diff --git a/QDMA/linux-kernel/RELEASE b/QDMA/linux-kernel/RELEASE index bab000f15c6442ec549cb901d87a39e7141c4587..08e2c83197de634a2c27e82228fbd7e63cf376e9 100755 --- a/QDMA/linux-kernel/RELEASE +++ b/QDMA/linux-kernel/RELEASE @@ -1,4 +1,4 @@ -RELEASE: 2023.1.0 +RELEASE: 2023.1.1 ================= This release is validated @@ -132,14 +132,15 @@ CPM5 - Updated the queue list command for >2048 Q's. - Added support to accomodate H2C & C2H Q's offset with fixed intervals for dma-perf application. +2023.1.1 Updates +---------------- +- Optimized the driver code and HW register settings for performance improvements. + KNOWN ISSUES: ============= - CPM5 Only - When >10VFs are attached to PF and FLR is issued, mailbox timeouts are observed. - - Performace optimizations are not finalized, Performance report with optimizations will be available in next patch release. - -- QDAM5.0 - - Performace optimizations are not finalized, Performance report with optimizations will be available in next patch release. + - Performace optimizations are not finalized, Performance report with optimizations will be available in next patch release. - All Designs - In interrupt mode, Sometimes completions are not received when C2H PIDX updates are held for 64 descriptors diff --git a/QDMA/linux-kernel/driver/libqdma/qdma_access/eqdma_cpm5_access/eqdma_cpm5_access.c b/QDMA/linux-kernel/driver/libqdma/qdma_access/eqdma_cpm5_access/eqdma_cpm5_access.c index 29c3cac146ffebce8d74615ee0872a9d9479d2cb..8e8a064ac1c3503e8793a4e59ac8f7621c6227a0 100755 --- a/QDMA/linux-kernel/driver/libqdma/qdma_access/eqdma_cpm5_access/eqdma_cpm5_access.c +++ b/QDMA/linux-kernel/driver/libqdma/qdma_access/eqdma_cpm5_access/eqdma_cpm5_access.c @@ -83,6 +83,8 @@ #define EQDMA_CPM5_GLBL2_FLR_PRESENT_MASK BIT(1) #define EQDMA_CPM5_GLBL2_MAILBOX_EN_MASK BIT(0) +#define EQDMA_CPM5_DEFAULT_C2H_INTR_TIMER_TICK 50 +#define PREFETCH_QUEUE_COUNT_STEP 4 /* TODO: This is work around and this needs to be auto generated from ODS */ /** EQDMA_CPM5_IND_REG_SEL_FMAP */ @@ -1718,6 +1720,307 @@ static uint32_t eqdma_cpm5_intr_context_buf_len(void) return len; } +/* + * eqdma_cpm5_set_perf_opt() - Helper function to set the + * cpm5 perf optimizations. + * + */ +static void eqdma_cpm5_set_perf_opt(void *dev_hndl) +{ + uint32_t reg_val = 0; + uint32_t pftch_cache_depth = 0; + uint32_t pftch_qcnt = 0; + uint32_t pftch_evnt_qcnt_th = 0; + uint32_t crdt_coal_fifo_th = 0; + uint32_t crdt_coal_crdt_th = 0; + + /* C2H interrupt timer tick */ + qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_INT_TIMER_TICK_ADDR, + EQDMA_CPM5_DEFAULT_C2H_INTR_TIMER_TICK); + +/* + * #define EQDMA_CPM5_C2H_PFCH_CACHE_DEPTH_ADDR 0xBE0 + * #define C2H_PFCH_CACHE_DEPTH_MAX_STBUF_MASK GENMASK(23, 16) + * #define C2H_PFCH_CACHE_DEPTH_MASK GENMASK(7, 0) + */ + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_PFCH_CACHE_DEPTH_ADDR); + pftch_cache_depth = FIELD_GET(C2H_PFCH_CACHE_DEPTH_MASK, reg_val); + +/* + * #define EQDMA_CPM5_GLBL_DSC_CFG_ADDR 0x250 + * #define GLBL_DSC_CFG_RSVD_1_MASK GENMASK(31, 10) + * #define GLBL_DSC_CFG_UNC_OVR_COR_MASK BIT(9) + * #define GLBL_DSC_CFG_CTXT_FER_DIS_MASK BIT(8) + * #define GLBL_DSC_CFG_RSVD_2_MASK GENMASK(7, 6) + * #define GLBL_DSC_CFG_MAXFETCH_MASK GENMASK(5, 3) + * #define GLBL_DSC_CFG_WB_ACC_INT_MASK GENMASK(2, 0) + */ +#define GLBL_DSC_CFG_RSVD_1_DFLT 0 +#define GLBL_DSC_CFG_UNC_OVR_COR_DFLT 0 +#define GLBL_DSC_CFG_CTXT_FER_DIS_DFLT 0 +#define GLBL_DSC_CFG_RSVD_2_DFLT 0 +/* =IF(Internal mode, 2,5) */ +#define GLBL_DSC_CFG_MAXFETCH 2 +#define GLBL_DSC_CFG_WB_ACC_INT 5 + reg_val = + FIELD_SET(GLBL_DSC_CFG_RSVD_1_MASK, GLBL_DSC_CFG_RSVD_1_DFLT) | + FIELD_SET(GLBL_DSC_CFG_UNC_OVR_COR_MASK, + GLBL_DSC_CFG_UNC_OVR_COR_DFLT) | + FIELD_SET(GLBL_DSC_CFG_CTXT_FER_DIS_MASK, + GLBL_DSC_CFG_CTXT_FER_DIS_DFLT) | + FIELD_SET(GLBL_DSC_CFG_RSVD_2_MASK, GLBL_DSC_CFG_RSVD_2_DFLT) | + FIELD_SET(GLBL_DSC_CFG_MAXFETCH_MASK, + GLBL_DSC_CFG_MAXFETCH) | + FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK, + GLBL_DSC_CFG_WB_ACC_INT); + qdma_reg_write(dev_hndl, EQDMA_CPM5_GLBL_DSC_CFG_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_GLBL_DSC_CFG_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_GLBL_DSC_CFG_ADDR, reg_val); + +/* + * #define EQDMA_CPM5_CFG_BLK_MISC_CTL_ADDR 0x4C + * #define CFG_BLK_MISC_CTL_RSVD_1_MASK GENMASK(31, 24) + * #define CFG_BLK_MISC_CTL_10B_TAG_EN_MASK BIT(23) + * #define CFG_BLK_MISC_CTL_RSVD_2_MASK BIT(22) + * #define CFG_BLK_MISC_CTL_AXI_WBK_MASK BIT(21) + * #define CFG_BLK_MISC_CTL_AXI_DSC_MASK BIT(20) + * #define CFG_BLK_MISC_CTL_NUM_TAG_MASK GENMASK(19, 8) + * #define CFG_BLK_MISC_CTL_RSVD_3_MASK GENMASK(7, 5) + * #define CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK GENMASK(4, 0) + */ +#define CFG_BLK_MISC_CTL_RSVD_1_DFLT 0 +#define CFG_BLK_MISC_CTL_RSVD_2_DFLT 0 +#define CFG_BLK_MISC_CTL_AXI_WBK_DFLT 0 +#define CFG_BLK_MISC_CTL_AXI_DSC_DFLT 0 +/* IF(10bit tag enabled, 512,256) */ +#ifdef EQDMA_CPM5_10BIT_TAG_ENABLE +#define CFG_BLK_MISC_CTL_10B_TAG_DFLT 1 +#define CFG_BLK_MISC_CTL_NUM_TAG_DFLT 512 +#else +#define CFG_BLK_MISC_CTL_10B_TAG_DFLT 0 +#define CFG_BLK_MISC_CTL_NUM_TAG_DFLT 256 +#endif +#define CFG_BLK_MISC_CTL_RSVD_3_DFLT 0 +#define EQDMA_CFG_BLK_MISC_CTL_RQ_METERING_MUL 31 + reg_val = + FIELD_SET(CFG_BLK_MISC_CTL_RSVD_1_MASK, + CFG_BLK_MISC_CTL_RSVD_1_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_10B_TAG_EN_MASK, + CFG_BLK_MISC_CTL_10B_TAG_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_RSVD_2_MASK, + CFG_BLK_MISC_CTL_RSVD_2_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_AXI_WBK_MASK, + CFG_BLK_MISC_CTL_AXI_WBK_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_AXI_DSC_MASK, + CFG_BLK_MISC_CTL_AXI_DSC_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_NUM_TAG_MASK, + CFG_BLK_MISC_CTL_NUM_TAG_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_RSVD_3_MASK, + CFG_BLK_MISC_CTL_RSVD_3_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK, + EQDMA_CFG_BLK_MISC_CTL_RQ_METERING_MUL); + qdma_reg_write(dev_hndl, EQDMA_CPM5_CFG_BLK_MISC_CTL_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_CFG_BLK_MISC_CTL_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_CFG_BLK_MISC_CTL_ADDR, reg_val); + +/* + * #define EQDMA_CPM5_C2H_PFCH_CFG_ADDR 0xB08 + * #define C2H_PFCH_CFG_EVTFL_TH_MASK GENMASK(31, 16) + * #define C2H_PFCH_CFG_FL_TH_MASK GENMASK(15, 0) + */ +#define EQDMA_PFTCH_CFG_EVT_PFTH_FL_TH 256 +#define C2H_PFCH_CFG_FL_TH_DFLT 256 + reg_val = + FIELD_SET(C2H_PFCH_CFG_EVTFL_TH_MASK, + EQDMA_PFTCH_CFG_EVT_PFTH_FL_TH) | + FIELD_SET(C2H_PFCH_CFG_FL_TH_MASK, + C2H_PFCH_CFG_FL_TH_DFLT); + + qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_C2H_PFCH_CFG_ADDR, reg_val); + +/* + * #define EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR 0xA80 + * #define C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK GENMASK(31, 16) + * #define C2H_PFCH_CFG_1_QCNT_MASK GENMASK(15, 0) + */ + pftch_qcnt = pftch_cache_depth - PREFETCH_QUEUE_COUNT_STEP; + pftch_evnt_qcnt_th = pftch_qcnt - PREFETCH_QUEUE_COUNT_STEP; + reg_val = + FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK, pftch_evnt_qcnt_th) | + FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK, pftch_qcnt); + qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR, reg_val); + +/* + * #define EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR 0xA84 + * #define C2H_PFCH_CFG_2_FENCE_MASK BIT(31) + * #define C2H_PFCH_CFG_2_RSVD_MASK GENMASK(30, 29) + * #define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK BIT(28) + * #define C2H_PFCH_CFG_2_LL_SZ_TH_MASK GENMASK(27, 12) + * #define C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK GENMASK(11, 6) + * #define C2H_PFCH_CFG_2_NUM_MASK GENMASK(5, 0) + */ +#define C2H_PFCH_CFG_2_FENCE_EN 1 +#define C2H_PFCH_CFG_2_RSVD_DFLT 0 +#define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_DFLT 0 +#define C2H_PFCH_CFG_2_LL_SZ_TH_DFLT 1024 +#define C2H_PFCH_CFG_2_VAR_DESC_NUM 15 +#define C2H_PFCH_CFG_2_NUM_PFCH_DFLT 16 + reg_val = + FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK, + C2H_PFCH_CFG_2_FENCE_EN) | + FIELD_SET(C2H_PFCH_CFG_2_RSVD_MASK, + C2H_PFCH_CFG_2_RSVD_DFLT) | + FIELD_SET(C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK, + C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_DFLT) | + FIELD_SET(C2H_PFCH_CFG_2_LL_SZ_TH_MASK, + C2H_PFCH_CFG_2_LL_SZ_TH_DFLT) | + FIELD_SET(C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK, + C2H_PFCH_CFG_2_VAR_DESC_NUM) | + FIELD_SET(C2H_PFCH_CFG_2_NUM_MASK, + C2H_PFCH_CFG_2_NUM_PFCH_DFLT); + qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR, reg_val); + +/* Registers Not Applicable for CPM5 + * #define EQDMA_PFCH_CFG_3_ADDR 0x147C + * #define EQDMA_PFCH_CFG_4_ADDR 0x1484 + */ + +/* + * #define EQDMA_CPM5_C2H_CRDT_COAL_CFG_1_ADDR 0x1400 + * #define C2H_CRDT_COAL_CFG_1_RSVD_1_MASK GENMASK(31, 18) + * #define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK GENMASK(17, 10) + * #define C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK GENMASK(9, 0)4 + */ +#define C2H_CRDT_COAL_CFG_1_RSVD_1_DFLT 0 +#define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_DFLT 16 +#define C2H_CRDT_COAL_CFG_1_TIMER_TH 16 + reg_val = + FIELD_SET(C2H_CRDT_COAL_CFG_1_RSVD_1_MASK, + C2H_CRDT_COAL_CFG_1_RSVD_1_DFLT) | + FIELD_SET(C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK, + C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_DFLT) | + FIELD_SET(C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK, + C2H_CRDT_COAL_CFG_1_TIMER_TH); + qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_CRDT_COAL_CFG_1_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_CRDT_COAL_CFG_1_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_C2H_CRDT_COAL_CFG_1_ADDR, reg_val); + +/* + * #define EQDMA_CPM5_C2H_CRDT_COAL_CFG_2_ADDR 0x1404 + * #define C2H_CRDT_COAL_CFG_2_RSVD_1_MASK GENMASK(31, 24) + * #define C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK GENMASK(23, 16) + * #define C2H_CRDT_COAL_CFG_2_RESERVED1_MASK GENMASK(15, 11) + * #define C2H_CRDT_COAL_CFG_2_NT_TH_MASK GENMASK(10, 0) + */ +#define C2H_CRDT_COAL_CFG_2_RSVD_1_DFLT 0 +#define C2H_CRDT_COAL_CFG_2_RESERVED1_DFLT 0 +#define C2H_CRDT_COAL_CFG_2_CRDT_CNT_TH_DFLT 156 + crdt_coal_fifo_th = pftch_cache_depth - 8; + crdt_coal_crdt_th = C2H_CRDT_COAL_CFG_2_CRDT_CNT_TH_DFLT; + reg_val = + FIELD_SET(C2H_CRDT_COAL_CFG_2_RSVD_1_MASK, + C2H_CRDT_COAL_CFG_2_RSVD_1_DFLT) | + FIELD_SET(C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK, + crdt_coal_fifo_th) | + FIELD_SET(C2H_CRDT_COAL_CFG_2_RESERVED1_MASK, + C2H_CRDT_COAL_CFG_2_RESERVED1_DFLT) | + FIELD_SET(C2H_CRDT_COAL_CFG_2_NT_TH_MASK, + crdt_coal_crdt_th); + qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_CRDT_COAL_CFG_2_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_CRDT_COAL_CFG_2_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_C2H_CRDT_COAL_CFG_2_ADDR, reg_val); + +/* + * #define EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR 0xE24 + * #define H2C_REQ_THROT_PCIE_EN_REQ_MASK BIT(31) + * #define H2C_REQ_THROT_PCIE_MASK GENMASK(30, 19) + * #define H2C_REQ_THROT_PCIE_EN_DATA_MASK BIT(18) + * #define H2C_REQ_THROT_PCIE_DATA_THRESH_MASK GENMASK(17, 0) + */ +#define H2C_REQ_THROT_PCIE_EN_REQ 1 +/* IF(10bit tag enabled, 512-64, 192) */ +#ifdef EQDMA_CPM5_10BIT_TAG_ENABLE +#define H2C_REQ_THROT_PCIE_REQ_TH 448 +#else +#define H2C_REQ_THROT_PCIE_REQ_TH 192 +#endif +#define H2C_REQ_THROT_PCIE_EN_DATA 1 +#define H2C_REQ_THROT_PCIE_DATA_TH 57344 + reg_val = + FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK, + H2C_REQ_THROT_PCIE_EN_REQ) | + FIELD_SET(H2C_REQ_THROT_PCIE_MASK, + H2C_REQ_THROT_PCIE_REQ_TH) | + FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK, + H2C_REQ_THROT_PCIE_EN_DATA) | + FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK, + H2C_REQ_THROT_PCIE_DATA_TH); + qdma_reg_write(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR, reg_val); + +/* + * #define EQDMA_CPM5_H2C_REQ_THROT_AXIMM_ADDR 0xE2C + * #define H2C_REQ_THROT_AXIMM_EN_REQ_MASK BIT(31) + * #define H2C_REQ_THROT_AXIMM_MASK GENMASK(30, 19) + * #define H2C_REQ_THROT_AXIMM_EN_DATA_MASK BIT(18) + * #define H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK GENMASK(17, 0) + */ +#define H2C_REQ_THROT_AXIMM_EN_REQ 0 +/* IF(10bit tag en=1, 512-64, 192) */ +#ifdef EQDMA_CPM5_10BIT_TAG_ENABLE +#define H2C_REQ_THROT_AXIMM_REQ_TH 448 +#else +#define H2C_REQ_THROT_AXIMM_REQ_TH 192 +#endif +#define H2C_REQ_THROT_AXIMM_EN_DATA 0 +#define H2C_REQ_THROT_AXIMM_DATA_TH 65536 + reg_val = + FIELD_SET(H2C_REQ_THROT_AXIMM_EN_REQ_MASK, + H2C_REQ_THROT_AXIMM_EN_REQ) | + FIELD_SET(H2C_REQ_THROT_AXIMM_MASK, + H2C_REQ_THROT_AXIMM_REQ_TH) | + FIELD_SET(H2C_REQ_THROT_AXIMM_EN_DATA_MASK, + H2C_REQ_THROT_AXIMM_EN_DATA) | + FIELD_SET(H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK, + H2C_REQ_THROT_AXIMM_DATA_TH); + qdma_reg_write(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_AXIMM_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_AXIMM_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_H2C_REQ_THROT_AXIMM_ADDR, reg_val); + +#define EQDMA_CPM5_H2C_MM_DATA_THROTTLE_ADDR 0x12EC +#define H2C_MM_DATA_THROTTLE_RSVD_1_MASK GENMASK(31, 17) +#define H2C_MM_DATA_THROTTLE_DAT_EN_MASK BIT(16) +#define H2C_MM_DATA_THROTTLE_DAT_MASK GENMASK(15, 0) +#define H2C_MM_DATA_THROTTLE_RSVD_1_DFLT 0 +#define H2C_MM_DATA_TH_EN 1 +#define H2C_MM_DATA_TH 57344 + reg_val = + FIELD_SET(H2C_MM_DATA_THROTTLE_RSVD_1_MASK, + H2C_MM_DATA_THROTTLE_RSVD_1_DFLT) | + FIELD_SET(H2C_MM_DATA_THROTTLE_DAT_EN_MASK, H2C_MM_DATA_TH_EN) | + FIELD_SET(H2C_MM_DATA_THROTTLE_DAT_MASK, H2C_MM_DATA_TH); + qdma_reg_write(dev_hndl, EQDMA_CPM5_H2C_MM_DATA_THROTTLE_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_H2C_MM_DATA_THROTTLE_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CPM5_H2C_MM_DATA_THROTTLE_ADDR, reg_val); +} + /* * eqdma_cpm5_indirect_reg_invalidate() - helper function to invalidate * indirect context registers. @@ -2131,27 +2434,6 @@ int eqdma_cpm5_set_default_global_csr(void *dev_hndl) qdma_write_csr_values(dev_hndl, EQDMA_CPM5_C2H_BUF_SZ_ADDR, 0, QDMA_NUM_C2H_BUFFER_SIZES, buf_sz); - /* Prefetch Configuration */ - reg_val = qdma_reg_read(dev_hndl, - EQDMA_CPM5_C2H_PFCH_CACHE_DEPTH_ADDR); - cfg_val = FIELD_GET(C2H_PFCH_CACHE_DEPTH_MASK, reg_val); - - reg_val = FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK, (cfg_val >> 2)) | - FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK, - ((cfg_val >> 2) - 4)); - qdma_reg_write(dev_hndl, - EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR, reg_val); - - reg_val = qdma_reg_read(dev_hndl, - EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR); - reg_val |= FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK, 1); - qdma_reg_write(dev_hndl, - EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR, reg_val); - - /* C2H interrupt timer tick */ - qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_INT_TIMER_TICK_ADDR, - DEFAULT_C2H_INTR_TIMER_TICK); - /* C2h Completion Coalesce Configuration */ cfg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_WRB_COAL_BUF_DEPTH_ADDR); @@ -2163,22 +2445,9 @@ int eqdma_cpm5_set_default_global_csr(void *dev_hndl) FIELD_SET(C2H_WRB_COAL_CFG_MAX_BUF_SZ_MASK, cfg_val); qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_WRB_COAL_CFG_ADDR, reg_val); - - /* H2C throttle Configuration*/ - - reg_val = - FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK, - EQDMA_CPM5_H2C_THROT_DATA_THRESH) | - FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK, - EQDMA_CPM5_THROT_EN_DATA) | - FIELD_SET(H2C_REQ_THROT_PCIE_MASK, - EQDMA_CPM5_H2C_THROT_REQ_THRESH) | - FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK, - EQDMA_CPM5_THROT_EN_REQ); - qdma_reg_write(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR, - reg_val); } + eqdma_cpm5_set_perf_opt(dev_hndl); return QDMA_SUCCESS; } @@ -3548,7 +3817,7 @@ static int eqdma_cpm5_cmpt_context_read(void *dev_hndl, uint16_t hw_qid, ctxt->en_int = FIELD_GET(CMPL_CTXT_DATA_W0_EN_INT_MASK, cmpt_ctxt[0]); ctxt->trig_mode = FIELD_GET(CMPL_CTXT_DATA_W0_TRIG_MODE_MASK, cmpt_ctxt[0]); - ctxt->fnc_id = FIELD_GET(CMPL_CTXT_DATA_W0_FNC_ID_MASK, cmpt_ctxt[0]); + ctxt->fnc_id = FIELD_GET(CMPL_CTXT_DATA_W0_FNC_ID_MASK, cmpt_ctxt[0]); ctxt->counter_idx = (uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W0_CNTER_IX_MASK, cmpt_ctxt[0])); @@ -5245,7 +5514,7 @@ int eqdma_cpm5_hw_error_enable(void *dev_hndl, uint32_t err_idx) if (err_idx > EQDMA_CPM5_ERRS_ALL) { qdma_log_error("%s: err_idx=%d is invalid, err:%d\n", __func__, - (enum eqdma_cpm5_error_idx)err_idx, + (enum eqdma_cpm5_error_idx)err_idx, -QDMA_ERR_INV_PARAM); return -QDMA_ERR_INV_PARAM; } diff --git a/QDMA/linux-kernel/driver/libqdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c b/QDMA/linux-kernel/driver/libqdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c index 59051a8ea28c0486f9a93f8af14a5594a8923e2d..3404e9ae0a85e97ccbb93d525875876efedfe8f2 100755 --- a/QDMA/linux-kernel/driver/libqdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c +++ b/QDMA/linux-kernel/driver/libqdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c @@ -50,6 +50,9 @@ #define EQDMA_MM_C2H_ERR_ALL_MASK 0X70000003 #define EQDMA_MM_H2C0_ERR_ALL_MASK 0X3041013E + + + /* H2C Throttle settings for QDMA 4.0 */ #define EQDMA_H2C_THROT_DATA_THRESH 0x5000 #define EQDMA_THROT_EN_DATA 1 @@ -1716,6 +1719,7 @@ int eqdma_context_buf_len(uint8_t st, return 0; } + static uint32_t eqdma_intr_context_buf_len(void) { uint32_t len = 0; @@ -1726,6 +1730,312 @@ static uint32_t eqdma_intr_context_buf_len(void) return len; } + +static void eqdma_set_perf_opt(void *dev_hndl) +{ + uint32_t reg_val = 0, data_th = 0, pfch_cache_dpth = 0; + /**** + * TODO: All the below settings are for QDMA5.0 + * Need to add the QDMA4.0 settings + */ +#define EQDMA_PFTCH_CACHE_DEPTH 64 +#define GLBL_DSC_CFG_RSVD_1_DFLT 0 +#define EQDMA_GLBL_DSC_CFG_C2H_UODSC_LIMIT 5 +#define EQDMA_GLBL_DSC_CFG_H2C_UODSC_LIMIT 8 +#define GLBL_DSC_CFG_UNC_OVR_COR_DFLT 0 +#define GLBL_DSC_CFG_CTXT_FER_DIS_DFLT 0 +#define GLBL_DSC_CFG_RSVD_2_DFLT 0 +#define EQDMA_GLBL_DSC_CFG_MAXFETCH 2 +#define EQDMA_GLBL_DSC_CFG_WB_ACC_INT 5 + + reg_val = + FIELD_SET(GLBL_DSC_CFG_RSVD_1_MASK, GLBL_DSC_CFG_RSVD_1_DFLT) | + FIELD_SET(GLBL_DSC_CFG_C2H_UODSC_LIMIT_MASK, + EQDMA_GLBL_DSC_CFG_C2H_UODSC_LIMIT) | + FIELD_SET(GLBL_DSC_CFG_H2C_UODSC_LIMIT_MASK, + EQDMA_GLBL_DSC_CFG_H2C_UODSC_LIMIT) | + FIELD_SET(GLBL_DSC_CFG_UNC_OVR_COR_MASK, + GLBL_DSC_CFG_UNC_OVR_COR_DFLT) | + FIELD_SET(GLBL_DSC_CFG_CTXT_FER_DIS_MASK, + GLBL_DSC_CFG_CTXT_FER_DIS_DFLT) | + FIELD_SET(GLBL_DSC_CFG_RSVD_2_MASK, GLBL_DSC_CFG_RSVD_2_DFLT) | + FIELD_SET(GLBL_DSC_CFG_MAXFETCH_MASK, + EQDMA_GLBL_DSC_CFG_MAXFETCH) | + FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK, + EQDMA_GLBL_DSC_CFG_WB_ACC_INT); + qdma_reg_write(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_GLBL_DSC_CFG_ADDR, reg_val); + +#define CFG_BLK_MISC_CTL_RSVD_1_DFLT 0 +#define CFG_BLK_MISC_CTL_10B_TAG_DFLT 0 +#define CFG_BLK_MISC_CTL_RSVD_2_DFLT 0 +#define CFG_BLK_MISC_CTL_AXI_WBK_DFLT 0 +#define CFG_BLK_MISC_CTL_AXI_DSC_DFLT 0 +#define CFG_BLK_MISC_CTL_NUM_TAG_DFLT 256 +#define CFG_BLK_MISC_CTL_RSVD_3_DFLT 0 +#define EQDMA_CFG_BLK_MISC_CTL_RQ_METERING_MUL 9 + + + reg_val = + FIELD_SET(CFG_BLK_MISC_CTL_RSVD_1_MASK, + CFG_BLK_MISC_CTL_RSVD_1_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_10B_TAG_EN_MASK, + CFG_BLK_MISC_CTL_10B_TAG_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_RSVD_2_MASK, + CFG_BLK_MISC_CTL_RSVD_2_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_AXI_WBK_MASK, + CFG_BLK_MISC_CTL_AXI_WBK_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_AXI_DSC_MASK, + CFG_BLK_MISC_CTL_AXI_DSC_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_NUM_TAG_MASK, + CFG_BLK_MISC_CTL_NUM_TAG_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_RSVD_3_MASK, + CFG_BLK_MISC_CTL_RSVD_3_DFLT) | + FIELD_SET(CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK, + EQDMA_CFG_BLK_MISC_CTL_RQ_METERING_MUL); + qdma_reg_write(dev_hndl, EQDMA_CFG_BLK_MISC_CTL_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_CFG_BLK_MISC_CTL_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_CFG_BLK_MISC_CTL_ADDR, reg_val); + +#define EQDMA_PFTCH_CFG_EVT_PFTH_FL_TH 256 +#define C2H_PFCH_CFG_FL_TH_DFLT 256 + + reg_val = + FIELD_SET(C2H_PFCH_CFG_EVTFL_TH_MASK, + EQDMA_PFTCH_CFG_EVT_PFTH_FL_TH) | + FIELD_SET(C2H_PFCH_CFG_FL_TH_MASK, C2H_PFCH_CFG_FL_TH_DFLT); + + qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_PFCH_CFG_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_C2H_PFCH_CFG_ADDR, reg_val); + +#define EQDMA_C2H_PFCH_CFG_1_QCNT_MASK (EQDMA_PFTCH_CACHE_DEPTH - 4) +#define EQDMA_C2H_PFCH_CFG_1_EVNT_QCNT_TH EQDMA_C2H_PFCH_CFG_1_QCNT_MASK + pfch_cache_dpth = qdma_reg_read(dev_hndl, + EQDMA_C2H_PFCH_CACHE_DEPTH_ADDR); + + reg_val = + FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK, + (pfch_cache_dpth - 4)) | + FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK, (pfch_cache_dpth - 4)); + qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_C2H_PFCH_CFG_1_ADDR, reg_val); + +#define EQDMA_C2H_PFCH_CFG_2_FENCE_EN 1 +#define C2H_PFCH_CFG_2_RSVD_DFLT 0 +#define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_DFLT 0 +#define C2H_PFCH_CFG_2_LL_SZ_TH_DFLT 1024 +#define C2H_PFCH_CFG_2_VAR_DESC_NUM 15 +#define C2H_PFCH_CFG_2_NUM_DFLT 8 + + reg_val = + FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK, + EQDMA_C2H_PFCH_CFG_2_FENCE_EN) | + FIELD_SET(C2H_PFCH_CFG_2_RSVD_MASK, C2H_PFCH_CFG_2_RSVD_DFLT) | + FIELD_SET(C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK, + C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_DFLT) | + FIELD_SET(C2H_PFCH_CFG_2_LL_SZ_TH_MASK, + C2H_PFCH_CFG_2_LL_SZ_TH_DFLT) | + FIELD_SET(C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK, + C2H_PFCH_CFG_2_VAR_DESC_NUM) | + FIELD_SET(C2H_PFCH_CFG_2_NUM_MASK, C2H_PFCH_CFG_2_NUM_DFLT); + qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val); +#define PFCH_CFG_3_RSVD_DFLT 0 +#define PFCH_CFG_3_VAR_DESC_FL_FREE_CNT_TH_DFLT 256 +#define PFCH_CFG_3_VAR_DESC_LG_PKT_CAM_CN_TH_DFLT 0 + + + reg_val = + FIELD_SET(PFCH_CFG_3_RSVD_MASK, PFCH_CFG_3_RSVD_DFLT) | + FIELD_SET(PFCH_CFG_3_VAR_DESC_FL_FREE_CNT_TH_MASK, + PFCH_CFG_3_VAR_DESC_FL_FREE_CNT_TH_DFLT) | + FIELD_SET(PFCH_CFG_3_VAR_DESC_LG_PKT_CAM_CN_TH_MASK, + PFCH_CFG_3_VAR_DESC_LG_PKT_CAM_CN_TH_DFLT); + qdma_reg_write(dev_hndl, EQDMA_PFCH_CFG_3_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_PFCH_CFG_3_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val); +#define EQDMA_PFCH_CFG_4_GLB_EVT_TIMER_TICK 64 +#define PFCH_CFG_4_DISABLE_GLB_EVT_TIMER_DFLT 0 +#define EQDMA_PFCH_CFG_4_EVT_TIMER_TICK 400 +#define PFCH_CFG_4_DISABLE_EVT_TIMER_DFLT 0 + + + reg_val = + FIELD_SET(PFCH_CFG_4_GLB_EVT_TIMER_TICK_MASK, + EQDMA_PFCH_CFG_4_GLB_EVT_TIMER_TICK) | + FIELD_SET(PFCH_CFG_4_DISABLE_GLB_EVT_TIMER_MASK, + PFCH_CFG_4_DISABLE_GLB_EVT_TIMER_DFLT) | + FIELD_SET(PFCH_CFG_4_EVT_TIMER_TICK_MASK, + EQDMA_PFCH_CFG_4_EVT_TIMER_TICK) | + FIELD_SET(PFCH_CFG_4_DISABLE_EVT_TIMER_MASK, + PFCH_CFG_4_DISABLE_EVT_TIMER_DFLT); + qdma_reg_write(dev_hndl, EQDMA_PFCH_CFG_4_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_PFCH_CFG_4_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_PFCH_CFG_4_ADDR, reg_val); +/**************** SET_2 *******************/ +#define C2H_CRDT_COAL_CFG_1_RSVD_1_DFLT 0 +#define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_DFLT 16 +#define EQDMA_C2H_CRDT_COAL_CFG_1_TIMER_TH 16 //64 + + + reg_val = + FIELD_SET(C2H_CRDT_COAL_CFG_1_RSVD_1_MASK, + C2H_CRDT_COAL_CFG_1_RSVD_1_DFLT) | + FIELD_SET(C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK, + C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_DFLT) | + FIELD_SET(C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK, + EQDMA_C2H_CRDT_COAL_CFG_1_TIMER_TH); + qdma_reg_write(dev_hndl, EQDMA_C2H_CRDT_COAL_CFG_1_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_CRDT_COAL_CFG_1_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_C2H_CRDT_COAL_CFG_1_ADDR, reg_val); +#define C2H_CRDT_COAL_CFG_2_RSVD_1_DFLT 0 +#define EQDMA_C2H_CRDT_COAL_CFG_2_FIFO_TH (EQDMA_PFTCH_CACHE_DEPTH - 8) +#define C2H_CRDT_COAL_CFG_2_RESERVED1_DFLT 0 +#define EQDMA_C2H_CRDT_COAL_CFG_2_CRDT_TH 96 + + reg_val = + FIELD_SET(C2H_CRDT_COAL_CFG_2_RSVD_1_MASK, + C2H_CRDT_COAL_CFG_2_RSVD_1_DFLT) | + FIELD_SET(C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK, + (pfch_cache_dpth - 8)) | + FIELD_SET(C2H_CRDT_COAL_CFG_2_RESERVED1_MASK, + C2H_CRDT_COAL_CFG_2_RESERVED1_DFLT) | + FIELD_SET(C2H_CRDT_COAL_CFG_2_NT_TH_MASK, + EQDMA_C2H_CRDT_COAL_CFG_2_CRDT_TH); + qdma_reg_write(dev_hndl, EQDMA_C2H_CRDT_COAL_CFG_2_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_CRDT_COAL_CFG_2_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_C2H_CRDT_COAL_CFG_2_ADDR, reg_val); + +/**************** SET_3 *******************/ +#define EQDMA_GLBL2_RRQ_PCIE_THROT_REQ_EN 0 +#define GLBL2_RRQ_PCIE_THROT_REQ_DFLT 192 +#define GLBL2_RRQ_PCIE_THROT_DAT_EN_DFLT 1 +#define GLBL2_RRQ_PCIE_THROT_DAT_DFLT 20480 + + + reg_val = + FIELD_SET(GLBL2_RRQ_PCIE_THROT_REQ_EN_MASK, + EQDMA_GLBL2_RRQ_PCIE_THROT_REQ_EN) | + FIELD_SET(GLBL2_RRQ_PCIE_THROT_REQ_MASK, + GLBL2_RRQ_PCIE_THROT_REQ_DFLT) | + FIELD_SET(GLBL2_RRQ_PCIE_THROT_DAT_EN_MASK, + GLBL2_RRQ_PCIE_THROT_DAT_EN_DFLT) | + FIELD_SET(GLBL2_RRQ_PCIE_THROT_DAT_MASK, + GLBL2_RRQ_PCIE_THROT_DAT_DFLT); + qdma_reg_write(dev_hndl, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR, reg_val); +#define GLBL2_RRQ_AXIMM_THROT_REQ_EN_DFLT 0 +#define GLBL2_RRQ_AXIMM_THROT_REQ_DFLT 0 +#define GLBL2_RRQ_AXIMM_THROT_DAT_EN_DFLT 0 +#define GLBL2_RRQ_AXIMM_THROT_DAT_DFLT 0 + + reg_val = + FIELD_SET(GLBL2_RRQ_AXIMM_THROT_REQ_EN_MASK, + GLBL2_RRQ_AXIMM_THROT_REQ_EN_DFLT) | + FIELD_SET(GLBL2_RRQ_AXIMM_THROT_REQ_MASK, + GLBL2_RRQ_AXIMM_THROT_REQ_DFLT) | + FIELD_SET(GLBL2_RRQ_AXIMM_THROT_DAT_EN_MASK, + GLBL2_RRQ_AXIMM_THROT_DAT_EN_DFLT) | + FIELD_SET(GLBL2_RRQ_AXIMM_THROT_DAT_MASK, + GLBL2_RRQ_AXIMM_THROT_DAT_DFLT); + qdma_reg_write(dev_hndl, EQDMA_GLBL2_RRQ_AXIMM_THROT_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_RRQ_AXIMM_THROT_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_GLBL2_RRQ_AXIMM_THROT_ADDR, reg_val); +#define GLBL2_RRQ_BRG_THROT_REQ_EN_DFLT 1 +#define GLBL2_RRQ_BRG_THROT_REQ_DFLT GLBL2_RRQ_PCIE_THROT_REQ_DFLT +#define GLBL2_RRQ_BRG_THROT_DAT_EN_DFLT 1 + + + reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR); + qdma_log_info("%s: BF reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR, reg_val); + data_th = FIELD_GET(GLBL2_RRQ_PCIE_THROT_DAT_MASK, reg_val); + + reg_val = + FIELD_SET(GLBL2_RRQ_BRG_THROT_REQ_EN_MASK, + GLBL2_RRQ_BRG_THROT_REQ_EN_DFLT) | + FIELD_SET(GLBL2_RRQ_BRG_THROT_REQ_MASK, + GLBL2_RRQ_BRG_THROT_REQ_DFLT) | + FIELD_SET(GLBL2_RRQ_BRG_THROT_DAT_EN_MASK, + GLBL2_RRQ_BRG_THROT_DAT_EN_DFLT) | + FIELD_SET(GLBL2_RRQ_BRG_THROT_DAT_MASK, data_th); + qdma_reg_write(dev_hndl, EQDMA_GLBL2_RRQ_BRG_THROT_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_RRQ_BRG_THROT_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_GLBL2_RRQ_BRG_THROT_ADDR, reg_val); + +/******************* SET_4 *************************/ +#define EQDMA_H2C_REQ_THROT_PCIE_EN_REQ 1 +#define EQDMA_H2C_REQ_THROT_PCIE_REQ_TH GLBL2_RRQ_PCIE_THROT_REQ_DFLT +#define EQDMA_H2C_REQ_THROT_PCIE_EN_DATA 1 +#define EQDMA_H2C_REQ_THROT_PCIE_DATA_TH 24576 + + reg_val = + FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK, + EQDMA_H2C_REQ_THROT_PCIE_EN_REQ) | + FIELD_SET(H2C_REQ_THROT_PCIE_MASK, + EQDMA_H2C_REQ_THROT_PCIE_REQ_TH) | + FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK, + EQDMA_H2C_REQ_THROT_PCIE_EN_DATA) | + FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK, + EQDMA_H2C_REQ_THROT_PCIE_DATA_TH); + qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_H2C_REQ_THROT_PCIE_ADDR, reg_val); +#define EQDMA_H2C_REQ_THROT_AXIMM_EN_REQ 1 +#define EQDMA_H2C_REQ_THROT_AXIMM_REQ_TH 64 +#define EQDMA_H2C_REQ_THROT_AXIMM_EN_DATA 1 +#define EQDMA_H2C_REQ_THROT_AXIMM_DATA_TH 16384 + + reg_val = + FIELD_SET(H2C_REQ_THROT_AXIMM_EN_REQ_MASK, + EQDMA_H2C_REQ_THROT_AXIMM_EN_REQ) | + FIELD_SET(H2C_REQ_THROT_AXIMM_MASK, + EQDMA_H2C_REQ_THROT_AXIMM_REQ_TH) | + FIELD_SET(H2C_REQ_THROT_AXIMM_EN_DATA_MASK, + EQDMA_H2C_REQ_THROT_AXIMM_EN_DATA) | + FIELD_SET(H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK, + EQDMA_H2C_REQ_THROT_AXIMM_DATA_TH); + qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_AXIMM_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_H2C_REQ_THROT_AXIMM_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_H2C_REQ_THROT_AXIMM_ADDR, reg_val); + +#define H2C_MM_DATA_THROTTLE_RSVD_1_DFLT 0 +#define EQDMA_H2C_MM_DATA_TH_EN GLBL2_RRQ_PCIE_THROT_DAT_EN_DFLT +#define EQDMA_H2C_MM_DATA_TH GLBL2_RRQ_PCIE_THROT_DAT_DFLT + + reg_val = + FIELD_SET(H2C_MM_DATA_THROTTLE_RSVD_1_MASK, + H2C_MM_DATA_THROTTLE_RSVD_1_DFLT) | + FIELD_SET(H2C_MM_DATA_THROTTLE_DAT_EN_MASK, + EQDMA_H2C_MM_DATA_TH_EN) | + FIELD_SET(H2C_MM_DATA_THROTTLE_DAT_MASK, EQDMA_H2C_MM_DATA_TH); + qdma_reg_write(dev_hndl, EQDMA_H2C_MM_DATA_THROTTLE_ADDR, reg_val); + reg_val = qdma_reg_read(dev_hndl, EQDMA_H2C_MM_DATA_THROTTLE_ADDR); + qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n", + __func__, EQDMA_H2C_MM_DATA_THROTTLE_ADDR, reg_val); + +} + + /* * eqdma_indirect_reg_invalidate() - helper function to invalidate indirect * context registers. @@ -2153,26 +2463,10 @@ int eqdma_set_default_global_csr(void *dev_hndl) DEFAULT_MAX_DSC_FETCH) | FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK, DEFAULT_WRB_INT); - } else if (eqdma_ip_version == EQDMA_IP_VERSION_5) { - /* For QDMA4.0 and QDMA5.0, HW design and register map - * is same except some performance optimizations - */ - reg_val = - FIELD_SET(GLBL_DSC_CFG_C2H_UODSC_LIMIT_MASK, - EQDMA5_DEFAULT_C2H_UODSC_LIMIT) | - FIELD_SET(GLBL_DSC_CFG_H2C_UODSC_LIMIT_MASK, - EQDMA5_DEFAULT_H2C_UODSC_LIMIT) | - FIELD_SET(GLBL_DSC_CFG_MAXFETCH_MASK, - EQDMA5_DEFAULT_MAX_DSC_FETCH) | - FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK, - EQDMA5_DEFAULT_WRB_INT); - } else { - qdma_log_error("%s: ip_type = %d is invalid, err:%d\n", - __func__, eqdma_ip_version, - -QDMA_ERR_INV_PARAM); - return -QDMA_ERR_INV_PARAM; + + qdma_reg_write(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR, + reg_val); } - qdma_reg_write(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR, reg_val); } if (dev_cap.st_en) { @@ -2189,18 +2483,16 @@ int eqdma_set_default_global_csr(void *dev_hndl) (cfg_val >> 2)) | FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK, ((cfg_val >> 2) - 4)); - } else { - /* Performance optimization for EQDMA5.0. */ - reg_val = FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK, - EQDMA5_DEFAULT_C2H_PFCH_QCNT) | - FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK, - EQDMA5_DEFAULT_C2H_EVT_QCNT_TH); - } - qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR, reg_val); - reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR); - reg_val |= FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK, 1); - qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val); + qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR, + reg_val); + + reg_val = qdma_reg_read(dev_hndl, + EQDMA_C2H_PFCH_CFG_2_ADDR); + reg_val |= FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK, 1); + qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR, + reg_val); + } /* C2H interrupt timer tick */ qdma_reg_write(dev_hndl, EQDMA_C2H_INT_TIMER_TICK_ADDR, @@ -2228,29 +2520,14 @@ int eqdma_set_default_global_csr(void *dev_hndl) EQDMA_H2C_THROT_REQ_THRESH) | FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK, EQDMA_THROT_EN_REQ); - } else if (eqdma_ip_version == EQDMA_IP_VERSION_5) { - /* For QDMA4.0 and QDMA5.0, HW design and register map - * is same except some performance optimizations - */ - reg_val = - FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK, - EQDMA5_H2C_THROT_DATA_THRESH) | - FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK, - EQDMA5_THROT_EN_DATA) | - FIELD_SET(H2C_REQ_THROT_PCIE_MASK, - EQDMA5_H2C_THROT_REQ_THRESH) | - FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK, - EQDMA5_THROT_EN_REQ); - } else { - qdma_log_error("%s: ip_type = %d is invalid, err:%d\n", - __func__, eqdma_ip_version, - -QDMA_ERR_INV_PARAM); - return -QDMA_ERR_INV_PARAM; + + qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR, + reg_val); } - qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR, - reg_val); } + if (eqdma_ip_version == EQDMA_IP_VERSION_5) + eqdma_set_perf_opt(dev_hndl); return QDMA_SUCCESS; } diff --git a/QDMA/linux-kernel/driver/libqdma/qdma_access/qdma_access_version.h b/QDMA/linux-kernel/driver/libqdma/qdma_access/qdma_access_version.h index 311e9468e1426f739af9b94a278e5b0eb58276cc..4dfef275441e47346c0eecafccc433284a973a9f 100755 --- a/QDMA/linux-kernel/driver/libqdma/qdma_access/qdma_access_version.h +++ b/QDMA/linux-kernel/driver/libqdma/qdma_access/qdma_access_version.h @@ -21,7 +21,7 @@ #define QDMA_VERSION_MAJOR 2023 #define QDMA_VERSION_MINOR 1 -#define QDMA_VERSION_PATCH 0 +#define QDMA_VERSION_PATCH 1 #define QDMA_VERSION_STR \ __stringify(QDMA_VERSION_MAJOR) "." \ diff --git a/QDMA/linux-kernel/driver/libqdma/qdma_access/qdma_soft_access/qdma_soft_access.c b/QDMA/linux-kernel/driver/libqdma/qdma_access/qdma_soft_access/qdma_soft_access.c index a7b3467f99b849a264dd9288ed112e6484cc245a..57042d0184ce9e7e5f5abed7a713c73137c0e0ae 100755 --- a/QDMA/linux-kernel/driver/libqdma/qdma_access/qdma_soft_access/qdma_soft_access.c +++ b/QDMA/linux-kernel/driver/libqdma/qdma_access/qdma_soft_access/qdma_soft_access.c @@ -4006,16 +4006,9 @@ int qdma_queue_pidx_update(void *dev_hndl, uint8_t is_vf, uint16_t qid, uint32_t reg_addr = 0; uint32_t reg_val = 0; - if (!dev_hndl) { - qdma_log_error("%s: dev_handle is NULL, err:%d\n", - __func__, - -QDMA_ERR_INV_PARAM); - return -QDMA_ERR_INV_PARAM; - } - if (!reg_info) { - qdma_log_error("%s: reg_info is NULL, err:%d\n", - __func__, - -QDMA_ERR_INV_PARAM); + if (!dev_hndl || !reg_info) { + qdma_log_error("%s: dev_handle is (%p), reg_info is (%p), err:%d\n", + __func__, dev_hndl, reg_info, -QDMA_ERR_INV_PARAM); return -QDMA_ERR_INV_PARAM; } @@ -4061,17 +4054,9 @@ int qdma_queue_cmpt_cidx_update(void *dev_hndl, uint8_t is_vf, QDMA_OFFSET_DMAP_SEL_CMPT_CIDX; uint32_t reg_val = 0; - if (!dev_hndl) { - qdma_log_error("%s: dev_handle is NULL, err:%d\n", - __func__, - -QDMA_ERR_INV_PARAM); - return -QDMA_ERR_INV_PARAM; - } - - if (!reg_info) { - qdma_log_error("%s: reg_info is NULL, err:%d\n", - __func__, - -QDMA_ERR_INV_PARAM); + if (!dev_hndl || !reg_info) { + qdma_log_error("%s: dev_handle (%p) reg_info (%p) , err:%d\n", + __func__, dev_hndl, reg_info, -QDMA_ERR_INV_PARAM); return -QDMA_ERR_INV_PARAM; } @@ -4118,15 +4103,9 @@ int qdma_queue_intr_cidx_update(void *dev_hndl, uint8_t is_vf, QDMA_OFFSET_DMAP_SEL_INT_CIDX; uint32_t reg_val = 0; - if (!dev_hndl) { - qdma_log_error("%s: dev_handle is NULL, err:%d\n", - __func__, -QDMA_ERR_INV_PARAM); - return -QDMA_ERR_INV_PARAM; - } - - if (!reg_info) { - qdma_log_error("%s: reg_info is NULL, err:%d\n", - __func__, -QDMA_ERR_INV_PARAM); + if (!dev_hndl || !reg_info) { + qdma_log_error("%s: dev_handle (%p) reg_info (%p), err:%d\n", + __func__, dev_hndl, reg_info, -QDMA_ERR_INV_PARAM); return -QDMA_ERR_INV_PARAM; }