diff --git a/QDMA/DPDK/RELEASE b/QDMA/DPDK/RELEASE
index 4642030b356660ed96ff33e08d622221edcfb6e2..9409e12195904337ca5728ec884ac92a4966fd42 100755
--- a/QDMA/DPDK/RELEASE
+++ b/QDMA/DPDK/RELEASE
@@ -1,4 +1,4 @@
-RELEASE: 2023.1.0
+RELEASE: 2023.1.1
 =================
 
 This release is based on DPDK v20.11, v21.11 and v22.11 and
@@ -117,13 +117,14 @@ CPM5
 - Enabled 128 bit SIMD vectorization for Intel and AMD platforms by default for qdma Receive and Transmit APIs
 - Added IO memory barriers for H2C/C2H producer index and completion ring consumer index updates
 
+2023.1.1 Updates
+----------------
+- Optimized the driver code and HW register settings for performance improvements
+
 KNOWN ISSUE:
 ============
 - CPM5:
-	- Performance optimizations are not finalized, Performance report with optimizations will be available in next patch release.
-
-- QDMA5.0:
-	- Performance optimizations are not finalized, Performance report with optimizations will be available in next patch release.
+	- Performance optimizations are not finalized, Updated Performance report with some more optimizations will be available in next patch release.
 
 - All Designs
 	- Function Level Reset(FLR) of PF device when VFs are attached to this PF results in mailbox communication failure
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma.h b/QDMA/DPDK/drivers/net/qdma/qdma.h
index 182bd97fe85d222483366376a639776f0353d284..3e139665d2fa8be4da8a005c22c3a5e9e5220ed9 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma.h
+++ b/QDMA/DPDK/drivers/net/qdma/qdma.h
@@ -166,12 +166,13 @@ struct qdma_cmpt_queue {
  * Structure associated with each RX queue.
  */
 struct qdma_rx_queue {
+	/* Move more accessed elementes into first cacheline */
 	struct rte_mempool	*mb_pool; /**< mbuf pool to populate RX ring. */
 	void			*rx_ring; /**< RX ring virtual address */
 	union qdma_ul_st_cmpt_ring	*cmpt_ring;
 	struct wb_status	*wb_status;
 	struct rte_mbuf		**sw_ring; /**< address of RX software ring. */
-	struct rte_eth_dev	*dev;
+	enum rte_pmd_qdma_bypass_desc_len	bypass_desc_sz:7;
 
 	uint16_t		rx_tail;
 	uint16_t		cmpt_desc_len;
@@ -185,6 +186,8 @@ struct qdma_rx_queue {
 	struct qdma_q_cmpt_cidx_reg_info cmpt_cidx_info;
 	struct qdma_pkt_stats	stats;
 
+	struct rte_eth_dev	*dev;
+
 	uint16_t		port_id; /**< Device port identifier. */
 	uint8_t			status:1;
 	uint8_t			err:1;
@@ -198,7 +201,6 @@ struct qdma_rx_queue {
 
 	union qdma_ul_st_cmpt_ring cmpt_data[QDMA_MAX_BURST_SIZE];
 
-	enum rte_pmd_qdma_bypass_desc_len	bypass_desc_sz:7;
 	uint8_t			func_id; /**< RX queue index. */
 	uint64_t		ep_addr;
 
@@ -231,22 +233,25 @@ struct qdma_rx_queue {
  * Structure associated with each TX queue.
  */
 struct qdma_tx_queue {
+	/* Move more accessed elementes into first cacheline */
+	enum rte_pmd_qdma_bypass_desc_len		bypass_desc_sz:7;
+	uint16_t			tx_fl_tail;
 	void				*tx_ring; /* TX ring virtual address*/
+	struct qdma_q_pidx_reg_info	q_pidx_info;
+
 	struct wb_status		*wb_status;
 	struct rte_mbuf			**sw_ring;/* SW ring virtual address*/
-	struct rte_eth_dev		*dev;
-	uint16_t			tx_fl_tail;
 	uint16_t			tx_desc_pend;
 	uint16_t			nb_tx_desc; /* No of TX descriptors.*/
 	rte_spinlock_t			pidx_update_lock;
-	struct qdma_q_pidx_reg_info	q_pidx_info;
 	uint64_t			offloads; /* Tx offloads */
 
+	struct rte_eth_dev		*dev;
+
 	uint8_t				st_mode:1;/* dma-mode: MM or ST */
 	uint8_t				tx_deferred_start:1;
 	uint8_t				en_bypass:1;
 	uint8_t				status:1;
-	enum rte_pmd_qdma_bypass_desc_len		bypass_desc_sz:7;
 	uint16_t			port_id; /* Device port identifier. */
 	uint8_t				func_id; /* RX queue index. */
 	int8_t				ringszidx;
@@ -278,10 +283,10 @@ struct queue_info {
 };
 
 struct qdma_pci_dev {
+	void *bar_addr[QDMA_NUM_BARS]; /* memory mapped I/O addr for BARs */
 	int config_bar_idx;
 	int user_bar_idx;
 	int bypass_bar_idx;
-	void *bar_addr[QDMA_NUM_BARS]; /* memory mapped I/O addr for BARs */
 
 	/* Driver Attributes */
 	uint32_t qsets_en;  /* no. of queue pairs enabled */
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_access/eqdma_cpm5_access/eqdma_cpm5_access.c b/QDMA/DPDK/drivers/net/qdma/qdma_access/eqdma_cpm5_access/eqdma_cpm5_access.c
index 504973383e189b3fabae722ecdd9c0ecf2a8a25d..ee516ee088715167ce36d005b63a2d10f4143ccb 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_access/eqdma_cpm5_access/eqdma_cpm5_access.c
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_access/eqdma_cpm5_access/eqdma_cpm5_access.c
@@ -99,6 +99,8 @@
 #define EQDMA_CPM5_GLBL2_FLR_PRESENT_MASK			BIT(1)
 #define EQDMA_CPM5_GLBL2_MAILBOX_EN_MASK			BIT(0)
 
+#define EQDMA_CPM5_DEFAULT_C2H_INTR_TIMER_TICK  50
+#define PREFETCH_QUEUE_COUNT_STEP               4
 
 /* TODO: This is work around and this needs to be auto generated from ODS */
 /** EQDMA_CPM5_IND_REG_SEL_FMAP */
@@ -1734,6 +1736,307 @@ static uint32_t eqdma_cpm5_intr_context_buf_len(void)
 	return len;
 }
 
+/*
+ * eqdma_cpm5_set_perf_opt() - Helper function to set the
+ *				cpm5 perf optimizations.
+ *
+ */
+static void eqdma_cpm5_set_perf_opt(void *dev_hndl)
+{
+	uint32_t reg_val = 0;
+	uint32_t pftch_cache_depth = 0;
+	uint32_t pftch_qcnt = 0;
+	uint32_t pftch_evnt_qcnt_th = 0;
+	uint32_t crdt_coal_fifo_th = 0;
+	uint32_t crdt_coal_crdt_th = 0;
+
+	/* C2H interrupt timer tick */
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_INT_TIMER_TICK_ADDR,
+		EQDMA_CPM5_DEFAULT_C2H_INTR_TIMER_TICK);
+
+/*
+ * #define EQDMA_CPM5_C2H_PFCH_CACHE_DEPTH_ADDR    0xBE0
+ * #define C2H_PFCH_CACHE_DEPTH_MAX_STBUF_MASK     GENMASK(23, 16)
+ * #define C2H_PFCH_CACHE_DEPTH_MASK               GENMASK(7, 0)
+ */
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_PFCH_CACHE_DEPTH_ADDR);
+	pftch_cache_depth = FIELD_GET(C2H_PFCH_CACHE_DEPTH_MASK, reg_val);
+
+/*
+ * #define EQDMA_CPM5_GLBL_DSC_CFG_ADDR      0x250
+ * #define GLBL_DSC_CFG_RSVD_1_MASK          GENMASK(31, 10)
+ * #define GLBL_DSC_CFG_UNC_OVR_COR_MASK     BIT(9)
+ * #define GLBL_DSC_CFG_CTXT_FER_DIS_MASK    BIT(8)
+ * #define GLBL_DSC_CFG_RSVD_2_MASK          GENMASK(7, 6)
+ * #define GLBL_DSC_CFG_MAXFETCH_MASK        GENMASK(5, 3)
+ * #define GLBL_DSC_CFG_WB_ACC_INT_MASK      GENMASK(2, 0)
+ */
+#define GLBL_DSC_CFG_RSVD_1_DFLT        0
+#define GLBL_DSC_CFG_UNC_OVR_COR_DFLT   0
+#define GLBL_DSC_CFG_CTXT_FER_DIS_DFLT  0
+#define GLBL_DSC_CFG_RSVD_2_DFLT        0
+/* =IF(Internal mode, 2,5) */
+#define GLBL_DSC_CFG_MAXFETCH           2
+#define GLBL_DSC_CFG_WB_ACC_INT         5
+	reg_val =
+		FIELD_SET(GLBL_DSC_CFG_RSVD_1_MASK, GLBL_DSC_CFG_RSVD_1_DFLT) |
+		FIELD_SET(GLBL_DSC_CFG_UNC_OVR_COR_MASK,
+					GLBL_DSC_CFG_UNC_OVR_COR_DFLT) |
+		FIELD_SET(GLBL_DSC_CFG_CTXT_FER_DIS_MASK,
+					GLBL_DSC_CFG_CTXT_FER_DIS_DFLT) |
+		FIELD_SET(GLBL_DSC_CFG_RSVD_2_MASK, GLBL_DSC_CFG_RSVD_2_DFLT) |
+		FIELD_SET(GLBL_DSC_CFG_MAXFETCH_MASK,
+					GLBL_DSC_CFG_MAXFETCH) |
+		FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK,
+					GLBL_DSC_CFG_WB_ACC_INT);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_GLBL_DSC_CFG_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_GLBL_DSC_CFG_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+		__func__, EQDMA_CPM5_GLBL_DSC_CFG_ADDR, reg_val);
+
+/*
+ * #define EQDMA_CPM5_CFG_BLK_MISC_CTL_ADDR               0x4C
+ * #define CFG_BLK_MISC_CTL_RSVD_1_MASK                   GENMASK(31, 24)
+ * #define CFG_BLK_MISC_CTL_10B_TAG_EN_MASK               BIT(23)
+ * #define CFG_BLK_MISC_CTL_RSVD_2_MASK                   BIT(22)
+ * #define CFG_BLK_MISC_CTL_AXI_WBK_MASK                  BIT(21)
+ * #define CFG_BLK_MISC_CTL_AXI_DSC_MASK                  BIT(20)
+ * #define CFG_BLK_MISC_CTL_NUM_TAG_MASK                  GENMASK(19, 8)
+ * #define CFG_BLK_MISC_CTL_RSVD_3_MASK                   GENMASK(7, 5)
+ * #define CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK   GENMASK(4, 0)
+ */
+#define CFG_BLK_MISC_CTL_RSVD_1_DFLT             0
+#define CFG_BLK_MISC_CTL_RSVD_2_DFLT             0
+#define CFG_BLK_MISC_CTL_AXI_WBK_DFLT            0
+#define CFG_BLK_MISC_CTL_AXI_DSC_DFLT            0
+/* IF(10bit tag enabled, 512,256) */
+#ifdef EQDMA_CPM5_10BIT_TAG_ENABLE
+#define CFG_BLK_MISC_CTL_10B_TAG_DFLT            1
+#define CFG_BLK_MISC_CTL_NUM_TAG_DFLT            512
+#else
+#define CFG_BLK_MISC_CTL_10B_TAG_DFLT            0
+#define CFG_BLK_MISC_CTL_NUM_TAG_DFLT            256
+#endif
+#define CFG_BLK_MISC_CTL_RSVD_3_DFLT             0
+#define EQDMA_CFG_BLK_MISC_CTL_RQ_METERING_MUL   31
+	reg_val =
+		FIELD_SET(CFG_BLK_MISC_CTL_RSVD_1_MASK,
+					CFG_BLK_MISC_CTL_RSVD_1_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_10B_TAG_EN_MASK,
+					CFG_BLK_MISC_CTL_10B_TAG_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_RSVD_2_MASK,
+					CFG_BLK_MISC_CTL_RSVD_2_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_AXI_WBK_MASK,
+					CFG_BLK_MISC_CTL_AXI_WBK_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_AXI_DSC_MASK,
+					CFG_BLK_MISC_CTL_AXI_DSC_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_NUM_TAG_MASK,
+					CFG_BLK_MISC_CTL_NUM_TAG_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_RSVD_3_MASK,
+					CFG_BLK_MISC_CTL_RSVD_3_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK,
+					EQDMA_CFG_BLK_MISC_CTL_RQ_METERING_MUL);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_CFG_BLK_MISC_CTL_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_CFG_BLK_MISC_CTL_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CPM5_CFG_BLK_MISC_CTL_ADDR, reg_val);
+
+/*
+ * #define EQDMA_CPM5_C2H_PFCH_CFG_ADDR        0xB08
+ * #define C2H_PFCH_CFG_EVTFL_TH_MASK          GENMASK(31, 16)
+ * #define C2H_PFCH_CFG_FL_TH_MASK             GENMASK(15, 0)
+ */
+#define EQDMA_PFTCH_CFG_EVT_PFTH_FL_TH         256
+#define C2H_PFCH_CFG_FL_TH_DFLT                256
+	reg_val =
+		FIELD_SET(C2H_PFCH_CFG_EVTFL_TH_MASK,
+					EQDMA_PFTCH_CFG_EVT_PFTH_FL_TH) |
+		FIELD_SET(C2H_PFCH_CFG_FL_TH_MASK,
+					C2H_PFCH_CFG_FL_TH_DFLT);
+
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CPM5_C2H_PFCH_CFG_ADDR, reg_val);
+
+/*
+ * #define EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR       0xA80
+ * #define C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK      GENMASK(31, 16)
+ * #define C2H_PFCH_CFG_1_QCNT_MASK             GENMASK(15, 0)
+ */
+	pftch_qcnt = pftch_cache_depth - PREFETCH_QUEUE_COUNT_STEP;
+	pftch_evnt_qcnt_th = pftch_qcnt - PREFETCH_QUEUE_COUNT_STEP;
+	reg_val =
+		FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK, pftch_evnt_qcnt_th) |
+		FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK, pftch_qcnt);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR, reg_val);
+
+/*
+ * #define EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR          0xA84
+ * #define C2H_PFCH_CFG_2_FENCE_MASK               BIT(31)
+ * #define C2H_PFCH_CFG_2_RSVD_MASK                GENMASK(30, 29)
+ * #define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK    BIT(28)
+ * #define C2H_PFCH_CFG_2_LL_SZ_TH_MASK            GENMASK(27, 12)
+ * #define C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK        GENMASK(11, 6)
+ * #define C2H_PFCH_CFG_2_NUM_MASK                 GENMASK(5, 0)
+ */
+#define C2H_PFCH_CFG_2_FENCE_EN                1
+#define C2H_PFCH_CFG_2_RSVD_DFLT               0
+#define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_DFLT   0
+#define C2H_PFCH_CFG_2_LL_SZ_TH_DFLT           1024
+#define C2H_PFCH_CFG_2_VAR_DESC_NUM            15
+#define C2H_PFCH_CFG_2_NUM_PFCH_DFLT           16
+	reg_val =
+		FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK,
+				C2H_PFCH_CFG_2_FENCE_EN) |
+		FIELD_SET(C2H_PFCH_CFG_2_RSVD_MASK,
+				C2H_PFCH_CFG_2_RSVD_DFLT) |
+		FIELD_SET(C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK,
+				C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_DFLT) |
+		FIELD_SET(C2H_PFCH_CFG_2_LL_SZ_TH_MASK,
+				C2H_PFCH_CFG_2_LL_SZ_TH_DFLT) |
+		FIELD_SET(C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK,
+				C2H_PFCH_CFG_2_VAR_DESC_NUM) |
+		FIELD_SET(C2H_PFCH_CFG_2_NUM_MASK,
+				C2H_PFCH_CFG_2_NUM_PFCH_DFLT);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR, reg_val);
+
+/* Registers Not Applicable for CPM5
+ * #define EQDMA_PFCH_CFG_3_ADDR           0x147C
+ * #define EQDMA_PFCH_CFG_4_ADDR           0x1484
+ */
+
+/*
+ * #define EQDMA_CPM5_C2H_CRDT_COAL_CFG_1_ADDR     0x1400
+ * #define C2H_CRDT_COAL_CFG_1_RSVD_1_MASK         GENMASK(31, 18)
+ * #define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK    GENMASK(17, 10)
+ * #define C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK       GENMASK(9, 0)4
+ */
+#define C2H_CRDT_COAL_CFG_1_RSVD_1_DFLT            0
+#define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_DFLT       16
+#define C2H_CRDT_COAL_CFG_1_TIMER_TH               16
+	reg_val =
+		FIELD_SET(C2H_CRDT_COAL_CFG_1_RSVD_1_MASK,
+				C2H_CRDT_COAL_CFG_1_RSVD_1_DFLT) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK,
+				C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_DFLT) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK,
+				C2H_CRDT_COAL_CFG_1_TIMER_TH);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_CRDT_COAL_CFG_1_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_CRDT_COAL_CFG_1_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CPM5_C2H_CRDT_COAL_CFG_1_ADDR, reg_val);
+
+/*
+ * #define EQDMA_CPM5_C2H_CRDT_COAL_CFG_2_ADDR     0x1404
+ * #define C2H_CRDT_COAL_CFG_2_RSVD_1_MASK         GENMASK(31, 24)
+ * #define C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK        GENMASK(23, 16)
+ * #define C2H_CRDT_COAL_CFG_2_RESERVED1_MASK      GENMASK(15, 11)
+ * #define C2H_CRDT_COAL_CFG_2_NT_TH_MASK          GENMASK(10, 0)
+ */
+#define C2H_CRDT_COAL_CFG_2_RSVD_1_DFLT            0
+#define C2H_CRDT_COAL_CFG_2_RESERVED1_DFLT         0
+#define C2H_CRDT_COAL_CFG_2_CRDT_CNT_TH_DFLT       156
+	crdt_coal_fifo_th = pftch_cache_depth - 8;
+	crdt_coal_crdt_th = C2H_CRDT_COAL_CFG_2_CRDT_CNT_TH_DFLT;
+	reg_val =
+		FIELD_SET(C2H_CRDT_COAL_CFG_2_RSVD_1_MASK,
+				C2H_CRDT_COAL_CFG_2_RSVD_1_DFLT) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK,
+				crdt_coal_fifo_th) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_2_RESERVED1_MASK,
+				C2H_CRDT_COAL_CFG_2_RESERVED1_DFLT) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_2_NT_TH_MASK,
+				crdt_coal_crdt_th);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_CRDT_COAL_CFG_2_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_C2H_CRDT_COAL_CFG_2_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CPM5_C2H_CRDT_COAL_CFG_2_ADDR, reg_val);
+
+/*
+ * #define EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR      0xE24
+ * #define H2C_REQ_THROT_PCIE_EN_REQ_MASK          BIT(31)
+ * #define H2C_REQ_THROT_PCIE_MASK                 GENMASK(30, 19)
+ * #define H2C_REQ_THROT_PCIE_EN_DATA_MASK         BIT(18)
+ * #define H2C_REQ_THROT_PCIE_DATA_THRESH_MASK     GENMASK(17, 0)
+ */
+#define H2C_REQ_THROT_PCIE_EN_REQ    1
+/* IF(10bit tag enabled, 512-64, 192) */
+#ifdef EQDMA_CPM5_10BIT_TAG_ENABLE
+#define H2C_REQ_THROT_PCIE_REQ_TH    448
+#else
+#define H2C_REQ_THROT_PCIE_REQ_TH    192
+#endif
+#define H2C_REQ_THROT_PCIE_EN_DATA   1
+#define H2C_REQ_THROT_PCIE_DATA_TH   57344
+	reg_val =
+		FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK,
+					H2C_REQ_THROT_PCIE_EN_REQ) |
+		FIELD_SET(H2C_REQ_THROT_PCIE_MASK,
+					H2C_REQ_THROT_PCIE_REQ_TH) |
+		FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK,
+					H2C_REQ_THROT_PCIE_EN_DATA) |
+		FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK,
+					H2C_REQ_THROT_PCIE_DATA_TH);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR, reg_val);
+
+/*
+ * #define EQDMA_CPM5_H2C_REQ_THROT_AXIMM_ADDR    0xE2C
+ * #define H2C_REQ_THROT_AXIMM_EN_REQ_MASK        BIT(31)
+ * #define H2C_REQ_THROT_AXIMM_MASK               GENMASK(30, 19)
+ * #define H2C_REQ_THROT_AXIMM_EN_DATA_MASK       BIT(18)
+ * #define H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK   GENMASK(17, 0)
+ */
+#define H2C_REQ_THROT_AXIMM_EN_REQ      0
+/* IF(10bit tag en=1, 512-64, 192) */
+#ifdef EQDMA_CPM5_10BIT_TAG_ENABLE
+#define H2C_REQ_THROT_AXIMM_REQ_TH      448
+#else
+#define H2C_REQ_THROT_AXIMM_REQ_TH      192
+#endif
+#define H2C_REQ_THROT_AXIMM_EN_DATA     0
+#define H2C_REQ_THROT_AXIMM_DATA_TH     65536
+	reg_val =
+		FIELD_SET(H2C_REQ_THROT_AXIMM_EN_REQ_MASK,
+				H2C_REQ_THROT_AXIMM_EN_REQ) |
+		FIELD_SET(H2C_REQ_THROT_AXIMM_MASK,
+				H2C_REQ_THROT_AXIMM_REQ_TH) |
+		FIELD_SET(H2C_REQ_THROT_AXIMM_EN_DATA_MASK,
+				H2C_REQ_THROT_AXIMM_EN_DATA) |
+		FIELD_SET(H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK,
+				H2C_REQ_THROT_AXIMM_DATA_TH);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_AXIMM_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_AXIMM_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CPM5_H2C_REQ_THROT_AXIMM_ADDR, reg_val);
+
+#define EQDMA_CPM5_H2C_MM_DATA_THROTTLE_ADDR    0x12EC
+#define H2C_MM_DATA_THROTTLE_RSVD_1_MASK        GENMASK(31, 17)
+#define H2C_MM_DATA_THROTTLE_DAT_EN_MASK        BIT(16)
+#define H2C_MM_DATA_THROTTLE_DAT_MASK           GENMASK(15, 0)
+#define H2C_MM_DATA_THROTTLE_RSVD_1_DFLT        0
+#define H2C_MM_DATA_TH_EN                       1
+#define H2C_MM_DATA_TH                          57344
+	reg_val =
+		FIELD_SET(H2C_MM_DATA_THROTTLE_RSVD_1_MASK,
+					H2C_MM_DATA_THROTTLE_RSVD_1_DFLT) |
+		FIELD_SET(H2C_MM_DATA_THROTTLE_DAT_EN_MASK, H2C_MM_DATA_TH_EN) |
+		FIELD_SET(H2C_MM_DATA_THROTTLE_DAT_MASK, H2C_MM_DATA_TH);
+	qdma_reg_write(dev_hndl, EQDMA_CPM5_H2C_MM_DATA_THROTTLE_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CPM5_H2C_MM_DATA_THROTTLE_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+		__func__, EQDMA_CPM5_H2C_MM_DATA_THROTTLE_ADDR, reg_val);
+}
+
 /*
  * eqdma_cpm5_indirect_reg_invalidate() - helper function to invalidate
  * indirect context registers.
@@ -2147,27 +2450,6 @@ int eqdma_cpm5_set_default_global_csr(void *dev_hndl)
 		qdma_write_csr_values(dev_hndl, EQDMA_CPM5_C2H_BUF_SZ_ADDR,
 				0, QDMA_NUM_C2H_BUFFER_SIZES, buf_sz);
 
-		/* Prefetch Configuration */
-		reg_val = qdma_reg_read(dev_hndl,
-				EQDMA_CPM5_C2H_PFCH_CACHE_DEPTH_ADDR);
-		cfg_val = FIELD_GET(C2H_PFCH_CACHE_DEPTH_MASK, reg_val);
-
-		reg_val = FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK, (cfg_val >> 2)) |
-				  FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK,
-						((cfg_val >> 2) - 4));
-		qdma_reg_write(dev_hndl,
-				EQDMA_CPM5_C2H_PFCH_CFG_1_ADDR, reg_val);
-
-		reg_val = qdma_reg_read(dev_hndl,
-					EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR);
-		reg_val |= FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK, 1);
-		qdma_reg_write(dev_hndl,
-				EQDMA_CPM5_C2H_PFCH_CFG_2_ADDR, reg_val);
-
-		/* C2H interrupt timer tick */
-		qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_INT_TIMER_TICK_ADDR,
-				DEFAULT_C2H_INTR_TIMER_TICK);
-
 		/* C2h Completion Coalesce Configuration */
 		cfg_val = qdma_reg_read(dev_hndl,
 				EQDMA_CPM5_C2H_WRB_COAL_BUF_DEPTH_ADDR);
@@ -2179,22 +2461,9 @@ int eqdma_cpm5_set_default_global_csr(void *dev_hndl)
 			FIELD_SET(C2H_WRB_COAL_CFG_MAX_BUF_SZ_MASK, cfg_val);
 		qdma_reg_write(dev_hndl, EQDMA_CPM5_C2H_WRB_COAL_CFG_ADDR,
 				reg_val);
-
-		/* H2C throttle Configuration*/
-
-		reg_val =
-			FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK,
-					EQDMA_CPM5_H2C_THROT_DATA_THRESH) |
-			FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK,
-					EQDMA_CPM5_THROT_EN_DATA) |
-			FIELD_SET(H2C_REQ_THROT_PCIE_MASK,
-					EQDMA_CPM5_H2C_THROT_REQ_THRESH) |
-			FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK,
-					EQDMA_CPM5_THROT_EN_REQ);
-		qdma_reg_write(dev_hndl, EQDMA_CPM5_H2C_REQ_THROT_PCIE_ADDR,
-			reg_val);
 	}
 
+	eqdma_cpm5_set_perf_opt(dev_hndl);
 	return QDMA_SUCCESS;
 }
 
@@ -3564,7 +3833,7 @@ static int eqdma_cpm5_cmpt_context_read(void *dev_hndl, uint16_t hw_qid,
 	ctxt->en_int = FIELD_GET(CMPL_CTXT_DATA_W0_EN_INT_MASK, cmpt_ctxt[0]);
 	ctxt->trig_mode =
 		FIELD_GET(CMPL_CTXT_DATA_W0_TRIG_MODE_MASK, cmpt_ctxt[0]);
-	ctxt->fnc_id = FIELD_GET(CMPL_CTXT_DATA_W0_FNC_ID_MASK,	cmpt_ctxt[0]);
+	ctxt->fnc_id = FIELD_GET(CMPL_CTXT_DATA_W0_FNC_ID_MASK, cmpt_ctxt[0]);
 	ctxt->counter_idx =
 		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W0_CNTER_IX_MASK,
 			cmpt_ctxt[0]));
@@ -5261,7 +5530,7 @@ int eqdma_cpm5_hw_error_enable(void *dev_hndl, uint32_t err_idx)
 	if (err_idx > EQDMA_CPM5_ERRS_ALL) {
 		qdma_log_error("%s: err_idx=%d is invalid, err:%d\n",
 				__func__,
-			       (enum eqdma_cpm5_error_idx)err_idx,
+				(enum eqdma_cpm5_error_idx)err_idx,
 				-QDMA_ERR_INV_PARAM);
 		return -QDMA_ERR_INV_PARAM;
 	}
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c b/QDMA/DPDK/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c
index 80a1ffb6b00097b1767ff757e91368ee19a64d50..bf5eab1c00cb824163f400ae04e2a478661fd2f3 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c
@@ -66,6 +66,9 @@
 #define EQDMA_MM_C2H_ERR_ALL_MASK			0X70000003
 #define EQDMA_MM_H2C0_ERR_ALL_MASK		    0X3041013E
 
+
+
+
 /* H2C Throttle settings for QDMA 4.0 */
 #define EQDMA_H2C_THROT_DATA_THRESH       0x5000
 #define EQDMA_THROT_EN_DATA               1
@@ -1732,6 +1735,7 @@ int eqdma_context_buf_len(uint8_t st,
 	return 0;
 }
 
+
 static uint32_t eqdma_intr_context_buf_len(void)
 {
 	uint32_t len = 0;
@@ -1742,6 +1746,312 @@ static uint32_t eqdma_intr_context_buf_len(void)
 	return len;
 }
 
+
+static void eqdma_set_perf_opt(void *dev_hndl)
+{
+	uint32_t reg_val = 0, data_th = 0, pfch_cache_dpth = 0;
+	/****
+	 * TODO: All the below settings are for QDMA5.0
+	 * Need to add the QDMA4.0 settings
+	 */
+#define EQDMA_PFTCH_CACHE_DEPTH				64
+#define GLBL_DSC_CFG_RSVD_1_DFLT			0
+#define EQDMA_GLBL_DSC_CFG_C2H_UODSC_LIMIT		5
+#define EQDMA_GLBL_DSC_CFG_H2C_UODSC_LIMIT              8
+#define GLBL_DSC_CFG_UNC_OVR_COR_DFLT                   0
+#define GLBL_DSC_CFG_CTXT_FER_DIS_DFLT			0
+#define GLBL_DSC_CFG_RSVD_2_DFLT                        0
+#define EQDMA_GLBL_DSC_CFG_MAXFETCH                     2
+#define EQDMA_GLBL_DSC_CFG_WB_ACC_INT			5
+
+	reg_val =
+		FIELD_SET(GLBL_DSC_CFG_RSVD_1_MASK, GLBL_DSC_CFG_RSVD_1_DFLT) |
+		FIELD_SET(GLBL_DSC_CFG_C2H_UODSC_LIMIT_MASK,
+					EQDMA_GLBL_DSC_CFG_C2H_UODSC_LIMIT) |
+		FIELD_SET(GLBL_DSC_CFG_H2C_UODSC_LIMIT_MASK,
+					EQDMA_GLBL_DSC_CFG_H2C_UODSC_LIMIT) |
+		FIELD_SET(GLBL_DSC_CFG_UNC_OVR_COR_MASK,
+					GLBL_DSC_CFG_UNC_OVR_COR_DFLT) |
+		FIELD_SET(GLBL_DSC_CFG_CTXT_FER_DIS_MASK,
+					GLBL_DSC_CFG_CTXT_FER_DIS_DFLT) |
+		FIELD_SET(GLBL_DSC_CFG_RSVD_2_MASK, GLBL_DSC_CFG_RSVD_2_DFLT) |
+		FIELD_SET(GLBL_DSC_CFG_MAXFETCH_MASK,
+				EQDMA_GLBL_DSC_CFG_MAXFETCH) |
+		FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK,
+				EQDMA_GLBL_DSC_CFG_WB_ACC_INT);
+	qdma_reg_write(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_GLBL_DSC_CFG_ADDR, reg_val);
+
+#define CFG_BLK_MISC_CTL_RSVD_1_DFLT                       0
+#define CFG_BLK_MISC_CTL_10B_TAG_DFLT                      0
+#define CFG_BLK_MISC_CTL_RSVD_2_DFLT                       0
+#define CFG_BLK_MISC_CTL_AXI_WBK_DFLT                      0
+#define CFG_BLK_MISC_CTL_AXI_DSC_DFLT                      0
+#define CFG_BLK_MISC_CTL_NUM_TAG_DFLT                      256
+#define CFG_BLK_MISC_CTL_RSVD_3_DFLT                       0
+#define EQDMA_CFG_BLK_MISC_CTL_RQ_METERING_MUL             9
+
+
+	reg_val =
+		FIELD_SET(CFG_BLK_MISC_CTL_RSVD_1_MASK,
+				CFG_BLK_MISC_CTL_RSVD_1_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_10B_TAG_EN_MASK,
+					CFG_BLK_MISC_CTL_10B_TAG_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_RSVD_2_MASK,
+				CFG_BLK_MISC_CTL_RSVD_2_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_AXI_WBK_MASK,
+					CFG_BLK_MISC_CTL_AXI_WBK_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_AXI_DSC_MASK,
+					CFG_BLK_MISC_CTL_AXI_DSC_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_NUM_TAG_MASK,
+					CFG_BLK_MISC_CTL_NUM_TAG_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_RSVD_3_MASK,
+				CFG_BLK_MISC_CTL_RSVD_3_DFLT) |
+		FIELD_SET(CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK,
+				EQDMA_CFG_BLK_MISC_CTL_RQ_METERING_MUL);
+	qdma_reg_write(dev_hndl, EQDMA_CFG_BLK_MISC_CTL_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_CFG_BLK_MISC_CTL_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_CFG_BLK_MISC_CTL_ADDR, reg_val);
+
+#define EQDMA_PFTCH_CFG_EVT_PFTH_FL_TH                    256
+#define C2H_PFCH_CFG_FL_TH_DFLT                           256
+
+	reg_val =
+		FIELD_SET(C2H_PFCH_CFG_EVTFL_TH_MASK,
+				EQDMA_PFTCH_CFG_EVT_PFTH_FL_TH) |
+		FIELD_SET(C2H_PFCH_CFG_FL_TH_MASK, C2H_PFCH_CFG_FL_TH_DFLT);
+
+	qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_PFCH_CFG_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_C2H_PFCH_CFG_ADDR, reg_val);
+
+#define EQDMA_C2H_PFCH_CFG_1_QCNT_MASK		(EQDMA_PFTCH_CACHE_DEPTH - 4)
+#define EQDMA_C2H_PFCH_CFG_1_EVNT_QCNT_TH	EQDMA_C2H_PFCH_CFG_1_QCNT_MASK
+	pfch_cache_dpth = qdma_reg_read(dev_hndl,
+			EQDMA_C2H_PFCH_CACHE_DEPTH_ADDR);
+
+	reg_val =
+		FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK,
+				(pfch_cache_dpth - 4)) |
+		FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK, (pfch_cache_dpth - 4));
+	qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_C2H_PFCH_CFG_1_ADDR, reg_val);
+
+#define EQDMA_C2H_PFCH_CFG_2_FENCE_EN               1
+#define C2H_PFCH_CFG_2_RSVD_DFLT                    0
+#define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_DFLT        0
+#define C2H_PFCH_CFG_2_LL_SZ_TH_DFLT                1024
+#define C2H_PFCH_CFG_2_VAR_DESC_NUM                 15
+#define C2H_PFCH_CFG_2_NUM_DFLT                     8
+
+	reg_val =
+		FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK,
+				EQDMA_C2H_PFCH_CFG_2_FENCE_EN) |
+		FIELD_SET(C2H_PFCH_CFG_2_RSVD_MASK, C2H_PFCH_CFG_2_RSVD_DFLT) |
+		FIELD_SET(C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK,
+					C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_DFLT) |
+		FIELD_SET(C2H_PFCH_CFG_2_LL_SZ_TH_MASK,
+				C2H_PFCH_CFG_2_LL_SZ_TH_DFLT) |
+		FIELD_SET(C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK,
+					C2H_PFCH_CFG_2_VAR_DESC_NUM) |
+		FIELD_SET(C2H_PFCH_CFG_2_NUM_MASK, C2H_PFCH_CFG_2_NUM_DFLT);
+	qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val);
+#define PFCH_CFG_3_RSVD_DFLT                               0
+#define PFCH_CFG_3_VAR_DESC_FL_FREE_CNT_TH_DFLT            256
+#define PFCH_CFG_3_VAR_DESC_LG_PKT_CAM_CN_TH_DFLT          0
+
+
+	reg_val =
+		FIELD_SET(PFCH_CFG_3_RSVD_MASK, PFCH_CFG_3_RSVD_DFLT) |
+		FIELD_SET(PFCH_CFG_3_VAR_DESC_FL_FREE_CNT_TH_MASK,
+				PFCH_CFG_3_VAR_DESC_FL_FREE_CNT_TH_DFLT) |
+		FIELD_SET(PFCH_CFG_3_VAR_DESC_LG_PKT_CAM_CN_TH_MASK,
+				PFCH_CFG_3_VAR_DESC_LG_PKT_CAM_CN_TH_DFLT);
+	qdma_reg_write(dev_hndl, EQDMA_PFCH_CFG_3_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_PFCH_CFG_3_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val);
+#define EQDMA_PFCH_CFG_4_GLB_EVT_TIMER_TICK             64
+#define PFCH_CFG_4_DISABLE_GLB_EVT_TIMER_DFLT           0
+#define EQDMA_PFCH_CFG_4_EVT_TIMER_TICK                 400
+#define PFCH_CFG_4_DISABLE_EVT_TIMER_DFLT               0
+
+
+	reg_val =
+		FIELD_SET(PFCH_CFG_4_GLB_EVT_TIMER_TICK_MASK,
+				EQDMA_PFCH_CFG_4_GLB_EVT_TIMER_TICK) |
+		FIELD_SET(PFCH_CFG_4_DISABLE_GLB_EVT_TIMER_MASK,
+				PFCH_CFG_4_DISABLE_GLB_EVT_TIMER_DFLT) |
+		FIELD_SET(PFCH_CFG_4_EVT_TIMER_TICK_MASK,
+				EQDMA_PFCH_CFG_4_EVT_TIMER_TICK) |
+		FIELD_SET(PFCH_CFG_4_DISABLE_EVT_TIMER_MASK,
+				PFCH_CFG_4_DISABLE_EVT_TIMER_DFLT);
+	qdma_reg_write(dev_hndl, EQDMA_PFCH_CFG_4_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_PFCH_CFG_4_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_PFCH_CFG_4_ADDR, reg_val);
+/**************** SET_2 *******************/
+#define C2H_CRDT_COAL_CFG_1_RSVD_1_DFLT             0
+#define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_DFLT        16
+#define EQDMA_C2H_CRDT_COAL_CFG_1_TIMER_TH          16 //64
+
+
+	reg_val =
+		FIELD_SET(C2H_CRDT_COAL_CFG_1_RSVD_1_MASK,
+				C2H_CRDT_COAL_CFG_1_RSVD_1_DFLT) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK,
+				C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_DFLT) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK,
+				EQDMA_C2H_CRDT_COAL_CFG_1_TIMER_TH);
+	qdma_reg_write(dev_hndl, EQDMA_C2H_CRDT_COAL_CFG_1_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_CRDT_COAL_CFG_1_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_C2H_CRDT_COAL_CFG_1_ADDR, reg_val);
+#define C2H_CRDT_COAL_CFG_2_RSVD_1_DFLT                   0
+#define EQDMA_C2H_CRDT_COAL_CFG_2_FIFO_TH	(EQDMA_PFTCH_CACHE_DEPTH - 8)
+#define C2H_CRDT_COAL_CFG_2_RESERVED1_DFLT                0
+#define EQDMA_C2H_CRDT_COAL_CFG_2_CRDT_TH                 96
+
+	reg_val =
+		FIELD_SET(C2H_CRDT_COAL_CFG_2_RSVD_1_MASK,
+					C2H_CRDT_COAL_CFG_2_RSVD_1_DFLT) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK,
+					(pfch_cache_dpth - 8)) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_2_RESERVED1_MASK,
+					C2H_CRDT_COAL_CFG_2_RESERVED1_DFLT) |
+		FIELD_SET(C2H_CRDT_COAL_CFG_2_NT_TH_MASK,
+					EQDMA_C2H_CRDT_COAL_CFG_2_CRDT_TH);
+	qdma_reg_write(dev_hndl, EQDMA_C2H_CRDT_COAL_CFG_2_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_CRDT_COAL_CFG_2_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_C2H_CRDT_COAL_CFG_2_ADDR, reg_val);
+
+/**************** SET_3 *******************/
+#define EQDMA_GLBL2_RRQ_PCIE_THROT_REQ_EN                  0
+#define GLBL2_RRQ_PCIE_THROT_REQ_DFLT                      192
+#define GLBL2_RRQ_PCIE_THROT_DAT_EN_DFLT                   1
+#define GLBL2_RRQ_PCIE_THROT_DAT_DFLT                      20480
+
+
+	reg_val =
+		FIELD_SET(GLBL2_RRQ_PCIE_THROT_REQ_EN_MASK,
+					EQDMA_GLBL2_RRQ_PCIE_THROT_REQ_EN) |
+		FIELD_SET(GLBL2_RRQ_PCIE_THROT_REQ_MASK,
+					GLBL2_RRQ_PCIE_THROT_REQ_DFLT) |
+		FIELD_SET(GLBL2_RRQ_PCIE_THROT_DAT_EN_MASK,
+					GLBL2_RRQ_PCIE_THROT_DAT_EN_DFLT) |
+		FIELD_SET(GLBL2_RRQ_PCIE_THROT_DAT_MASK,
+					GLBL2_RRQ_PCIE_THROT_DAT_DFLT);
+	qdma_reg_write(dev_hndl, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR, reg_val);
+#define GLBL2_RRQ_AXIMM_THROT_REQ_EN_DFLT                  0
+#define GLBL2_RRQ_AXIMM_THROT_REQ_DFLT                     0
+#define GLBL2_RRQ_AXIMM_THROT_DAT_EN_DFLT                  0
+#define GLBL2_RRQ_AXIMM_THROT_DAT_DFLT                     0
+
+	reg_val =
+		FIELD_SET(GLBL2_RRQ_AXIMM_THROT_REQ_EN_MASK,
+					GLBL2_RRQ_AXIMM_THROT_REQ_EN_DFLT) |
+		FIELD_SET(GLBL2_RRQ_AXIMM_THROT_REQ_MASK,
+					GLBL2_RRQ_AXIMM_THROT_REQ_DFLT) |
+		FIELD_SET(GLBL2_RRQ_AXIMM_THROT_DAT_EN_MASK,
+					GLBL2_RRQ_AXIMM_THROT_DAT_EN_DFLT) |
+		FIELD_SET(GLBL2_RRQ_AXIMM_THROT_DAT_MASK,
+					GLBL2_RRQ_AXIMM_THROT_DAT_DFLT);
+	qdma_reg_write(dev_hndl, EQDMA_GLBL2_RRQ_AXIMM_THROT_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_RRQ_AXIMM_THROT_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_GLBL2_RRQ_AXIMM_THROT_ADDR, reg_val);
+#define GLBL2_RRQ_BRG_THROT_REQ_EN_DFLT                    1
+#define GLBL2_RRQ_BRG_THROT_REQ_DFLT             GLBL2_RRQ_PCIE_THROT_REQ_DFLT
+#define GLBL2_RRQ_BRG_THROT_DAT_EN_DFLT                    1
+
+
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR);
+	qdma_log_info("%s: BF reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_GLBL2_RRQ_PCIE_THROT_ADDR, reg_val);
+	data_th = FIELD_GET(GLBL2_RRQ_PCIE_THROT_DAT_MASK, reg_val);
+
+	reg_val =
+		FIELD_SET(GLBL2_RRQ_BRG_THROT_REQ_EN_MASK,
+				GLBL2_RRQ_BRG_THROT_REQ_EN_DFLT) |
+		FIELD_SET(GLBL2_RRQ_BRG_THROT_REQ_MASK,
+				GLBL2_RRQ_BRG_THROT_REQ_DFLT) |
+		FIELD_SET(GLBL2_RRQ_BRG_THROT_DAT_EN_MASK,
+				GLBL2_RRQ_BRG_THROT_DAT_EN_DFLT) |
+		FIELD_SET(GLBL2_RRQ_BRG_THROT_DAT_MASK, data_th);
+	qdma_reg_write(dev_hndl, EQDMA_GLBL2_RRQ_BRG_THROT_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_RRQ_BRG_THROT_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_GLBL2_RRQ_BRG_THROT_ADDR, reg_val);
+
+/******************* SET_4 *************************/
+#define EQDMA_H2C_REQ_THROT_PCIE_EN_REQ                     1
+#define EQDMA_H2C_REQ_THROT_PCIE_REQ_TH          GLBL2_RRQ_PCIE_THROT_REQ_DFLT
+#define EQDMA_H2C_REQ_THROT_PCIE_EN_DATA                    1
+#define EQDMA_H2C_REQ_THROT_PCIE_DATA_TH                    24576
+
+	reg_val =
+		FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK,
+				EQDMA_H2C_REQ_THROT_PCIE_EN_REQ) |
+		FIELD_SET(H2C_REQ_THROT_PCIE_MASK,
+				EQDMA_H2C_REQ_THROT_PCIE_REQ_TH) |
+		FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK,
+				EQDMA_H2C_REQ_THROT_PCIE_EN_DATA) |
+		FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK,
+				EQDMA_H2C_REQ_THROT_PCIE_DATA_TH);
+	qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_H2C_REQ_THROT_PCIE_ADDR, reg_val);
+#define EQDMA_H2C_REQ_THROT_AXIMM_EN_REQ            1
+#define EQDMA_H2C_REQ_THROT_AXIMM_REQ_TH            64
+#define EQDMA_H2C_REQ_THROT_AXIMM_EN_DATA           1
+#define EQDMA_H2C_REQ_THROT_AXIMM_DATA_TH           16384
+
+	reg_val =
+		FIELD_SET(H2C_REQ_THROT_AXIMM_EN_REQ_MASK,
+					EQDMA_H2C_REQ_THROT_AXIMM_EN_REQ) |
+		FIELD_SET(H2C_REQ_THROT_AXIMM_MASK,
+					EQDMA_H2C_REQ_THROT_AXIMM_REQ_TH) |
+		FIELD_SET(H2C_REQ_THROT_AXIMM_EN_DATA_MASK,
+					EQDMA_H2C_REQ_THROT_AXIMM_EN_DATA) |
+		FIELD_SET(H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK,
+					EQDMA_H2C_REQ_THROT_AXIMM_DATA_TH);
+	qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_AXIMM_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_H2C_REQ_THROT_AXIMM_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_H2C_REQ_THROT_AXIMM_ADDR, reg_val);
+
+#define H2C_MM_DATA_THROTTLE_RSVD_1_DFLT        0
+#define EQDMA_H2C_MM_DATA_TH_EN		      GLBL2_RRQ_PCIE_THROT_DAT_EN_DFLT
+#define EQDMA_H2C_MM_DATA_TH		      GLBL2_RRQ_PCIE_THROT_DAT_DFLT
+
+	reg_val =
+		FIELD_SET(H2C_MM_DATA_THROTTLE_RSVD_1_MASK,
+				H2C_MM_DATA_THROTTLE_RSVD_1_DFLT) |
+		FIELD_SET(H2C_MM_DATA_THROTTLE_DAT_EN_MASK,
+				EQDMA_H2C_MM_DATA_TH_EN) |
+		FIELD_SET(H2C_MM_DATA_THROTTLE_DAT_MASK, EQDMA_H2C_MM_DATA_TH);
+	qdma_reg_write(dev_hndl, EQDMA_H2C_MM_DATA_THROTTLE_ADDR, reg_val);
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_H2C_MM_DATA_THROTTLE_ADDR);
+	qdma_log_info("%s: reg = 0x%08X val = 0x%08X\n",
+			__func__, EQDMA_H2C_MM_DATA_THROTTLE_ADDR, reg_val);
+
+}
+
+
 /*
  * eqdma_indirect_reg_invalidate() - helper function to invalidate indirect
  *					context registers.
@@ -2169,26 +2479,10 @@ int eqdma_set_default_global_csr(void *dev_hndl)
 						DEFAULT_MAX_DSC_FETCH) |
 				FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK,
 						DEFAULT_WRB_INT);
-		} else if (eqdma_ip_version == EQDMA_IP_VERSION_5) {
-			/* For QDMA4.0 and QDMA5.0, HW design and register map
-			 * is same except some performance optimizations
-			 */
-			reg_val =
-				FIELD_SET(GLBL_DSC_CFG_C2H_UODSC_LIMIT_MASK,
-					EQDMA5_DEFAULT_C2H_UODSC_LIMIT) |
-				FIELD_SET(GLBL_DSC_CFG_H2C_UODSC_LIMIT_MASK,
-					EQDMA5_DEFAULT_H2C_UODSC_LIMIT) |
-				FIELD_SET(GLBL_DSC_CFG_MAXFETCH_MASK,
-					EQDMA5_DEFAULT_MAX_DSC_FETCH) |
-				FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK,
-					EQDMA5_DEFAULT_WRB_INT);
-		} else {
-			qdma_log_error("%s: ip_type = %d is invalid, err:%d\n",
-				__func__, eqdma_ip_version,
-				-QDMA_ERR_INV_PARAM);
-			return -QDMA_ERR_INV_PARAM;
+
+			qdma_reg_write(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR,
+					reg_val);
 		}
-		qdma_reg_write(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR, reg_val);
 	}
 
 	if (dev_cap.st_en) {
@@ -2205,18 +2499,16 @@ int eqdma_set_default_global_csr(void *dev_hndl)
 					(cfg_val >> 2)) |
 				FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK,
 						((cfg_val >> 2) - 4));
-		} else {
-			/* Performance optimization for EQDMA5.0. */
-			reg_val = FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK,
-						EQDMA5_DEFAULT_C2H_PFCH_QCNT) |
-				  FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK,
-						EQDMA5_DEFAULT_C2H_EVT_QCNT_TH);
-		}
-		qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR, reg_val);
 
-		reg_val = qdma_reg_read(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR);
-		reg_val |= FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK, 1);
-		qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val);
+			qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR,
+					reg_val);
+
+			reg_val = qdma_reg_read(dev_hndl,
+					EQDMA_C2H_PFCH_CFG_2_ADDR);
+			reg_val |= FIELD_SET(C2H_PFCH_CFG_2_FENCE_MASK, 1);
+			qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR,
+					reg_val);
+		}
 
 		/* C2H interrupt timer tick */
 		qdma_reg_write(dev_hndl, EQDMA_C2H_INT_TIMER_TICK_ADDR,
@@ -2244,29 +2536,14 @@ int eqdma_set_default_global_csr(void *dev_hndl)
 						EQDMA_H2C_THROT_REQ_THRESH) |
 				FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK,
 						EQDMA_THROT_EN_REQ);
-		} else if (eqdma_ip_version == EQDMA_IP_VERSION_5) {
-			/* For QDMA4.0 and QDMA5.0, HW design and register map
-			 * is same except some performance optimizations
-			 */
-			reg_val =
-				FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK,
-						EQDMA5_H2C_THROT_DATA_THRESH) |
-				FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK,
-						EQDMA5_THROT_EN_DATA) |
-				FIELD_SET(H2C_REQ_THROT_PCIE_MASK,
-						EQDMA5_H2C_THROT_REQ_THRESH) |
-				FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK,
-						EQDMA5_THROT_EN_REQ);
-		} else {
-			qdma_log_error("%s: ip_type = %d is invalid, err:%d\n",
-						__func__, eqdma_ip_version,
-					   -QDMA_ERR_INV_PARAM);
-			return -QDMA_ERR_INV_PARAM;
+
+			qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR,
+					reg_val);
 		}
-		qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR,
-			reg_val);
 	}
 
+	if (eqdma_ip_version == EQDMA_IP_VERSION_5)
+		eqdma_set_perf_opt(dev_hndl);
 	return QDMA_SUCCESS;
 }
 
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_access/qdma_access_version.h b/QDMA/DPDK/drivers/net/qdma/qdma_access/qdma_access_version.h
index 602defd1a8808baa9eef338426f37df9f2a1fca1..94bec64b54a7ba14f58444e15414623911406175 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_access/qdma_access_version.h
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_access/qdma_access_version.h
@@ -37,7 +37,7 @@
 
 #define QDMA_VERSION_MAJOR	2023
 #define QDMA_VERSION_MINOR	1
-#define QDMA_VERSION_PATCH	0
+#define QDMA_VERSION_PATCH	1
 
 #define QDMA_VERSION_STR	\
 	__stringify(QDMA_VERSION_MAJOR) "." \
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_access/qdma_soft_access/qdma_soft_access.c b/QDMA/DPDK/drivers/net/qdma/qdma_access/qdma_soft_access/qdma_soft_access.c
index b8f1a7ecc1d0c1061954fb898961d2d0343ad22e..f26a707f15ea7b67a1ef42b72425a5e5ef6b9dc8 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_access/qdma_soft_access/qdma_soft_access.c
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_access/qdma_soft_access/qdma_soft_access.c
@@ -4022,16 +4022,9 @@ int qdma_queue_pidx_update(void *dev_hndl, uint8_t is_vf, uint16_t qid,
 	uint32_t reg_addr = 0;
 	uint32_t reg_val = 0;
 
-	if (!dev_hndl) {
-		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
-						__func__,
-					   -QDMA_ERR_INV_PARAM);
-		return -QDMA_ERR_INV_PARAM;
-	}
-	if (!reg_info) {
-		qdma_log_error("%s: reg_info is NULL, err:%d\n",
-						__func__,
-					   -QDMA_ERR_INV_PARAM);
+	if (!dev_hndl || !reg_info) {
+		qdma_log_error("%s: dev_handle is (%p), reg_info is (%p), err:%d\n",
+			__func__, dev_hndl, reg_info, -QDMA_ERR_INV_PARAM);
 		return -QDMA_ERR_INV_PARAM;
 	}
 
@@ -4077,17 +4070,9 @@ int qdma_queue_cmpt_cidx_update(void *dev_hndl, uint8_t is_vf,
 		QDMA_OFFSET_DMAP_SEL_CMPT_CIDX;
 	uint32_t reg_val = 0;
 
-	if (!dev_hndl) {
-		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
-						__func__,
-					   -QDMA_ERR_INV_PARAM);
-		return -QDMA_ERR_INV_PARAM;
-	}
-
-	if (!reg_info) {
-		qdma_log_error("%s: reg_info is NULL, err:%d\n",
-						__func__,
-					   -QDMA_ERR_INV_PARAM);
+	if (!dev_hndl || !reg_info) {
+		qdma_log_error("%s: dev_handle (%p) reg_info (%p) , err:%d\n",
+			__func__, dev_hndl, reg_info, -QDMA_ERR_INV_PARAM);
 		return -QDMA_ERR_INV_PARAM;
 	}
 
@@ -4134,15 +4119,9 @@ int qdma_queue_intr_cidx_update(void *dev_hndl, uint8_t is_vf,
 		QDMA_OFFSET_DMAP_SEL_INT_CIDX;
 	uint32_t reg_val = 0;
 
-	if (!dev_hndl) {
-		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
-				__func__, -QDMA_ERR_INV_PARAM);
-		return -QDMA_ERR_INV_PARAM;
-	}
-
-	if (!reg_info) {
-		qdma_log_error("%s: reg_info is NULL, err:%d\n",
-					__func__, -QDMA_ERR_INV_PARAM);
+	if (!dev_hndl || !reg_info) {
+		qdma_log_error("%s: dev_handle (%p) reg_info (%p), err:%d\n",
+			__func__, dev_hndl, reg_info, -QDMA_ERR_INV_PARAM);
 		return -QDMA_ERR_INV_PARAM;
 	}
 
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_devops.c b/QDMA/DPDK/drivers/net/qdma/qdma_devops.c
index 648db10f2c032fba27313d0b734d340812f5d3a2..5b2fbbc1fe7c956fdefdb370a68ae8e85a9c0542 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_devops.c
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_devops.c
@@ -233,25 +233,10 @@ int qdma_dev_notify_qdel(struct rte_eth_dev *dev, uint32_t qidx_hw,
 
 uint8_t qmda_get_desc_sz_idx(enum rte_pmd_qdma_bypass_desc_len size)
 {
-	uint8_t ret;
-	switch (size) {
-	case RTE_PMD_QDMA_BYPASS_DESC_LEN_8B:
-		ret = 0;
-		break;
-	case RTE_PMD_QDMA_BYPASS_DESC_LEN_16B:
-		ret = 1;
-		break;
-	case RTE_PMD_QDMA_BYPASS_DESC_LEN_32B:
-		ret = 2;
-		break;
-	case RTE_PMD_QDMA_BYPASS_DESC_LEN_64B:
-		ret = 3;
-		break;
-	default:
-		/* Suppress compiler warnings*/
-		ret = 0;
-	}
-	return ret;
+	return ((size == RTE_PMD_QDMA_BYPASS_DESC_LEN_64B) ? 3 :
+			(size == RTE_PMD_QDMA_BYPASS_DESC_LEN_32B) ? 2 :
+			(size == RTE_PMD_QDMA_BYPASS_DESC_LEN_16B) ? 1 :
+			/* (size == RTE_PMD_QDMA_BYPASS_DESC_LEN_8B) */0);
 }
 
 static inline int
@@ -393,6 +378,7 @@ int qdma_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 	rxq->mb_pool = mb_pool;
 	rxq->dev = dev;
 	rxq->st_mode = qdma_dev->q_info[rx_queue_id].queue_mode;
+
 	rxq->nb_rx_desc = (nb_rx_desc + 1);
 	/* <= 2018.2 IP
 	 * double the cmpl ring size to avoid run out of cmpl entry while
@@ -765,6 +751,7 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 	}
 
 	txq->st_mode = qdma_dev->q_info[tx_queue_id].queue_mode;
+
 	txq->en_bypass = (qdma_dev->q_info[tx_queue_id].tx_bypass_mode) ? 1 : 0;
 	txq->bypass_desc_sz = qdma_dev->q_info[tx_queue_id].tx_bypass_desc_sz;
 
@@ -1010,7 +997,9 @@ int qdma_dev_link_update(struct rte_eth_dev *dev,
 {
 	dev->data->dev_link.link_status = ETH_LINK_UP;
 	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
+
+	/* TODO: Configure link speed by reading hardware capabilities */
+	dev->data->dev_link.link_speed = ETH_SPEED_NUM_200G;
 
 	PMD_DRV_LOG(INFO, "Link update done\n");
 	return 0;
@@ -1610,14 +1599,14 @@ int qdma_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qid)
 		if (!(qdma_dev->ip_type == EQDMA_SOFT_IP)) {
 			while (rxq->wb_status->pidx !=
 					rxq->cmpt_cidx_info.wrb_cidx) {
-				usleep(10);
+				rte_delay_us_block(10);
 				if (cnt++ > 10000)
 					break;
 			}
 		}
 	} else { /* MM mode */
 		while (rxq->wb_status->cidx != rxq->q_pidx_info.pidx) {
-			usleep(10);
+			rte_delay_us_block(10);
 			if (cnt++ > 10000)
 				break;
 		}
@@ -1666,7 +1655,7 @@ int qdma_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qid)
 	txq->status = RTE_ETH_QUEUE_STATE_STOPPED;
 	/* Wait for TXQ to send out all packets. */
 	while (txq->wb_status->cidx != txq->q_pidx_info.pidx) {
-		usleep(10);
+		rte_delay_us_block(10);
 		if (cnt++ > 10000)
 			break;
 	}
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_dpdk_compat.c b/QDMA/DPDK/drivers/net/qdma/qdma_dpdk_compat.c
index 74ec7435ce8ba3e6998c242ff41f93441c3e9038..1b4e76717e38f83c5ce93b63082d98ba30fb8f6a 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_dpdk_compat.c
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_dpdk_compat.c
@@ -22,7 +22,6 @@
 #include "qdma_platform.h"
 #include "qdma_devops.h"
 
-#define UNUSED(param)	param
 
 #if defined(QDMA_DPDK_21_11) || defined(QDMA_DPDK_22_11)
 
@@ -218,13 +217,14 @@ void rte_pmd_qdma_compat_memzone_reserve_aligned(void)
 					sizeof(*rte_eth_devices));
 }
 
-void rte_pmd_qdma_get_bdf(uint32_t m_id, uint32_t *bus, uint32_t *dev, uint32_t *fn)
+void rte_pmd_qdma_get_bdf(uint32_t m_id, uint32_t *bus,
+		uint32_t *dev, uint32_t *fn)
 {
-    struct rte_pci_device *pci_dev;
-    pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[m_id]);
-    *bus = pci_dev->addr.bus;
-    *dev = pci_dev->addr.devid;
-    *fn = pci_dev->addr.function;
+	struct rte_pci_device *pci_dev;
+	pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[m_id]);
+	*bus = pci_dev->addr.bus;
+	*dev = pci_dev->addr.devid;
+	*fn = pci_dev->addr.function;
 }
 
 int rte_pmd_qdma_dev_remove(int port_id)
@@ -234,31 +234,44 @@ int rte_pmd_qdma_dev_remove(int port_id)
 	return rte_dev_remove(dev);
 }
 
-struct rte_device* rte_pmd_qdma_get_device(int port_id)
+struct rte_device *rte_pmd_qdma_get_device(int port_id)
 {
 	struct rte_device *dev;
 	dev = rte_eth_devices[port_id].device;
 	return dev;
 }
 
+bool rte_pmd_qdma_validate_dev(int port_id)
+{
+	struct rte_device *device = rte_pmd_qdma_get_device(port_id);
+
+	if (device && ((!strcmp(device->driver->name, "net_qdma")) ||
+	     (!strcmp(device->driver->name, "net_qdma_vf"))))
+		return true;
+	else
+		return false;
+}
+
 uint16_t rte_pmd_qdma_get_dev_id(int port_id)
 {
 	struct rte_pci_device *pci_dev;
 	pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[port_id]);
-	return (pci_dev->id.device_id);
+	return pci_dev->id.device_id;
 }
 
-struct rte_pci_device* rte_pmd_qdma_eth_dev_to_pci(int port_id)
+struct rte_pci_device *rte_pmd_qdma_eth_dev_to_pci(int port_id)
 {
 	return RTE_ETH_DEV_TO_PCI(&rte_eth_devices[port_id]);
 }
 
-unsigned int rte_pmd_qdma_compat_pci_read_reg(int port_id, unsigned int bar, unsigned int offset)
+unsigned int rte_pmd_qdma_compat_pci_read_reg(int port_id,
+		unsigned int bar, unsigned int offset)
 {
 	return qdma_pci_read_reg(&rte_eth_devices[port_id], bar, offset);
 }
 
-void rte_pmd_qdma_compat_pci_write_reg(int port_id, uint32_t bar, uint32_t offset, uint32_t reg_val)
+void rte_pmd_qdma_compat_pci_write_reg(int port_id, uint32_t bar,
+		uint32_t offset, uint32_t reg_val)
 {
 	qdma_pci_write_reg(&rte_eth_devices[port_id], bar, offset, reg_val);
 }
@@ -273,34 +286,31 @@ void rte_pmd_qdma_dev_started(int port_id, bool status)
 int rte_pmd_qdma_dev_fp_ops_config(int port_id)
 {
 #if (defined(QDMA_DPDK_21_11) || defined(QDMA_DPDK_22_11))
+	struct rte_eth_dev *dev;
+	struct rte_eth_fp_ops *fpo = rte_eth_fp_ops;
 
-        struct rte_eth_dev *dev;
-        struct rte_eth_fp_ops *fpo = rte_eth_fp_ops;
-
-        if (port_id < 0 || port_id >= rte_eth_dev_count_avail()) {
-                PMD_DRV_LOG(ERR, "%s:%d Wrong port id %d\n", __func__, __LINE__,
-                        port_id);
-                return -ENOTSUP;
-        }
-        dev = &rte_eth_devices[port_id];
-
-        fpo[port_id].rx_pkt_burst = dev->rx_pkt_burst;
-        fpo[port_id].tx_pkt_burst = dev->tx_pkt_burst;
-        fpo[port_id].rx_queue_count = dev->rx_queue_count;
-        fpo[port_id].rx_descriptor_status = dev->rx_descriptor_status;
-        fpo[port_id].tx_descriptor_status = dev->tx_descriptor_status;
-        fpo[port_id].rxq.data = dev->data->rx_queues;
-        fpo[port_id].txq.data = dev->data->tx_queues;
+	if (port_id < 0 || port_id >= rte_eth_dev_count_avail()) {
+		PMD_DRV_LOG(ERR,
+			"%s:%d Wrong port id %d\n",
+			__func__, __LINE__, port_id);
+		return -ENOTSUP;
+	}
+	dev = &rte_eth_devices[port_id];
 
-        return 0;
+	fpo[port_id].rx_pkt_burst = dev->rx_pkt_burst;
+	fpo[port_id].tx_pkt_burst = dev->tx_pkt_burst;
+	fpo[port_id].rx_queue_count = dev->rx_queue_count;
+	fpo[port_id].rx_descriptor_status = dev->rx_descriptor_status;
+	fpo[port_id].tx_descriptor_status = dev->tx_descriptor_status;
+	fpo[port_id].rxq.data = dev->data->rx_queues;
+	fpo[port_id].txq.data = dev->data->tx_queues;
 
+	return 0;
 #endif
 
 #ifdef QDMA_DPDK_20_11
-
-        UNUSED(port_id);
+	RTE_SET_USED(port_id);
 	return 0;
-
 #endif
+}
 
-}
\ No newline at end of file
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_dpdk_compat.h b/QDMA/DPDK/drivers/net/qdma/qdma_dpdk_compat.h
index d719010817d7a4e90943d3498a5b60b91bb4fe73..dfe35864849bfd70d662daf21818ef6a6216f5d4 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_dpdk_compat.h
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_dpdk_compat.h
@@ -7,7 +7,7 @@
 
 #define ETH_LINK_UP RTE_ETH_LINK_UP
 #define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX
-#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G
+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G
 #define pci_dev_intr_handle pci_dev->intr_handle
 #define qdma_dev_rx_queue_count qdma_dev_rx_queue_count_v2122
 #define qdma_dev_rx_queue_release qdma_dev_rx_queue_release_v2122
@@ -63,7 +63,7 @@ void qdma_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
 
 #include <rte_ethdev_pci.h>
 #include <rte_ethdev_driver.h>
-#define pci_dev_intr_handle &pci_dev->intr_handle
+#define pci_dev_intr_handle (&pci_dev->intr_handle)
 #define	qdma_dev_rx_queue_count qdma_dev_rx_queue_count_v2011
 #define qdma_dev_rx_queue_release qdma_dev_rx_queue_release_v2011
 #define qdma_dev_tx_queue_release qdma_dev_tx_queue_release_v2011
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_rxtx.c b/QDMA/DPDK/drivers/net/qdma/qdma_rxtx.c
index 7ec2b5a3e7c7915ff5b1a85f39cf10a283fe3fa7..e3e55739843a6ea664909f0e0f28c8d2bd497877 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_rxtx.c
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_rxtx.c
@@ -1132,8 +1132,8 @@ static int rearm_c2h_ring(struct qdma_rx_queue *rxq, uint16_t num_desc)
 }
 
 /* Receive API for Streaming mode */
-uint16_t qdma_recv_pkts_st(struct qdma_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-				uint16_t nb_pkts)
+uint16_t qdma_recv_pkts_st(struct qdma_rx_queue *rxq,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
 	uint16_t count_pkts;
 	struct wb_status *wb_status;
@@ -1176,11 +1176,7 @@ uint16_t qdma_recv_pkts_st(struct qdma_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		return 0;
 	}
 
-	if (nb_pkts > QDMA_MAX_BURST_SIZE)
-		nb_pkts = QDMA_MAX_BURST_SIZE;
-
-	if (nb_pkts > nb_pkts_avail)
-		nb_pkts = nb_pkts_avail;
+	nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(nb_pkts_avail, QDMA_MAX_BURST_SIZE));
 
 #ifdef DUMP_MEMPOOL_USAGE_STATS
 	PMD_DRV_LOG(DEBUG, "%s(): %d: queue id = %d, mbuf_avail_count = %d, "
@@ -1196,7 +1192,9 @@ uint16_t qdma_recv_pkts_st(struct qdma_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 #ifdef QDMA_LATENCY_OPTIMIZED
 	adapt_update_counter(rxq, nb_pkts_avail);
 #endif //QDMA_LATENCY_OPTIMIZED
-	if (process_cmpt_ring(rxq, nb_pkts) != 0)
+
+	int ret = process_cmpt_ring(rxq, nb_pkts);
+	if (unlikely(ret))
 		return 0;
 
 	if (rxq->status != RTE_ETH_QUEUE_STATE_STARTED) {
@@ -1236,8 +1234,8 @@ uint16_t qdma_recv_pkts_st(struct qdma_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 }
 
 /* Receive API for Memory mapped mode */
-uint16_t qdma_recv_pkts_mm(struct qdma_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-			uint16_t nb_pkts)
+uint16_t qdma_recv_pkts_mm(struct qdma_rx_queue *rxq,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
 	struct rte_mbuf *mb;
 	uint32_t count, id;
@@ -1434,8 +1432,8 @@ qdma_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 /* Transmit API for Streaming mode */
-uint16_t qdma_xmit_pkts_st(struct qdma_tx_queue *txq, struct rte_mbuf **tx_pkts,
-			uint16_t nb_pkts)
+uint16_t qdma_xmit_pkts_st(struct qdma_tx_queue *txq,
+		struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct rte_mbuf *mb;
 	uint64_t pkt_len = 0;
@@ -1443,6 +1441,7 @@ uint16_t qdma_xmit_pkts_st(struct qdma_tx_queue *txq, struct rte_mbuf **tx_pkts,
 	uint16_t cidx = 0;
 	uint16_t count = 0, id;
 	struct qdma_pci_dev *qdma_dev = txq->dev->data->dev_private;
+
 #ifdef TEST_64B_DESC_BYPASS
 	int bypass_desc_sz_idx = qmda_get_desc_sz_idx(txq->bypass_desc_sz);
 
@@ -1476,7 +1475,8 @@ uint16_t qdma_xmit_pkts_st(struct qdma_tx_queue *txq, struct rte_mbuf **tx_pkts,
 	 * Hence, DMA won't happen with new descriptors.
 	 */
 	avail = txq->nb_tx_desc - 2 - in_use;
-	if (!avail) {
+
+	if (unlikely(!avail)) {
 		PMD_DRV_LOG(DEBUG, "Tx queue full, in_use = %d", in_use);
 		return 0;
 	}
@@ -1501,7 +1501,7 @@ uint16_t qdma_xmit_pkts_st(struct qdma_tx_queue *txq, struct rte_mbuf **tx_pkts,
 #else
 		ret = qdma_ul_update_st_h2c_desc(txq, txq->offloads, mb);
 #endif //RTE_ARCH_X86_64
-		if (ret < 0)
+		if (unlikely(ret < 0))
 			break;
 	}
 
@@ -1532,8 +1532,8 @@ uint16_t qdma_xmit_pkts_st(struct qdma_tx_queue *txq, struct rte_mbuf **tx_pkts,
 }
 
 /* Transmit API for Memory mapped mode */
-uint16_t qdma_xmit_pkts_mm(struct qdma_tx_queue *txq, struct rte_mbuf **tx_pkts,
-			uint16_t nb_pkts)
+uint16_t qdma_xmit_pkts_mm(struct qdma_tx_queue *txq,
+		struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct rte_mbuf *mb;
 	uint32_t count, id;
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_user.c b/QDMA/DPDK/drivers/net/qdma/qdma_user.c
index 222cf49bd5c0edd250d058e32d4954ee6a73b1d9..a487de6676d36b924da78d05dd1451b9f355cf9f 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_user.c
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_user.c
@@ -183,24 +183,26 @@ int qdma_ul_update_st_h2c_desc(void *qhndl, uint64_t q_offloads,
 		desc_info->src_addr = mb->buf_iova + mb->data_off;
 		desc_info->flags = (S_H2C_DESC_F_SOP | S_H2C_DESC_F_EOP);
 		desc_info->cdh_flags = 0;
-	} else {
-		while (nsegs && mb) {
-			desc_info = get_st_h2c_desc(qhndl);
+		return 0;
+	}
 
-			desc_info->len = rte_pktmbuf_data_len(mb);
-			desc_info->pld_len = desc_info->len;
-			desc_info->src_addr = mb->buf_iova + mb->data_off;
-			desc_info->flags = 0;
-			if (nsegs == pkt_segs)
-				desc_info->flags |= S_H2C_DESC_F_SOP;
-			if (nsegs == 1)
-				desc_info->flags |= S_H2C_DESC_F_EOP;
-			desc_info->cdh_flags = 0;
+	while (nsegs && mb) {
+		desc_info = get_st_h2c_desc(qhndl);
 
-			nsegs--;
-			mb = mb->next;
-		}
+		desc_info->len = rte_pktmbuf_data_len(mb);
+		desc_info->pld_len = desc_info->len;
+		desc_info->src_addr = mb->buf_iova + mb->data_off;
+		desc_info->flags = 0;
+
+		desc_info->flags |= (nsegs == pkt_segs) ? S_H2C_DESC_F_SOP : 0;
+		desc_info->flags |= (nsegs == 1) ? S_H2C_DESC_F_EOP : 0;
+
+		desc_info->cdh_flags = 0;
+
+		nsegs--;
+		mb = mb->next;
 	}
+
 	return 0;
 }
 
diff --git a/QDMA/DPDK/drivers/net/qdma/qdma_vf_ethdev.c b/QDMA/DPDK/drivers/net/qdma/qdma_vf_ethdev.c
index ea94e2ca495e8b95c78c95d837088b68885e2de5..4c4fce57d4a864fad5bc36b05ee14db9f7db715d 100755
--- a/QDMA/DPDK/drivers/net/qdma/qdma_vf_ethdev.c
+++ b/QDMA/DPDK/drivers/net/qdma/qdma_vf_ethdev.c
@@ -572,7 +572,9 @@ static int qdma_vf_dev_link_update(struct rte_eth_dev *dev,
 {
 	dev->data->dev_link.link_status = ETH_LINK_UP;
 	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-	dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
+
+	/* TODO: Configure link speed by reading hardware capabilities */
+	dev->data->dev_link.link_speed = ETH_SPEED_NUM_200G;
 
 	PMD_DRV_LOG(INFO, "Link update done\n");
 
diff --git a/QDMA/DPDK/drivers/net/qdma/rte_pmd_qdma.h b/QDMA/DPDK/drivers/net/qdma/rte_pmd_qdma.h
index cfff15381fdb19e6976d53d6073573e5c8954d34..e1774c52a8447adedac7fcbf0884f8ef25700d7e 100755
--- a/QDMA/DPDK/drivers/net/qdma/rte_pmd_qdma.h
+++ b/QDMA/DPDK/drivers/net/qdma/rte_pmd_qdma.h
@@ -757,7 +757,8 @@ int rte_pmd_qdma_dev_fp_ops_config(int port_id);
  * @return  pci_read_reg value
  *
  ******************************************************************************/
-unsigned int rte_pmd_qdma_compat_pci_read_reg(int port_id, unsigned int bar, unsigned int offset);
+unsigned int rte_pmd_qdma_compat_pci_read_reg(int port_id,
+		unsigned int bar, unsigned int offset);
 
 /*****************************************************************************/
 /**
@@ -769,7 +770,8 @@ unsigned int rte_pmd_qdma_compat_pci_read_reg(int port_id, unsigned int bar, uns
  * @param   reg_val Value which needs to write
  *
  ******************************************************************************/
-void rte_pmd_qdma_compat_pci_write_reg(int port_id, uint32_t bar, uint32_t offset, uint32_t reg_val);
+void rte_pmd_qdma_compat_pci_write_reg(int port_id, uint32_t bar,
+		uint32_t offset, uint32_t reg_val);
 
 /*****************************************************************************/
 /**
@@ -781,7 +783,8 @@ void rte_pmd_qdma_compat_pci_write_reg(int port_id, uint32_t bar, uint32_t offse
  * @param   fn Function
  *
  ******************************************************************************/
-void rte_pmd_qdma_get_bdf(uint32_t m_id, uint32_t *bus, uint32_t *dev, uint32_t *fn);
+void rte_pmd_qdma_get_bdf(uint32_t m_id, uint32_t *bus,
+		uint32_t *dev, uint32_t *fn);
 
 /*****************************************************************************/
 /**
@@ -829,7 +832,7 @@ void rte_pmd_qdma_dev_started(int port_id, bool status);
  *
  ******************************************************************************/
 
-struct rte_pci_device* rte_pmd_qdma_eth_dev_to_pci(int port_id);
+struct rte_pci_device *rte_pmd_qdma_eth_dev_to_pci(int port_id);
 
 /*****************************************************************************/
 /**
@@ -839,7 +842,17 @@ struct rte_pci_device* rte_pmd_qdma_eth_dev_to_pci(int port_id);
  * @return  rte_device* rte_device
  *
  ******************************************************************************/
-struct rte_device* rte_pmd_qdma_get_device(int port_id);
+struct rte_device *rte_pmd_qdma_get_device(int port_id);
+
+/*****************************************************************************/
+/**
+ * DPDK PMD compatibility function to validate rte device
+ *
+ * @param   port_id Port ID
+ * @return  bool true/false
+ *
+ ******************************************************************************/
+bool rte_pmd_qdma_validate_dev(int port_id);
 
 #ifdef __cplusplus
 }
diff --git a/QDMA/DPDK/drivers/net/qdma/version.h b/QDMA/DPDK/drivers/net/qdma/version.h
index 0962b0fcea6395771b2b89cff6d3e5e09ad8364e..14dac1fd2d458d5ac4c9c806a8c28bb494344f17 100755
--- a/QDMA/DPDK/drivers/net/qdma/version.h
+++ b/QDMA/DPDK/drivers/net/qdma/version.h
@@ -39,7 +39,7 @@
 
 #define QDMA_PMD_MAJOR		2023
 #define QDMA_PMD_MINOR		1
-#define QDMA_PMD_PATCHLEVEL	0
+#define QDMA_PMD_PATCHLEVEL	1
 
 #define QDMA_PMD_VERSION      \
 	qdma_stringify(QDMA_PMD_MAJOR) "." \
diff --git a/QDMA/DPDK/drivers/net/qdma/version.map b/QDMA/DPDK/drivers/net/qdma/version.map
index f866b5e1d03782cfa07af5281f40a6f34df11d00..1b9c22d34eafa93f7acfe3ad11941c3fdf9b7f31 100755
--- a/QDMA/DPDK/drivers/net/qdma/version.map
+++ b/QDMA/DPDK/drivers/net/qdma/version.map
@@ -109,6 +109,7 @@ DPDK_22 {
         rte_pmd_qdma_dev_started;
         rte_pmd_qdma_eth_dev_to_pci;
 	rte_pmd_qdma_get_device;
+        rte_pmd_qdma_validate_dev;
 
 
         local: *;
@@ -155,6 +156,7 @@ DPDK_23 {
         rte_pmd_qdma_dev_started;
         rte_pmd_qdma_eth_dev_to_pci;
 	rte_pmd_qdma_get_device;
+        rte_pmd_qdma_validate_dev;
 
 
         local: *;
diff --git a/QDMA/DPDK/tools/0001-PKTGEN-22.04.1-Patch-to-add-Jumbo-packet-support.patch b/QDMA/DPDK/tools/0001-PKTGEN-22.04.1-Patch-to-add-Jumbo-packet-support.patch
index b493b52906a8a2926fee04da7ab612e07361e77c..e50d80d8340c60d090e9ab4b45834fa46296da45 100755
--- a/QDMA/DPDK/tools/0001-PKTGEN-22.04.1-Patch-to-add-Jumbo-packet-support.patch
+++ b/QDMA/DPDK/tools/0001-PKTGEN-22.04.1-Patch-to-add-Jumbo-packet-support.patch
@@ -1,7 +1,8 @@
-From fce606c78b3c6be2b9615c46975b8c5430e97924 Mon Sep 17 00:00:00 2001
+From fb3ffa15e6d1f6b1a576b393ff69317707351aae Mon Sep 17 00:00:00 2001
 From: Prasad Pardeshi <prasadp@xilinx.com>
-Date: Wed, 1 Feb 2023 02:40:17 +0530
-Subject: [PATCH] PKTGEN-22.04.1: Patch to add Jumbo packet support
+Date: Tue, 28 Feb 2023 17:37:06 +0530
+Subject: [PATCH] [PATCH] PKTGEN-22.04.1: Patch to add Jumbo packet support
+
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
@@ -11,7 +12,7 @@ This patch include:
 2. Increase default number of RX_DESC to 2K.
 3. Disable RX classification.
 4. Set user provided packet size as DMA packet size i.e. not to remove
-CRC bytes Signed-off-by: Kumar Sanghvi <kumars@xilinx.com>
+CRC bytes Signed-off-by: Kumar Sanghvi <kumars@xilinx.com>
 Signed-off-by: Nikhil Agarwal <nagarwal@xilinx.com>
 Signed-off-by: Pankaj Darak <pankajd@xilinx.com>
 Signed-off-by: Thanneeru Srinivasulu <sthannee@xilinx.com>
@@ -19,20 +20,21 @@ Signed-off-by: tarakr <tarakr@xilinx.com>
 Signed-off-by: Suryanarayana Raju Sangani <ssangani@xilinx.com>
 ---
  app/lpktgenlib.c        |  2 ++
+ app/meson.build         |  1 +
  app/pktgen-cmds.c       | 18 ++++++++++++++----
  app/pktgen-constants.h  |  4 ++--
  app/pktgen-latency.c    |  2 ++
- app/pktgen-main.c       | 28 ++++++++++++++++++++--------
+ app/pktgen-main.c       | 25 ++++++++++++++++++-------
  app/pktgen-port-cfg.c   | 15 ++++++++++-----
  app/pktgen-port-cfg.h   |  2 +-
  app/pktgen-range.c      |  3 ++-
  app/pktgen-rate.c       |  2 ++
  app/pktgen-stats.c      |  2 ++
- app/pktgen.c            | 21 ++++++++++++++++++---
+ app/pktgen.c            | 19 +++++++++++++++++--
  app/pktgen.h            |  5 +++--
  lib/cli/cli_map.c       |  1 +
  lib/common/pg_strings.c |  1 +
- 14 files changed, 80 insertions(+), 26 deletions(-)
+ 15 files changed, 78 insertions(+), 24 deletions(-)
 
 diff --git a/app/lpktgenlib.c b/app/lpktgenlib.c
 index bc24433..add5a16 100644
@@ -47,6 +49,18 @@ index bc24433..add5a16 100644
  #include <stdint.h>
  #include <netinet/in.h>
  
+diff --git a/app/meson.build b/app/meson.build
+index 66087a5..1027997 100644
+--- a/app/meson.build
++++ b/app/meson.build
+@@ -35,6 +35,7 @@ deps += [cc.find_library('rte_net_i40e', required: false)]
+ deps += [cc.find_library('rte_net_ixgbe', required: false)]
+ deps += [cc.find_library('rte_net_ice', required: false)]
+ deps += [cc.find_library('rte_bus_vdev', required: false)]
++deps += [cc.find_library('rte_net_qdma', required: true)]
+ 
+ deps += [dependency('threads')]
+ deps += [cc.find_library('numa', required: true)]
 diff --git a/app/pktgen-cmds.c b/app/pktgen-cmds.c
 index 9708b28..f92b890 100644
 --- a/app/pktgen-cmds.c
@@ -143,18 +157,19 @@ index bafaf1c..7787a6f 100644
  
  #include "pktgen-cmds.h"
 diff --git a/app/pktgen-main.c b/app/pktgen-main.c
-index 7debdb3..33eac3a 100644
+index 7debdb3..11a118a 100644
 --- a/app/pktgen-main.c
 +++ b/app/pktgen-main.c
-@@ -10,6 +10,7 @@
- #include <signal.h>
- #include <locale.h>
+@@ -29,6 +29,8 @@
+ #include "pktgen-log.h"
+ #include "cli-functions.h"
  
-+#include "ethdev_driver.h"
- #include <lua_config.h>
- #ifdef LUA_ENABLED
- #include <lua_socket.h>
-@@ -206,7 +207,7 @@ pktgen_parse_args(int argc, char **argv)
++#include <rte_pmd_qdma.h>
++
+ /* Offset to the mbuf dynamic field holding pktgen data. */
+ int pktgen_dynfield_offset = -1;
+ 
+@@ -206,7 +208,7 @@ pktgen_parse_args(int argc, char **argv)
      pktgen.mbuf_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
  
      pktgen.verbose = 0;
@@ -163,7 +178,7 @@ index 7debdb3..33eac3a 100644
             EOF)
          switch (opt) {
          case 't':
-@@ -315,7 +316,12 @@ pktgen_parse_args(int argc, char **argv)
+@@ -315,7 +317,12 @@ pktgen_parse_args(int argc, char **argv)
  
          case 'h': /* print out the help message */
              pktgen_usage(prgname);
@@ -177,7 +192,7 @@ index 7debdb3..33eac3a 100644
  
          case 0: /* crc-strip for all ports */
              printf(">>> Strip CRC in hardware is the default\n");
-@@ -421,8 +427,10 @@ RTE_FINI(pktgen_fini)
+@@ -421,7 +428,8 @@ RTE_FINI(pktgen_fini)
  int
  main(int argc, char **argv)
  {
@@ -185,11 +200,9 @@ index 7debdb3..33eac3a 100644
 +    uint32_t nb_ports;
 +    int32_t i;
      int32_t ret;
-+    struct rte_device *dev;
  
      setlocale(LC_ALL, "");
- 
-@@ -574,12 +582,16 @@ main(int argc, char **argv)
+@@ -574,10 +582,13 @@ main(int argc, char **argv)
      /* Wait for all of the cores to stop running and exit. */
      rte_eal_mp_wait_lcore();
  
@@ -197,20 +210,16 @@ index 7debdb3..33eac3a 100644
 -    {
 -        rte_eth_dev_stop(i);
 -        rte_delay_us_sleep(100 * 1000);
--    }
 +    nb_ports = rte_eth_dev_count_avail();
 +    for(i = nb_ports-1; i >= 0; i--) {
-+            dev = rte_eth_devices[i].device;
-+	    if (rte_dev_remove(dev))
++	    if (rte_pmd_qdma_dev_remove(i))
 +	            printf("Failed to detach port '%d'\n", i);
 +	    else
 +		    printf("successfully removed port '%d'\n", i);
++
+     }
  
-+    }
-+    
      cli_destroy();
- 
-     return 0;
 diff --git a/app/pktgen-port-cfg.c b/app/pktgen-port-cfg.c
 index 7a61db3..3579d07 100644
 --- a/app/pktgen-port-cfg.c
@@ -326,7 +335,7 @@ index e7f27ef..7840b08 100644
  #include <lua_config.h>
  
 diff --git a/app/pktgen.c b/app/pktgen.c
-index 448cc7f..508d650 100644
+index 448cc7f..14441f7 100644
 --- a/app/pktgen.c
 +++ b/app/pktgen.c
 @@ -75,6 +75,7 @@ pktgen_wire_size(port_info_t *info)
@@ -337,17 +346,14 @@ index 448cc7f..508d650 100644
      return size;
  }
  
-@@ -297,8 +298,9 @@ pktgen_send_burst(port_info_t *info, uint16_t qid)
+@@ -297,6 +298,7 @@ pktgen_send_burst(port_info_t *info, uint16_t qid)
      struct qstats_s *qstats = &info->qstats[qid];
      uint32_t ret, cnt, tap, rnd, tstamp, i;
      int32_t seq_idx;
 +    pkt_seq_t *pkt;
  
--    tap = pktgen_tst_port_flags(info, PROCESS_TX_TAP_PKTS);
-+    tap = pktgen_tst_port_flags(info, PROCESS_TX_TAP_PKTS);	
+     tap = pktgen_tst_port_flags(info, PROCESS_TX_TAP_PKTS);
  
-     if ((cnt = mtab->len) == 0)
-         goto special_send;
 @@ -313,6 +315,10 @@ pktgen_send_burst(port_info_t *info, uint16_t qid)
      else
          seq_idx = SINGLE_PKT;