2023-05-19 07:22:33

by Ratheesh Kannoth

[permalink] [raw]
Subject: [PATCH net-next v3] octeontx2-pf: Add support for page pool

Page pool for each rx queue enhance rx side performance
by reclaiming buffers back to each queue specific pool. DMA
mapping is done only for first allocation of buffers.
As subsequent buffers allocation avoid DMA mapping,
it results in performance improvement.

Image | Performance
------------ | ------------
Vannila | 3Mpps
|
with this | 42Mpps
change |
---------------------------

Signed-off-by: Ratheesh Kannoth <[email protected]>
---

ChangeLog

v2 -> v3:
* Modified commit message
* Fixed nit comments.

v1 -> v2:
* Removed GFP_DMA flag
* Returned correct err value

v0 -> v1:
* Removed CONFIG_PAGE_POOL #ifdefs in code
* Used compound page APIs
* Replaced page_pool_put_page API with page_pool_put_full_page API
---
.../net/ethernet/marvell/octeontx2/Kconfig | 1 +
.../marvell/octeontx2/nic/otx2_common.c | 74 ++++++++++++++++---
.../marvell/octeontx2/nic/otx2_common.h | 6 +-
.../ethernet/marvell/octeontx2/nic/otx2_pf.c | 11 ++-
.../marvell/octeontx2/nic/otx2_txrx.c | 19 +++--
.../marvell/octeontx2/nic/otx2_txrx.h | 1 +
.../ethernet/marvell/octeontx2/nic/qos_sq.c | 2 +-
7 files changed, 93 insertions(+), 21 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 993ac180a5db..a32d85d6f599 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -32,6 +32,7 @@ config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
select NET_DEVLINK
+ select PAGE_POOL
depends on (64BIT && COMPILE_TEST) || ARM64
select DIMLIB
depends on PCI
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f9286648e45c..4e3a5de358a8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -518,11 +518,32 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}

+static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
+{
+ unsigned int offset = 0;
+ struct page *page;
+ size_t sz;
+
+ sz = SKB_DATA_ALIGN(pool->rbsize);
+ sz = ALIGN(sz, OTX2_ALIGN);
+
+ page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ *dma = page_pool_get_dma_addr(page) + offset;
+ return 0;
+}
+
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
dma_addr_t *dma)
{
u8 *buf;

+ if (pool->page_pool)
+ return otx2_alloc_pool_buf(pfvf, pool, dma);
+
buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
@@ -1205,10 +1226,28 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
}
}

+void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ u64 iova, int size)
+{
+ u64 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ struct page *page = virt_to_head_page(phys_to_virt(pa));
+
+ if (pool->page_pool) {
+ page_pool_put_full_page(pool->page_pool, page, true);
+ } else {
+ dma_unmap_page_attrs(pfvf->dev, iova, size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ put_page(page);
+ }
+}
+
void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
{
int pool_id, pool_start = 0, pool_end = 0, size = 0;
- u64 iova, pa;
+ struct otx2_pool *pool;
+ u64 iova;

if (type == AURA_NIX_SQ) {
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1224,15 +1263,13 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
/* Free SQB and RQB pointers from the aura pool */
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
iova = otx2_aura_allocptr(pfvf, pool_id);
+ pool = &pfvf->qset.pool[pool_id];
while (iova) {
if (type == AURA_NIX_RQ)
iova -= OTX2_HEAD_ROOM;

- pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
- dma_unmap_page_attrs(pfvf->dev, iova, size,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- put_page(virt_to_page(phys_to_virt(pa)));
+ otx2_free_bufs(pfvf, pool, iova, size);
+
iova = otx2_aura_allocptr(pfvf, pool_id);
}
}
@@ -1250,6 +1287,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
pool = &pfvf->qset.pool[pool_id];
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
+ page_pool_destroy(pool->page_pool);
+ pool->page_pool = NULL;
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
@@ -1333,8 +1372,9 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
}

int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
- int stack_pages, int numptrs, int buf_size)
+ int stack_pages, int numptrs, int buf_size, int type)
{
+ struct page_pool_params pp_params = { 0 };
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1378,6 +1418,22 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
aq->ctype = NPA_AQ_CTYPE_POOL;
aq->op = NPA_AQ_INSTOP_INIT;

+ if (type != AURA_NIX_RQ) {
+ pool->page_pool = NULL;
+ return 0;
+ }
+
+ pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+ pp_params.pool_size = numptrs;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
+
return 0;
}

@@ -1412,7 +1468,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)

/* Initialize pool context */
err = otx2_pool_init(pfvf, pool_id, stack_pages,
- num_sqbs, hw->sqb_size);
+ num_sqbs, hw->sqb_size, AURA_NIX_SQ);
if (err)
goto fail;
}
@@ -1475,7 +1531,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
}
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
err = otx2_pool_init(pfvf, pool_id, stack_pages,
- num_ptrs, pfvf->rbsize);
+ num_ptrs, pfvf->rbsize, AURA_NIX_RQ);
if (err)
goto fail;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index b2267c8bec37..a9ed15d1793a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -976,7 +976,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
-void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
@@ -984,7 +984,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma);
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
- int stack_pages, int numptrs, int buf_size);
+ int stack_pages, int numptrs, int buf_size, int type);
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs);

@@ -1054,6 +1054,8 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
+void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ u64 iova, int size);

/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e1883c3edda3..db3fcab1c8cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1555,7 +1555,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
struct msg_req *req;
+ int pool_id;
int qidx;

/* Ensure all SQE are processed */
@@ -1584,7 +1586,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
cq = &qset->cq[qidx];
if (cq->cq_type == CQ_RX)
- otx2_cleanup_rx_cqes(pf, cq);
+ otx2_cleanup_rx_cqes(pf, cq, qidx);
else
otx2_cleanup_tx_cqes(pf, cq);
}
@@ -1594,6 +1596,13 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);

+ for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
+ pool = &pf->qset.pool[pool_id];
+ page_pool_destroy(pool->page_pool);
+ pool->page_pool = NULL;
+ }
+
otx2_free_cq_res(pf);

/* Free all ingress bandwidth profiles allocated */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index e288f46b23a8..37d4e4b73816 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -217,9 +217,6 @@ static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
va - page_address(page) + off,
len - off, pfvf->rbsize);
-
- otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
- pfvf->rbsize, DMA_FROM_DEVICE);
return true;
}

@@ -382,6 +379,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;

+ skb_mark_for_recycle(skb);
+
napi_gro_frags(napi);
}

@@ -1186,11 +1185,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
}
EXPORT_SYMBOL(otx2_sq_append_skb);

-void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
{
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- u64 iova, pa;
+ struct otx2_pool *pool;
+ u16 pool_id;
+ u64 iova;

if (pfvf->xdp_prog)
xdp_rxq_info_unreg(&cq->xdp_rxq);
@@ -1198,6 +1199,9 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;

+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
@@ -1210,9 +1214,8 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
continue;
}
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
- pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
- put_page(virt_to_page(phys_to_virt(pa)));
+
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
}

/* Free CQEs to HW */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 7ab6db9a986f..b5d689eeff80 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -118,6 +118,7 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
+ struct page_pool *page_pool;
u16 rbsize;
};

diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index d96ed29c1567..9d887bfc3108 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -63,7 +63,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)

/* Initialize pool context */
err = otx2_pool_init(pfvf, pool_id, stack_pages,
- num_sqbs, hw->sqb_size);
+ num_sqbs, hw->sqb_size, AURA_NIX_SQ);
if (err)
goto aura_free;

--
2.25.1



2023-05-19 07:43:04

by Yunsheng Lin

[permalink] [raw]
Subject: Re: [PATCH net-next v3] octeontx2-pf: Add support for page pool

On 2023/5/19 15:13, Ratheesh Kannoth wrote:
> Page pool for each rx queue enhance rx side performance
> by reclaiming buffers back to each queue specific pool. DMA
> mapping is done only for first allocation of buffers.
> As subsequent buffers allocation avoid DMA mapping,
> it results in performance improvement.
>
> Image | Performance
> ------------ | ------------
> Vannila | 3Mpps
> |
> with this | 42Mpps
> change |
> ---------------------------

LGTM.
Reviewed-by: Yunsheng Lin <[email protected]>

>
> Signed-off-by: Ratheesh Kannoth <[email protected]>
> ---


2023-05-19 10:17:29

by Simon Horman

[permalink] [raw]
Subject: Re: [PATCH net-next v3] octeontx2-pf: Add support for page pool

On Fri, May 19, 2023 at 12:43:52PM +0530, Ratheesh Kannoth wrote:
> Page pool for each rx queue enhance rx side performance
> by reclaiming buffers back to each queue specific pool. DMA
> mapping is done only for first allocation of buffers.
> As subsequent buffers allocation avoid DMA mapping,
> it results in performance improvement.
>
> Image | Performance
> ------------ | ------------
> Vannila | 3Mpps
> |
> with this | 42Mpps
> change |
> ---------------------------
>
> Signed-off-by: Ratheesh Kannoth <[email protected]>

...

> @@ -1205,10 +1226,28 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
> }
> }
>
> +void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
> + u64 iova, int size)
> +{
> + u64 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
> + struct page *page = virt_to_head_page(phys_to_virt(pa));

nit: please arrange local variables in networking code in reverse xmas tree
order - longest line to shortest.

u64 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
struct page *page;

page = virt_to_head_page(phys_to_virt(pa));

The following tool can check this:

https://github.com/ecree-solarflare/xmastree

...

> @@ -1186,11 +1185,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
> }
> EXPORT_SYMBOL(otx2_sq_append_skb);
>
> -void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
> +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
> {
> struct nix_cqe_rx_s *cqe;
> int processed_cqe = 0;
> - u64 iova, pa;
> + struct otx2_pool *pool;
> + u16 pool_id;
> + u64 iova;

Likewise here.

>
> if (pfvf->xdp_prog)
> xdp_rxq_info_unreg(&cq->xdp_rxq);

...

2023-05-19 10:40:47

by Ratheesh Kannoth

[permalink] [raw]
Subject: RE: Re: [PATCH net-next v3] octeontx2-pf: Add support for page pool


> -----Original Message-----
> From: Simon Horman <[email protected]>
> Sent: Friday, May 19, 2023 3:33 PM
> To: Ratheesh Kannoth <[email protected]>
> Cc: [email protected]; [email protected]; Sunil Kovvuri
> Goutham <[email protected]>; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; Subbaraya Sundeep Bhatta
> <[email protected]>; Geethasowjanya Akula <[email protected]>;
> Srujana Challa <[email protected]>; Hariprasad Kelam
> <[email protected]>
> Subject: [EXT] Re: [PATCH net-next v3] octeontx2-pf: Add support for page
> pool
>
> External Email
>
> ----------------------------------------------------------------------
> On Fri, May 19, 2023 at 12:43:52PM +0530, Ratheesh Kannoth wrote:
> > Page pool for each rx queue enhance rx side performance by reclaiming
> > buffers back to each queue specific pool. DMA mapping is done only for
> > first allocation of buffers.
> > As subsequent buffers allocation avoid DMA mapping, it results in
> > performance improvement.
> >
> > Image | Performance
> > ------------ | ------------
> > Vannila | 3Mpps
> > |
> > with this | 42Mpps
> > change |
> > ---------------------------
> >
> > Signed-off-by: Ratheesh Kannoth <[email protected]>
>
> ...
>
> > @@ -1205,10 +1226,28 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
> > }
> > }
> >
> > +void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
> > + u64 iova, int size)
> > +{
> > + u64 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
> > + struct page *page = virt_to_head_page(phys_to_virt(pa));
>
> nit: please arrange local variables in networking code in reverse xmas tree
> order - longest line to shortest.
Variable "pa" is used in second line. Are you suggesting to defer assignment later; and only declare variables here in reverse xmas tree style ?


>
> u64 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
> struct page *page;
>
> page = virt_to_head_page(phys_to_virt(pa));
>
> The following tool can check this:
>
> https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__github.com_ecree-
> 2Dsolarflare_xmastree&d=DwIBAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=aekcsy
> BCH00_LewrEDcQBzsRw8KCpUR0vZb_auTHk4M&m=EAM7f8UtMNRC57jfDp
> gqNsIClttVw6HHyCL7XecFBync-LgDwxIm9_45n-w-
> WvjY&s=x_0EQ5wG1cZv01ySk_vzRX3kc8Bd2OALcpI8NgxYXTI&e=
>
> ...
>
> > @@ -1186,11 +1185,13 @@ bool otx2_sq_append_skb(struct net_device
> > *netdev, struct otx2_snd_queue *sq, }
> > EXPORT_SYMBOL(otx2_sq_append_skb);
> >
> > -void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue
> > *cq)
> > +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue
> > +*cq, int qidx)
> > {
> > struct nix_cqe_rx_s *cqe;
> > int processed_cqe = 0;
> > - u64 iova, pa;
> > + struct otx2_pool *pool;
> > + u16 pool_id;
> > + u64 iova;
>
> Likewise here.
Okay
>
> >
> > if (pfvf->xdp_prog)
> > xdp_rxq_info_unreg(&cq->xdp_rxq);
>

-Ratheesh

2023-05-19 13:30:43

by Simon Horman

[permalink] [raw]
Subject: Re: Re: [PATCH net-next v3] octeontx2-pf: Add support for page pool

On Fri, May 19, 2023 at 10:21:44AM +0000, Ratheesh Kannoth wrote:
>
> > -----Original Message-----
> > From: Simon Horman <[email protected]>
> > Sent: Friday, May 19, 2023 3:33 PM
> > To: Ratheesh Kannoth <[email protected]>
> > Cc: [email protected]; [email protected]; Sunil Kovvuri
> > Goutham <[email protected]>; [email protected];
> > [email protected]; [email protected]; [email protected];
> > [email protected]; Subbaraya Sundeep Bhatta
> > <[email protected]>; Geethasowjanya Akula <[email protected]>;
> > Srujana Challa <[email protected]>; Hariprasad Kelam
> > <[email protected]>
> > Subject: [EXT] Re: [PATCH net-next v3] octeontx2-pf: Add support for page
> > pool
> >
> > External Email
> >
> > ----------------------------------------------------------------------
> > On Fri, May 19, 2023 at 12:43:52PM +0530, Ratheesh Kannoth wrote:
> > > Page pool for each rx queue enhance rx side performance by reclaiming
> > > buffers back to each queue specific pool. DMA mapping is done only for
> > > first allocation of buffers.
> > > As subsequent buffers allocation avoid DMA mapping, it results in
> > > performance improvement.
> > >
> > > Image | Performance
> > > ------------ | ------------
> > > Vannila | 3Mpps
> > > |
> > > with this | 42Mpps
> > > change |
> > > ---------------------------
> > >
> > > Signed-off-by: Ratheesh Kannoth <[email protected]>
> >
> > ...
> >
> > > @@ -1205,10 +1226,28 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
> > > }
> > > }
> > >
> > > +void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
> > > + u64 iova, int size)
> > > +{
> > > + u64 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
> > > + struct page *page = virt_to_head_page(phys_to_virt(pa));
> >
> > nit: please arrange local variables in networking code in reverse xmas tree
> > order - longest line to shortest.
> Variable "pa" is used in second line. Are you suggesting to defer assignment later; and only declare variables here in reverse xmas tree style ?

Yes, that is my suggestion.