2023-07-28 23:50:20

by Haiyang Zhang

[permalink] [raw]
Subject: [PATCH V4,net-next] net: mana: Add page pool for RX buffers

Add page pool for RX buffers for faster buffer cycle and reduce CPU
usage.

The standard page pool API is used.

Signed-off-by: Haiyang Zhang <[email protected]>
---
V4:
Add nid setting, remove page_pool_nid_changed(), as suggested by
Jesper Dangaard Brouer
V3:
Update xdp mem model, pool param, alloc as suggested by Jakub Kicinski
V2:
Use the standard page pool API as suggested by Jesper Dangaard Brouer

---
drivers/net/ethernet/microsoft/mana/mana_en.c | 90 +++++++++++++++----
include/net/mana/mana.h | 3 +
2 files changed, 77 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index ac2acc9aca9d..83f2ac132990 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1414,8 +1414,8 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return skb;
}

-static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
- struct mana_rxq *rxq)
+static void mana_rx_skb(void *buf_va, bool from_pool,
+ struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
{
struct mana_stats_rx *rx_stats = &rxq->stats;
struct net_device *ndev = rxq->ndev;
@@ -1448,6 +1448,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
if (!skb)
goto drop;

+ if (from_pool)
+ skb_mark_for_recycle(skb);
+
skb->dev = napi->dev;

skb->protocol = eth_type_trans(skb, ndev);
@@ -1498,9 +1501,14 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
u64_stats_update_end(&rx_stats->syncp);

drop:
- WARN_ON_ONCE(rxq->xdp_save_va);
- /* Save for reuse */
- rxq->xdp_save_va = buf_va;
+ if (from_pool) {
+ page_pool_recycle_direct(rxq->page_pool,
+ virt_to_head_page(buf_va));
+ } else {
+ WARN_ON_ONCE(rxq->xdp_save_va);
+ /* Save for reuse */
+ rxq->xdp_save_va = buf_va;
+ }

++ndev->stats.rx_dropped;

@@ -1508,11 +1516,13 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
}

static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
- dma_addr_t *da, bool is_napi)
+ dma_addr_t *da, bool *from_pool, bool is_napi)
{
struct page *page;
void *va;

+ *from_pool = false;
+
/* Reuse XDP dropped page if available */
if (rxq->xdp_save_va) {
va = rxq->xdp_save_va;
@@ -1533,17 +1543,22 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
return NULL;
}
} else {
- page = dev_alloc_page();
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
return NULL;

+ *from_pool = true;
va = page_to_virt(page);
}

*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, *da)) {
- put_page(virt_to_head_page(va));
+ if (*from_pool)
+ page_pool_put_full_page(rxq->page_pool, page, is_napi);
+ else
+ put_page(virt_to_head_page(va));
+
return NULL;
}

@@ -1552,21 +1567,25 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,

/* Allocate frag for rx buffer, and save the old buf */
static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
- struct mana_recv_buf_oob *rxoob, void **old_buf)
+ struct mana_recv_buf_oob *rxoob, void **old_buf,
+ bool *old_fp)
{
+ bool from_pool;
dma_addr_t da;
void *va;

- va = mana_get_rxfrag(rxq, dev, &da, true);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
if (!va)
return;

dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
DMA_FROM_DEVICE);
*old_buf = rxoob->buf_va;
+ *old_fp = rxoob->from_pool;

rxoob->buf_va = va;
rxoob->sgl[0].address = da;
+ rxoob->from_pool = from_pool;
}

static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
@@ -1580,6 +1599,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct device *dev = gc->dev;
void *old_buf = NULL;
u32 curr, pktlen;
+ bool old_fp;

apc = netdev_priv(ndev);

@@ -1622,12 +1642,12 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);

- mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf);
+ mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);

/* Unsuccessful refill will have old_buf == NULL.
* In this case, mana_rx_skb() will drop the packet.
*/
- mana_rx_skb(old_buf, oob, rxq);
+ mana_rx_skb(old_buf, old_fp, oob, rxq);

drop:
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
@@ -1887,6 +1907,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
struct mana_recv_buf_oob *rx_oob;
struct device *dev = gc->dev;
struct napi_struct *napi;
+ struct page *page;
int i;

if (!rxq)
@@ -1919,10 +1940,18 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
dma_unmap_single(dev, rx_oob->sgl[0].address,
rx_oob->sgl[0].size, DMA_FROM_DEVICE);

- put_page(virt_to_head_page(rx_oob->buf_va));
+ page = virt_to_head_page(rx_oob->buf_va);
+
+ if (rx_oob->from_pool)
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ else
+ put_page(page);
+
rx_oob->buf_va = NULL;
}

+ page_pool_destroy(rxq->page_pool);
+
if (rxq->gdma_rq)
mana_gd_destroy_queue(gc, rxq->gdma_rq);

@@ -1933,18 +1962,20 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
struct mana_rxq *rxq, struct device *dev)
{
struct mana_port_context *mpc = netdev_priv(rxq->ndev);
+ bool from_pool = false;
dma_addr_t da;
void *va;

if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
- va = mana_get_rxfrag(rxq, dev, &da, false);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);

if (!va)
return -ENOMEM;

rx_oob->buf_va = va;
+ rx_oob->from_pool = from_pool;

rx_oob->sgl[0].address = da;
rx_oob->sgl[0].size = rxq->datasize;
@@ -2014,6 +2045,26 @@ static int mana_push_wqe(struct mana_rxq *rxq)
return 0;
}

+static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
+{
+ struct page_pool_params pprm = {};
+ int ret;
+
+ pprm.pool_size = RX_BUFFERS_PER_QUEUE;
+ pprm.nid = gc->numa_node;
+ pprm.napi = &rxq->rx_cq.napi;
+
+ rxq->page_pool = page_pool_create(&pprm);
+
+ if (IS_ERR(rxq->page_pool)) {
+ ret = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
u32 rxq_idx, struct mana_eq *eq,
struct net_device *ndev)
@@ -2043,6 +2094,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
&rxq->headroom);

+ /* Create page pool for RX queue */
+ err = mana_create_page_pool(rxq, gc);
+ if (err) {
+ netdev_err(ndev, "Create page pool err:%d\n", err);
+ goto out;
+ }
+
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
if (err)
goto out;
@@ -2114,8 +2172,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,

WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
- WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool));

napi_enable(&cq->napi);

diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 024ad8ddb27e..b12859511839 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -280,6 +280,7 @@ struct mana_recv_buf_oob {
struct gdma_wqe_request wqe_req;

void *buf_va;
+ bool from_pool; /* allocated from a page pool */

/* SGL of the buffer going to be sent has part of the work request. */
u32 num_sge;
@@ -330,6 +331,8 @@ struct mana_rxq {
bool xdp_flush;
int xdp_rc; /* XDP redirect return code */

+ struct page_pool *page_pool;
+
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
*/
--
2.25.1



2023-08-01 02:06:15

by Jakub Kicinski

[permalink] [raw]
Subject: Re: [PATCH V4,net-next] net: mana: Add page pool for RX buffers

On Fri, 28 Jul 2023 14:46:07 -0700 Haiyang Zhang wrote:
> static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
> - dma_addr_t *da, bool is_napi)
> + dma_addr_t *da, bool *from_pool, bool is_napi)
> {
> struct page *page;
> void *va;
>
> + *from_pool = false;
> +
> /* Reuse XDP dropped page if available */
> if (rxq->xdp_save_va) {
> va = rxq->xdp_save_va;
> @@ -1533,17 +1543,22 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
> return NULL;
> }
> } else {
> - page = dev_alloc_page();
> + page = page_pool_dev_alloc_pages(rxq->page_pool);
> if (!page)
> return NULL;
>
> + *from_pool = true;
> va = page_to_virt(page);
> }
>
> *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
> DMA_FROM_DEVICE);
> if (dma_mapping_error(dev, *da)) {
> - put_page(virt_to_head_page(va));
> + if (*from_pool)
> + page_pool_put_full_page(rxq->page_pool, page, is_napi);

AFAICT you only pass the is_napi to recycle in case of error?
It's fine to always pass in false, passing true enables some
optimizations but it's not worth trying to optimize error paths.

Otherwise you may be passing in true, even tho budget was 0,
see the recently added warnings in this doc:

https://www.kernel.org/doc/html/next/networking/napi.html

In general the driver seems to be processing Rx regardless
of budget? This looks like a bug which should be fixed with
a separate patch for the net tree..
--
pw-bot: cr

2023-08-01 16:25:41

by Haiyang Zhang

[permalink] [raw]
Subject: RE: [PATCH V4,net-next] net: mana: Add page pool for RX buffers



> -----Original Message-----
> From: Jakub Kicinski <[email protected]>
> Sent: Monday, July 31, 2023 8:31 PM
> To: Haiyang Zhang <[email protected]>
> Cc: [email protected]; [email protected]; Dexuan Cui
> <[email protected]>; KY Srinivasan <[email protected]>; Paul Rosswurm
> <[email protected]>; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; Long Li <[email protected]>;
> [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; Ajay Sharma <[email protected]>; [email protected];
> [email protected]; [email protected]; linux-
> [email protected]
> Subject: Re: [PATCH V4,net-next] net: mana: Add page pool for RX buffers
>
> On Fri, 28 Jul 2023 14:46:07 -0700 Haiyang Zhang wrote:
> > static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
> > - dma_addr_t *da, bool is_napi)
> > + dma_addr_t *da, bool *from_pool, bool is_napi)
> > {
> > struct page *page;
> > void *va;
> >
> > + *from_pool = false;
> > +
> > /* Reuse XDP dropped page if available */
> > if (rxq->xdp_save_va) {
> > va = rxq->xdp_save_va;
> > @@ -1533,17 +1543,22 @@ static void *mana_get_rxfrag(struct mana_rxq
> *rxq, struct device *dev,
> > return NULL;
> > }
> > } else {
> > - page = dev_alloc_page();
> > + page = page_pool_dev_alloc_pages(rxq->page_pool);
> > if (!page)
> > return NULL;
> >
> > + *from_pool = true;
> > va = page_to_virt(page);
> > }
> >
> > *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
> > DMA_FROM_DEVICE);
> > if (dma_mapping_error(dev, *da)) {
> > - put_page(virt_to_head_page(va));
> > + if (*from_pool)
> > + page_pool_put_full_page(rxq->page_pool, page,
> is_napi);
>
> AFAICT you only pass the is_napi to recycle in case of error?
> It's fine to always pass in false, passing true enables some
> optimizations but it's not worth trying to optimize error paths.

Yes, this place is only for the error path. I will pass in false.

>
> Otherwise you may be passing in true, even tho budget was 0,
> see the recently added warnings in this doc:
>
> https://www.ker/
> nel.org%2Fdoc%2Fhtml%2Fnext%2Fnetworking%2Fnapi.html&data=05%7C01%7
> Chaiyangz%40microsoft.com%7C82fcd2d20fe54dd8cd4808db9226a2c7%7C72f9
> 88bf86f141af91ab2d7cd011db47%7C1%7C0%7C638264466881746523%7CUnkn
> own%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1ha
> WwiLCJXVCI6Mn0%3D%7C3000%7C%7C%7C&sdata=D9ac4TnYOSPwGmk%2FKX
> G4buu%2FKT7J%2BuH8lAJNPl%2FRWy4%3D&reserved=0
>
> In general the driver seems to be processing Rx regardless
> of budget? This looks like a bug which should be fixed with
> a separate patch for the net tree..

Thanks, I will look into and fix this in a separate patch.

- Haiyang