2021-04-13 03:43:58

by Ong Boon Leong

[permalink] [raw]
Subject: [PATCH net-next 0/7] stmmac: add XDP ZC support

Hi,

This is the v1 patch series to add XDP ZC support to stmmac driver and
the changes are as listed in below summary:-

1-4/7: Refactor RX & TX buffer allocation and initialization to prepare
stmmac driver for XSK RX & TX pool enabling and disabling.

5/7: Refactor stmmac_xdp_run_prog() for XDP ZC use which does not need
to check for XDP program loaded.

6-7/7: XDP ZC RX and TX enabling.

The above patch series have been tested using xdpsock app in samples/bpf
directory on Intel mGbE controller. The DUT receives burst traffic
packets generated by using pktgen_sample03_burst_single_flow.sh in
samples/pktgen.

########################################################################

==========
A) RX-Only
==========

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -r -S

sock0@eth0:0 rxdrop xdp-skb
pps pkts 1.00
rx 112161 12229475
tx 0 0

sock0@eth0:0 rxdrop xdp-skb
pps pkts 1.00
rx 112280 12341779
tx 0 0

sock0@eth0:0 rxdrop xdp-skb
pps pkts 1.00
rx 112358 12454155
tx 0 0

====================================================

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -r -N -c

sock0@eth0:0 rxdrop xdp-drv
pps pkts 1.00
rx 681082 2616133
tx 0 0

sock0@eth0:0 rxdrop xdp-drv
pps pkts 1.00
rx 681205 3297415
tx 0 0

sock0@eth0:0 rxdrop xdp-drv
pps pkts 1.00
rx 681386 3978873
tx 0 0

====================================================

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -r -z

sock0@eth0:0 rxdrop xdp-drv
pps pkts 1.00
rx 703915 19579779
tx 0 0

sock0@eth0:0 rxdrop xdp-drv
pps pkts 1.00
rx 703766 20283768
tx 0 0

sock0@eth0:0 rxdrop xdp-drv
pps pkts 1.00
rx 703383 20987229
tx 0 0

==========
B) TX-Only
==========

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -t -S

sock0@eth0:0 txonly xdp-skb
pps pkts 1.00
rx 0 0
tx 140269 4326720

sock0@eth0:0 txonly xdp-skb
pps pkts 1.00
rx 0 0
tx 140514 4467264

sock0@eth0:0 txonly xdp-skb
pps pkts 1.00
rx 0 0
tx 140009 4607296

====================================================

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -t -N -c

sock0@eth0:0 txonly xdp-drv
pps pkts 1.00
rx 0 0
tx 138222 3108160

sock0@eth0:0 txonly xdp-drv
pps pkts 1.00
rx 0 0
tx 139629 3247872

sock0@eth0:0 txonly xdp-drv
pps pkts 1.00
rx 0 0
tx 139821 3387712

====================================================

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -t -z

sock0@eth0:0 txonly xdp-drv
pps pkts 1.00
rx 0 0
tx 447382 13390848

sock0@eth0:0 txonly xdp-drv
pps pkts 1.00
rx 0 0
tx 447384 13838272

sock0@eth0:0 txonly xdp-drv
pps pkts 1.00
rx 0 0
tx 447384 14285696

================
C) L2 Forwarding
================

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -l -S

sock0@eth0:0 l2fwd xdp-skb
pps pkts 1.00
rx 85021 7363434
tx 85021 7363434

sock0@eth0:0 l2fwd xdp-skb
pps pkts 1.00
rx 85003 7448446
tx 85003 7448446

sock0@eth0:0 l2fwd xdp-skb
pps pkts 1.00
rx 84946 7533403
tx 84946 7533403

====================================================

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -l -N -c

sock0@eth0:0 l2fwd xdp-drv
pps pkts 1.00
rx 132136 1092673
tx 132072 1092609

sock0@eth0:0 l2fwd xdp-drv
pps pkts 1.00
rx 132428 1225118
tx 132428 1225054

sock0@eth0:0 l2fwd xdp-drv
pps pkts 1.00
rx 132623 1357757
tx 132623 1357693

====================================================

root@intel-corei7-64:~ $ ./xdpsock -i eth0 -l -z

sock0@eth0:0 l2fwd xdp-drv
pps pkts 1.00
rx 468476 43619530
tx 468476 43619466

sock0@eth0:0 l2fwd xdp-drv
pps pkts 1.00
rx 468633 44088218
tx 468633 44088154

sock0@eth0:0 l2fwd xdp-drv
pps pkts 1.00
rx 468439 44556775
tx 468439 44556711

########################################################################

Based on the results obtained from above using xdpsock test cases, the
result looks promising. It will be great if community can help to review
and test the above patch series on your respective platform and provide
me feedback for any improvement.

Thank you very much,
Boon Leong

Ong Boon Leong (7):
net: stmmac: rearrange RX buffer allocation and free functions
net: stmmac: introduce dma_recycle_rx_skbufs for
stmmac_reinit_rx_buffers
net: stmmac: refactor stmmac_init_rx_buffers for
stmmac_reinit_rx_buffers
net: stmmac: rearrange RX and TX desc init into per-queue basis
net: stmmac: Refactor __stmmac_xdp_run_prog for XDP ZC
net: stmmac: Enable RX via AF_XDP zero-copy
net: stmmac: Add TX via XDP zero-copy socket

drivers/net/ethernet/stmicro/stmmac/stmmac.h | 24 +-
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 1698 +++++++++++++----
.../net/ethernet/stmicro/stmmac/stmmac_xdp.c | 94 +
.../net/ethernet/stmicro/stmmac/stmmac_xdp.h | 3 +
4 files changed, 1396 insertions(+), 423 deletions(-)

--
2.25.1


2021-04-13 03:45:57

by Ong Boon Leong

[permalink] [raw]
Subject: [PATCH net-next 4/7] net: stmmac: rearrange RX and TX desc init into per-queue basis

Below functions are made to be per-queue in preparation of XDP ZC:

__init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
__init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)

The original functions below are stay maintained for all queue usage:

init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
init_dma_tx_desc_rings(struct net_device *dev)

Signed-off-by: Ong Boon Leong <[email protected]>
---
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 180 ++++++++++--------
1 file changed, 100 insertions(+), 80 deletions(-)

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7e889ef0c7b5..0804674e628e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1575,60 +1575,70 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
}

/**
- * init_dma_rx_desc_rings - init the RX descriptor rings
- * @dev: net device structure
+ * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @queue: RX queue index
* @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
{
- struct stmmac_priv *priv = netdev_priv(dev);
- u32 rx_count = priv->plat->rx_queues_to_use;
- int ret = -ENOMEM;
- int queue;
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int ret;

- /* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
- "SKB addresses:\nskb\t\tskb data\tdma data\n");
+ "(%s) dma_rx_phy=0x%08x\n", __func__,
+ (u32)rx_q->dma_rx_phy);

- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ stmmac_clear_rx_descriptors(priv, queue);

+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx_q->page_pool));

- netif_dbg(priv, probe, priv->dev,
- "(%s) dma_rx_phy=0x%08x\n", __func__,
- (u32)rx_q->dma_rx_phy);
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
+ rx_q->queue_index);

- stmmac_clear_rx_descriptors(priv, queue);
+ ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ if (ret < 0)
+ return -ENOMEM;

- WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
- MEM_TYPE_PAGE_POOL,
- rx_q->page_pool));
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;

- netdev_info(priv->dev,
- "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
- rx_q->queue_index);
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, rx_q->dma_erx,
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 1);
+ else
+ stmmac_mode_init(priv, rx_q->dma_rx,
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 0);
+ }

- ret = stmmac_alloc_rx_buffers(priv, queue, flags);
- if (ret < 0)
- goto err_init_rx_buffers;
+ return 0;
+}

- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
+static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ int ret;

- /* Setup the chained descriptor addresses */
- if (priv->mode == STMMAC_CHAIN_MODE) {
- if (priv->extend_desc)
- stmmac_mode_init(priv, rx_q->dma_erx,
- rx_q->dma_rx_phy,
- priv->dma_rx_size, 1);
- else
- stmmac_mode_init(priv, rx_q->dma_rx,
- rx_q->dma_rx_phy,
- priv->dma_rx_size, 0);
- }
+ /* RX INITIALIZATION */
+ netif_dbg(priv, probe, priv->dev,
+ "SKB addresses:\nskb\t\tskb data\tdma data\n");
+
+ for (queue = 0; queue < rx_count; queue++) {
+ ret = __init_dma_rx_desc_rings(priv, queue, flags);
+ if (ret)
+ goto err_init_rx_buffers;
}

return 0;
@@ -1647,63 +1657,73 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
}

/**
- * init_dma_tx_desc_rings - init the TX descriptor rings
- * @dev: net device structure.
+ * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @queue : TX queue index
* Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_tx_desc_rings(struct net_device *dev)
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_priv *priv = netdev_priv(dev);
- u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
int i;

- for (queue = 0; queue < tx_queue_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
-
- netif_dbg(priv, probe, priv->dev,
- "(%s) dma_tx_phy=0x%08x\n", __func__,
- (u32)tx_q->dma_tx_phy);
-
- /* Setup the chained descriptor addresses */
- if (priv->mode == STMMAC_CHAIN_MODE) {
- if (priv->extend_desc)
- stmmac_mode_init(priv, tx_q->dma_etx,
- tx_q->dma_tx_phy,
- priv->dma_tx_size, 1);
- else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
- stmmac_mode_init(priv, tx_q->dma_tx,
- tx_q->dma_tx_phy,
- priv->dma_tx_size, 0);
- }
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_tx_phy=0x%08x\n", __func__,
+ (u32)tx_q->dma_tx_phy);

- for (i = 0; i < priv->dma_tx_size; i++) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((tx_q->dma_etx + i)->basic);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- p = &((tx_q->dma_entx + i)->basic);
- else
- p = tx_q->dma_tx + i;
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, tx_q->dma_etx,
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 1);
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
+ stmmac_mode_init(priv, tx_q->dma_tx,
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 0);
+ }

- stmmac_clear_desc(priv, p);
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ struct dma_desc *p;

- tx_q->tx_skbuff_dma[i].buf = 0;
- tx_q->tx_skbuff_dma[i].map_as_page = false;
- tx_q->tx_skbuff_dma[i].len = 0;
- tx_q->tx_skbuff_dma[i].last_segment = false;
- tx_q->tx_skbuff[i] = NULL;
- }
+ if (priv->extend_desc)
+ p = &((tx_q->dma_etx + i)->basic);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &((tx_q->dma_entx + i)->basic);
+ else
+ p = tx_q->dma_tx + i;

- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
+ stmmac_clear_desc(priv, p);

- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ tx_q->tx_skbuff_dma[i].len = 0;
+ tx_q->tx_skbuff_dma[i].last_segment = false;
+ tx_q->tx_skbuff[i] = NULL;
}

+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+
+ return 0;
+}
+
+static int init_dma_tx_desc_rings(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_queue_cnt;
+ u32 queue;
+
+ tx_queue_cnt = priv->plat->tx_queues_to_use;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ __init_dma_tx_desc_rings(priv, queue);
+
return 0;
}

--
2.25.1

2021-04-13 04:29:02

by Ong Boon Leong

[permalink] [raw]
Subject: [PATCH net-next 2/7] net: stmmac: introduce dma_recycle_rx_skbufs for stmmac_reinit_rx_buffers

Rearrange RX buffer page_pool recycling logics into dma_recycle_rx_skbufs,
so that we prepare stmmac_reinit_rx_buffers() for XSK pool expansion.

Signed-off-by: Ong Boon Leong <[email protected]>
---
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 44 ++++++++++++-------
1 file changed, 27 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f6d3d26ce45a..a6c3414fd231 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1512,6 +1512,31 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
return 0;
}

+/**
+ * dma_recycle_rx_skbufs - recycle RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_recycle_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+ if (buf->page) {
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ }
+
+ if (priv->sph && buf->sec_page) {
+ page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
+ buf->sec_page = NULL;
+ }
+ }
+}
+
/**
* stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
* @priv: driver private structure
@@ -1524,23 +1549,8 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
u32 queue;
int i;

- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (buf->page) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
- buf->page = NULL;
- }
-
- if (priv->sph && buf->sec_page) {
- page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
- buf->sec_page = NULL;
- }
- }
- }
+ for (queue = 0; queue < rx_count; queue++)
+ dma_recycle_rx_skbufs(priv, queue);

for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
--
2.25.1

2021-04-13 04:29:02

by Ong Boon Leong

[permalink] [raw]
Subject: [PATCH net-next 1/7] net: stmmac: rearrange RX buffer allocation and free functions

This patch restructures the per RX queue buffer allocation from page_pool
to stmmac_alloc_rx_buffers().

We also rearrange dma_free_rx_skbufs() so that it can be used in
init_dma_rx_desc_rings() during freeing of RX buffer in the event of
page_pool allocation failure to replace the more efficient method earlier.
The replacement is needed to make the RX buffer alloc and free method
scalable to XDP ZC xsk_pool alloc and free later.

Signed-off-by: Ong Boon Leong <[email protected]>
---
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 84 +++++++++++--------
1 file changed, 47 insertions(+), 37 deletions(-)

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 77285646c5fc..f6d3d26ce45a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1475,6 +1475,43 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
tx_q->tx_skbuff_dma[i].map_as_page = false;
}

+/**
+ * dma_free_rx_skbufs - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, queue, i);
+}
+
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
+ gfp_t flags)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct dma_desc *p;
+ int ret;
+
+ if (priv->extend_desc)
+ p = &((rx_q->dma_erx + i)->basic);
+ else
+ p = rx_q->dma_rx + i;
+
+ ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ queue);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
* @priv: driver private structure
@@ -1547,15 +1584,14 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
return;

err_reinit_rx_buffers:
- do {
- while (--i >= 0)
- stmmac_free_rx_buffer(priv, queue, i);
+ while (queue >= 0) {
+ dma_free_rx_skbufs(priv, queue);

if (queue == 0)
break;

- i = priv->dma_rx_size;
- } while (queue-- > 0);
+ queue--;
+ }
}

/**
@@ -1572,7 +1608,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
u32 rx_count = priv->plat->rx_queues_to_use;
int ret = -ENOMEM;
int queue;
- int i;

/* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
@@ -1580,7 +1615,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)

for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- int ret;
+

netif_dbg(priv, probe, priv->dev,
"(%s) dma_rx_phy=0x%08x\n", __func__,
@@ -1596,22 +1631,12 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
rx_q->queue_index);

- for (i = 0; i < priv->dma_rx_size; i++) {
- struct dma_desc *p;
-
- if (priv->extend_desc)
- p = &((rx_q->dma_erx + i)->basic);
- else
- p = rx_q->dma_rx + i;
-
- ret = stmmac_init_rx_buffers(priv, p, i, flags,
- queue);
- if (ret)
- goto err_init_rx_buffers;
- }
+ ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ if (ret < 0)
+ goto err_init_rx_buffers;

rx_q->cur_rx = 0;
- rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
+ rx_q->dirty_rx = 0;

/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
@@ -1630,13 +1655,11 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)

err_init_rx_buffers:
while (queue >= 0) {
- while (--i >= 0)
- stmmac_free_rx_buffer(priv, queue, i);
+ dma_free_rx_skbufs(priv, queue);

if (queue == 0)
break;

- i = priv->dma_rx_size;
queue--;
}

@@ -1731,19 +1754,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
return ret;
}

-/**
- * dma_free_rx_skbufs - free RX dma buffers
- * @priv: private structure
- * @queue: RX queue index
- */
-static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
-{
- int i;
-
- for (i = 0; i < priv->dma_rx_size; i++)
- stmmac_free_rx_buffer(priv, queue, i);
-}
-
/**
* dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure
--
2.25.1