2022-11-15 16:42:08

by Shenwei Wang

[permalink] [raw]
Subject: [PATCH v4 2/2] net: fec: add xdp and page pool statistics

Added xdp and page pool statistics.

Signed-off-by: Shenwei Wang <[email protected]>
Reported-by: kernel test robot <[email protected]>
---
drivers/net/ethernet/freescale/fec.h | 15 ++++
drivers/net/ethernet/freescale/fec_main.c | 100 ++++++++++++++++++++--
2 files changed, 109 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 61e847b18343..adbe661552be 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -526,6 +526,19 @@ struct fec_enet_priv_txrx_info {
struct sk_buff *skb;
};

+enum {
+ RX_XDP_REDIRECT = 0,
+ RX_XDP_PASS,
+ RX_XDP_DROP,
+ RX_XDP_TX,
+ RX_XDP_TX_ERRORS,
+ TX_XDP_XMIT,
+ TX_XDP_XMIT_ERRORS,
+
+ /* The following must be the last one */
+ XDP_STATS_TOTAL,
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -546,6 +559,8 @@ struct fec_enet_priv_rx_q {
/* page_pool */
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
+ struct u64_stats_sync syncp;
+ u64 stats[XDP_STATS_TOTAL];

/* rx queue number, in the range 0-7 */
u8 id;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 616eea712ca8..4706cbc8ec5c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1507,7 +1507,8 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,

static u32
fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
- struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index)
+ struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq,
+ u32 stats[], int index)
{
unsigned int sync, len = xdp->data_end - xdp->data;
u32 ret = FEC_ENET_XDP_PASS;
@@ -1523,10 +1524,12 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,

switch (act) {
case XDP_PASS:
+ stats[RX_XDP_PASS]++;
ret = FEC_ENET_XDP_PASS;
break;

case XDP_REDIRECT:
+ stats[RX_XDP_REDIRECT]++;
err = xdp_do_redirect(fep->netdev, xdp, prog);
if (!err) {
ret = FEC_ENET_XDP_REDIR;
@@ -1549,6 +1552,7 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
fallthrough; /* handle aborts by dropping packet */

case XDP_DROP:
+ stats[RX_XDP_DROP]++;
ret = FEC_ENET_XDP_CONSUMED;
page = virt_to_head_page(xdp->data);
page_pool_put_page(rxq->page_pool, page, sync, true);
@@ -1582,6 +1586,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
u32 ret, xdp_result = FEC_ENET_XDP_PASS;
u32 data_start = FEC_ENET_XDP_HEADROOM;
+ u32 xdp_stats[XDP_STATS_TOTAL];
struct xdp_buff xdp;
struct page *page;
u32 sub_len = 4;
@@ -1656,11 +1661,13 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
fec_enet_update_cbd(rxq, bdp, index);

if (xdp_prog) {
+ memset(xdp_stats, 0, sizeof(xdp_stats));
xdp_buff_clear_frags_flag(&xdp);
/* subtract 16bit shift and FCS */
xdp_prepare_buff(&xdp, page_address(page),
data_start, pkt_len - sub_len, false);
- ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
+ ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq,
+ xdp_stats, index);
xdp_result |= ret;
if (ret != FEC_ENET_XDP_PASS)
goto rx_processing_done;
@@ -1762,6 +1769,15 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
if (xdp_result & FEC_ENET_XDP_REDIR)
xdp_do_flush_map();

+ if (xdp_prog) {
+ int i;
+
+ u64_stats_update_begin(&rxq->syncp);
+ for (i = 0; i < XDP_STATS_TOTAL; i++)
+ rxq->stats[i] += xdp_stats[i];
+ u64_stats_update_end(&rxq->syncp);
+ }
+
return pkt_received;
}

@@ -2701,6 +2717,16 @@ static const struct fec_stat {

#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))

+static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
+ "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */
+ "rx_xdp_pass", /* RX_XDP_PASS, */
+ "rx_xdp_drop", /* RX_XDP_DROP, */
+ "rx_xdp_tx", /* RX_XDP_TX, */
+ "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */
+ "tx_xdp_xmit", /* TX_XDP_XMIT, */
+ "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */
+};
+
static void fec_enet_update_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
@@ -2710,6 +2736,44 @@ static void fec_enet_update_ethtool_stats(struct net_device *dev)
fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
}

+static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
+{
+ u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
+ struct fec_enet_priv_rx_q *rxq;
+ unsigned int start;
+ int i, j;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ do {
+ start = u64_stats_fetch_begin(&rxq->syncp);
+ for (j = 0; j < XDP_STATS_TOTAL; j++)
+ xdp_stats[j] += rxq->stats[j];
+ } while (u64_stats_fetch_retry(&rxq->syncp, start));
+ }
+
+ memcpy(data, xdp_stats, sizeof(xdp_stats));
+}
+
+static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ if (!rxq->page_pool)
+ continue;
+
+ page_pool_get_stats(rxq->page_pool, &stats);
+ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void fec_enet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -2719,6 +2783,12 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev,
fec_enet_update_ethtool_stats(dev);

memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
+ data += FEC_STATS_SIZE / sizeof(u64);
+
+ fec_enet_get_xdp_stats(fep, data);
+ data += XDP_STATS_TOTAL;
+
+ fec_enet_page_pool_stats(fep, data);
}

static void fec_enet_get_strings(struct net_device *netdev,
@@ -2727,9 +2797,15 @@ static void fec_enet_get_strings(struct net_device *netdev,
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- fec_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
+ memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
+ strscpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ page_pool_ethtool_stats_get_strings(data);
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -2739,9 +2815,13 @@ static void fec_enet_get_strings(struct net_device *netdev,

static int fec_enet_get_sset_count(struct net_device *dev, int sset)
{
+ int count;
+
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(fec_stats);
+ count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
+ count += page_pool_ethtool_stats_get_count();
+ return count;
case ETH_SS_TEST:
return net_selftest_get_count();
default:
@@ -2752,6 +2832,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
static void fec_enet_clear_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_priv_rx_q *rxq;
int i;

/* Disable MIB statistics counters */
@@ -2760,6 +2841,11 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev)
for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
writel(0, fep->hwp + fec_stats[i].offset);

+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+ memset(rxq->stats, 0, sizeof(rxq->stats));
+ }
+
/* Don't disable MIB statistics counters */
writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
}
@@ -3126,6 +3212,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (i = 0; i < rxq->bd.ring_size; i++)
page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);

+ memset(rxq->stats, 0, sizeof(rxq->stats));
+
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
xdp_rxq_info_unreg(&rxq->xdp_rxq);
page_pool_destroy(rxq->page_pool);
--
2.34.1



2022-11-15 17:46:29

by Andrew Lunn

[permalink] [raw]
Subject: Re: [PATCH v4 2/2] net: fec: add xdp and page pool statistics

> @@ -1582,6 +1586,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
> struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
> u32 ret, xdp_result = FEC_ENET_XDP_PASS;
> u32 data_start = FEC_ENET_XDP_HEADROOM;
> + u32 xdp_stats[XDP_STATS_TOTAL];
> struct xdp_buff xdp;
> struct page *page;
> u32 sub_len = 4;
> @@ -1656,11 +1661,13 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
> fec_enet_update_cbd(rxq, bdp, index);
>
> if (xdp_prog) {
> + memset(xdp_stats, 0, sizeof(xdp_stats));
> xdp_buff_clear_frags_flag(&xdp);
> /* subtract 16bit shift and FCS */
> xdp_prepare_buff(&xdp, page_address(page),
> data_start, pkt_len - sub_len, false);
> - ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
> + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq,
> + xdp_stats, index);
> xdp_result |= ret;
> if (ret != FEC_ENET_XDP_PASS)
> goto rx_processing_done;
> @@ -1762,6 +1769,15 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
> if (xdp_result & FEC_ENET_XDP_REDIR)
> xdp_do_flush_map();
>
> + if (xdp_prog) {
> + int i;
> +
> + u64_stats_update_begin(&rxq->syncp);
> + for (i = 0; i < XDP_STATS_TOTAL; i++)
> + rxq->stats[i] += xdp_stats[i];
> + u64_stats_update_end(&rxq->syncp);
> + }
> +

This looks wrong. You are processing upto the napi budget, 64 frames,
in a loop. The memset to 0 happens inside the loop, but you do the
accumulation outside the loop?

This patch is getting pretty big. Please break it up, at least into
one patch for XDP stats and one for page pool stats.

Andrew

2022-11-15 18:10:04

by Shenwei Wang

[permalink] [raw]
Subject: RE: [EXT] Re: [PATCH v4 2/2] net: fec: add xdp and page pool statistics



> -----Original Message-----
> From: Andrew Lunn <[email protected]>
> Sent: Tuesday, November 15, 2022 11:29 AM
> To: Shenwei Wang <[email protected]>
> Cc: David S. Miller <[email protected]>; Eric Dumazet
> <[email protected]>; Jakub Kicinski <[email protected]>; Paolo Abeni
> <[email protected]>; Jesper Dangaard Brouer <[email protected]>; Ilias
> Apalodimas <[email protected]>; Alexei Starovoitov
> <[email protected]>; Daniel Borkmann <[email protected]>; John Fastabend
> <[email protected]>; [email protected]; linux-
> [email protected]; [email protected]; kernel test robot <[email protected]>
> Subject: [EXT] Re: [PATCH v4 2/2] net: fec: add xdp and page pool statistics
>
> Caution: EXT Email
>
> > @@ -1582,6 +1586,7 @@ fec_enet_rx_queue(struct net_device *ndev, int
> budget, u16 queue_id)
> > struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
> > u32 ret, xdp_result = FEC_ENET_XDP_PASS;
> > u32 data_start = FEC_ENET_XDP_HEADROOM;
> > + u32 xdp_stats[XDP_STATS_TOTAL];
> > struct xdp_buff xdp;
> > struct page *page;
> > u32 sub_len = 4;
> > @@ -1656,11 +1661,13 @@ fec_enet_rx_queue(struct net_device *ndev, int
> budget, u16 queue_id)
> > fec_enet_update_cbd(rxq, bdp, index);
> >
> > if (xdp_prog) {
> > + memset(xdp_stats, 0, sizeof(xdp_stats));
> > xdp_buff_clear_frags_flag(&xdp);
> > /* subtract 16bit shift and FCS */
> > xdp_prepare_buff(&xdp, page_address(page),
> > data_start, pkt_len - sub_len, false);
> > - ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
> > + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq,
> > + xdp_stats, index);
> > xdp_result |= ret;
> > if (ret != FEC_ENET_XDP_PASS)
> > goto rx_processing_done; @@ -1762,6
> > +1769,15 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16
> queue_id)
> > if (xdp_result & FEC_ENET_XDP_REDIR)
> > xdp_do_flush_map();
> >
> > + if (xdp_prog) {
> > + int i;
> > +
> > + u64_stats_update_begin(&rxq->syncp);
> > + for (i = 0; i < XDP_STATS_TOTAL; i++)
> > + rxq->stats[i] += xdp_stats[i];
> > + u64_stats_update_end(&rxq->syncp);
> > + }
> > +
>
> This looks wrong. You are processing upto the napi budget, 64 frames, in a loop.
> The memset to 0 happens inside the loop, but you do the accumulation outside
> the loop?
>

My bad. That should be moved outside the loop.

Thanks,
Shenwei

> This patch is getting pretty big. Please break it up, at least into one patch for XDP
> stats and one for page pool stats.
>
> Andrew