2018-03-02 23:44:46

by Antoine Tenart

[permalink] [raw]
Subject: [PATCH net-next 0/5] net: mvpp2: jumbo frames support

Hi all,

This series enable jumbo frames support in the Marvell PPv2 driver. The
first 2 patches rework the buffer management, then two patches prepare for
the final patch which adds the jumbo frames support into the driver.

This is based on top of net-next, and was tested on a mcbin.

Thanks!
Antoine

Antoine Tenart (1):
net: mvpp2: enable UDP/TCP checksum over IPv6

Stefan Chulski (3):
net: mvpp2: use the same buffer pool for all ports
net: mvpp2: update the BM buffer free/destroy logic
net: mvpp2: jumbo frames support

Yan Markman (1):
net: mvpp2: use a data size of 10kB for Tx FIFO on port 0

drivers/net/ethernet/marvell/mvpp2.c | 231 +++++++++++++++++++++++++----------
1 file changed, 165 insertions(+), 66 deletions(-)

--
2.14.3



2018-03-02 16:25:47

by Antoine Tenart

[permalink] [raw]
Subject: [PATCH net-next 1/5] net: mvpp2: use the same buffer pool for all ports

From: Stefan Chulski <[email protected]>

This patch configures the buffer manager long pool for all ports part of
the same CP. Long pool separation between ports is redundant since there
are no performance improvement when different pools are used.

Signed-off-by: Stefan Chulski <[email protected]>
[Antoine: cosmetic cleanup, commit message]
Signed-off-by: Antoine Tenart <[email protected]>
---
drivers/net/ethernet/marvell/mvpp2.c | 76 ++++++++++++++++++------------------
1 file changed, 37 insertions(+), 39 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 2acf499fc317..ca349d520d22 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -808,23 +808,23 @@ enum mvpp2_prs_l3_cast {
#define MVPP22_RSS_TABLE_ENTRIES 32

/* BM constants */
-#define MVPP2_BM_POOLS_NUM 8
#define MVPP2_BM_LONG_BUF_NUM 1024
#define MVPP2_BM_SHORT_BUF_NUM 2048
#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
#define MVPP2_BM_POOL_PTR_ALIGN 128
-#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
-#define MVPP2_BM_SWF_SHORT_POOL 3

/* BM cookie (32 bits) definition */
#define MVPP2_BM_COOKIE_POOL_OFFS 8
#define MVPP2_BM_COOKIE_CPU_OFFS 24

+#define MVPP2_BM_SHORT_FRAME_SIZE 512
+#define MVPP2_BM_LONG_FRAME_SIZE 2048
/* BM short pool packet size
* These value assure that for SWF the total number
* of bytes allocated for each buffer will be 512
*/
-#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
+#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
+#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)

#define MVPP21_ADDR_SPACE_SZ 0
#define MVPP22_ADDR_SPACE_SZ SZ_64K
@@ -832,12 +832,17 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_MAX_THREADS 8
#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS

-enum mvpp2_bm_type {
- MVPP2_BM_FREE,
- MVPP2_BM_SWF_LONG,
- MVPP2_BM_SWF_SHORT
+enum mvpp2_bm_pool_log_num {
+ MVPP2_BM_SHORT,
+ MVPP2_BM_LONG,
+ MVPP2_BM_POOLS_NUM
};

+static struct {
+ int pkt_size;
+ int buf_num;
+} mvpp2_pools[MVPP2_BM_POOLS_NUM];
+
/* GMAC MIB Counters register definitions */
#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
@@ -1266,7 +1271,6 @@ struct mvpp2_cls_lookup_entry {
struct mvpp2_bm_pool {
/* Pool number in the range 0-7 */
int id;
- enum mvpp2_bm_type type;

/* Buffer Pointers Pool External (BPPE) size */
int size;
@@ -4195,7 +4199,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
val |= MVPP2_BM_START_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);

- bm_pool->type = MVPP2_BM_FREE;
bm_pool->size = size;
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
@@ -4345,6 +4348,17 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
return 0;
}

+static void mvpp2_setup_bm_pool(void)
+{
+ /* Short pool */
+ mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
+
+ /* Long pool */
+ mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
+}
+
/* Attach long pool to rxq */
static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
int lrxq, int long_pool)
@@ -4483,13 +4497,11 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
bm_pool->buf_num += i;

netdev_dbg(port->dev,
- "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
- bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+ "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
bm_pool->id, bm_pool->pkt_size, buf_size, total_size);

netdev_dbg(port->dev,
- "%s pool %d: %d of %d buffers added\n",
- bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+ "pool %d: %d of %d buffers added\n",
bm_pool->id, i, buf_num);
return i;
}
@@ -4498,25 +4510,20 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
* pool pointer on success
*/
static struct mvpp2_bm_pool *
-mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
- int pkt_size)
+mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, int pkt_size)
{
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;

- if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
- netdev_err(port->dev, "mixing pool types is forbidden\n");
+ if (pool < MVPP2_BM_SHORT || pool > MVPP2_BM_LONG) {
+ netdev_err(port->dev, "Invalid pool %d\n", pool);
return NULL;
}

- if (new_pool->type == MVPP2_BM_FREE)
- new_pool->type = type;
-
/* Allocate buffers in case BM pool is used as long pool, but packet
* size doesn't match MTU or BM pool hasn't being used yet
*/
- if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
- (new_pool->pkt_size == 0)) {
+ if (new_pool->pkt_size == 0) {
int pkts_num;

/* Set default buffer number or free all the buffers in case
@@ -4524,9 +4531,7 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
*/
pkts_num = new_pool->buf_num;
if (pkts_num == 0)
- pkts_num = type == MVPP2_BM_SWF_LONG ?
- MVPP2_BM_LONG_BUF_NUM :
- MVPP2_BM_SHORT_BUF_NUM;
+ pkts_num = mvpp2_pools[pool].buf_num;
else
mvpp2_bm_bufs_free(port->dev->dev.parent,
port->priv, new_pool);
@@ -4558,9 +4563,8 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)

if (!port->pool_long) {
port->pool_long =
- mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
- MVPP2_BM_SWF_LONG,
- port->pkt_size);
+ mvpp2_bm_pool_use(port, MVPP2_BM_LONG,
+ mvpp2_pools[MVPP2_BM_LONG].pkt_size);
if (!port->pool_long)
return -ENOMEM;

@@ -4572,9 +4576,8 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)

if (!port->pool_short) {
port->pool_short =
- mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
- MVPP2_BM_SWF_SHORT,
- MVPP2_BM_SHORT_PKT_SIZE);
+ mvpp2_bm_pool_use(port, MVPP2_BM_SHORT,
+ mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
if (!port->pool_short)
return -ENOMEM;

@@ -4593,7 +4596,6 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_bm_pool *port_pool = port->pool_long;
int num, pkts_num = port_pool->buf_num;
- int pkt_size = MVPP2_RX_PKT_SIZE(mtu);

/* Update BM pool with new buffer size */
mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
@@ -4602,18 +4604,12 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
return -EIO;
}

- port_pool->pkt_size = pkt_size;
- port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
- MVPP2_SKB_SHINFO_SIZE;
num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
port_pool->id, num, pkts_num);
return -EIO;
}
-
- mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
- MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
dev->mtu = mtu;
netdev_update_features(dev);
return 0;
@@ -8630,6 +8626,8 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->sysctrl_base = NULL;
}

+ mvpp2_setup_bm_pool();
+
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
u32 addr_space_sz;

--
2.14.3


2018-03-02 16:27:56

by Antoine Tenart

[permalink] [raw]
Subject: [PATCH net-next 5/5] net: mvpp2: jumbo frames support

From: Stefan Chulski <[email protected]>

This patch adds the support for jumbo frames in the Marvell PPv2 driver.
A third buffer pool is added with 10KB buffers, which is used if the MTU
is higher than 1518B for packets larger than 1518B. Please note only the
port 0 supports hardware checksum offload due to the Tx FIFO size
limitation.

Signed-off-by: Stefan Chulski <[email protected]>
[Antoine: cosmetic cleanup, commit message]
Signed-off-by: Antoine Tenart <[email protected]>
---
drivers/net/ethernet/marvell/mvpp2.c | 96 +++++++++++++++++++++++++++---------
1 file changed, 74 insertions(+), 22 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 31a2c3039e2e..543e8c11a47f 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -815,6 +815,7 @@ enum mvpp2_prs_l3_cast {
#define MVPP22_RSS_TABLE_ENTRIES 32

/* BM constants */
+#define MVPP2_BM_JUMBO_BUF_NUM 512
#define MVPP2_BM_LONG_BUF_NUM 1024
#define MVPP2_BM_SHORT_BUF_NUM 2048
#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
@@ -826,12 +827,14 @@ enum mvpp2_prs_l3_cast {

#define MVPP2_BM_SHORT_FRAME_SIZE 512
#define MVPP2_BM_LONG_FRAME_SIZE 2048
+#define MVPP2_BM_JUMBO_FRAME_SIZE 10240
/* BM short pool packet size
* These value assure that for SWF the total number
* of bytes allocated for each buffer will be 512
*/
#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
+#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)

#define MVPP21_ADDR_SPACE_SZ 0
#define MVPP22_ADDR_SPACE_SZ SZ_64K
@@ -842,6 +845,7 @@ enum mvpp2_prs_l3_cast {
enum mvpp2_bm_pool_log_num {
MVPP2_BM_SHORT,
MVPP2_BM_LONG,
+ MVPP2_BM_JUMBO,
MVPP2_BM_POOLS_NUM
};

@@ -4393,6 +4397,10 @@ static void mvpp2_setup_bm_pool(void)
/* Long pool */
mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
+
+ /* Jumbo pool */
+ mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
}

/* Attach long pool to rxq */
@@ -4551,7 +4559,7 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, int pkt_size)
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;

- if (pool < MVPP2_BM_SHORT || pool > MVPP2_BM_LONG) {
+ if (pool < MVPP2_BM_SHORT || pool > MVPP2_BM_JUMBO) {
netdev_err(port->dev, "Invalid pool %d\n", pool);
return NULL;
}
@@ -4596,11 +4604,24 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, int pkt_size)
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
int rxq;
+ enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
+
+ /* If port pkt_size is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Short pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+ long_log_pool = MVPP2_BM_JUMBO;
+ short_log_pool = MVPP2_BM_LONG;
+ } else {
+ long_log_pool = MVPP2_BM_LONG;
+ short_log_pool = MVPP2_BM_SHORT;
+ }

if (!port->pool_long) {
port->pool_long =
- mvpp2_bm_pool_use(port, MVPP2_BM_LONG,
- mvpp2_pools[MVPP2_BM_LONG].pkt_size);
+ mvpp2_bm_pool_use(port, long_log_pool,
+ mvpp2_pools[long_log_pool].pkt_size);
if (!port->pool_long)
return -ENOMEM;

@@ -4612,8 +4633,8 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)

if (!port->pool_short) {
port->pool_short =
- mvpp2_bm_pool_use(port, MVPP2_BM_SHORT,
- mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
+ mvpp2_bm_pool_use(port, short_log_pool,
+ mvpp2_pools[long_log_pool].pkt_size);
if (!port->pool_short)
return -ENOMEM;

@@ -4630,24 +4651,49 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_bm_pool *port_pool = port->pool_long;
- int num, pkts_num = port_pool->buf_num;
+ enum mvpp2_bm_pool_log_num new_long_pool;
+ int pkt_size = MVPP2_RX_PKT_SIZE(mtu);

- /* Update BM pool with new buffer size */
- mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool,
- port_pool->buf_num);
- if (port_pool->buf_num) {
- WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
- return -EIO;
+ /* If port MTU is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Short pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ new_long_pool = MVPP2_BM_JUMBO;
+ else
+ new_long_pool = MVPP2_BM_LONG;
+
+ if (new_long_pool != port->pool_long->id) {
+ /* Remove port from old short & long pool */
+ port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
+ port->pool_long->pkt_size);
+ port->pool_long->port_map &= ~(1 << port->id);
+ port->pool_long = NULL;
+
+ port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
+ port->pool_short->pkt_size);
+ port->pool_short->port_map &= ~(1 << port->id);
+ port->pool_short = NULL;
+
+ port->pkt_size = pkt_size;
+
+ /* Add port to new short & long pool */
+ mvpp2_swf_bm_pool_init(port);
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
}

- num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
- if (num != pkts_num) {
- WARN(1, "pool %d: %d of %d allocated\n",
- port_pool->id, num, pkts_num);
- return -EIO;
- }
dev->mtu = mtu;
+ dev->wanted_features = dev->features;
+
netdev_update_features(dev);
return 0;
}
@@ -8326,13 +8372,19 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->features = features | NETIF_F_RXCSUM;
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
+
dev->vlan_features |= features;
dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;

- /* MTU range: 68 - 9676 */
+ /* MTU range: 68 - 9704 */
dev->min_mtu = ETH_MIN_MTU;
- /* 9676 == 9700 - 20 and rounding to 8 */
- dev->max_mtu = 9676;
+ /* 9704 == 9728 - 20 and rounding to 8 */
+ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;

err = register_netdev(dev);
if (err < 0) {
--
2.14.3


2018-03-02 16:37:04

by Thomas Petazzoni

[permalink] [raw]
Subject: Re: [PATCH net-next 1/5] net: mvpp2: use the same buffer pool for all ports

Hello,

On Fri, 2 Mar 2018 16:40:40 +0100, Antoine Tenart wrote:
> +static struct {
> + int pkt_size;
> + int buf_num;
> +} mvpp2_pools[MVPP2_BM_POOLS_NUM];

Any reason for not doing:

} mvpp2_pools[MVPP2_BM_POOLS_NUM] = {
[MVPP2_BM_SHORT] = {
.pkt_size = MVPP2_BM_SHORT_PKT_SIZE,
.buf_num = MVPP2_BM_SHORT_BUF_NUM
},
[MVPP2_BM_LONG] = {
.pkt_size = MVPP2_BM_LONG_PKT_SIZE,
.buf_num = MVPP2_BM_LONG_BUF_NUM,
},
};

And get rid of:

> +static void mvpp2_setup_bm_pool(void)
> +{
> + /* Short pool */
> + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
> + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
> +
> + /* Long pool */
> + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
> + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
> +}

?

Best regards,

Thomas
--
Thomas Petazzoni, CTO, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
http://bootlin.com

2018-03-02 19:25:54

by Antoine Tenart

[permalink] [raw]
Subject: [PATCH net-next 3/5] net: mvpp2: use a data size of 10kB for Tx FIFO on port 0

From: Yan Markman <[email protected]>

This patch sets the Tx FIFO data size on port 0 to 10kB. This prepares
the PPv2 driver for the Jumbo frame support addition as the hardware
will need big enough Tx FIFO buffers when dealing with frames going
through an interface with an MTU of 9000.

Signed-off-by: Yan Markman <[email protected]>
[Antoine: commit message, small reworks.]
Signed-off-by: Antoine Tenart <[email protected]>
---
drivers/net/ethernet/marvell/mvpp2.c | 27 ++++++++++++++++++++++-----
1 file changed, 22 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index db511dd4249d..39635de51dd7 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -44,6 +44,7 @@
#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
#define MVPP2_RX_FIFO_INIT_REG 0x64
+#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))

/* RX DMA Top Registers */
@@ -542,6 +543,11 @@
/* TX FIFO constants */
#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
+#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
+#define MVPP2_TX_FIFO_THRESHOLD_10KB \
+ (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP2_TX_FIFO_THRESHOLD_3KB \
+ (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)

/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
@@ -8456,14 +8462,25 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}

-/* Initialize Tx FIFO's */
+/* Initialize Tx FIFO's
+ * The CP110's total tx-fifo size is 19kB.
+ * Use large-size 10kB for fast port but 3kB for others.
+ */
static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
{
- int port;
+ int port, size, thrs;

- for (port = 0; port < MVPP2_MAX_PORTS; port++)
- mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port),
- MVPP22_TX_FIFO_DATA_SIZE_3KB);
+ for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ if (port == 0) {
+ size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
+ thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
+ } else {
+ size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
+ thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
+ }
+ mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
+ mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
+ }
}

static void mvpp2_axi_init(struct mvpp2 *priv)
--
2.14.3


2018-03-02 19:26:35

by Antoine Tenart

[permalink] [raw]
Subject: [PATCH net-next 2/5] net: mvpp2: update the BM buffer free/destroy logic

From: Stefan Chulski <[email protected]>

The buffer free routine is updated to release only given a number of
buffers, and the destroy routine now checks the actual number of buffers
in the (BPPI and BPPE) HW counters before draining the pools. This
change helps getting jumbo frames support.

Signed-off-by: Stefan Chulski <[email protected]>
[Antoine: cosmetic cleanup, commit message]
Signed-off-by: Antoine Tenart <[email protected]>
---
drivers/net/ethernet/marvell/mvpp2.c | 45 ++++++++++++++++++++++++++++++------
1 file changed, 38 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ca349d520d22..db511dd4249d 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -258,6 +258,7 @@
#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
+#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
#define MVPP2_BM_START_MASK BIT(0)
@@ -4251,11 +4252,17 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,

/* Free all buffers from the pool */
static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
- struct mvpp2_bm_pool *bm_pool)
+ struct mvpp2_bm_pool *bm_pool, int buf_num)
{
int i;

- for (i = 0; i < bm_pool->buf_num; i++) {
+ if (buf_num > bm_pool->buf_num) {
+ WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
+ bm_pool->id, buf_num);
+ buf_num = bm_pool->buf_num;
+ }
+
+ for (i = 0; i < buf_num; i++) {
dma_addr_t buf_dma_addr;
phys_addr_t buf_phys_addr;
void *data;
@@ -4277,16 +4284,39 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
bm_pool->buf_num -= i;
}

+/* Check number of buffers in BM pool */
+int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
+{
+ int buf_num = 0;
+
+ buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
+ MVPP22_BM_POOL_PTRS_NUM_MASK;
+ buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
+ MVPP2_BM_BPPI_PTR_NUM_MASK;
+
+ /* HW has one buffer ready which is not reflected in the counters */
+ if (buf_num)
+ buf_num += 1;
+
+ return buf_num;
+}
+
/* Cleanup pool */
static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool)
{
+ int buf_num;
u32 val;

- mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
- if (bm_pool->buf_num) {
- WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
+ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
+ mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
+
+ /* Check buffer counters after free */
+ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
+ if (buf_num) {
+ WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
+ bm_pool->id, bm_pool->buf_num);
return 0;
}

@@ -4534,7 +4564,7 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, int pkt_size)
pkts_num = mvpp2_pools[pool].buf_num;
else
mvpp2_bm_bufs_free(port->dev->dev.parent,
- port->priv, new_pool);
+ port->priv, new_pool, pkts_num);

new_pool->pkt_size = pkt_size;
new_pool->frag_size =
@@ -4598,7 +4628,8 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
int num, pkts_num = port_pool->buf_num;

/* Update BM pool with new buffer size */
- mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
+ mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool,
+ port_pool->buf_num);
if (port_pool->buf_num) {
WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
return -EIO;
--
2.14.3


2018-03-02 19:51:36

by Thomas Petazzoni

[permalink] [raw]
Subject: Re: [PATCH net-next 3/5] net: mvpp2: use a data size of 10kB for Tx FIFO on port 0

Hello,

On Fri, 2 Mar 2018 16:40:42 +0100, Antoine Tenart wrote:

> -/* Initialize Tx FIFO's */
> +/* Initialize Tx FIFO's
> + * The CP110's total tx-fifo size is 19kB.
> + * Use large-size 10kB for fast port but 3kB for others.
> + */

Is there a reason to hardcode 10KB for port 0, and 3KB for the other
ports ? Would there be use cases where the user may want different
configurations ?

It's just that it feels very "hardcoded" to enforce specifically those
numbers.

Also, does it make sense to mention the CP110 here ? Is this 19 KB
limitation a limit of the PPv2.2 IP, or of the CP110 ?

Thomas
--
Thomas Petazzoni, CTO, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
http://bootlin.com

2018-03-02 23:44:40

by Antoine Tenart

[permalink] [raw]
Subject: [PATCH net-next 4/5] net: mvpp2: enable UDP/TCP checksum over IPv6

This patch adds the NETIF_F_IPV6_CSUM to the driver's features to enable
UDP/TCP checksum over IPv6. No extra configuration of the engine is
needed on top of the IPv4 counterpart, which already is in the features
list (NETIF_F_IP_CSUM).

Signed-off-by: Antoine Tenart <[email protected]>
---
drivers/net/ethernet/marvell/mvpp2.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 39635de51dd7..31a2c3039e2e 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -8321,7 +8321,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
}

- features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO;
dev->features = features | NETIF_F_RXCSUM;
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
--
2.14.3


2018-03-03 00:14:16

by Thomas Petazzoni

[permalink] [raw]
Subject: Re: [PATCH net-next 5/5] net: mvpp2: jumbo frames support

Hello,

On Fri, 2 Mar 2018 16:40:44 +0100, Antoine Tenart wrote:

> /* Attach long pool to rxq */
> @@ -4551,7 +4559,7 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, int pkt_size)
> struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
> int num;
>
> - if (pool < MVPP2_BM_SHORT || pool > MVPP2_BM_LONG) {
> + if (pool < MVPP2_BM_SHORT || pool > MVPP2_BM_JUMBO) {

pool could be an unsigned, which would avoid the need for
MVPP2_BM_SHORT.

And for the upper limit, you have a convenient MVPP2_BM_POOLS_NUM in
your mvpp2_bm_pool_log_num enum, so why not use if ?



> netdev_err(port->dev, "Invalid pool %d\n", pool);
> return NULL;
> }
> @@ -4596,11 +4604,24 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, int pkt_size)
> static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
> {
> int rxq;
> + enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
> +
> + /* If port pkt_size is higher than 1518B:
> + * HW Long pool - SW Jumbo pool, HW Short pool - SW Short pool

The comment is wrong. In this case, the HW short pool is the SW long
pool.

> + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
> + */
> + if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
> + long_log_pool = MVPP2_BM_JUMBO;
> + short_log_pool = MVPP2_BM_LONG;

See here.

> + } else {
> + long_log_pool = MVPP2_BM_LONG;
> + short_log_pool = MVPP2_BM_SHORT;
> + }


> + /* If port MTU is higher than 1518B:
> + * HW Long pool - SW Jumbo pool, HW Short pool - SW Short pool

And the comment is wrong here as well :)

> + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
> + */
> + if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
> + new_long_pool = MVPP2_BM_JUMBO;
> + else
> + new_long_pool = MVPP2_BM_LONG;
> +
> + if (new_long_pool != port->pool_long->id) {
> + /* Remove port from old short & long pool */
> + port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
> + port->pool_long->pkt_size);
> + port->pool_long->port_map &= ~(1 << port->id);

BIT(port->id) ?

> + port->pool_long = NULL;
> +
> + port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
> + port->pool_short->pkt_size);
> + port->pool_short->port_map &= ~(1 << port->id);

Ditto.

> + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {

Again, all over the place we hardcode the fact that Jumbo frames can
only be used on port 0. I know port 0 is the only one that can do 10G,
but are there possibly some use cases where you may want Jumbo frame on
another port ?

This all really feels very hardcoded to me.

> + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
> + dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
> + }
> +
> dev->vlan_features |= features;
> dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
>
> - /* MTU range: 68 - 9676 */
> + /* MTU range: 68 - 9704 */
> dev->min_mtu = ETH_MIN_MTU;
> - /* 9676 == 9700 - 20 and rounding to 8 */
> - dev->max_mtu = 9676;

How come we already had a max_mtu of 9676 without Jumbo frame support ?

> + /* 9704 == 9728 - 20 and rounding to 8 */
> + dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;

Is this correct for all ports ? Shouldn't the maximum MTU be different
between port 0 (that supports Jumbo frames) and the other ports ?

Thanks!

Thomas
--
Thomas Petazzoni, CTO, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
http://bootlin.com

2018-03-04 06:32:56

by Stefan Chulski

[permalink] [raw]
Subject: RE: [PATCH net-next 3/5] net: mvpp2: use a data size of 10kB for Tx FIFO on port 0


> On Fri, 2 Mar 2018 16:40:42 +0100, Antoine Tenart wrote:
>
> > -/* Initialize Tx FIFO's */
> > +/* Initialize Tx FIFO's
> > + * The CP110's total tx-fifo size is 19kB.
> > + * Use large-size 10kB for fast port but 3kB for others.
> > + */
>
> Is there a reason to hardcode 10KB for port 0, and 3KB for the other ports ?
> Would there be use cases where the user may want different configurations
> ?
>

Design requirement are 10KB TX FIFO for the 10Gb/sec and 2.5KB for the 2.5Gb/sec.
Since only port 0 support 10Gb/sec and ports 1&2 support up to 2.5Gb/sec.
I don't see any reason to change this configurations.
Also TX FIFO size could be set only during probe.

> It's just that it feels very "hardcoded" to enforce specifically those numbers.
>
> Also, does it make sense to mention the CP110 here ? Is this 19 KB limitation
> a limit of the PPv2.2 IP, or of the CP110 ?

PPv2.2 IP is part of 110 communication processor.
Next communication processor will has different Packet processor or next generation of PPv2.x
Limit is PPv2.2 TX FIFO.

Stefan.

2018-03-04 06:57:05

by Stefan Chulski

[permalink] [raw]
Subject: RE: [PATCH net-next 5/5] net: mvpp2: jumbo frames support

> > netdev_err(port->dev, "Invalid pool %d\n", pool);
> > return NULL;
> > }
> > @@ -4596,11 +4604,24 @@ mvpp2_bm_pool_use(struct mvpp2_port
> *port, int
> > pool, int pkt_size) static int mvpp2_swf_bm_pool_init(struct
> > mvpp2_port *port) {
> > int rxq;
> > + enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
> > +
> > + /* If port pkt_size is higher than 1518B:
> > + * HW Long pool - SW Jumbo pool, HW Short pool - SW Short pool
>
> The comment is wrong. In this case, the HW short pool is the SW long pool.

You right. Comment is wrong.

> > + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
>
> Again, all over the place we hardcode the fact that Jumbo frames can only be
> used on port 0. I know port 0 is the only one that can do 10G, but are there
> possibly some use cases where you may want Jumbo frame on another port
> ?
>
> This all really feels very hardcoded to me.
>

All ports support Jumbo frames.
But only port 0 can do TX HW checksum offload(due to TX FIFO size).

Packet processor 2.2 has only 19KB TX FIFO size.
So in TX FIFO config code assign for Port 0 - 10KB, Port 1 - 3KB and Port 1 - 3KB.

To perform checksum in HW, HW obviously should work in store and forward mode. Store all frame in TX FIFO and then check checksum.
If mtu 1500B, everything fine and all port can do this.

If mtu is 9KB and 9KB frame transmitted, Port 0 still can do HW checksum. But ports 1 and 2 doesn't has enough FIFO for this.
So we cannot offload this feature and SW should perform checksum.

> > + /* 9704 == 9728 - 20 and rounding to 8 */
> > + dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
>
> Is this correct for all ports ? Shouldn't the maximum MTU be different
> between port 0 (that supports Jumbo frames) and the other ports ?

This is correct for all ports. All ports can support Jumbo frames.

Stefan.

2018-03-04 07:00:23

by Stefan Chulski

[permalink] [raw]
Subject: RE: [PATCH net-next 1/5] net: mvpp2: use the same buffer pool for all ports

> Hello,
>
> On Fri, 2 Mar 2018 16:40:40 +0100, Antoine Tenart wrote:
> > +static struct {
> > + int pkt_size;
> > + int buf_num;
> > +} mvpp2_pools[MVPP2_BM_POOLS_NUM];
>
> Any reason for not doing:
>
> } mvpp2_pools[MVPP2_BM_POOLS_NUM] = {
> [MVPP2_BM_SHORT] = {
> .pkt_size = MVPP2_BM_SHORT_PKT_SIZE,
> .buf_num = MVPP2_BM_SHORT_BUF_NUM
> },
> [MVPP2_BM_LONG] = {
> .pkt_size = MVPP2_BM_LONG_PKT_SIZE,
> .buf_num = MVPP2_BM_LONG_BUF_NUM,
> },
> };
>
> And get rid of:
>
> > +static void mvpp2_setup_bm_pool(void) {
> > + /* Short pool */
> > + mvpp2_pools[MVPP2_BM_SHORT].buf_num =
> MVPP2_BM_SHORT_BUF_NUM;
> > + mvpp2_pools[MVPP2_BM_SHORT].pkt_size =
> MVPP2_BM_SHORT_PKT_SIZE;
> > +
> > + /* Long pool */
> > + mvpp2_pools[MVPP2_BM_LONG].buf_num =
> MVPP2_BM_LONG_BUF_NUM;
> > + mvpp2_pools[MVPP2_BM_LONG].pkt_size =
> MVPP2_BM_LONG_PKT_SIZE; }
>
> ?
>

No, we can change it.

Stefan.

2018-03-04 09:43:22

by Thomas Petazzoni

[permalink] [raw]
Subject: Re: [PATCH net-next 3/5] net: mvpp2: use a data size of 10kB for Tx FIFO on port 0

Hello,

On Sun, 4 Mar 2018 06:29:59 +0000, Stefan Chulski wrote:

> > Is there a reason to hardcode 10KB for port 0, and 3KB for the other ports ?
> > Would there be use cases where the user may want different configurations
> > ?
>
> Design requirement are 10KB TX FIFO for the 10Gb/sec and 2.5KB for the 2.5Gb/sec.

What is a "design requirement" ? Is it a HW design limitation ?

> Since only port 0 support 10Gb/sec and ports 1&2 support up to 2.5Gb/sec.
> I don't see any reason to change this configurations.
> Also TX FIFO size could be set only during probe.
>
> > It's just that it feels very "hardcoded" to enforce specifically those numbers.
> >
> > Also, does it make sense to mention the CP110 here ? Is this 19 KB limitation
> > a limit of the PPv2.2 IP, or of the CP110 ?
>
> PPv2.2 IP is part of 110 communication processor.

Thanks, I know this :-)

> Next communication processor will has different Packet processor or next generation of PPv2.x
> Limit is PPv2.2 TX FIFO.

So, the limitation has nothing to do with CP110 really, it's just a
limitation of PPv2.2, and mentioning CP110 in the comment doesn't make
much sense, correct ?

Best regards,

Thomas
--
Thomas Petazzoni, CTO, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
http://bootlin.com

2018-03-04 09:43:22

by Thomas Petazzoni

[permalink] [raw]
Subject: Re: [PATCH net-next 5/5] net: mvpp2: jumbo frames support

Hello,

On Sun, 4 Mar 2018 06:56:02 +0000, Stefan Chulski wrote:

> > > + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
> >
> > Again, all over the place we hardcode the fact that Jumbo frames can only be
> > used on port 0. I know port 0 is the only one that can do 10G, but are there
> > possibly some use cases where you may want Jumbo frame on another port
> > ?
> >
> > This all really feels very hardcoded to me.
> >
>
> All ports support Jumbo frames.
> But only port 0 can do TX HW checksum offload(due to TX FIFO size).
>
> Packet processor 2.2 has only 19KB TX FIFO size.
> So in TX FIFO config code assign for Port 0 - 10KB, Port 1 - 3KB and Port 1 - 3KB.

Yes, but I was also questioning whether hardcoding this configuration
was correct.

> To perform checksum in HW, HW obviously should work in store and forward mode. Store all frame in TX FIFO and then check checksum.
> If mtu 1500B, everything fine and all port can do this.
>
> If mtu is 9KB and 9KB frame transmitted, Port 0 still can do HW checksum. But ports 1 and 2 doesn't has enough FIFO for this.
> So we cannot offload this feature and SW should perform checksum.

So perhaps the real check should not be "port 0", but whether the MTU
is higher or lower than the TX FIFO size assigned to the current port.
This would express in much better way the reason why HW checksum can be
used or not.

> > > + /* 9704 == 9728 - 20 and rounding to 8 */
> > > + dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
> >
> > Is this correct for all ports ? Shouldn't the maximum MTU be different
> > between port 0 (that supports Jumbo frames) and the other ports ?
>
> This is correct for all ports. All ports can support Jumbo frames.

OK. With your explanation above, I understand better.

Best regards,

Thomas
--
Thomas Petazzoni, CTO, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
http://bootlin.com

2018-03-04 09:43:22

by Stefan Chulski

[permalink] [raw]
Subject: RE: [PATCH net-next 3/5] net: mvpp2: use a data size of 10kB for Tx FIFO on port 0



> -----Original Message-----
> From: Thomas Petazzoni [mailto:[email protected]]
> Sent: Sunday, March 04, 2018 11:25 AM
> To: Stefan Chulski <[email protected]>
> Cc: Antoine Tenart <[email protected]>; [email protected];
> Yan Markman <[email protected]>; [email protected]; linux-
> [email protected]; [email protected];
> [email protected]; [email protected]; Nadav Haklai
> <[email protected]>; [email protected]
> Subject: Re: [PATCH net-next 3/5] net: mvpp2: use a data size of 10kB for Tx
> FIFO on port 0
>
> Hello,
>
> On Sun, 4 Mar 2018 06:29:59 +0000, Stefan Chulski wrote:
>
> > > Is there a reason to hardcode 10KB for port 0, and 3KB for the other ports
> ?
> > > Would there be use cases where the user may want different
> > > configurations ?
> >
> > Design requirement are 10KB TX FIFO for the 10Gb/sec and 2.5KB for the
> 2.5Gb/sec.
>
> What is a "design requirement" ? Is it a HW design limitation ?

We can call it HW design limitation. Anyway to support 10Gb/sec port should have at least 10KB TX FIFO.

> So, the limitation has nothing to do with CP110 really, it's just a limitation of
> PPv2.2, and mentioning CP110 in the comment doesn't make much sense,
> correct ?

I will change it.

Stefan.



2018-03-04 09:51:12

by Stefan Chulski

[permalink] [raw]
Subject: RE: [PATCH net-next 5/5] net: mvpp2: jumbo frames support

> > To perform checksum in HW, HW obviously should work in store and
> forward mode. Store all frame in TX FIFO and then check checksum.
> > If mtu 1500B, everything fine and all port can do this.
> >
> > If mtu is 9KB and 9KB frame transmitted, Port 0 still can do HW checksum.
> But ports 1 and 2 doesn't has enough FIFO for this.
> > So we cannot offload this feature and SW should perform checksum.
>
> So perhaps the real check should not be "port 0", but whether the MTU is
> higher or lower than the TX FIFO size assigned to the current port.
> This would express in much better way the reason why HW checksum can be
> used or not.

I really don't want involve MTU size here, for each packet we should add to MTU overhead added by HW(offset, CRC, DSA tags and etc).
I prefer just to check: port TX FIFO size is 10KB -> port can support HW checksum offload.
Do you suggest to keep some shadow table with ports TX FIFO sizes for this?

Thanks,
Stefan.

2018-03-05 10:50:46

by Antoine Tenart

[permalink] [raw]
Subject: Re: [PATCH net-next 1/5] net: mvpp2: use the same buffer pool for all ports

Hi Thomas,

On Fri, Mar 02, 2018 at 05:01:59PM +0100, Thomas Petazzoni wrote:
> On Fri, 2 Mar 2018 16:40:40 +0100, Antoine Tenart wrote:
> > +static struct {
> > + int pkt_size;
> > + int buf_num;
> > +} mvpp2_pools[MVPP2_BM_POOLS_NUM];
>
> Any reason for not doing:
>
> } mvpp2_pools[MVPP2_BM_POOLS_NUM] = {
> [MVPP2_BM_SHORT] = {
> .pkt_size = MVPP2_BM_SHORT_PKT_SIZE,
> .buf_num = MVPP2_BM_SHORT_BUF_NUM
> },
> [MVPP2_BM_LONG] = {
> .pkt_size = MVPP2_BM_LONG_PKT_SIZE,
> .buf_num = MVPP2_BM_LONG_BUF_NUM,
> },
> };
>
> And get rid of:
>
> > +static void mvpp2_setup_bm_pool(void)
> > +{
> > + /* Short pool */
> > + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
> > + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
> > +
> > + /* Long pool */
> > + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
> > + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
> > +}
>
> ?

I wanted to do this, but it's no possible as MVPP2_BM_SHORT_PKT_SIZE and
MVPP2_BM_LONG_PKT_SIZE use a core definition which expands at some point
to __max(...) which has to be called from within a function.

That's why I kept mvpp2_setup_bm_pool().

Thanks!
Antoine

--
Antoine T?nart, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
https://bootlin.com

2018-03-05 14:15:45

by Thomas Petazzoni

[permalink] [raw]
Subject: Re: [PATCH net-next 1/5] net: mvpp2: use the same buffer pool for all ports

Hello,

On Mon, 5 Mar 2018 11:48:13 +0100, Antoine Tenart wrote:

> > > +static void mvpp2_setup_bm_pool(void)
> > > +{
> > > + /* Short pool */
> > > + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
> > > + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
> > > +
> > > + /* Long pool */
> > > + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
> > > + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
> > > +}
> >
> > ?
>
> I wanted to do this, but it's no possible as MVPP2_BM_SHORT_PKT_SIZE and
> MVPP2_BM_LONG_PKT_SIZE use a core definition which expands at some point
> to __max(...) which has to be called from within a function.

Hum, weird:

#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
#define MVPP2_BM_LONG_FRAME_SIZE 2048

#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)

#define MVPP2_SKB_SHINFO_SIZE \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))

#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)

I don't really see a __max(...) call.

And if this value really expands depending on other values, then it
isn't really a constant, and should be considered as a constant, no?

Thomas
--
Thomas Petazzoni, CTO, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
http://bootlin.com

2018-03-05 14:17:15

by Antoine Tenart

[permalink] [raw]
Subject: Re: [PATCH net-next 1/5] net: mvpp2: use the same buffer pool for all ports

Hi Thomas,

On Mon, Mar 05, 2018 at 01:41:48PM +0100, Thomas Petazzoni wrote:
> On Mon, 5 Mar 2018 11:48:13 +0100, Antoine Tenart wrote:
>
> > > > +static void mvpp2_setup_bm_pool(void)
> > > > +{
> > > > + /* Short pool */
> > > > + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
> > > > + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
> > > > +
> > > > + /* Long pool */
> > > > + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
> > > > + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
> > > > +}
> > >
> > > ?
> >
> > I wanted to do this, but it's no possible as MVPP2_BM_SHORT_PKT_SIZE and
> > MVPP2_BM_LONG_PKT_SIZE use a core definition which expands at some point
> > to __max(...) which has to be called from within a function.
>
> Hum, weird:
>
> #define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
> #define MVPP2_BM_LONG_FRAME_SIZE 2048
>
> #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
> ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
>
> #define MVPP2_SKB_SHINFO_SIZE \
> SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
>
> #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
>
> I don't really see a __max(...) call.

NET_SKB_PAD expends to __max(...).

> And if this value really expands depending on other values, then it
> isn't really a constant, and should be considered as a constant, no?

Well, the padding isn't truly a constant for some reasons, you can see
an explanation here:
https://elixir.bootlin.com/linux/latest/source/include/linux/skbuff.h#L2446

But this value can only change at compile time, so it really depends on
what is the definition of a constant is :)

Antoine

--
Antoine T?nart, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
https://bootlin.com