2019-07-03 10:21:04

by Ivan Khoronzhuk

[permalink] [raw]
Subject: [PATCH v6 net-next 0/5] net: ethernet: ti: cpsw: Add XDP support

This patchset adds XDP support for TI cpsw driver and base it on
page_pool allocator. It was verified on af_xdp socket drop,
af_xdp l2f, ebpf XDP_DROP, XDP_REDIRECT, XDP_PASS, XDP_TX.

It was verified with following configs enabled:
CONFIG_JIT=y
CONFIG_BPFILTER=y
CONFIG_BPF_SYSCALL=y
CONFIG_XDP_SOCKETS=y
CONFIG_BPF_EVENTS=y
CONFIG_HAVE_EBPF_JIT=y
CONFIG_BPF_JIT=y
CONFIG_CGROUP_BPF=y

Link on previous v5:
https://lkml.org/lkml/2019/6/30/89

Also regular tests with iperf2 were done in order to verify impact on
regular netstack performance, compared with base commit:
https://pastebin.com/JSMT0iZ4

v5..v6:
- do changes that is rx_dev while redirect/flush cycle is kept the same
- dropped net: ethernet: ti: davinci_cpdma: return handler status
- other changes desc in patches

v4..v5:
- added two plreliminary patches:
net: ethernet: ti: davinci_cpdma: allow desc split while down
net: ethernet: ti: cpsw_ethtool: allow res split while down
- added xdp alocator refcnt on xdp level, avoiding page pool refcnt
- moved flush status as separate argument for cpdma_chan_process
- reworked cpsw code according to last changes to allocator
- added missed statistic counter

v3..v4:
- added page pool user counter
- use same pool for ndevs in dual mac
- restructured page pool create/destroy according to the last changes in API

v2..v3:
- each rxq and ndev has its own page pool

v1..v2:
- combined xdp_xmit functions
- used page allocation w/o refcnt juggle
- unmapped page for skb netstack
- moved rxq/page pool allocation to open/close pair
- added several preliminary patches:
net: page_pool: add helper function to retrieve dma addresses
net: page_pool: add helper function to unmap dma addresses
net: ethernet: ti: cpsw: use cpsw as drv data
net: ethernet: ti: cpsw_ethtool: simplify slave loops

Ivan Khoronzhuk (5):
xdp: allow same allocator usage
net: ethernet: ti: davinci_cpdma: add dma mapped submit
net: ethernet: ti: davinci_cpdma: allow desc split while down
net: ethernet: ti: cpsw_ethtool: allow res split while down
net: ethernet: ti: cpsw: add XDP support

drivers/net/ethernet/ti/Kconfig | 1 +
drivers/net/ethernet/ti/cpsw.c | 485 +++++++++++++++++++++---
drivers/net/ethernet/ti/cpsw_ethtool.c | 76 +++-
drivers/net/ethernet/ti/cpsw_priv.h | 7 +
drivers/net/ethernet/ti/davinci_cpdma.c | 99 ++++-
drivers/net/ethernet/ti/davinci_cpdma.h | 7 +-
include/net/xdp_priv.h | 2 +
net/core/xdp.c | 55 +++
8 files changed, 656 insertions(+), 76 deletions(-)

--
2.17.1


2019-07-03 10:21:31

by Ivan Khoronzhuk

[permalink] [raw]
Subject: [PATCH v6 net-next 2/5] net: ethernet: ti: davinci_cpdma: add dma mapped submit

In case if dma mapped packet needs to be sent, like with XDP
page pool, the "mapped" submit can be used. This patch adds dma
mapped submit based on regular one.

Signed-off-by: Ivan Khoronzhuk <[email protected]>
---
drivers/net/ethernet/ti/davinci_cpdma.c | 89 ++++++++++++++++++++++---
drivers/net/ethernet/ti/davinci_cpdma.h | 4 ++
2 files changed, 83 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 5cf1758d425b..8da46394c0e7 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -139,6 +139,7 @@ struct submit_info {
int directed;
void *token;
void *data;
+ int flags;
int len;
};

@@ -184,6 +185,8 @@ static struct cpdma_control_info controls[] = {
(directed << CPDMA_TO_PORT_SHIFT)); \
} while (0)

+#define CPDMA_DMA_EXT_MAP BIT(16)
+
static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
{
struct cpdma_desc_pool *pool = ctlr->pool;
@@ -1015,6 +1018,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
struct cpdma_chan *chan = si->chan;
struct cpdma_ctlr *ctlr = chan->ctlr;
int len = si->len;
+ int swlen = len;
struct cpdma_desc __iomem *desc;
dma_addr_t buffer;
u32 mode;
@@ -1036,16 +1040,22 @@ static int cpdma_chan_submit_si(struct submit_info *si)
chan->stats.runt_transmit_buff++;
}

- buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
- ret = dma_mapping_error(ctlr->dev, buffer);
- if (ret) {
- cpdma_desc_free(ctlr->pool, desc, 1);
- return -EINVAL;
- }
-
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
cpdma_desc_to_port(chan, mode, si->directed);

+ if (si->flags & CPDMA_DMA_EXT_MAP) {
+ buffer = (u32)si->data;
+ dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
+ swlen |= CPDMA_DMA_EXT_MAP;
+ } else {
+ buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
+ ret = dma_mapping_error(ctlr->dev, buffer);
+ if (ret) {
+ cpdma_desc_free(ctlr->pool, desc, 1);
+ return -EINVAL;
+ }
+ }
+
/* Relaxed IO accessors can be used here as there is read barrier
* at the end of write sequence.
*/
@@ -1055,7 +1065,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
writel_relaxed(mode | len, &desc->hw_mode);
writel_relaxed((uintptr_t)si->token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer);
- writel_relaxed(len, &desc->sw_len);
+ writel_relaxed(swlen, &desc->sw_len);
desc_read(desc, sw_len);

__cpdma_chan_submit(chan, desc);
@@ -1079,6 +1089,32 @@ int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
si.data = data;
si.len = len;
si.directed = directed;
+ si.flags = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data = (void *)(u32)data;
+ si.len = len;
+ si.directed = directed;
+ si.flags = CPDMA_DMA_EXT_MAP;

spin_lock_irqsave(&chan->lock, flags);
if (chan->state == CPDMA_STATE_TEARDOWN) {
@@ -1103,6 +1139,32 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
si.data = data;
si.len = len;
si.directed = directed;
+ si.flags = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data = (void *)(u32)data;
+ si.len = len;
+ si.directed = directed;
+ si.flags = CPDMA_DMA_EXT_MAP;

spin_lock_irqsave(&chan->lock, flags);
if (chan->state != CPDMA_STATE_ACTIVE) {
@@ -1140,10 +1202,17 @@ static void __cpdma_chan_free(struct cpdma_chan *chan,
uintptr_t token;

token = desc_read(desc, sw_token);
- buff_dma = desc_read(desc, sw_buffer);
origlen = desc_read(desc, sw_len);

- dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ buff_dma = desc_read(desc, sw_buffer);
+ if (origlen & CPDMA_DMA_EXT_MAP) {
+ origlen &= ~CPDMA_DMA_EXT_MAP;
+ dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
+ chan->dir);
+ } else {
+ dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ }
+
cpdma_desc_free(pool, desc, 1);
(*chan->handler)((void *)token, outlen, status);
}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 9343c8c73c1b..0271a20c2e09 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -77,8 +77,12 @@ int cpdma_chan_stop(struct cpdma_chan *chan);

int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
int len, int directed);
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed);
int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
int len, int directed);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
--
2.17.1

2019-07-03 10:21:47

by Ivan Khoronzhuk

[permalink] [raw]
Subject: [PATCH v6 net-next 3/5] net: ethernet: ti: davinci_cpdma: allow desc split while down

That's possible to set ring params while interfaces are down. When
interface gets up it uses number of descs to fill rx queue and on
later on changes to create rx pools. Usually, this resplit can happen
after phy is up, but it can be needed before this, so allow it to
happen while setting number of rx descs, when interfaces are down.
Also, if no dependency on intf state, move it to cpdma layer, where
it should be.

Signed-off-by: Ivan Khoronzhuk <[email protected]>
---
drivers/net/ethernet/ti/cpsw_ethtool.c | 9 ++++-----
drivers/net/ethernet/ti/davinci_cpdma.c | 10 +++++++++-
drivers/net/ethernet/ti/davinci_cpdma.h | 3 +--
3 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index f60dc1dfc443..08d7aaee8299 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -664,15 +664,14 @@ int cpsw_set_ringparam(struct net_device *ndev,

cpsw_suspend_data_pass(ndev);

- cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
-
- if (cpsw->usage_count)
- cpdma_chan_split_pool(cpsw->dma);
+ ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+ if (ret)
+ goto err;

ret = cpsw_resume_data_pass(ndev);
if (!ret)
return 0;
-
+err:
dev_err(cpsw->dev, "cannot set ring params, closing device\n");
dev_close(ndev);
return ret;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 8da46394c0e7..4167b0b77c8e 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -1423,8 +1423,16 @@ int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
return ctlr->num_tx_desc;
}

-void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
ctlr->num_rx_desc = num_rx_desc;
ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+ ret = cpdma_chan_split_pool(ctlr);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return ret;
}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 0271a20c2e09..d3cfe234d16a 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -116,8 +116,7 @@ enum cpdma_control {
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
-void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
-int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr);

#endif
--
2.17.1

2019-07-05 19:55:26

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v6 net-next 2/5] net: ethernet: ti: davinci_cpdma: add dma mapped submit

Hi Ivan,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url: https://github.com/0day-ci/linux/commits/Ivan-Khoronzhuk/xdp-allow-same-allocator-usage/20190706-003850
config: arm64-allmodconfig (attached as .config)
compiler: aarch64-linux-gcc (GCC) 7.4.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=7.4.0 make.cross ARCH=arm64

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <[email protected]>

All warnings (new ones prefixed by >>):

drivers/net//ethernet/ti/davinci_cpdma.c: In function 'cpdma_chan_submit_si':
>> drivers/net//ethernet/ti/davinci_cpdma.c:1047:12: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
buffer = (u32)si->data;
^
drivers/net//ethernet/ti/davinci_cpdma.c: In function 'cpdma_chan_idle_submit_mapped':
>> drivers/net//ethernet/ti/davinci_cpdma.c:1114:12: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
si.data = (void *)(u32)data;
^
drivers/net//ethernet/ti/davinci_cpdma.c: In function 'cpdma_chan_submit_mapped':
drivers/net//ethernet/ti/davinci_cpdma.c:1164:12: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
si.data = (void *)(u32)data;
^

vim +1047 drivers/net//ethernet/ti/davinci_cpdma.c

1015
1016 static int cpdma_chan_submit_si(struct submit_info *si)
1017 {
1018 struct cpdma_chan *chan = si->chan;
1019 struct cpdma_ctlr *ctlr = chan->ctlr;
1020 int len = si->len;
1021 int swlen = len;
1022 struct cpdma_desc __iomem *desc;
1023 dma_addr_t buffer;
1024 u32 mode;
1025 int ret;
1026
1027 if (chan->count >= chan->desc_num) {
1028 chan->stats.desc_alloc_fail++;
1029 return -ENOMEM;
1030 }
1031
1032 desc = cpdma_desc_alloc(ctlr->pool);
1033 if (!desc) {
1034 chan->stats.desc_alloc_fail++;
1035 return -ENOMEM;
1036 }
1037
1038 if (len < ctlr->params.min_packet_size) {
1039 len = ctlr->params.min_packet_size;
1040 chan->stats.runt_transmit_buff++;
1041 }
1042
1043 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1044 cpdma_desc_to_port(chan, mode, si->directed);
1045
1046 if (si->flags & CPDMA_DMA_EXT_MAP) {
> 1047 buffer = (u32)si->data;
1048 dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
1049 swlen |= CPDMA_DMA_EXT_MAP;
1050 } else {
1051 buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
1052 ret = dma_mapping_error(ctlr->dev, buffer);
1053 if (ret) {
1054 cpdma_desc_free(ctlr->pool, desc, 1);
1055 return -EINVAL;
1056 }
1057 }
1058
1059 /* Relaxed IO accessors can be used here as there is read barrier
1060 * at the end of write sequence.
1061 */
1062 writel_relaxed(0, &desc->hw_next);
1063 writel_relaxed(buffer, &desc->hw_buffer);
1064 writel_relaxed(len, &desc->hw_len);
1065 writel_relaxed(mode | len, &desc->hw_mode);
1066 writel_relaxed((uintptr_t)si->token, &desc->sw_token);
1067 writel_relaxed(buffer, &desc->sw_buffer);
1068 writel_relaxed(swlen, &desc->sw_len);
1069 desc_read(desc, sw_len);
1070
1071 __cpdma_chan_submit(chan, desc);
1072
1073 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1074 chan_write(chan, rxfree, 1);
1075
1076 chan->count++;
1077 return 0;
1078 }
1079
1080 int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
1081 int len, int directed)
1082 {
1083 struct submit_info si;
1084 unsigned long flags;
1085 int ret;
1086
1087 si.chan = chan;
1088 si.token = token;
1089 si.data = data;
1090 si.len = len;
1091 si.directed = directed;
1092 si.flags = 0;
1093
1094 spin_lock_irqsave(&chan->lock, flags);
1095 if (chan->state == CPDMA_STATE_TEARDOWN) {
1096 spin_unlock_irqrestore(&chan->lock, flags);
1097 return -EINVAL;
1098 }
1099
1100 ret = cpdma_chan_submit_si(&si);
1101 spin_unlock_irqrestore(&chan->lock, flags);
1102 return ret;
1103 }
1104
1105 int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
1106 dma_addr_t data, int len, int directed)
1107 {
1108 struct submit_info si;
1109 unsigned long flags;
1110 int ret;
1111
1112 si.chan = chan;
1113 si.token = token;
> 1114 si.data = (void *)(u32)data;
1115 si.len = len;
1116 si.directed = directed;
1117 si.flags = CPDMA_DMA_EXT_MAP;
1118
1119 spin_lock_irqsave(&chan->lock, flags);
1120 if (chan->state == CPDMA_STATE_TEARDOWN) {
1121 spin_unlock_irqrestore(&chan->lock, flags);
1122 return -EINVAL;
1123 }
1124
1125 ret = cpdma_chan_submit_si(&si);
1126 spin_unlock_irqrestore(&chan->lock, flags);
1127 return ret;
1128 }
1129

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation


Attachments:
(No filename) (5.28 kB)
.config.gz (64.08 kB)
Download all attachments