2020-07-22 15:57:28

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 00/15] qed/qede: improve chain API and add XDP_REDIRECT support

This series adds missing XDP_REDIRECT case handling in QLogic Everest
Ethernet driver with all necessary prerequisites and ops.
QEDE Tx relies heavily on chain API, so make sure it is in its best
at first.

Alexander Lobakin (15):
qed: reformat "qed_chain.h" a bit
qed: reformat Makefile
qed: move chain methods to a separate file
qed: prevent possible double-frees of the chains
qed: sanitize PBL chains allocation
qed: move chain initialization inlines next to allocation functions
qed: simplify initialization of the chains with an external PBL
qed: simplify chain allocation with init params struct
qed: add support for different page sizes for chains
qed: optimize common chain accessors
qed: introduce qed_chain_get_elem_used{,u32}()
qede: reformat several structures in "qede.h"
qede: reformat net_device_ops declarations
qede: refactor XDP Tx processing
qede: add .ndo_xdp_xmit() and XDP_REDIRECT support

drivers/infiniband/hw/qedr/main.c | 20 +-
drivers/infiniband/hw/qedr/verbs.c | 97 ++---
drivers/net/ethernet/qlogic/qed/Makefile | 37 +-
drivers/net/ethernet/qlogic/qed/qed_chain.c | 367 ++++++++++++++++++
drivers/net/ethernet/qlogic/qed/qed_dev.c | 273 -------------
drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 32 +-
drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 39 +-
drivers/net/ethernet/qlogic/qed/qed_ll2.c | 44 ++-
.../net/ethernet/qlogic/qed/qed_sp_commands.c | 4 +-
drivers/net/ethernet/qlogic/qed/qed_spq.c | 90 +++--
drivers/net/ethernet/qlogic/qede/qede.h | 175 +++++----
drivers/net/ethernet/qlogic/qede/qede_fp.c | 174 ++++++---
drivers/net/ethernet/qlogic/qede/qede_main.c | 185 +++++----
include/linux/qed/qed_chain.h | 328 ++++++----------
include/linux/qed/qed_if.h | 9 +-
15 files changed, 1016 insertions(+), 858 deletions(-)
create mode 100644 drivers/net/ethernet/qlogic/qed/qed_chain.c

--

Netdev folks, could you please take the entire series through your tree
after the necessary acks and reviews? Patches 8-9 also touch qedr driver
under rdma tree, but these changes can't be separated as it would break
incremental buildability and bisecting.

--
2.25.1


2020-07-22 15:57:32

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 13/15] qede: reformat net_device_ops declarations

Correct the indentation of net_device_ops declarations for fancier look.

Signed-off-by: Alexander Lobakin <[email protected]>
Signed-off-by: Igor Russkikh <[email protected]>
Signed-off-by: Michal Kalderon <[email protected]>
---
drivers/net/ethernet/qlogic/qede/qede_main.c | 122 +++++++++----------
1 file changed, 61 insertions(+), 61 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index b5a95f165520..92bcdfa27961 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -639,79 +639,79 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
}

static const struct net_device_ops qede_netdev_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_do_ioctl = qede_ioctl,
- .ndo_tx_timeout = qede_tx_timeout,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_do_ioctl = qede_ioctl,
+ .ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
- .ndo_set_vf_mac = qede_set_vf_mac,
- .ndo_set_vf_vlan = qede_set_vf_vlan,
- .ndo_set_vf_trust = qede_set_vf_trust,
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+ .ndo_set_vf_trust = qede_set_vf_trust,
#endif
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
- .ndo_set_vf_link_state = qede_set_vf_link_state,
- .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
- .ndo_get_vf_config = qede_get_vf_config,
- .ndo_set_vf_rate = qede_set_vf_rate,
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
- .ndo_features_check = qede_features_check,
- .ndo_bpf = qede_xdp,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+ .ndo_features_check = qede_features_check,
+ .ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = qede_rx_flow_steer,
+ .ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
- .ndo_setup_tc = qede_setup_tc_offload,
+ .ndo_setup_tc = qede_setup_tc_offload,
};

static const struct net_device_ops qede_netdev_vf_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
- .ndo_features_check = qede_features_check,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+ .ndo_features_check = qede_features_check,
};

static const struct net_device_ops qede_netdev_vf_xdp_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
- .ndo_features_check = qede_features_check,
- .ndo_bpf = qede_xdp,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+ .ndo_features_check = qede_features_check,
+ .ndo_bpf = qede_xdp,
};

/* -------------------------------------------------------------------------
--
2.25.1

2020-07-22 15:57:47

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 15/15] qede: add .ndo_xdp_xmit() and XDP_REDIRECT support

Add XDP_REDIRECT case handling and the corresponding NDO to support
redirecting XDP frames. This also includes registering driver memory
model (currently order-0 page mode) in BPF subsystem.
The total number of XDP queues is usually 1:1 with Rx ones.

Signed-off-by: Alexander Lobakin <[email protected]>
Signed-off-by: Igor Russkikh <[email protected]>
Signed-off-by: Michal Kalderon <[email protected]>
---
drivers/net/ethernet/qlogic/qede/qede.h | 8 ++
drivers/net/ethernet/qlogic/qede/qede_fp.c | 97 +++++++++++++++++++-
drivers/net/ethernet/qlogic/qede/qede_main.c | 18 ++++
3 files changed, 118 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 308c66a5f98f..803c1fcca8ad 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -199,6 +199,7 @@ struct qede_dev {
u8 fp_num_rx;
u16 req_queues;
u16 num_queues;
+ u16 total_xdp_queues;

#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
@@ -381,6 +382,7 @@ struct sw_tx_bd {

struct sw_tx_xdp {
struct page *page;
+ struct xdp_frame *xdpf;
dma_addr_t mapping;
};

@@ -403,6 +405,9 @@ struct qede_tx_queue {
void __iomem *doorbell_addr;
union db_prod tx_db;

+ /* Spinlock for XDP queues in case of XDP_REDIRECT */
+ spinlock_t xdp_tx_lock;
+
int index; /* Slowpath only */
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev))
@@ -456,6 +461,7 @@ struct qede_fastpath {

u8 xdp_xmit;
#define QEDE_XDP_TX BIT(0)
+#define QEDE_XDP_REDIRECT BIT(1)

struct napi_struct napi;
struct qed_sb_info *sb_info;
@@ -516,6 +522,8 @@ struct qede_reload_args {

/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+ struct xdp_frame **frames, u32 flags);
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_features_t qede_features_check(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index c80bf6d37b89..a2494bf85007 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -303,7 +303,7 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
}

static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
- u16 len, struct page *page)
+ u16 len, struct page *page, struct xdp_frame *xdpf)
{
struct eth_tx_1st_bd *bd;
struct sw_tx_xdp *xdp;
@@ -330,12 +330,66 @@ static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
xdp->mapping = dma;
xdp->page = page;
+ xdp->xdpf = xdpf;

txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;

return 0;
}

+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct device *dmadev = &edev->pdev->dev;
+ struct qede_tx_queue *xdp_tx;
+ struct xdp_frame *xdpf;
+ dma_addr_t mapping;
+ int i, drops = 0;
+ u16 xdp_prod;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (unlikely(!netif_running(dev)))
+ return -ENETDOWN;
+
+ i = smp_processor_id() % edev->total_xdp_queues;
+ xdp_tx = edev->fp_array[i].xdp_tx;
+
+ spin_lock(&xdp_tx->xdp_tx_lock);
+
+ for (i = 0; i < n_frames; i++) {
+ xdpf = frames[i];
+
+ mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dmadev, mapping))) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+
+ continue;
+ }
+
+ if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
+ NULL, xdpf))) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH) {
+ xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
+
+ xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+ qede_update_tx_producer(xdp_tx);
+ }
+
+ spin_unlock(&xdp_tx->xdp_tx_lock);
+
+ return n_frames - drops;
+}
+
int qede_txq_has_work(struct qede_tx_queue *txq)
{
u16 hw_bd_cons;
@@ -353,6 +407,7 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
struct device *dev = &edev->pdev->dev;
+ struct xdp_frame *xdpf;
u16 hw_bd_cons;

hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
@@ -360,10 +415,19 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)

while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
xdp_info = xdp_arr + txq->sw_tx_cons;
+ xdpf = xdp_info->xdpf;
+
+ if (xdpf) {
+ dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
+ DMA_TO_DEVICE);
+ xdp_return_frame(xdpf);

- dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- __free_page(xdp_info->page);
+ xdp_info->xdpf = NULL;
+ } else {
+ dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(xdp_info->page);
+ }

qed_chain_consume(&txq->tx_pbl);
txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
@@ -1065,7 +1129,8 @@ static bool qede_rx_xdp(struct qede_dev *edev,
* throw current buffer, as replacement was already allocated.
*/
if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
- *data_offset, *len, bd->data))) {
+ *data_offset, *len, bd->data,
+ NULL))) {
dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
rxq->data_direction);
__free_page(bd->data);
@@ -1079,6 +1144,25 @@ static bool qede_rx_xdp(struct qede_dev *edev,
}

/* Regardless, we've consumed an Rx BD */
+ qede_rx_bd_ring_consume(rxq);
+ break;
+ case XDP_REDIRECT:
+ /* We need the replacement buffer before transmit. */
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ qede_recycle_rx_bd_ring(rxq, 1);
+
+ trace_xdp_exception(edev->ndev, prog, act);
+ break;
+ }
+
+ dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+ rxq->data_direction);
+
+ if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
+ DP_NOTICE(edev, "Failed to redirect the packet\n");
+ else
+ fp->xdp_xmit |= QEDE_XDP_REDIRECT;
+
qede_rx_bd_ring_consume(rxq);
break;
default:
@@ -1387,6 +1471,9 @@ int qede_poll(struct napi_struct *napi, int budget)
qede_update_tx_producer(fp->xdp_tx);
}

+ if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+ xdp_do_flush_map();
+
return rx_work_done;
}

diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 92bcdfa27961..1aaae3203f5a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -672,6 +672,7 @@ static const struct net_device_ops qede_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
+ .ndo_xdp_xmit = qede_xdp_transmit,
.ndo_setup_tc = qede_setup_tc_offload,
};

@@ -712,6 +713,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
+ .ndo_xdp_xmit = qede_xdp_transmit,
};

/* -------------------------------------------------------------------------
@@ -1712,6 +1714,7 @@ static void qede_init_fp(struct qede_dev *edev)
{
int queue_id, rxq_index = 0, txq_index = 0;
struct qede_fastpath *fp;
+ bool init_xdp = false;

for_each_queue(queue_id) {
fp = &edev->fp_array[queue_id];
@@ -1723,6 +1726,9 @@ static void qede_init_fp(struct qede_dev *edev)
fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
rxq_index);
fp->xdp_tx->is_xdp = 1;
+
+ spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
+ init_xdp = true;
}

if (fp->type & QEDE_FASTPATH_RX) {
@@ -1738,6 +1744,13 @@ static void qede_init_fp(struct qede_dev *edev)
/* Driver have no error path from here */
WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
fp->rxq->rxq_id) < 0);
+
+ if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0,
+ NULL)) {
+ DP_NOTICE(edev,
+ "Failed to register XDP memory model\n");
+ }
}

if (fp->type & QEDE_FASTPATH_TX) {
@@ -1763,6 +1776,11 @@ static void qede_init_fp(struct qede_dev *edev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, queue_id);
}
+
+ if (init_xdp) {
+ edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
+ DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
+ }
}

static int qede_set_real_num_queues(struct qede_dev *edev)
--
2.25.1

2020-07-22 15:58:06

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 14/15] qede: refactor XDP Tx processing

Current XDP Tx logic is suboptimal and can't be reused for XDP_REDIRECT
path.
Make qede_xdp_{tx_int,xmit}() more universal and effective in general to
allow future expanding.

Misc: use unlikely() hints where appropriate and replace "fallthrough"
comments with pseudo-keywords.

Signed-off-by: Alexander Lobakin <[email protected]>
Signed-off-by: Igor Russkikh <[email protected]>
Signed-off-by: Michal Kalderon <[email protected]>
---
drivers/net/ethernet/qlogic/qede/qede.h | 1 +
drivers/net/ethernet/qlogic/qede/qede_fp.c | 89 +++++++++++-----------
2 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index e8ed0bb94ee0..308c66a5f98f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -455,6 +455,7 @@ struct qede_fastpath {
u8 id;

u8 xdp_xmit;
+#define QEDE_XDP_TX BIT(0)

struct napi_struct napi;
struct qed_sb_info *sb_info;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1c4ece0713f8..c80bf6d37b89 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -302,48 +302,37 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
wmb();
}

-static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
- struct sw_rx_data *metadata, u16 padding, u16 length)
+static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
+ u16 len, struct page *page)
{
- struct qede_tx_queue *txq = fp->xdp_tx;
- struct eth_tx_1st_bd *first_bd;
- u16 idx = txq->sw_tx_prod;
+ struct eth_tx_1st_bd *bd;
+ struct sw_tx_xdp *xdp;
u16 val;

- if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+ if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
+ txq->num_tx_buffers)) {
txq->stopped_cnt++;
return -ENOMEM;
}

- first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+ bd = qed_chain_produce(&txq->tx_pbl);
+ bd->data.nbds = 1;
+ bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);

- memset(first_bd, 0, sizeof(*first_bd));
- first_bd->data.bd_flags.bitfields =
- BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
-
- val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;

- first_bd->data.bitfields |= cpu_to_le16(val);
- first_bd->data.nbds = 1;
+ bd->data.bitfields = cpu_to_le16(val);

/* We can safely ignore the offset, as it's 0 for XDP */
- BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+ BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);

- /* Synchronize the buffer back to device, as program [probably]
- * has changed it.
- */
- dma_sync_single_for_device(&edev->pdev->dev,
- metadata->mapping + padding,
- length, PCI_DMA_TODEVICE);
+ xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
+ xdp->mapping = dma;
+ xdp->page = page;

- txq->sw_tx_ring.xdp[idx].page = metadata->data;
- txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;

- /* Mark the fastpath for future XDP doorbell */
- fp->xdp_xmit = 1;
-
return 0;
}

@@ -362,20 +351,21 @@ int qede_txq_has_work(struct qede_tx_queue *txq)

static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- u16 hw_bd_cons, idx;
+ struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
+ struct device *dev = &edev->pdev->dev;
+ u16 hw_bd_cons;

hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();

while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
- qed_chain_consume(&txq->tx_pbl);
- idx = txq->sw_tx_cons;
+ xdp_info = xdp_arr + txq->sw_tx_cons;

- dma_unmap_page(&edev->pdev->dev,
- txq->sw_tx_ring.xdp[idx].mapping,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(txq->sw_tx_ring.xdp[idx].page);
+ dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(xdp_info->page);

+ qed_chain_consume(&txq->tx_pbl);
txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++;
}
@@ -1064,32 +1054,39 @@ static bool qede_rx_xdp(struct qede_dev *edev,
switch (act) {
case XDP_TX:
/* We need the replacement buffer before transmit. */
- if (qede_alloc_rx_buffer(rxq, true)) {
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
qede_recycle_rx_bd_ring(rxq, 1);
+
trace_xdp_exception(edev->ndev, prog, act);
- return false;
+ break;
}

/* Now if there's a transmission problem, we'd still have to
* throw current buffer, as replacement was already allocated.
*/
- if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
- dma_unmap_page(rxq->dev, bd->mapping,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
+ *data_offset, *len, bd->data))) {
+ dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+ rxq->data_direction);
__free_page(bd->data);
+
trace_xdp_exception(edev->ndev, prog, act);
+ } else {
+ dma_sync_single_for_device(rxq->dev,
+ bd->mapping + *data_offset,
+ *len, rxq->data_direction);
+ fp->xdp_xmit |= QEDE_XDP_TX;
}

/* Regardless, we've consumed an Rx BD */
qede_rx_bd_ring_consume(rxq);
- return false;
-
+ break;
default:
bpf_warn_invalid_xdp_action(act);
- /* Fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(edev->ndev, prog, act);
- /* Fall through */
+ fallthrough;
case XDP_DROP:
qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
}
@@ -1353,6 +1350,9 @@ int qede_poll(struct napi_struct *napi, int budget)
napi);
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
+ u16 xdp_prod;
+
+ fp->xdp_xmit = 0;

if (likely(fp->type & QEDE_FASTPATH_TX)) {
int cos;
@@ -1380,10 +1380,9 @@ int qede_poll(struct napi_struct *napi, int budget)
}
}

- if (fp->xdp_xmit) {
- u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+ if (fp->xdp_xmit & QEDE_XDP_TX) {
+ xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);

- fp->xdp_xmit = 0;
fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
qede_update_tx_producer(fp->xdp_tx);
}
--
2.25.1

2020-07-22 15:58:08

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 03/15] qed: move chain methods to a separate file

Move chain allocation/freeing functions to a new file to not mix it with
hardware-related code.

Signed-off-by: Alexander Lobakin <[email protected]>
Signed-off-by: Igor Russkikh <[email protected]>
Signed-off-by: Michal Kalderon <[email protected]>
---
drivers/net/ethernet/qlogic/qed/Makefile | 1 +
drivers/net/ethernet/qlogic/qed/qed_chain.c | 300 ++++++++++++++++++++
drivers/net/ethernet/qlogic/qed/qed_dev.c | 273 ------------------
3 files changed, 301 insertions(+), 273 deletions(-)
create mode 100644 drivers/net/ethernet/qlogic/qed/qed_chain.c

diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 3c75e4fa9b02..f947b105cf14 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_QED) := qed.o

qed-y := \
+ qed_chain.o \
qed_cxt.o \
qed_dcbx.o \
qed_debug.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
new file mode 100644
index 000000000000..40cc26f7f20b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (c) 2020 Marvell International Ltd. */
+
+#include <linux/qed/qed_chain.h>
+
+#include "qed_dev_api.h"
+
+static void qed_chain_free_next_ptr(struct qed_dev *cdev,
+ struct qed_chain *chain)
+{
+ struct device *dev = &cdev->pdev->dev;
+ struct qed_chain_next *next;
+ dma_addr_t phys, phys_next;
+ void *virt, *virt_next;
+ u32 size, i;
+
+ size = chain->elem_size * chain->usable_per_page;
+ virt = chain->p_virt_addr;
+ phys = chain->p_phys_addr;
+
+ for (i = 0; i < chain->page_cnt; i++) {
+ if (!virt)
+ break;
+
+ next = virt + size;
+ virt_next = next->next_virt;
+ phys_next = HILO_DMA_REGPAIR(next->next_phys);
+
+ dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys);
+
+ virt = virt_next;
+ phys = phys_next;
+ }
+}
+
+static void qed_chain_free_single(struct qed_dev *cdev,
+ struct qed_chain *chain)
+{
+ if (!chain->p_virt_addr)
+ return;
+
+ dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
+ chain->p_virt_addr, chain->p_phys_addr);
+}
+
+static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
+{
+ struct device *dev = &cdev->pdev->dev;
+ struct addr_tbl_entry *entry;
+ u32 pbl_size, i;
+
+ if (!chain->pbl.pp_addr_tbl)
+ return;
+
+ for (i = 0; i < chain->page_cnt; i++) {
+ entry = chain->pbl.pp_addr_tbl + i;
+ if (!entry->virt_addr)
+ break;
+
+ dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr,
+ entry->dma_map);
+ }
+
+ pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+
+ if (!chain->b_external_pbl)
+ dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table,
+ chain->pbl_sp.p_phys_table);
+
+ vfree(chain->pbl.pp_addr_tbl);
+ chain->pbl.pp_addr_tbl = NULL;
+}
+
+/**
+ * qed_chain_free() - Free chain DMA memory.
+ *
+ * @cdev: Main device structure.
+ * @chain: Chain to free.
+ */
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
+{
+ switch (chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ qed_chain_free_next_ptr(cdev, chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ qed_chain_free_single(cdev, chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ qed_chain_free_pbl(cdev, chain);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+qed_chain_alloc_sanity_check(struct qed_dev *cdev,
+ enum qed_chain_cnt_type cnt_type,
+ size_t elem_size, u32 page_cnt)
+{
+ u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+ /* The actual chain size can be larger than the maximal possible value
+ * after rounding up the requested elements number to pages, and after
+ * taking into account the unusuable elements (next-ptr elements).
+ * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+ * size/capacity fields are of u32 type.
+ */
+ switch (cnt_type) {
+ case QED_CHAIN_CNT_TYPE_U16:
+ if (chain_size > U16_MAX + 1)
+ break;
+
+ return 0;
+ case QED_CHAIN_CNT_TYPE_U32:
+ if (chain_size > U32_MAX)
+ break;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ DP_NOTICE(cdev,
+ "The actual chain size (0x%llx) is larger than the maximal possible value\n",
+ chain_size);
+
+ return -EINVAL;
+}
+
+static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
+ struct qed_chain *chain)
+{
+ struct device *dev = &cdev->pdev->dev;
+ void *virt, *virt_prev = NULL;
+ dma_addr_t phys;
+ u32 i;
+
+ for (i = 0; i < chain->page_cnt; i++) {
+ virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
+ GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ if (i == 0) {
+ qed_chain_init_mem(chain, virt, phys);
+ qed_chain_reset(chain);
+ } else {
+ qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
+ phys);
+ }
+
+ virt_prev = virt;
+ }
+
+ /* Last page's next element should point to the beginning of the
+ * chain.
+ */
+ qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
+ chain->p_phys_addr);
+
+ return 0;
+}
+
+static int qed_chain_alloc_single(struct qed_dev *cdev,
+ struct qed_chain *chain)
+{
+ dma_addr_t phys;
+ void *virt;
+
+ virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
+ &phys, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ qed_chain_init_mem(chain, virt, phys);
+ qed_chain_reset(chain);
+
+ return 0;
+}
+
+static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
+ struct qed_chain_ext_pbl *ext_pbl)
+{
+ struct device *dev = &cdev->pdev->dev;
+ struct addr_tbl_entry *addr_tbl;
+ dma_addr_t phys, pbl_phys;
+ void *pbl_virt;
+ u32 page_cnt, i;
+ size_t size;
+ void *virt;
+
+ page_cnt = chain->page_cnt;
+
+ size = array_size(page_cnt, sizeof(*addr_tbl));
+ if (unlikely(size == SIZE_MAX))
+ return -EOVERFLOW;
+
+ addr_tbl = vzalloc(size);
+ if (!addr_tbl)
+ return -ENOMEM;
+
+ chain->pbl.pp_addr_tbl = addr_tbl;
+
+ if (ext_pbl) {
+ size = 0;
+ pbl_virt = ext_pbl->p_pbl_virt;
+ pbl_phys = ext_pbl->p_pbl_phys;
+
+ chain->b_external_pbl = true;
+ } else {
+ size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE);
+ if (unlikely(size == SIZE_MAX))
+ return -EOVERFLOW;
+
+ pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys,
+ GFP_KERNEL);
+ }
+
+ if (!pbl_virt)
+ return -ENOMEM;
+
+ chain->pbl_sp.p_virt_table = pbl_virt;
+ chain->pbl_sp.p_phys_table = pbl_phys;
+
+ for (i = 0; i < page_cnt; i++) {
+ virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
+ GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ if (i == 0) {
+ qed_chain_init_mem(chain, virt, phys);
+ qed_chain_reset(chain);
+ }
+
+ /* Fill the PBL table with the physical address of the page */
+ *(dma_addr_t *)pbl_virt = phys;
+ pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+
+ /* Keep the virtual address of the page */
+ addr_tbl[i].virt_addr = virt;
+ addr_tbl[i].dma_map = phys;
+ }
+
+ return 0;
+}
+
+int qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems,
+ size_t elem_size,
+ struct qed_chain *chain,
+ struct qed_chain_ext_pbl *ext_pbl)
+{
+ u32 page_cnt;
+ int rc;
+
+ if (mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+ rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Cannot allocate a chain with the given arguments:\n");
+ DP_NOTICE(cdev,
+ "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
+ intended_use, mode, cnt_type, num_elems, elem_size);
+ return rc;
+ }
+
+ qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode,
+ cnt_type);
+
+ switch (mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ rc = qed_chain_alloc_next_ptr(cdev, chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ rc = qed_chain_alloc_single(cdev, chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ rc = qed_chain_alloc_pbl(cdev, chain, ext_pbl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!rc)
+ return 0;
+
+ qed_chain_free(cdev, chain);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 6516a1f921da..d9c7a1a6be94 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -4716,279 +4716,6 @@ void qed_hw_remove(struct qed_dev *cdev)
qed_mcp_nvm_info_free(p_hwfn);
}

-static void qed_chain_free_next_ptr(struct qed_dev *cdev,
- struct qed_chain *p_chain)
-{
- void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
- dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
- struct qed_chain_next *p_next;
- u32 size, i;
-
- if (!p_virt)
- return;
-
- size = p_chain->elem_size * p_chain->usable_per_page;
-
- for (i = 0; i < p_chain->page_cnt; i++) {
- if (!p_virt)
- break;
-
- p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
- p_virt_next = p_next->next_virt;
- p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
-
- dma_free_coherent(&cdev->pdev->dev,
- QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
-
- p_virt = p_virt_next;
- p_phys = p_phys_next;
- }
-}
-
-static void qed_chain_free_single(struct qed_dev *cdev,
- struct qed_chain *p_chain)
-{
- if (!p_chain->p_virt_addr)
- return;
-
- dma_free_coherent(&cdev->pdev->dev,
- QED_CHAIN_PAGE_SIZE,
- p_chain->p_virt_addr, p_chain->p_phys_addr);
-}
-
-static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
-{
- struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
- u32 page_cnt = p_chain->page_cnt, i, pbl_size;
-
- if (!pp_addr_tbl)
- return;
-
- for (i = 0; i < page_cnt; i++) {
- if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
- break;
-
- dma_free_coherent(&cdev->pdev->dev,
- QED_CHAIN_PAGE_SIZE,
- pp_addr_tbl[i].virt_addr,
- pp_addr_tbl[i].dma_map);
- }
-
- pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
-
- if (!p_chain->b_external_pbl)
- dma_free_coherent(&cdev->pdev->dev,
- pbl_size,
- p_chain->pbl_sp.p_virt_table,
- p_chain->pbl_sp.p_phys_table);
-
- vfree(p_chain->pbl.pp_addr_tbl);
- p_chain->pbl.pp_addr_tbl = NULL;
-}
-
-void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
-{
- switch (p_chain->mode) {
- case QED_CHAIN_MODE_NEXT_PTR:
- qed_chain_free_next_ptr(cdev, p_chain);
- break;
- case QED_CHAIN_MODE_SINGLE:
- qed_chain_free_single(cdev, p_chain);
- break;
- case QED_CHAIN_MODE_PBL:
- qed_chain_free_pbl(cdev, p_chain);
- break;
- }
-}
-
-static int
-qed_chain_alloc_sanity_check(struct qed_dev *cdev,
- enum qed_chain_cnt_type cnt_type,
- size_t elem_size, u32 page_cnt)
-{
- u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
-
- /* The actual chain size can be larger than the maximal possible value
- * after rounding up the requested elements number to pages, and after
- * taking into acount the unusuable elements (next-ptr elements).
- * The size of a "u16" chain can be (U16_MAX + 1) since the chain
- * size/capacity fields are of a u32 type.
- */
- if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
- chain_size > ((u32)U16_MAX + 1)) ||
- (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
- DP_NOTICE(cdev,
- "The actual chain size (0x%llx) is larger than the maximal possible value\n",
- chain_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
-{
- void *p_virt = NULL, *p_virt_prev = NULL;
- dma_addr_t p_phys = 0;
- u32 i;
-
- for (i = 0; i < p_chain->page_cnt; i++) {
- p_virt = dma_alloc_coherent(&cdev->pdev->dev,
- QED_CHAIN_PAGE_SIZE,
- &p_phys, GFP_KERNEL);
- if (!p_virt)
- return -ENOMEM;
-
- if (i == 0) {
- qed_chain_init_mem(p_chain, p_virt, p_phys);
- qed_chain_reset(p_chain);
- } else {
- qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
- p_virt, p_phys);
- }
-
- p_virt_prev = p_virt;
- }
- /* Last page's next element should point to the beginning of the
- * chain.
- */
- qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
- p_chain->p_virt_addr,
- p_chain->p_phys_addr);
-
- return 0;
-}
-
-static int
-qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
-{
- dma_addr_t p_phys = 0;
- void *p_virt = NULL;
-
- p_virt = dma_alloc_coherent(&cdev->pdev->dev,
- QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
- if (!p_virt)
- return -ENOMEM;
-
- qed_chain_init_mem(p_chain, p_virt, p_phys);
- qed_chain_reset(p_chain);
-
- return 0;
-}
-
-static int
-qed_chain_alloc_pbl(struct qed_dev *cdev,
- struct qed_chain *p_chain,
- struct qed_chain_ext_pbl *ext_pbl)
-{
- u32 page_cnt = p_chain->page_cnt, size, i;
- dma_addr_t p_phys = 0, p_pbl_phys = 0;
- struct addr_tbl_entry *pp_addr_tbl;
- u8 *p_pbl_virt = NULL;
- void *p_virt = NULL;
-
- size = page_cnt * sizeof(*pp_addr_tbl);
- pp_addr_tbl = vzalloc(size);
- if (!pp_addr_tbl)
- return -ENOMEM;
-
- /* The allocation of the PBL table is done with its full size, since it
- * is expected to be successive.
- * qed_chain_init_pbl_mem() is called even in a case of an allocation
- * failure, since tbl was previously allocated, and it
- * should be saved to allow its freeing during the error flow.
- */
- size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
-
- if (!ext_pbl) {
- p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_pbl_phys, GFP_KERNEL);
- } else {
- p_pbl_virt = ext_pbl->p_pbl_virt;
- p_pbl_phys = ext_pbl->p_pbl_phys;
- p_chain->b_external_pbl = true;
- }
-
- qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
- if (!p_pbl_virt)
- return -ENOMEM;
-
- for (i = 0; i < page_cnt; i++) {
- p_virt = dma_alloc_coherent(&cdev->pdev->dev,
- QED_CHAIN_PAGE_SIZE,
- &p_phys, GFP_KERNEL);
- if (!p_virt)
- return -ENOMEM;
-
- if (i == 0) {
- qed_chain_init_mem(p_chain, p_virt, p_phys);
- qed_chain_reset(p_chain);
- }
-
- /* Fill the PBL table with the physical address of the page */
- *(dma_addr_t *)p_pbl_virt = p_phys;
- /* Keep the virtual address of the page */
- p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
- p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
-
- p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
- }
-
- return 0;
-}
-
-int qed_chain_alloc(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type,
- u32 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain,
- struct qed_chain_ext_pbl *ext_pbl)
-{
- u32 page_cnt;
- int rc = 0;
-
- if (mode == QED_CHAIN_MODE_SINGLE)
- page_cnt = 1;
- else
- page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
-
- rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
- if (rc) {
- DP_NOTICE(cdev,
- "Cannot allocate a chain with the given arguments:\n");
- DP_NOTICE(cdev,
- "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
- intended_use, mode, cnt_type, num_elems, elem_size);
- return rc;
- }
-
- qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
- mode, cnt_type);
-
- switch (mode) {
- case QED_CHAIN_MODE_NEXT_PTR:
- rc = qed_chain_alloc_next_ptr(cdev, p_chain);
- break;
- case QED_CHAIN_MODE_SINGLE:
- rc = qed_chain_alloc_single(cdev, p_chain);
- break;
- case QED_CHAIN_MODE_PBL:
- rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
- break;
- }
- if (rc)
- goto nomem;
-
- return 0;
-
-nomem:
- qed_chain_free(cdev, p_chain);
- return rc;
-}
-
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
--
2.25.1

2020-07-22 15:58:48

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 08/15] qed: simplify chain allocation with init params struct

To simplify qed_chain_alloc() prototype and call sites, introduce struct
qed_chain_init_params to specify chain params, and pass a pointer to
filled struct to the actual qed_chain_alloc() instead of a long list
of separate arguments.

Signed-off-by: Alexander Lobakin <[email protected]>
Signed-off-by: Igor Russkikh <[email protected]>
Signed-off-by: Michal Kalderon <[email protected]>
---
drivers/infiniband/hw/qedr/main.c | 20 ++--
drivers/infiniband/hw/qedr/verbs.c | 95 +++++++++----------
drivers/net/ethernet/qlogic/qed/qed_chain.c | 80 +++++++++-------
drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 32 +------
drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 39 ++++----
drivers/net/ethernet/qlogic/qed/qed_ll2.c | 44 +++++----
drivers/net/ethernet/qlogic/qed/qed_spq.c | 90 +++++++++++-------
drivers/net/ethernet/qlogic/qede/qede_main.c | 45 ++++-----
include/linux/qed/qed_chain.h | 21 ++--
include/linux/qed/qed_if.h | 9 +-
10 files changed, 242 insertions(+), 233 deletions(-)

diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index ccaedfd53e49..b1de8d608e4d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -346,9 +346,14 @@ static void qedr_free_resources(struct qedr_dev *dev)

static int qedr_alloc_resources(struct qedr_dev *dev)
{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .elem_size = sizeof(struct regpair *),
+ };
struct qedr_cnq *cnq;
__le16 *cons_pi;
- u16 n_entries;
int i, rc;

dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
@@ -382,7 +387,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);

/* Allocate CNQ PBLs */
- n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+ params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
+ QEDR_ROCE_MAX_CNQ_SIZE);
+
for (i = 0; i < dev->num_cnq; i++) {
cnq = &dev->cnq_array[i];

@@ -391,13 +398,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
if (rc)
goto err3;

- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- n_entries,
- sizeof(struct regpair *),
- &cnq->pbl, NULL);
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
+ &params);
if (rc)
goto err4;

diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 9b9e80266367..6737895a0d68 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -891,6 +891,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
udata, struct qedr_ucontext, ibucontext);
struct qed_rdma_destroy_cq_out_params destroy_oparams;
struct qed_rdma_destroy_cq_in_params destroy_iparams;
+ struct qed_chain_init_params chain_params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ .elem_size = sizeof(union rdma_cqe),
+ };
struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qed_rdma_create_cq_in_params params;
struct qedr_create_cq_ureq ureq = {};
@@ -917,6 +923,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,

chain_entries = qedr_align_cq_entries(entries);
chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+ chain_params.num_elems = chain_entries;

/* calc db offset. user will add DPI base, kernel will add db addr */
db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
@@ -951,13 +958,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;

- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- chain_entries,
- sizeof(union rdma_cqe),
- &cq->pbl, NULL);
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
+ &chain_params);
if (rc)
goto err0;

@@ -1446,6 +1448,12 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
struct ib_srq_init_attr *init_attr)
{
struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
+ };
dma_addr_t phy_prod_pair_addr;
u32 num_elems;
void *va;
@@ -1464,13 +1472,9 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
hw_srq->virt_prod_pair_addr = va;

num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- num_elems,
- QEDR_SRQ_WQE_ELEM_SIZE,
- &hw_srq->pbl, NULL);
+ params.num_elems = num_elems;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
if (rc)
goto err0;

@@ -1901,29 +1905,28 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_elems, u32 n_rq_elems)
{
struct qed_rdma_create_qp_out_params out_params;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ };
int rc;

- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_sq_elems,
- QEDR_SQE_ELEMENT_SIZE,
- &qp->sq.pbl, NULL);
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+ params.num_elems = n_sq_elems;
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;

+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
if (rc)
return rc;

in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);

- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_rq_elems,
- QEDR_RQE_ELEMENT_SIZE,
- &qp->rq.pbl, NULL);
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.elem_size = n_rq_elems;
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
if (rc)
return rc;

@@ -1949,7 +1952,10 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_elems, u32 n_rq_elems)
{
struct qed_rdma_create_qp_out_params out_params;
- struct qed_chain_ext_pbl ext_pbl;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ };
int rc;

in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
@@ -1966,31 +1972,24 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
return -EINVAL;

/* Now we allocate the chain */
- ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
- ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;

- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_sq_elems,
- QEDR_SQE_ELEMENT_SIZE,
- &qp->sq.pbl, &ext_pbl);
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+ params.num_elems = n_sq_elems;
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
+ params.ext_pbl_virt = out_params.sq_pbl_virt;
+ params.ext_pbl_phys = out_params.sq_pbl_phys;

+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
if (rc)
goto err;

- ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
- ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
-
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_rq_elems,
- QEDR_RQE_ELEMENT_SIZE,
- &qp->rq.pbl, &ext_pbl);
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.num_elems = n_rq_elems;
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+ params.ext_pbl_virt = out_params.rq_pbl_virt;
+ params.ext_pbl_phys = out_params.rq_pbl_phys;

+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
if (rc)
goto err;

diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
index 2a61007442ae..6e8dc6cefa5d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_chain.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -5,23 +5,22 @@

#include "qed_dev_api.h"

-static void qed_chain_init_params(struct qed_chain *chain,
- u32 page_cnt, u8 elem_size,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type,
- const struct qed_chain_ext_pbl *ext_pbl)
+static void qed_chain_init(struct qed_chain *chain,
+ const struct qed_chain_init_params *params,
+ u32 page_cnt)
{
memset(chain, 0, sizeof(*chain));

- chain->elem_size = elem_size;
- chain->intended_use = intended_use;
- chain->mode = mode;
- chain->cnt_type = cnt_type;
+ chain->elem_size = params->elem_size;
+ chain->intended_use = params->intended_use;
+ chain->mode = params->mode;
+ chain->cnt_type = params->cnt_type;

- chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
- chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
- chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+ chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size);
+ chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
+ params->mode);
+ chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
+ params->mode);

chain->elem_per_page_mask = chain->elem_per_page - 1;
chain->next_page_mask = chain->usable_per_page &
@@ -31,9 +30,9 @@ static void qed_chain_init_params(struct qed_chain *chain,
chain->capacity = chain->usable_per_page * page_cnt;
chain->size = chain->elem_per_page * page_cnt;

- if (ext_pbl && ext_pbl->p_pbl_virt) {
- chain->pbl_sp.table_virt = ext_pbl->p_pbl_virt;
- chain->pbl_sp.table_phys = ext_pbl->p_pbl_phys;
+ if (params->ext_pbl_virt) {
+ chain->pbl_sp.table_virt = params->ext_pbl_virt;
+ chain->pbl_sp.table_phys = params->ext_pbl_phys;

chain->b_external_pbl = true;
}
@@ -152,10 +151,16 @@ void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)

static int
qed_chain_alloc_sanity_check(struct qed_dev *cdev,
- enum qed_chain_cnt_type cnt_type,
- size_t elem_size, u32 page_cnt)
+ const struct qed_chain_init_params *params,
+ u32 page_cnt)
{
- u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+ u64 chain_size;
+
+ chain_size = ELEMS_PER_PAGE(params->elem_size);
+ chain_size *= page_cnt;
+
+ if (!chain_size)
+ return -EINVAL;

/* The actual chain size can be larger than the maximal possible value
* after rounding up the requested elements number to pages, and after
@@ -163,7 +168,7 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
* The size of a "u16" chain can be (U16_MAX + 1) since the chain
* size/capacity fields are of u32 type.
*/
- switch (cnt_type) {
+ switch (params->cnt_type) {
case QED_CHAIN_CNT_TYPE_U16:
if (chain_size > U16_MAX + 1)
break;
@@ -296,37 +301,42 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
return 0;
}

-int qed_chain_alloc(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type,
- u32 num_elems,
- size_t elem_size,
- struct qed_chain *chain,
- struct qed_chain_ext_pbl *ext_pbl)
+/**
+ * qed_chain_alloc() - Allocate and initialize a chain.
+ *
+ * @cdev: Main device structure.
+ * @chain: Chain to be processed.
+ * @params: Chain initialization parameters.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
+ struct qed_chain_init_params *params)
{
u32 page_cnt;
int rc;

- if (mode == QED_CHAIN_MODE_SINGLE)
+ if (params->mode == QED_CHAIN_MODE_SINGLE)
page_cnt = 1;
else
- page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+ page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
+ params->elem_size,
+ params->mode);

- rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
+ rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
if (rc) {
DP_NOTICE(cdev,
"Cannot allocate a chain with the given arguments:\n");
DP_NOTICE(cdev,
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
- intended_use, mode, cnt_type, num_elems, elem_size);
+ params->intended_use, params->mode, params->cnt_type,
+ params->num_elems, params->elem_size);
return rc;
}

- qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode,
- cnt_type, ext_pbl);
+ qed_chain_init(chain, params, page_cnt);

- switch (mode) {
+ switch (params->mode) {
case QED_CHAIN_MODE_NEXT_PTR:
rc = qed_chain_alloc_next_ptr(cdev, chain);
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 395d4932c262..d3c1f3879be8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -254,35 +254,9 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params);

-/**
- * @brief qed_chain_alloc - Allocate and initialize a chain
- *
- * @param p_hwfn
- * @param intended_use
- * @param mode
- * @param num_elems
- * @param elem_size
- * @param p_chain
- * @param ext_pbl - a possible external PBL
- *
- * @return int
- */
-int
-qed_chain_alloc(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type,
- u32 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl);
-
-/**
- * @brief qed_chain_free - Free chain DMA memory
- *
- * @param p_hwfn
- * @param p_chain
- */
-void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
+int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
+ struct qed_chain_init_params *params);
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain);

/**
* @@brief qed_fw_l2_queue - Get absolute L2 queue ID
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 25d2c882d7ac..4eae4ee3538f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -684,9 +684,13 @@ static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn **p_out_conn)
{
- u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0;
struct scsi_terminate_extra_params *p_q_cnts = NULL;
struct qed_iscsi_pf_params *p_params = NULL;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ };
struct tcp_upload_params *p_tcp = NULL;
struct qed_iscsi_conn *p_conn = NULL;
int rc = 0;
@@ -727,34 +731,25 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
goto nomem_upload_param;
p_conn->tcp_upload_params_virt_addr = p_tcp;

- r2tq_num_elements = p_params->num_r2tq_pages_in_ring *
- QED_CHAIN_PAGE_SIZE / 0x80;
- rc = qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- r2tq_num_elements, 0x80, &p_conn->r2tq, NULL);
+ params.num_elems = p_params->num_r2tq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_wqe);
+ params.elem_size = sizeof(struct iscsi_wqe);
+
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
if (rc)
goto nomem_r2tq;

- uhq_num_elements = p_params->num_uhq_pages_in_ring *
+ params.num_elems = p_params->num_uhq_pages_in_ring *
QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
- rc = qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- uhq_num_elements,
- sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL);
+ params.elem_size = sizeof(struct iscsi_uhqe);
+
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
if (rc)
goto nomem_uhq;

- xhq_num_elements = uhq_num_elements;
- rc = qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- xhq_num_elements,
- sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL);
+ params.elem_size = sizeof(struct iscsi_xhqe);
+
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
if (rc)
goto nomem;

diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 6f4aec339cd4..0452b728c527 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1125,6 +1125,12 @@ static int
qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info)
{
+ struct qed_chain_init_params params = {
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = p_ll2_info->input.rx_num_desc,
+ };
+ struct qed_dev *cdev = p_hwfn->cdev;
struct qed_ll2_rx_packet *p_descq;
u32 capacity;
int rc = 0;
@@ -1132,13 +1138,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
if (!p_ll2_info->input.rx_num_desc)
goto out;

- rc = qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_NEXT_PTR,
- QED_CHAIN_CNT_TYPE_U16,
- p_ll2_info->input.rx_num_desc,
- sizeof(struct core_rx_bd),
- &p_ll2_info->rx_queue.rxq_chain, NULL);
+ params.mode = QED_CHAIN_MODE_NEXT_PTR;
+ params.elem_size = sizeof(struct core_rx_bd);
+
+ rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, &params);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
goto out;
@@ -1154,13 +1157,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
}
p_ll2_info->rx_queue.descq_array = p_descq;

- rc = qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- p_ll2_info->input.rx_num_desc,
- sizeof(struct core_rx_fast_path_cqe),
- &p_ll2_info->rx_queue.rcq_chain, NULL);
+ params.mode = QED_CHAIN_MODE_PBL;
+ params.elem_size = sizeof(struct core_rx_fast_path_cqe);
+
+ rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, &params);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
goto out;
@@ -1177,6 +1177,13 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info)
{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = p_ll2_info->input.tx_num_desc,
+ .elem_size = sizeof(struct core_tx_bd),
+ };
struct qed_ll2_tx_packet *p_descq;
u32 desc_size;
u32 capacity;
@@ -1185,13 +1192,8 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
if (!p_ll2_info->input.tx_num_desc)
goto out;

- rc = qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- p_ll2_info->input.tx_num_desc,
- sizeof(struct core_tx_bd),
- &p_ll2_info->tx_queue.txq_chain, NULL);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain,
+ &params);
if (rc)
goto out;

diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 92ab029789e5..0bc1a0aeb56e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -382,22 +382,26 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)

int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = num_elem,
+ .elem_size = sizeof(union event_ring_element),
+ };
struct qed_eq *p_eq;
+ int ret;

/* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
if (!p_eq)
return -ENOMEM;

- /* Allocate and initialize EQ chain*/
- if (qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- num_elem,
- sizeof(union event_ring_element),
- &p_eq->chain, NULL))
+ ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, &params);
+ if (ret) {
+ DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
goto eq_allocate_fail;
+ }

/* register EQ completion on the SP SB */
qed_int_register_cb(p_hwfn, qed_eq_completion,
@@ -408,7 +412,8 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)

eq_allocate_fail:
kfree(p_eq);
- return -ENOMEM;
+
+ return ret;
}

void qed_eq_setup(struct qed_hwfn *p_hwfn)
@@ -529,33 +534,40 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)

int qed_spq_alloc(struct qed_hwfn *p_hwfn)
{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_SINGLE,
+ .intended_use = QED_CHAIN_USE_TO_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .elem_size = sizeof(struct slow_path_element),
+ };
+ struct qed_dev *cdev = p_hwfn->cdev;
struct qed_spq_entry *p_virt = NULL;
struct qed_spq *p_spq = NULL;
dma_addr_t p_phys = 0;
u32 capacity;
+ int ret;

/* SPQ struct */
p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
if (!p_spq)
return -ENOMEM;

- /* SPQ ring */
- if (qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_SINGLE,
- QED_CHAIN_CNT_TYPE_U16,
- 0, /* N/A when the mode is SINGLE */
- sizeof(struct slow_path_element),
- &p_spq->chain, NULL))
- goto spq_allocate_fail;
+ /* SPQ ring */
+ ret = qed_chain_alloc(cdev, &p_spq->chain, &params);
+ if (ret) {
+ DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
+ goto spq_chain_alloc_fail;
+ }

/* allocate and fill the SPQ elements (incl. ramrod data list) */
capacity = qed_chain_get_capacity(&p_spq->chain);
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ ret = -ENOMEM;
+
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
capacity * sizeof(struct qed_spq_entry),
&p_phys, GFP_KERNEL);
if (!p_virt)
- goto spq_allocate_fail;
+ goto spq_alloc_fail;

p_spq->p_virt = p_virt;
p_spq->p_phys = p_phys;
@@ -563,10 +575,12 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)

return 0;

-spq_allocate_fail:
- qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+spq_alloc_fail:
+ qed_chain_free(cdev, &p_spq->chain);
+spq_chain_alloc_fail:
kfree(p_spq);
- return -ENOMEM;
+
+ return ret;
}

void qed_spq_free(struct qed_hwfn *p_hwfn)
@@ -967,30 +981,40 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return 0;
}

+#define QED_SPQ_CONSQ_ELEM_SIZE 0x80
+
int qed_consq_alloc(struct qed_hwfn *p_hwfn)
{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
+ .elem_size = QED_SPQ_CONSQ_ELEM_SIZE,
+ };
struct qed_consq *p_consq;
+ int ret;

/* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
if (!p_consq)
return -ENOMEM;

- /* Allocate and initialize EQ chain*/
- if (qed_chain_alloc(p_hwfn->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain, NULL))
- goto consq_allocate_fail;
+ /* Allocate and initialize ConsQ chain */
+ ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, &params);
+ if (ret) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
+ goto consq_alloc_fail;
+ }

p_hwfn->p_consq = p_consq;
+
return 0;

-consq_allocate_fail:
+consq_alloc_fail:
kfree(p_consq);
- return -ENOMEM;
+
+ return ret;
}

void qed_consq_setup(struct qed_hwfn *p_hwfn)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 6f2171dc0dea..b5a95f165520 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1442,6 +1442,11 @@ static void qede_set_tpa_param(struct qede_rx_queue *rxq)
/* This function allocates all memory needed per Rx queue */
static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
+ struct qed_chain_init_params params = {
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = RX_RING_SIZE,
+ };
+ struct qed_dev *cdev = edev->cdev;
int i, rc, size;

rxq->num_rx_buffers = edev->q_num_rx_buffers;
@@ -1477,24 +1482,20 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
}

/* Allocate FW Rx ring */
- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_NEXT_PTR,
- QED_CHAIN_CNT_TYPE_U16,
- RX_RING_SIZE,
- sizeof(struct eth_rx_bd),
- &rxq->rx_bd_ring, NULL);
+ params.mode = QED_CHAIN_MODE_NEXT_PTR;
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.elem_size = sizeof(struct eth_rx_bd);
+
+ rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
if (rc)
goto err;

/* Allocate FW completion ring */
- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- RX_RING_SIZE,
- sizeof(union eth_rx_cqe),
- &rxq->rx_comp_ring, NULL);
+ params.mode = QED_CHAIN_MODE_PBL;
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME;
+ params.elem_size = sizeof(union eth_rx_cqe);
+
+ rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
if (rc)
goto err;

@@ -1531,7 +1532,13 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* This function allocates all memory needed per Tx queue */
static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- union eth_tx_bd_types *p_virt;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = edev->q_num_tx_buffers,
+ .elem_size = sizeof(union eth_tx_bd_types),
+ };
int size, rc;

txq->num_tx_buffers = edev->q_num_tx_buffers;
@@ -1549,13 +1556,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
goto err;
}

- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- txq->num_tx_buffers,
- sizeof(*p_virt),
- &txq->tx_pbl, NULL);
+ rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
if (rc)
goto err;

diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index a0d83095dc73..f5cfee0934e5 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -54,11 +54,6 @@ struct qed_chain_pbl_u32 {
u32 cons_page_idx;
};

-struct qed_chain_ext_pbl {
- dma_addr_t p_pbl_phys;
- void *p_pbl_virt;
-};
-
struct qed_chain_u16 {
/* Cyclic index of next element to produce/consme */
u16 prod_idx;
@@ -119,7 +114,7 @@ struct qed_chain {
u16 usable_per_page;
u8 elem_unusable;

- u8 cnt_type;
+ enum qed_chain_cnt_type cnt_type;

/* Slowpath of the chain - required for initialization and destruction,
* but isn't involved in regular functionality.
@@ -142,11 +137,23 @@ struct qed_chain {
/* Total number of elements [for entire chain] */
u32 size;

- u8 intended_use;
+ enum qed_chain_use_mode intended_use;

bool b_external_pbl;
};

+struct qed_chain_init_params {
+ enum qed_chain_mode mode;
+ enum qed_chain_use_mode intended_use;
+ enum qed_chain_cnt_type cnt_type;
+
+ u32 num_elems;
+ size_t elem_size;
+
+ void *ext_pbl_virt;
+ dma_addr_t ext_pbl_phys;
+};
+
#define QED_CHAIN_PAGE_SIZE 0x1000

#define ELEMS_PER_PAGE(elem_size) \
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index a5c6854343e6..cd6a5c7e56eb 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -948,13 +948,8 @@ struct qed_common_ops {
u8 dp_level);

int (*chain_alloc)(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type,
- u32 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain,
- struct qed_chain_ext_pbl *ext_pbl);
+ struct qed_chain *chain,
+ struct qed_chain_init_params *params);

void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
--
2.25.1

2020-07-22 15:58:49

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 02/15] qed: reformat Makefile

List one entry per line and sort them alphabetically to simplify the
addition of the new ones.

Signed-off-by: Alexander Lobakin <[email protected]>
Signed-off-by: Igor Russkikh <[email protected]>
Signed-off-by: Michal Kalderon <[email protected]>
---
drivers/net/ethernet/qlogic/qed/Makefile | 36 +++++++++++++++++++-----
1 file changed, 29 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 4176bbf2a22b..3c75e4fa9b02 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -3,12 +3,34 @@

obj-$(CONFIG_QED) := qed.o

-qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
- qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
- qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o
-qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
-qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
-qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
+qed-y := \
+ qed_cxt.o \
+ qed_dcbx.o \
+ qed_debug.o \
+ qed_dev.o \
+ qed_hw.o \
+ qed_init_fw_funcs.o \
+ qed_init_ops.o \
+ qed_int.o \
+ qed_l2.o \
+ qed_main.o \
+ qed_mcp.o \
+ qed_mng_tlv.o \
+ qed_ptp.o \
+ qed_selftest.o \
+ qed_sp_commands.o \
+ qed_spq.o
+
qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
+qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
+qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_OOO) += qed_ooo.o
+
+qed-$(CONFIG_QED_RDMA) += \
+ qed_iwarp.o \
+ qed_rdma.o \
+ qed_roce.o
+
+qed-$(CONFIG_QED_SRIOV) += \
+ qed_sriov.o \
+ qed_vf.o
--
2.25.1

2020-07-22 16:00:23

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 04/15] qed: prevent possible double-frees of the chains

Zero-initialize chain on qed_chain_free(), so it couldn't be freed
twice and provoke undefined behaviour.

Signed-off-by: Alexander Lobakin <[email protected]>
Signed-off-by: Igor Russkikh <[email protected]>
Signed-off-by: Michal Kalderon <[email protected]>
---
drivers/net/ethernet/qlogic/qed/qed_chain.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
index 40cc26f7f20b..fd2d985793dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_chain.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -90,8 +90,10 @@ void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
qed_chain_free_pbl(cdev, chain);
break;
default:
- break;
+ return;
}
+
+ qed_chain_init_mem(chain, NULL, 0);
}

static int
--
2.25.1

2020-07-22 16:00:47

by Alexander Lobakin

[permalink] [raw]
Subject: [PATCH net-next 10/15] qed: optimize common chain accessors

Constify chain pointers and refactor qed_chain_get_elem_left{,u32}() a bit.

Signed-off-by: Alexander Lobakin <[email protected]>
Signed-off-by: Igor Russkikh <[email protected]>
Signed-off-by: Michal Kalderon <[email protected]>
---
include/linux/qed/qed_chain.h | 60 +++++++++++++++++++----------------
1 file changed, 33 insertions(+), 27 deletions(-)

diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 8a96c361cc19..434479e2ab65 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -182,73 +182,79 @@ struct qed_chain_init_params {
((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)

/* Accessors */
-static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+
+static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
+{
+ return chain->u.chain16.prod_idx;
+}
+
+static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
{
- return p_chain->u.chain16.prod_idx;
+ return chain->u.chain16.cons_idx;
}

-static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
{
- return p_chain->u.chain16.cons_idx;
+ return chain->u.chain32.prod_idx;
}

-static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
{
- return p_chain->u.chain32.cons_idx;
+ return chain->u.chain32.cons_idx;
}

-static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
{
- u16 elem_per_page = p_chain->elem_per_page;
- u32 prod = p_chain->u.chain16.prod_idx;
- u32 cons = p_chain->u.chain16.cons_idx;
+ u32 prod = qed_chain_get_prod_idx(chain);
+ u32 cons = qed_chain_get_cons_idx(chain);
+ u16 elem_per_page = chain->elem_per_page;
u16 used;

if (prod < cons)
prod += (u32)U16_MAX + 1;

used = (u16)(prod - cons);
- if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= prod / elem_per_page - cons / elem_per_page;
+ if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= (u16)(prod / elem_per_page - cons / elem_per_page);

- return (u16)(p_chain->capacity - used);
+ return (u16)(chain->capacity - used);
}

-static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
{
- u16 elem_per_page = p_chain->elem_per_page;
- u64 prod = p_chain->u.chain32.prod_idx;
- u64 cons = p_chain->u.chain32.cons_idx;
+ u64 prod = qed_chain_get_prod_idx_u32(chain);
+ u64 cons = qed_chain_get_cons_idx_u32(chain);
+ u16 elem_per_page = chain->elem_per_page;
u32 used;

if (prod < cons)
prod += (u64)U32_MAX + 1;

used = (u32)(prod - cons);
- if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
used -= (u32)(prod / elem_per_page - cons / elem_per_page);

- return p_chain->capacity - used;
+ return chain->capacity - used;
}

-static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
{
- return p_chain->usable_per_page;
+ return chain->usable_per_page;
}

-static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
{
- return p_chain->elem_unusable;
+ return chain->elem_unusable;
}

-static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
{
- return p_chain->page_cnt;
+ return chain->page_cnt;
}

-static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
{
- return p_chain->pbl_sp.table_phys;
+ return chain->pbl_sp.table_phys;
}

/**
--
2.25.1

2020-07-22 21:54:17

by Alexander Lobakin

[permalink] [raw]
Subject: Re: [PATCH net-next 03/15] qed: move chain methods to a separate file

Kbuild test robot triggered a build error on Alpha, sorry. Will send
v2 soon.

Al