Hello Kalle and all,
Here are two patches adding support for the new revision of QSR10g chip.
Major change from the driver perspective is a PCIe data path update.
Sergey Matyukevich (2):
qtnfmac: refactor data path to prepare for the next QSR10G chip revision
qtnfmac: add support for the new revision of QSR10g chip
drivers/net/wireless/quantenna/qtnfmac/bus.h | 3 +-
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c | 4 +-
.../wireless/quantenna/qtnfmac/pcie/pearl_pcie.c | 629 ++++++++++++++++-----
.../quantenna/qtnfmac/pcie/pearl_pcie_ipc.h | 3 +
.../quantenna/qtnfmac/pcie/pearl_pcie_regs.h | 33 +-
.../net/wireless/quantenna/qtnfmac/qtn_hw_ids.h | 1 +
drivers/net/wireless/quantenna/qtnfmac/util.c | 2 +
7 files changed, 517 insertions(+), 158 deletions(-)
--
2.11.0
Data path operations may differ between chip revisions. Extract such
operations and settings and into a separate structure in order to
support multiple QSR10G chips revisions with single module.
Remove data path counters specific to a single chip revision.
Signed-off-by: Sergey Matyukevich <[email protected]>
---
drivers/net/wireless/quantenna/qtnfmac/bus.h | 3 +-
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c | 3 +-
.../wireless/quantenna/qtnfmac/pcie/pearl_pcie.c | 356 +++++++++++++--------
3 files changed, 220 insertions(+), 142 deletions(-)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 87d048df09d1..b8e1049e7e21 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -52,8 +52,7 @@ struct qtnf_bus_ops {
struct qtnf_bus {
struct device *dev;
enum qtnf_fw_state fw_state;
- u32 chip;
- u32 chiprev;
+ u32 chipid;
struct qtnf_bus_ops *bus_ops;
struct qtnf_wmac *mac[QTNF_MAX_MAC];
struct qtnf_qlink_transport trans;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 5337e67092ca..1a1896c4c042 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -335,10 +335,11 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!bus)
return -ENOMEM;
+ bus->fw_state = QTNF_FW_STATE_DETACHED;
+ bus->chipid = chipid;
pcie_priv = get_bus_priv(bus);
pci_set_drvdata(pdev, bus);
bus->dev = &pdev->dev;
- bus->fw_state = QTNF_FW_STATE_DETACHED;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
pcie_priv->flashboot = flashboot;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 8e0d8018208a..32506f700cca 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -23,9 +23,6 @@
#include "shm_ipc.h"
#include "debug.h"
-#define PEARL_TX_BD_SIZE_DEFAULT 32
-#define PEARL_RX_BD_SIZE_DEFAULT 256
-
struct qtnf_pearl_bda {
__le16 bda_len;
__le16 bda_version;
@@ -73,8 +70,28 @@ struct qtnf_pearl_fw_hdr {
__le32 crc;
} __packed;
+struct qtnf_pcie_pearl_state;
+
+struct qtnf_pcie_pearl_hdp_ops {
+ u16 hdp_rx_bd_size_default;
+ u16 hdp_tx_bd_size_default;
+ int (*hdp_alloc_bd_table)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_init)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_hhbm_init)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_set_queues)(struct qtnf_pcie_pearl_state *ps,
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size);
+ void (*hdp_rbd_attach)(struct qtnf_pcie_pearl_state *ps, u16 index,
+ dma_addr_t paddr);
+ u32 (*hdp_get_tx_done_index)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_tx_hw_push)(struct qtnf_pcie_pearl_state *ps, int index,
+ dma_addr_t paddr);
+
+};
+
struct qtnf_pcie_pearl_state {
struct qtnf_pcie_bus_priv base;
+ const struct qtnf_pcie_pearl_hdp_ops *hdp_ops;
/* lock for irq configuration changes */
spinlock_t irq_lock;
@@ -97,6 +114,180 @@ struct qtnf_pcie_pearl_state {
u32 pcie_irq_uf_count;
};
+/* HDP common ops */
+
+static void hdp_set_queues_common(struct qtnf_pcie_pearl_state *ps,
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+
+ if (tx_bd_size == 0) {
+ tx_bd_size = ps->hdp_ops->hdp_tx_bd_size_default;
+ } else if (!is_power_of_2(tx_bd_size)) {
+ pr_warn("invalid tx_bd_size value %u, use default %u\n",
+ tx_bd_size, ps->hdp_ops->hdp_tx_bd_size_default);
+ tx_bd_size = ps->hdp_ops->hdp_tx_bd_size_default;
+ }
+
+ if (rx_bd_size == 0) {
+ rx_bd_size = ps->hdp_ops->hdp_rx_bd_size_default;
+ } else if (!is_power_of_2(rx_bd_size)) {
+ pr_warn("invalid rx_bd_size value %u, use default %u\n",
+ tx_bd_size, ps->hdp_ops->hdp_rx_bd_size_default);
+ rx_bd_size = ps->hdp_ops->hdp_rx_bd_size_default;
+ }
+
+ priv->tx_bd_num = tx_bd_size;
+ priv->rx_bd_num = rx_bd_size;
+}
+
+/* HDP ops: rev B */
+
+static int hdp_alloc_bd_table_rev_b(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ dma_addr_t paddr;
+ void *vaddr;
+ int len;
+
+ len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
+
+ vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /* tx bd */
+
+ ps->bd_table_vaddr = vaddr;
+ ps->bd_table_paddr = paddr;
+ ps->bd_table_len = len;
+
+ ps->tx_bd_vbase = vaddr;
+ ps->tx_bd_pbase = paddr;
+
+ pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ /* rx bd */
+
+ vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
+
+ ps->rx_bd_vbase = vaddr;
+ ps->rx_bd_pbase = paddr;
+
+ pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ return 0;
+}
+
+static void hdp_rbd_attach_rev_b(struct qtnf_pcie_pearl_state *ps, u16 index,
+ dma_addr_t paddr)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
+
+ writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
+}
+
+static void hdp_hhbm_init_rev_b(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ val |= HHBM_CONFIG_SOFT_RESET;
+ writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ usleep_range(50, 100);
+ val &= ~HHBM_CONFIG_SOFT_RESET;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ val |= HHBM_64BIT;
+#endif
+ writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
+}
+
+static void hdp_init_rev_b(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->rx_bd_pbase),
+ PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(ps->rx_bd_pbase),
+ PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
+ writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
+ PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
+}
+
+static void hdp_set_queues_rev_b(struct qtnf_pcie_pearl_state *ps,
+ unsigned int tx_bd_size,
+ unsigned int rx_bd_size)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 val;
+
+ hdp_set_queues_common(ps, tx_bd_size, rx_bd_size);
+
+ val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
+ if (val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("invalid tx_bd_size value %u, use default %u\n",
+ tx_bd_size, ps->hdp_ops->hdp_tx_bd_size_default);
+ tx_bd_size = ps->hdp_ops->hdp_tx_bd_size_default;
+ }
+
+ val = rx_bd_size * sizeof(dma_addr_t);
+ if (val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("invalid rx_bd_size value %u, use default %u\n",
+ tx_bd_size, ps->hdp_ops->hdp_rx_bd_size_default);
+ rx_bd_size = ps->hdp_ops->hdp_rx_bd_size_default;
+ }
+
+ priv->tx_bd_num = tx_bd_size;
+ priv->rx_bd_num = rx_bd_size;
+}
+
+static u32 hdp_get_tx_done_index_rev_b(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 v;
+
+ v = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
+ & (priv->tx_bd_num - 1);
+
+ return v;
+}
+
+static void hdp_tx_hw_push_rev_b(struct qtnf_pcie_pearl_state *ps, int index,
+ dma_addr_t paddr)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
+}
+
+static const struct qtnf_pcie_pearl_hdp_ops hdp_ops_rev_b = {
+ .hdp_tx_bd_size_default = 32,
+ .hdp_rx_bd_size_default = 256,
+ .hdp_alloc_bd_table = hdp_alloc_bd_table_rev_b,
+ .hdp_init = hdp_init_rev_b,
+ .hdp_hhbm_init = hdp_hhbm_init_rev_b,
+ .hdp_set_queues = hdp_set_queues_rev_b,
+ .hdp_rbd_attach = hdp_rbd_attach_rev_b,
+ .hdp_get_tx_done_index = hdp_get_tx_done_index_rev_b,
+ .hdp_tx_hw_push = hdp_tx_hw_push_rev_b,
+};
+
+/* common */
+
static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
{
unsigned long flags;
@@ -229,56 +420,6 @@ static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
return 0;
}
-static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
-{
- struct qtnf_pcie_bus_priv *priv = &ps->base;
- dma_addr_t paddr;
- void *vaddr;
- int len;
-
- len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
- priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
-
- vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
- if (!vaddr)
- return -ENOMEM;
-
- /* tx bd */
-
- ps->bd_table_vaddr = vaddr;
- ps->bd_table_paddr = paddr;
- ps->bd_table_len = len;
-
- ps->tx_bd_vbase = vaddr;
- ps->tx_bd_pbase = paddr;
-
- pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
-
- priv->tx_bd_r_index = 0;
- priv->tx_bd_w_index = 0;
-
- /* rx bd */
-
- vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
- paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
-
- ps->rx_bd_vbase = vaddr;
- ps->rx_bd_pbase = paddr;
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(paddr),
- PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(paddr),
- PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
- writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
- PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
-
- pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
-
- return 0;
-}
-
static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
@@ -312,14 +453,8 @@ static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
/* sync up all descriptor updates */
wmb();
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(paddr),
- PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(paddr),
- PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
+ ps->hdp_ops->hdp_rbd_attach(ps, index, paddr);
- writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
return 0;
}
@@ -379,66 +514,15 @@ static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps)
}
}
-static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
-{
- u32 val;
-
- val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
- val |= HHBM_CONFIG_SOFT_RESET;
- writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
- usleep_range(50, 100);
- val &= ~HHBM_CONFIG_SOFT_RESET;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- val |= HHBM_64BIT;
-#endif
- writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
- writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
-
- return 0;
-}
-
static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
unsigned int tx_bd_size,
unsigned int rx_bd_size)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
int ret;
- u32 val;
- if (tx_bd_size == 0)
- tx_bd_size = PEARL_TX_BD_SIZE_DEFAULT;
-
- val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
-
- if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
- pr_warn("invalid tx_bd_size value %u, use default %u\n",
- tx_bd_size, PEARL_TX_BD_SIZE_DEFAULT);
- priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT;
- } else {
- priv->tx_bd_num = tx_bd_size;
- }
-
- if (rx_bd_size == 0)
- rx_bd_size = PEARL_RX_BD_SIZE_DEFAULT;
-
- val = rx_bd_size * sizeof(dma_addr_t);
-
- if (!is_power_of_2(rx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
- pr_warn("invalid rx_bd_size value %u, use default %u\n",
- rx_bd_size, PEARL_RX_BD_SIZE_DEFAULT);
- priv->rx_bd_num = PEARL_RX_BD_SIZE_DEFAULT;
- } else {
- priv->rx_bd_num = rx_bd_size;
- }
-
- priv->rx_bd_w_index = 0;
- priv->rx_bd_r_index = 0;
-
- ret = pearl_hhbm_init(ps);
- if (ret) {
- pr_err("failed to init h/w queues\n");
- return ret;
- }
+ ps->hdp_ops->hdp_set_queues(ps, tx_bd_size, rx_bd_size);
+ ps->hdp_ops->hdp_hhbm_init(ps);
ret = qtnf_pcie_alloc_skb_array(priv);
if (ret) {
@@ -446,7 +530,7 @@ static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
return ret;
}
- ret = pearl_alloc_bd_table(ps);
+ ret = ps->hdp_ops->hdp_alloc_bd_table(ps);
if (ret) {
pr_err("failed to allocate bd table\n");
return ret;
@@ -458,6 +542,8 @@ static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
return ret;
}
+ ps->hdp_ops->hdp_init(ps);
+
return ret;
}
@@ -474,9 +560,7 @@ static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
- tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
- & (priv->tx_bd_num - 1);
-
+ tx_done_index = ps->hdp_ops->hdp_get_tx_done_index(ps);
i = priv->tx_bd_r_index;
while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
@@ -580,18 +664,13 @@ static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb)
/* write new TX descriptor to PCIE_RX_FIFO on EP */
txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(txbd_paddr),
- PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(txbd_paddr),
- PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
-
if (++i >= priv->tx_bd_num)
i = 0;
priv->tx_bd_w_index = i;
+ ps->hdp_ops->hdp_tx_hw_push(ps, i, txbd_paddr);
+
tx_done:
if (ret && skb) {
pr_err_ratelimited("drop skb\n");
@@ -739,7 +818,7 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
consume = 0;
}
- if (skb && (skb_tailroom(skb) < psize)) {
+ if (skb && (skb_tailroom(skb) < psize)) {
pr_err("skip packet with invalid length: %u > %u\n",
psize, skb_tailroom(skb));
consume = 0;
@@ -777,7 +856,7 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
priv->rx_bd_r_index = r_idx;
- /* repalce processed buffer by a new one */
+ /* replace processed buffer by a new one */
w_idx = priv->rx_bd_w_index;
while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
priv->rx_bd_num) > 0) {
@@ -884,22 +963,10 @@ static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
- seq_printf(s, "tx_bd_p_index(%u)\n",
- readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
- & (priv->tx_bd_num - 1));
seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
- seq_printf(s, "tx queue len(%u)\n",
- CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
- priv->tx_bd_num));
seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
- seq_printf(s, "rx_bd_p_index(%u)\n",
- readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base))
- & (priv->rx_bd_num - 1));
seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
- seq_printf(s, "rx alloc queue len(%u)\n",
- CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
- priv->rx_bd_num));
return 0;
}
@@ -1108,7 +1175,8 @@ static u64 qtnf_pearl_dma_mask_get(void)
#endif
}
-static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
+static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus,
+ unsigned int tx_bd_size,
unsigned int rx_bd_size)
{
struct qtnf_shm_ipc_int ipc_int;
@@ -1120,6 +1188,16 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
spin_lock_init(&ps->irq_lock);
INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
+ switch (bus->chipid) {
+ case QTN_CHIP_ID_PEARL:
+ case QTN_CHIP_ID_PEARL_B:
+ ps->hdp_ops = &hdp_ops_rev_b;
+ break;
+ default:
+ pr_err("unsupported PEARL chip ID 0x%x\n", bus->chipid);
+ return -ENOTSUPP;
+ }
+
ps->pcie_reg_base = ps->base.dmareg_bar;
ps->bda = ps->base.epmem_bar;
writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
--
2.11.0
Add support for the new minor revision of QSR10g chip. Major changes from
the driver perspective include PCIe data path modifications. Setup is now
more complicated, but finally more things have been offloaded to hardware.
As a result, less driver boilerplate operations are needed after Tx/Rx
descriptors queues have been configured. Besides, restrictions on
descriptors queue lengths have been relaxed.
Signed-off-by: Sergey Matyukevich <[email protected]>
---
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c | 1 +
.../wireless/quantenna/qtnfmac/pcie/pearl_pcie.c | 273 +++++++++++++++++++--
.../quantenna/qtnfmac/pcie/pearl_pcie_ipc.h | 3 +
.../quantenna/qtnfmac/pcie/pearl_pcie_regs.h | 33 ++-
.../net/wireless/quantenna/qtnfmac/qtn_hw_ids.h | 1 +
drivers/net/wireless/quantenna/qtnfmac/util.c | 2 +
6 files changed, 297 insertions(+), 16 deletions(-)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 1a1896c4c042..45bb84007bd5 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -322,6 +322,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QTN_CHIP_ID_PEARL:
case QTN_CHIP_ID_PEARL_B:
case QTN_CHIP_ID_PEARL_C:
+ case QTN_CHIP_ID_PEARL_C1:
bus = qtnf_pcie_pearl_alloc(pdev);
break;
case QTN_CHIP_ID_TOPAZ:
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 32506f700cca..7b01fa7fab1c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -57,8 +57,6 @@ struct qtnf_pearl_rx_bd {
__le32 addr_h;
__le32 info;
__le32 info_h;
- __le32 next_ptr;
- __le32 next_ptr_h;
} __packed;
struct qtnf_pearl_fw_hdr {
@@ -78,12 +76,15 @@ struct qtnf_pcie_pearl_hdp_ops {
int (*hdp_alloc_bd_table)(struct qtnf_pcie_pearl_state *ps);
void (*hdp_init)(struct qtnf_pcie_pearl_state *ps);
void (*hdp_hhbm_init)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_enable)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_disable)(struct qtnf_pcie_pearl_state *ps);
void (*hdp_set_queues)(struct qtnf_pcie_pearl_state *ps,
unsigned int tx_bd_size,
unsigned int rx_bd_size);
void (*hdp_rbd_attach)(struct qtnf_pcie_pearl_state *ps, u16 index,
dma_addr_t paddr);
u32 (*hdp_get_tx_done_index)(struct qtnf_pcie_pearl_state *ps);
+ void (*hdp_tx_done_wrap)(struct qtnf_pcie_pearl_state *ps);
void (*hdp_tx_hw_push)(struct qtnf_pcie_pearl_state *ps, int index,
dma_addr_t paddr);
@@ -105,8 +106,19 @@ struct qtnf_pcie_pearl_state {
struct qtnf_pearl_rx_bd *rx_bd_vbase;
dma_addr_t rx_bd_pbase;
+ dma_addr_t rx_dma_cnt_paddr;
+ void *rx_dma_cnt_vaddr;
+
+ dma_addr_t tx_dma_cnt_paddr;
+ void *tx_dma_cnt_vaddr;
+
dma_addr_t bd_table_paddr;
void *bd_table_vaddr;
+
+ u32 tx_bd_ack_wrap;
+ u16 rx_bd_h_index;
+ u16 tx_bd_h_index;
+
u32 bd_table_len;
u32 pcie_irq_mask;
u32 pcie_irq_rx_count;
@@ -280,12 +292,234 @@ static const struct qtnf_pcie_pearl_hdp_ops hdp_ops_rev_b = {
.hdp_alloc_bd_table = hdp_alloc_bd_table_rev_b,
.hdp_init = hdp_init_rev_b,
.hdp_hhbm_init = hdp_hhbm_init_rev_b,
+ .hdp_enable = NULL,
+ .hdp_disable = NULL,
.hdp_set_queues = hdp_set_queues_rev_b,
.hdp_rbd_attach = hdp_rbd_attach_rev_b,
.hdp_get_tx_done_index = hdp_get_tx_done_index_rev_b,
+ .hdp_tx_done_wrap = NULL,
.hdp_tx_hw_push = hdp_tx_hw_push_rev_b,
};
+/* HDP ops: rev C */
+
+static int hdp_alloc_bd_table_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ dma_addr_t paddr;
+ void *vaddr;
+ int len;
+
+ len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd) +
+ 2 * QTN_HDP_DMA_PTR_SIZE;
+
+ vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /* tx bd */
+
+ ps->bd_table_vaddr = vaddr;
+ ps->bd_table_paddr = paddr;
+ ps->bd_table_len = len;
+
+ ps->tx_bd_vbase = vaddr;
+ ps->tx_bd_pbase = paddr;
+
+ pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ /* rx bd */
+
+ vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
+
+ ps->rx_bd_vbase = vaddr;
+ ps->rx_bd_pbase = paddr;
+
+ pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ /* dma completion counters */
+
+ vaddr = ((struct qtnf_pearl_rx_bd *)vaddr) + priv->rx_bd_num;
+ paddr += priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
+
+ ps->rx_dma_cnt_vaddr = vaddr;
+ ps->rx_dma_cnt_paddr = paddr;
+
+ vaddr += QTN_HDP_DMA_PTR_SIZE;
+ paddr += QTN_HDP_DMA_PTR_SIZE;
+
+ ps->tx_dma_cnt_vaddr = vaddr;
+ ps->tx_dma_cnt_paddr = paddr;
+
+ return 0;
+}
+
+static void hdp_rbd_attach_rev_c(struct qtnf_pcie_pearl_state *ps, u16 index,
+ dma_addr_t paddr)
+{
+ u16 ihw;
+
+ ihw = index | (ps->rx_bd_h_index & QTN_HDP_BD_WRAP);
+ if (ihw < ps->rx_bd_h_index)
+ ihw ^= QTN_HDP_BD_WRAP;
+
+ writel(ihw | ((ihw ^ QTN_HDP_BD_WRAP) << 16),
+ PCIE_HDP_TX0_DESC_Q_WR_PTR(ps->pcie_reg_base));
+
+ ps->rx_bd_h_index = ihw;
+}
+
+static void hdp_hhbm_init_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ val |= HHBM_CONFIG_SOFT_RESET;
+ writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ usleep_range(50, 100);
+}
+
+static void hdp_init_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ int mrrs = pcie_get_readrq(priv->pdev);
+ int mps = pcie_get_mps(priv->pdev);
+ u32 val;
+
+ val = readl(PCIE_HDP_AXI_MASTER_CTRL(ps->pcie_reg_base));
+
+ if (mrrs > PCIE_HDP_AXI_BURST32_SIZE)
+ val |= PCIE_HDP_AXI_EN_BURST32_READ;
+ else
+ val &= ~PCIE_HDP_AXI_EN_BURST32_READ;
+
+ if (mps > PCIE_HDP_AXI_BURST32_SIZE)
+ val |= PCIE_HDP_AXI_EN_BURST32_WRITE;
+ else
+ val &= ~PCIE_HDP_AXI_EN_BURST32_WRITE;
+
+ writel(val, PCIE_HDP_AXI_MASTER_CTRL(ps->pcie_reg_base));
+
+ /* HDP Tx init */
+
+ writel(PCIE_HDP_RXDMA_INTERLEAVE | PCIE_HDP_RXDMA_NEW |
+ PCIE_HDP_RXDMA_WPTR, PCIE_HDP_RXDMA_CTRL(ps->pcie_reg_base));
+ writel(PCIE_HDP_TXDMA_NEW, PCIE_HDP_TX_DMA_CTRL(ps->pcie_reg_base));
+
+ writel(QTN_HOST_LO32(ps->tx_bd_pbase),
+ PCIE_HDP_RX2_DESC_BASE_ADDR(ps->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->tx_bd_pbase),
+ PCIE_HDP_RX2_DESC_BASE_ADDR_H(ps->pcie_reg_base));
+#endif
+
+ writel(priv->tx_bd_num | (sizeof(struct qtnf_pearl_tx_bd) << 16),
+ PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+
+ writel(QTN_HOST_LO32(ps->tx_dma_cnt_paddr),
+ PCIE_HDP_RX2_DEV_PTR_ADDR(ps->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->tx_dma_cnt_paddr),
+ PCIE_HDP_RX2_DEV_PTR_ADDR_H(ps->pcie_reg_base));
+#endif
+ writel(ps->tx_bd_h_index,
+ PCIE_HDP_RX2_DESC_Q_WR_PTR(ps->pcie_reg_base));
+
+ /* HDP Rx init */
+
+ writel(QTN_HOST_LO32(ps->rx_bd_pbase),
+ PCIE_HDP_TX0_DESC_BASE_ADDR(ps->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->rx_bd_pbase),
+ PCIE_HDP_TX0_DESC_BASE_ADDR_H(ps->pcie_reg_base));
+#endif
+ writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd) << 16),
+ PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+
+ writel(QTN_HOST_LO32(ps->rx_dma_cnt_paddr),
+ PCIE_HDP_TX0_DEV_PTR_ADDR(ps->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(ps->rx_dma_cnt_paddr),
+ PCIE_HDP_TX0_DEV_PTR_ADDR_H(ps->pcie_reg_base));
+#endif
+}
+
+static u32 hdp_get_tx_done_index_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 v;
+
+ v = le32_to_cpu(*((__le32 *)ps->tx_dma_cnt_vaddr)) &
+ (priv->tx_bd_num - 1);
+
+ return v;
+}
+
+static void hdp_tx_done_wrap_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ ps->tx_bd_ack_wrap ^= (QTN_HDP_BD_WRAP << 16);
+}
+
+static void hdp_tx_hw_push_rev_c(struct qtnf_pcie_pearl_state *ps, int index,
+ dma_addr_t paddr)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 ihw;
+
+ ihw = index | (ps->tx_bd_h_index & QTN_HDP_BD_WRAP);
+
+ if (ihw < ps->tx_bd_h_index)
+ ihw ^= QTN_HDP_BD_WRAP;
+
+ writel(ihw | (priv->tx_bd_r_index << 16) | ps->tx_bd_ack_wrap,
+ PCIE_HDP_RX2_DESC_Q_WR_PTR(ps->pcie_reg_base));
+
+ ps->tx_bd_h_index = ihw;
+}
+
+static void hdp_enable_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+ val |= PCIE_HDP_DESC_FETCH_EN;
+ writel(val, PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+
+ val = readl(PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+ val |= PCIE_HDP_DESC_FETCH_EN;
+ writel(val, PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+}
+
+static void hdp_disable_rev_c(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+ val &= ~PCIE_HDP_DESC_FETCH_EN;
+ writel(val, PCIE_HDP_RX2_DESC_Q_CTRL(ps->pcie_reg_base));
+
+ val = readl(PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+ val &= ~PCIE_HDP_DESC_FETCH_EN;
+ writel(val, PCIE_HDP_TX0_DESC_Q_CTRL(ps->pcie_reg_base));
+}
+
+static const struct qtnf_pcie_pearl_hdp_ops hdp_ops_rev_c = {
+ .hdp_rx_bd_size_default = 512,
+ .hdp_tx_bd_size_default = 512,
+ .hdp_alloc_bd_table = hdp_alloc_bd_table_rev_c,
+ .hdp_init = hdp_init_rev_c,
+ .hdp_hhbm_init = hdp_hhbm_init_rev_c,
+ .hdp_enable = hdp_enable_rev_c,
+ .hdp_disable = hdp_disable_rev_c,
+ .hdp_set_queues = hdp_set_queues_common,
+ .hdp_rbd_attach = hdp_rbd_attach_rev_c,
+ .hdp_get_tx_done_index = hdp_get_tx_done_index_rev_c,
+ .hdp_tx_done_wrap = hdp_tx_done_wrap_rev_c,
+ .hdp_tx_hw_push = hdp_tx_hw_push_rev_c,
+};
+
/* common */
static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
@@ -586,8 +820,11 @@ static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
priv->tx_skb[i] = NULL;
count++;
- if (++i >= priv->tx_bd_num)
+ if (++i >= priv->tx_bd_num) {
+ if (ps->hdp_ops->hdp_tx_done_wrap)
+ ps->hdp_ops->hdp_tx_done_wrap(ps);
i = 0;
+ }
}
priv->tx_reclaim_done += count;
@@ -727,11 +964,17 @@ static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
u32 status;
priv->pcie_irq_count++;
- status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
+ writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
+ writel(status & ps->pcie_irq_mask,
+ PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
+ writel(ps->pcie_irq_mask & (~status),
+ PCIE_HDP_INT_EN(ps->pcie_reg_base));
+
if (!(status & ps->pcie_irq_mask))
goto irq_done;
@@ -744,20 +987,13 @@ static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
if (status & PCIE_HDP_INT_HHBM_UF)
ps->pcie_irq_uf_count++;
- if (status & PCIE_HDP_INT_RX_BITS) {
- qtnf_dis_rxdone_irq(ps);
+ if (status & PCIE_HDP_INT_RX_BITS)
napi_schedule(&bus->mux_napi);
- }
- if (status & PCIE_HDP_INT_TX_BITS) {
- qtnf_dis_txdone_irq(ps);
+ if (status & PCIE_HDP_INT_TX_BITS)
tasklet_hi_schedule(&priv->reclaim_tq);
- }
irq_done:
- /* H/W workaround: clean all bits, not only enabled */
- qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
-
if (!priv->msi_enabled)
qtnf_deassert_intx(ps);
@@ -896,6 +1132,8 @@ static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
qtnf_enable_hdp_irqs(ps);
+ if (ps->hdp_ops->hdp_enable)
+ ps->hdp_ops->hdp_enable(ps);
napi_enable(&bus->mux_napi);
}
@@ -904,6 +1142,8 @@ static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
napi_disable(&bus->mux_napi);
+ if (ps->hdp_ops->hdp_disable)
+ ps->hdp_ops->hdp_disable(ps);
qtnf_disable_hdp_irqs(ps);
}
@@ -1124,7 +1364,8 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
} else {
pr_info("starting firmware upload: %s\n", fwname);
-
+ if (ps->hdp_ops->hdp_enable)
+ ps->hdp_ops->hdp_enable(ps);
ret = qtnf_ep_fw_load(ps, fw->data, fw->size);
release_firmware(fw);
if (ret) {
@@ -1193,6 +1434,10 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus,
case QTN_CHIP_ID_PEARL_B:
ps->hdp_ops = &hdp_ops_rev_b;
break;
+ case QTN_CHIP_ID_PEARL_C:
+ case QTN_CHIP_ID_PEARL_C1:
+ ps->hdp_ops = &hdp_ops_rev_c;
+ break;
default:
pr_err("unsupported PEARL chip ID 0x%x\n", bus->chipid);
return -ENOTSUPP;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
index 634480fe6a64..42a67d66d9e8 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
@@ -61,6 +61,9 @@
#define QTN_ENET_ADDR_LENGTH 6
+#define QTN_HDP_BD_WRAP 0x8000
+#define QTN_HDP_DMA_PTR_SIZE (4 * sizeof(u64))
+
#define QTN_TXDONE_MASK ((u32)0x80000000)
#define QTN_GET_LEN(x) ((x) & 0xFFFF)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
index 6e9a5c61d46f..945d27b36852 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
@@ -4,7 +4,7 @@
#ifndef __PEARL_PCIE_H
#define __PEARL_PCIE_H
-/* Pearl PCIe HDP registers */
+/* Pearl rev B PCIe HDP registers */
#define PCIE_HDP_CTRL(base) ((base) + 0x2c00)
#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04)
#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10)
@@ -60,7 +60,6 @@
#define PCIE_HDP_RX3DMA_CNT(base) ((base) + 0x2d1c)
#define PCIE_HDP_TX0DMA_CNT(base) ((base) + 0x2d20)
#define PCIE_HDP_TX1DMA_CNT(base) ((base) + 0x2d24)
-#define PCIE_HDP_RXDMA_CTRL(base) ((base) + 0x2d28)
#define PCIE_HDP_TX_HOST_Q_SZ_CTRL(base) ((base) + 0x2d2c)
#define PCIE_HDP_TX_HOST_Q_BASE_L(base) ((base) + 0x2d30)
#define PCIE_HDP_TX_HOST_Q_BASE_H(base) ((base) + 0x2d34)
@@ -68,6 +67,36 @@
#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c)
#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40)
+#define PCIE_HDP_TX_DMA_CTRL(base) ((base) + 0x2dcc)
+#define PCIE_HDP_TXDMA_NEW (BIT(8))
+
+#define PCIE_HDP_RXDMA_CTRL(base) ((base) + 0x2d28)
+#define PCIE_HDP_RXDMA_WPTR (BIT(27))
+#define PCIE_HDP_RXDMA_NEW (BIT(29))
+#define PCIE_HDP_RXDMA_INTERLEAVE (BIT(30))
+
+/* Pearl rev C PCIe HDP registers */
+#define PCIE_HDP_TX0_DEV_PTR_ADDR(base) ((base) + 0x2db0)
+#define PCIE_HDP_TX0_DEV_PTR_ADDR_H(base) ((base) + 0x2db4)
+#define PCIE_HDP_TX0_DESC_Q_WR_PTR(base) ((base) + 0x2da4)
+#define PCIE_HDP_TX0_DESC_BASE_ADDR(base) ((base) + 0x2dac)
+#define PCIE_HDP_TX0_DESC_BASE_ADDR_H(base) ((base) + 0x2da8)
+
+#define PCIE_HDP_RX2_DESC_BASE_ADDR(base) ((base) + 0x2c20)
+#define PCIE_HDP_RX2_DESC_BASE_ADDR_H(base) ((base) + 0x2c24)
+#define PCIE_HDP_RX2_DESC_Q_WR_PTR(base) ((base) + 0x2d84)
+#define PCIE_HDP_RX2_DEV_PTR_ADDR(base) ((base) + 0x2dd8)
+#define PCIE_HDP_RX2_DEV_PTR_ADDR_H(base) ((base) + 0x2ddc)
+
+#define PCIE_HDP_TX0_DESC_Q_CTRL(base) ((base) + 0x2da0)
+#define PCIE_HDP_RX2_DESC_Q_CTRL(base) ((base) + 0x2d80)
+#define PCIE_HDP_DESC_FETCH_EN (BIT(31))
+
+#define PCIE_HDP_AXI_MASTER_CTRL(base) ((base) + 0x2de0)
+#define PCIE_HDP_AXI_EN_BURST32_READ (BIT(3) | BIT(7))
+#define PCIE_HDP_AXI_EN_BURST32_WRITE BIT(11)
+#define PCIE_HDP_AXI_BURST32_SIZE (32 * 8)
+
/* Pearl PCIe HBM pool registers */
#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00)
#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
index 82d879950b62..d962126602cd 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
@@ -18,6 +18,7 @@
#define QTN_CHIP_ID_PEARL 0x50
#define QTN_CHIP_ID_PEARL_B 0x60
#define QTN_CHIP_ID_PEARL_C 0x70
+#define QTN_CHIP_ID_PEARL_C1 0x80
/* FW names */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
index cda6f5f3f38a..afad12ce3ba5 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -116,6 +116,8 @@ const char *qtnf_chipid_to_string(unsigned long chip_id)
return "Pearl revB";
case QTN_CHIP_ID_PEARL_C:
return "Pearl revC";
+ case QTN_CHIP_ID_PEARL_C1:
+ return "Pearl revC1";
default:
return "unknown";
}
--
2.11.0
Sergey Matyukevich <[email protected]> writes:
> Add support for the new minor revision of QSR10g chip. Major changes from
> the driver perspective include PCIe data path modifications. Setup is now
> more complicated, but finally more things have been offloaded to hardware.
> As a result, less driver boilerplate operations are needed after Tx/Rx
> descriptors queues have been configured. Besides, restrictions on
> descriptors queue lengths have been relaxed.
>
> Signed-off-by: Sergey Matyukevich <[email protected]>
What about the firmware, is that available for this new revision?
--
https://wireless.wiki.kernel.org/en/developers/documentation/submittingpatches
> > Add support for the new minor revision of QSR10g chip. Major changes from
> > the driver perspective include PCIe data path modifications. Setup is now
> > more complicated, but finally more things have been offloaded to hardware.
> > As a result, less driver boilerplate operations are needed after Tx/Rx
> > descriptors queues have been configured. Besides, restrictions on
> > descriptors queue lengths have been relaxed.
> >
> > Signed-off-by: Sergey Matyukevich <[email protected]>
>
> What about the firmware, is that available for this new revision?
Hello Kalle,
There are two drivers: pearl_qtnfmac for QSR10G and topaz_qtnfmac for
QSR1000. Firmware for QSR1000 chips has a higher priority since those
devices have been in production for quite a while now and there are
multiple products available. From the engineering perspective we are
ready to release firmware and SDK for QSR1000/QSR2000 devices. Now we
are waiting for the ACK from legal team. This was delayed by the
acquisition of Quantenna by On Semiconductor.
As for the latest QSR10G chips, we are not yet ready to release SDK.
The main reason is that platform is under active development.
Regards,
Sergey
> > > Add support for the new minor revision of QSR10g chip. Major changes from
> > > the driver perspective include PCIe data path modifications. Setup is now
> > > more complicated, but finally more things have been offloaded to hardware.
> > > As a result, less driver boilerplate operations are needed after Tx/Rx
> > > descriptors queues have been configured. Besides, restrictions on
> > > descriptors queue lengths have been relaxed.
> > >
> > > Signed-off-by: Sergey Matyukevich <[email protected]>
> >
> > What about the firmware, is that available for this new revision?
>
> Hello Kalle,
>
> There are two drivers: pearl_qtnfmac for QSR10G and topaz_qtnfmac for
> QSR1000. Firmware for QSR1000 chips has a higher priority since those
> devices have been in production for quite a while now and there are
> multiple products available. From the engineering perspective we are
> ready to release firmware and SDK for QSR1000/QSR2000 devices. Now we
> are waiting for the ACK from legal team. This was delayed by the
> acquisition of Quantenna by On Semiconductor.
>
> As for the latest QSR10G chips, we are not yet ready to release SDK.
> The main reason is that platform is under active development.
Hello Kalle,
I noticed that you marked these two patches as deferred in patchwork.
Is there anything else I have to do here ?
Regards,
Sergey
> > > > Add support for the new minor revision of QSR10g chip. Major changes from
> > > > the driver perspective include PCIe data path modifications. Setup is now
> > > > more complicated, but finally more things have been offloaded to hardware.
> > > > As a result, less driver boilerplate operations are needed after Tx/Rx
> > > > descriptors queues have been configured. Besides, restrictions on
> > > > descriptors queue lengths have been relaxed.
> > > >
> > > > Signed-off-by: Sergey Matyukevich <[email protected]>
> > >
> > > What about the firmware, is that available for this new revision?
> >
> > Hello Kalle,
> >
> > There are two drivers: pearl_qtnfmac for QSR10G and topaz_qtnfmac for
> > QSR1000. Firmware for QSR1000 chips has a higher priority since those
> > devices have been in production for quite a while now and there are
> > multiple products available. From the engineering perspective we are
> > ready to release firmware and SDK for QSR1000/QSR2000 devices. Now we
> > are waiting for the ACK from legal team. This was delayed by the
> > acquisition of Quantenna by On Semiconductor.
> >
> > As for the latest QSR10G chips, we are not yet ready to release SDK.
> > The main reason is that platform is under active development.
>
> Hello Kalle,
>
> I noticed that you marked these two patches as deferred in patchwork.
> Is there anything else I have to do here ?
>
> Regards,
> Sergey
Hello Kalle,
Could you please clarify your expectations regarding this functionality.
Am I correct assuming that you implicitly tie acceptance of these patches
with the promised release of firmware and SDK for QSR1000/2000 family ?
Regards,
Sergey
Sergey Matyukevich <[email protected]> writes:
>> > > > Add support for the new minor revision of QSR10g chip. Major changes from
>> > > > the driver perspective include PCIe data path modifications. Setup is now
>> > > > more complicated, but finally more things have been offloaded to hardware.
>> > > > As a result, less driver boilerplate operations are needed after Tx/Rx
>> > > > descriptors queues have been configured. Besides, restrictions on
>> > > > descriptors queue lengths have been relaxed.
>> > > >
>> > > > Signed-off-by: Sergey Matyukevich <[email protected]>
>> > >
>> > > What about the firmware, is that available for this new revision?
>> >
>> > Hello Kalle,
>> >
>> > There are two drivers: pearl_qtnfmac for QSR10G and topaz_qtnfmac for
>> > QSR1000. Firmware for QSR1000 chips has a higher priority since those
>> > devices have been in production for quite a while now and there are
>> > multiple products available. From the engineering perspective we are
>> > ready to release firmware and SDK for QSR1000/QSR2000 devices. Now we
>> > are waiting for the ACK from legal team. This was delayed by the
>> > acquisition of Quantenna by On Semiconductor.
>> >
>> > As for the latest QSR10G chips, we are not yet ready to release SDK.
>> > The main reason is that platform is under active development.
>>
>> Hello Kalle,
>>
>> I noticed that you marked these two patches as deferred in patchwork.
>> Is there anything else I have to do here ?
>>
>> Regards,
>> Sergey
>
> Hello Kalle,
>
> Could you please clarify your expectations regarding this functionality.
> Am I correct assuming that you implicitly tie acceptance of these patches
> with the promised release of firmware and SDK for QSR1000/2000 family ?
Sorry for the delay, I wanted to check the qtnfmac firmware status
before responding. And it didn't look good. The wiki page[1] mentions
nothing about the firmware, neither does Kconfig and even a quick google
search didn't make me any wiser. So I have no clue what's the current
situation with the firmware.
I don't like this at all. All upstream drivers are supposed to be used
by _anyone_ and the firmware should be publically available, with a very
strong preference having the firmware in linux-firmware repo. I made an
exception with qtnfmac and didn't require it to be in linux-firmware,
IIRC the reason being there were some problems with the firmware license
(something related to GPL?).
Upstream drivers need to have the firmware available. If Quantenna does
not want to release the firmware I'm not willing to accept patches to
new hardware either. I will accept patches for hardware already in
upstream, but any patches adding new hardware support will be
automatically rejected until the firmware issue is resolved.
[1] https://wireless.wiki.kernel.org/en/users/drivers/qtnfmac
--
https://patchwork.kernel.org/project/linux-wireless/list/
https://wireless.wiki.kernel.org/en/developers/documentation/submittingpatches