2017-07-13 21:22:02

by Moritz Fischer

[permalink] [raw]
Subject: [PATCH 1/2] dt-bindings: net: Add bindings for National Instruments XGE netdev

This adds bindings for the NI XGE 1G/10G network device.

Signed-off-by: Moritz Fischer <[email protected]>
---
Documentation/devicetree/bindings/net/nixge.c | 32 +++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
create mode 100644 Documentation/devicetree/bindings/net/nixge.c

diff --git a/Documentation/devicetree/bindings/net/nixge.c b/Documentation/devicetree/bindings/net/nixge.c
new file mode 100644
index 0000000..9fff5a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nixge.c
@@ -0,0 +1,32 @@
+* NI XGE Ethernet controller
+
+Required properties:
+- compatible: Should be "ni,xge-enet-2.00"
+- reg: Address and length of the register set for the device
+- interrupts: Should contain tx and rx interrupt
+- interrupt-names: Should be "rx-irq" and "tx-irq"
+- phy-mode: See ethernet.txt file in the same directory.
+- nvmem-cells: Phandle of nvmem cell containing the mac address
+- nvmem-cell-names: Should be "address"
+
+Examples (10G generic PHY):
+ nixge0: ethernet@40000000 {
+ compatible = "ni,xge-enet-2.00";
+ reg = <0x40000000 0x6000>;
+
+ nvmem-cells = <&eth1_addr>;
+ nvmem-cell-names = "address";
+
+ interrupts = <0 29 4>, <0 30 4>;
+ interrupt-names = "rx-irq", "tx-irq";
+ interrupt-parent = <&intc>;
+
+ phy-mode = "xgmii";
+ phy-handle = <&ethernet_phy1>;
+
+ ethernet_phy1: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <4>;
+ devices = <0xa>;
+ };
+ };
--
2.7.4


2017-07-13 21:22:08

by Moritz Fischer

[permalink] [raw]
Subject: [PATCH 2/2] net: ethernet: nixge: Add support for National Instruments XGE netdev

Add support for the National Instruments XGE 1/10G network device.

It uses the EEPROM on the board via NVMEM.

Signed-off-by: Moritz Fischer <[email protected]>
---
drivers/net/ethernet/Kconfig | 1 +
drivers/net/ethernet/Makefile | 1 +
drivers/net/ethernet/ni/Kconfig | 26 +
drivers/net/ethernet/ni/Makefile | 1 +
drivers/net/ethernet/ni/nixge.c | 1246 ++++++++++++++++++++++++++++++++++++++
5 files changed, 1275 insertions(+)
create mode 100644 drivers/net/ethernet/ni/Kconfig
create mode 100644 drivers/net/ethernet/ni/Makefile
create mode 100644 drivers/net/ethernet/ni/nixge.c

diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index edae15ac..2021806 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -127,6 +127,7 @@ config FEALNX

source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
+source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"

config NET_NETX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index bf7f450..68f49f7 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
+obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_NETX) += netx-eth.o
obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig
new file mode 100644
index 0000000..a74ffeb
--- /dev/null
+++ b/drivers/net/ethernet/ni/Kconfig
@@ -0,0 +1,26 @@
+#
+# National Instuments network device configuration
+#
+
+config NET_VENDOR_NI
+ bool "National Instruments Devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about National Instrument devices.
+ If you say Y, you will be asked for your specific device in the
+ following questions.
+
+if NET_VENDOR_NI
+
+config NI_XGE_MANAGEMENT_ENET
+ tristate "National Instruments XGE management enet support"
+ select PHYLIB
+ ---help---
+ Simple LAN device for debug or management purposes. Can
+ support either 10G or 1G PHYs via SFP+ ports.
+
+endif
diff --git a/drivers/net/ethernet/ni/Makefile b/drivers/net/ethernet/ni/Makefile
new file mode 100644
index 0000000..99c6646
--- /dev/null
+++ b/drivers/net/ethernet/ni/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NI_XGE_MANAGEMENT_ENET) += nixge.o
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
new file mode 100644
index 0000000..85b213c
--- /dev/null
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -0,0 +1,1246 @@
+/*
+ * Copyright (c) 2016-2017, National Instruments Corp.
+ *
+ * Network Driver for Ettus Research XGE MAC
+ *
+ * This is largely based on the Xilinx AXI Ethernet Driver,
+ * and uses the same DMA engine in the FPGA
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+
+#define TX_BD_NUM 64
+#define RX_BD_NUM 128
+
+#define DELAY_OF_ONE_MILLISEC 1000
+
+/* Axi DMA Register definitions */
+
+#define XAXIDMA_TX_CR_OFFSET 0x00000000 /* Channel control */
+#define XAXIDMA_TX_SR_OFFSET 0x00000004 /* Status */
+#define XAXIDMA_TX_CDESC_OFFSET 0x00000008 /* Current descriptor pointer */
+#define XAXIDMA_TX_TDESC_OFFSET 0x00000010 /* Tail descriptor pointer */
+
+#define XAXIDMA_RX_CR_OFFSET 0x00000030 /* Channel control */
+#define XAXIDMA_RX_SR_OFFSET 0x00000034 /* Status */
+#define XAXIDMA_RX_CDESC_OFFSET 0x00000038 /* Current descriptor pointer */
+#define XAXIDMA_RX_TDESC_OFFSET 0x00000040 /* Tail descriptor pointer */
+
+#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
+#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+
+#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */
+#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */
+#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */
+#define XAXIDMA_BD_STS_OFFSET 0x1C /* Status */
+#define XAXIDMA_BD_USR0_OFFSET 0x20 /* User IP specific word0 */
+#define XAXIDMA_BD_USR1_OFFSET 0x24 /* User IP specific word1 */
+#define XAXIDMA_BD_USR2_OFFSET 0x28 /* User IP specific word2 */
+#define XAXIDMA_BD_USR3_OFFSET 0x2C /* User IP specific word3 */
+#define XAXIDMA_BD_USR4_OFFSET 0x30 /* User IP specific word4 */
+#define XAXIDMA_BD_ID_OFFSET 0x34 /* Sw ID */
+#define XAXIDMA_BD_HAS_STSCNTRL_OFFSET 0x38 /* Whether has stscntrl strm */
+#define XAXIDMA_BD_HAS_DRE_OFFSET 0x3C /* Whether has DRE */
+
+#define XAXIDMA_BD_HAS_DRE_SHIFT 8 /* Whether has DRE shift */
+#define XAXIDMA_BD_HAS_DRE_MASK 0xF00 /* Whether has DRE mask */
+#define XAXIDMA_BD_WORDLEN_MASK 0xFF /* Whether has DRE mask */
+
+#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
+#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
+#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
+#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
+
+#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
+#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
+
+#define XAXIDMA_DELAY_SHIFT 24
+#define XAXIDMA_COALESCE_SHIFT 16
+
+#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
+#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
+#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
+#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+
+/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+#define XAXIDMA_DFT_TX_THRESHOLD 24
+#define XAXIDMA_DFT_TX_WAITBOUND 254
+#define XAXIDMA_DFT_RX_THRESHOLD 24
+#define XAXIDMA_DFT_RX_WAITBOUND 254
+
+#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
+#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
+#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
+
+#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
+#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
+#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
+#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
+#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
+#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
+#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
+#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
+#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
+
+#define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40
+
+#define NIXGE_REG_MDIO_DATA 0x10
+#define NIXGE_REG_MDIO_ADDR 0x14
+#define NIXGE_REG_MDIO_OP 0x18
+#define NIXGE_REG_MDIO_CTRL 0x1c
+
+#define NIXGE_MDIO_CLAUSE45 BIT(12)
+#define NIXGE_MDIO_CLAUSE22 0
+#define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
+#define NIXGE_MDIO_OP_ADDRESS 0
+#define NIXGE_MDIO_OP_WRITE BIT(0)
+#define NIXGE_MDIO_OP_READ (BIT(1) | BIT(0))
+#define MDIO_C22_WRITE BIT(0)
+#define MDIO_C22_READ BIT(1)
+#define MDIO_READ_POST 2
+#define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
+#define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
+
+#define NIXGE_MAX_PHY_ADDR 32
+
+#define NIXGE_REG_MAC_LSB 0x1000
+#define NIXGE_REG_MAC_MSB 0x1004
+
+/* Packet size info */
+#define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
+#define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
+#define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
+#define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
+
+#define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
+#define NIXGE_MAX_VLAN_FRAME_SIZE (NIXGE_MTU + VLAN_ETH_HLEN + NIXGE_TRL_SIZE)
+#define NIXGE_MAX_JUMBO_FRAME_SIZE \
+ (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
+
+struct nixge_dma_bd {
+ u32 next; /* Physical address of next buffer descriptor */
+ u32 reserved1;
+ u32 phys;
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4;
+ u32 cntrl;
+ u32 status;
+ u32 app0;
+ u32 app1; /* TX start << 16 | insert */
+ u32 app2; /* TX csum seed */
+ u32 app3;
+ u32 app4;
+ u32 sw_id_offset;
+ u32 reserved5;
+ u32 reserved6;
+};
+
+struct nixge_priv {
+ struct net_device *ndev;
+ struct device *dev;
+
+ /* Connection to PHY device */
+ struct phy_device *phy_dev;
+ phy_interface_t phy_interface;
+ /* protecting link parameters */
+ spinlock_t lock;
+ int link;
+ int speed;
+ int duplex;
+
+ /* MDIO bus data */
+ struct mii_bus *mii_bus; /* MII bus reference */
+
+ /* IO registers, dma functions and IRQs */
+ void __iomem *ctrl_regs;
+ void __iomem *dma_regs;
+
+ struct tasklet_struct dma_err_tasklet;
+
+ int tx_irq;
+ int rx_irq;
+ u32 phy_type;
+
+ u32 last_link;
+ u32 features;
+
+ /* Buffer descriptors */
+ struct nixge_dma_bd *tx_bd_v;
+ dma_addr_t tx_bd_p;
+ struct nixge_dma_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ u32 tx_bd_ci;
+ u32 tx_bd_tail;
+ u32 rx_bd_ci;
+
+ u32 max_frm_size;
+ u32 rxmem;
+
+ int csum_offload_on_tx_path;
+ int csum_offload_on_rx_path;
+
+ u32 coalesce_count_rx;
+ u32 coalesce_count_tx;
+};
+
+static inline void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset,
+ u32 val)
+{
+ writel(val, priv->dma_regs + offset);
+}
+
+static inline u32 nixge_dma_read_reg(const struct nixge_priv *priv,
+ off_t offset)
+{
+ return readl(priv->dma_regs + offset);
+}
+
+static inline void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset,
+ u32 val)
+{
+ writel(val, priv->ctrl_regs + offset);
+}
+
+static inline u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
+{
+ return readl(priv->ctrl_regs + offset);
+}
+
+#define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
+ readl_poll_timeout((priv)->ctrl_regs + (addr), (val), cond, \
+ (sleep_us), (timeout_us))
+
+static void nixge_dma_bd_release(struct net_device *ndev)
+{
+ int i;
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
+ priv->max_frm_size, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (priv->rx_bd_v[i].sw_id_offset));
+ }
+
+ if (priv->rx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*priv->rx_bd_v) * RX_BD_NUM,
+ priv->rx_bd_v,
+ priv->rx_bd_p);
+ }
+ if (priv->tx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*priv->tx_bd_v) * TX_BD_NUM,
+ priv->tx_bd_v,
+ priv->tx_bd_p);
+ }
+}
+
+static int nixge_dma_bd_init(struct net_device *ndev)
+{
+ u32 cr;
+ int i;
+ struct sk_buff *skb;
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ /* Reset the indexes which are used for accessing the BDs */
+ priv->tx_bd_ci = 0;
+ priv->tx_bd_tail = 0;
+ priv->rx_bd_ci = 0;
+
+ /* Allocate the Tx and Rx buffer descriptors. */
+ priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*priv->tx_bd_v) * TX_BD_NUM,
+ &priv->tx_bd_p, GFP_KERNEL);
+ if (!priv->tx_bd_v)
+ goto out;
+
+ priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*priv->rx_bd_v) * RX_BD_NUM,
+ &priv->rx_bd_p, GFP_KERNEL);
+ if (!priv->rx_bd_v)
+ goto out;
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ priv->tx_bd_v[i].next = priv->tx_bd_p +
+ sizeof(*priv->tx_bd_v) *
+ ((i + 1) % TX_BD_NUM);
+ }
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ priv->rx_bd_v[i].next = priv->rx_bd_p +
+ sizeof(*priv->rx_bd_v) *
+ ((i + 1) % RX_BD_NUM);
+
+ skb = netdev_alloc_skb_ip_align(ndev, priv->max_frm_size);
+ if (!skb)
+ goto out;
+
+ priv->rx_bd_v[i].sw_id_offset = (u32)skb;
+ priv->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ priv->max_frm_size,
+ DMA_FROM_DEVICE);
+ priv->rx_bd_v[i].cntrl = priv->max_frm_size;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
+ (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+
+ return 0;
+out:
+ nixge_dma_bd_release(ndev);
+ return -ENOMEM;
+}
+
+static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
+{
+ u32 timeout;
+ /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
+ * The reset process of Axi DMA takes a while to complete as all
+ * pending commands/transfers will be flushed or completed during
+ * this reset process.
+ */
+ nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
+ timeout = DELAY_OF_ONE_MILLISEC;
+ while (nixge_dma_read_reg(priv, offset) & XAXIDMA_CR_RESET_MASK) {
+ udelay(1);
+ if (--timeout == 0) {
+ netdev_err(priv->ndev, "%s: DMA reset timeout!\n",
+ __func__);
+ break;
+ }
+ }
+}
+
+static void nixge_device_reset(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
+ __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
+
+ priv->max_frm_size = NIXGE_MAX_VLAN_FRAME_SIZE;
+
+ if ((ndev->mtu > NIXGE_MTU) &&
+ (ndev->mtu <= NIXGE_JUMBO_MTU)) {
+ priv->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
+ NIXGE_TRL_SIZE;
+ }
+
+ if (nixge_dma_bd_init(ndev)) {
+ netdev_err(ndev, "%s: descriptor allocation failed\n",
+ __func__);
+ }
+
+ netif_trans_update(ndev);
+}
+
+static void nixge_handle_link_change(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ unsigned long flags;
+ int status_change = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link != priv->link) {
+ if (!phydev->link) {
+ priv->speed = 0;
+ priv->duplex = -1;
+ }
+ priv->link = phydev->link;
+
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (status_change) {
+ if (phydev->link) {
+ netif_carrier_on(ndev);
+ netdev_info(ndev, "link up (%d/%s)\n",
+ phydev->speed,
+ phydev->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
+ } else {
+ netif_carrier_off(ndev);
+ netdev_info(ndev, "link down\n");
+ }
+ }
+}
+
+static void nixge_start_xmit_done(struct net_device *ndev)
+{
+ u32 size = 0;
+ u32 packets = 0;
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct nixge_dma_bd *cur_p;
+ unsigned int status = 0;
+
+ cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
+ status = cur_p->status;
+
+ while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->app4)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
+ /*cur_p->phys = 0;*/
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app4 = 0;
+ cur_p->status = 0;
+
+ size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ packets++;
+
+ ++priv->tx_bd_ci;
+ priv->tx_bd_ci %= TX_BD_NUM;
+ cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
+ status = cur_p->status;
+ }
+
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += size;
+ netif_wake_queue(ndev);
+}
+
+static inline int nixge_check_tx_bd_space(struct nixge_priv *priv,
+ int num_frag)
+{
+ struct nixge_dma_bd *cur_p;
+
+ cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
+ if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
+ return NETDEV_TX_BUSY;
+ return 0;
+}
+
+static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ u32 ii;
+ u32 num_frag;
+ skb_frag_t *frag;
+ dma_addr_t tail_p;
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct nixge_dma_bd *cur_p;
+
+ num_frag = skb_shinfo(skb)->nr_frags;
+ cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
+
+ if (nixge_check_tx_bd_space(priv, num_frag)) {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ for (ii = 0; ii < num_frag; ii++) {
+ ++priv->tx_bd_tail;
+ priv->tx_bd_tail %= TX_BD_NUM;
+ cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
+ frag = &skb_shinfo(skb)->frags[ii];
+ cur_p->phys = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ cur_p->cntrl = skb_frag_size(frag);
+ }
+
+ cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
+ cur_p->app4 = (unsigned long)skb;
+
+ tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
+ /* Start the transfer */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ ++priv->tx_bd_tail;
+ priv->tx_bd_tail %= TX_BD_NUM;
+
+ return NETDEV_TX_OK;
+}
+
+static void nixge_recv(struct net_device *ndev)
+{
+ u32 length;
+ u32 size = 0;
+ u32 packets = 0;
+ dma_addr_t tail_p = 0;
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb, *new_skb;
+ struct nixge_dma_bd *cur_p;
+
+ cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
+
+ while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ tail_p = priv->rx_bd_p
+ + sizeof(*priv->rx_bd_v) * priv->rx_bd_ci;
+ skb = (struct sk_buff *)(cur_p->sw_id_offset);
+
+ length = cur_p->status & 0x7fffff;
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ priv->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ /*skb_checksum_none_assert(skb);*/
+
+ /* For now mark them as CHECKSUM_NONE since
+ * we don't have offload capabilities
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(skb);
+
+ size += length;
+ packets++;
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, priv->max_frm_size);
+ if (!new_skb)
+ return;
+
+ cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ priv->max_frm_size,
+ DMA_FROM_DEVICE);
+ cur_p->cntrl = priv->max_frm_size;
+ cur_p->status = 0;
+ cur_p->sw_id_offset = (u32)new_skb;
+
+ ++priv->rx_bd_ci;
+ priv->rx_bd_ci %= RX_BD_NUM;
+ cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
+ }
+
+ ndev->stats.rx_packets += packets;
+ ndev->stats.rx_bytes += size;
+
+ if (tail_p)
+ nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+}
+
+static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
+ nixge_start_xmit_done(priv->ndev);
+ goto out;
+ }
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ (priv->tx_bd_v[priv->tx_bd_ci]).phys);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Tx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Rx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&priv->dma_err_tasklet);
+ nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
+ nixge_recv(priv->ndev);
+ goto out;
+ }
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ (priv->rx_bd_v[priv->rx_bd_ci]).phys);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&priv->dma_err_tasklet);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static void nixge_dma_err_handler(unsigned long data)
+{
+ u32 cr, i;
+ struct nixge_priv *lp = (struct nixge_priv *)data;
+ struct net_device *ndev = lp->ndev;
+ struct nixge_dma_bd *cur_p;
+
+ __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
+ __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ cur_p = &lp->tx_bd_v[i];
+ if (cur_p->phys)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->app4)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ }
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ cur_p = &lp->rx_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ lp->tx_bd_ci = 0;
+ lp->tx_bd_tail = 0;
+ lp->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Rx channel control register */
+ nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Tx channel control register */
+ nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
+ nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting
+ */
+ nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
+ nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+}
+
+static int nixge_open(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ nixge_device_reset(ndev);
+
+ /* start netif carrier down */
+ netif_carrier_off(ndev);
+
+ if (!ndev->phydev)
+ netdev_err(ndev, "no phy, phy_start() failed\n");
+ else
+ phy_start(ndev->phydev);
+
+ /* Enable tasklets for Axi DMA error handling */
+ tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
+ (unsigned long)priv);
+
+ /* Enable interrupts for Axi DMA Tx */
+ ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+ /* Enable interrupts for Axi DMA Rx */
+ ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+
+ return 0;
+
+err_rx_irq:
+ free_irq(priv->tx_irq, ndev);
+err_tx_irq:
+ tasklet_kill(&priv->dma_err_tasklet);
+ dev_err(priv->dev, "request_irq() failed\n");
+ return ret;
+}
+
+static int nixge_stop(struct net_device *ndev)
+{
+ u32 cr;
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ dev_dbg(&ndev->dev, "axienet_close()\n");
+
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+
+ tasklet_kill(&priv->dma_err_tasklet);
+
+ free_irq(priv->tx_irq, ndev);
+ free_irq(priv->rx_irq, ndev);
+
+ nixge_dma_bd_release(ndev);
+ return 0;
+}
+
+static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if ((new_mtu + VLAN_ETH_HLEN +
+ NIXGE_TRL_SIZE) > priv->rxmem)
+ return -EINVAL;
+
+ ndev->mtu = new_mtu;
+
+ return 0;
+}
+
+static s32 __nixge_set_mac_address(struct net_device *ndev, const void *addr)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ if (addr)
+ memcpy(ndev->dev_addr, addr, ETH_ALEN);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ eth_random_addr(ndev->dev_addr);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
+ (ndev->dev_addr[2]) << 24 |
+ (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ (ndev->dev_addr[5] << 0));
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
+ (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
+
+ return 0;
+}
+
+static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ __nixge_set_mac_address(ndev, addr->sa_data);
+ return 0;
+}
+
+static const struct net_device_ops nixge_netdev_ops = {
+ .ndo_open = nixge_open,
+ .ndo_stop = nixge_stop,
+ .ndo_start_xmit = nixge_start_xmit,
+ .ndo_change_mtu = nixge_change_mtu,
+ .ndo_set_mac_address = nixge_net_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *ed)
+{
+ strlcpy(ed->driver, "nixge", sizeof(ed->driver));
+ strlcpy(ed->version, "1.00a", sizeof(ed->version));
+}
+
+static int nixge_ethtools_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ecoalesce)
+{
+ u32 regval = 0;
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ return 0;
+}
+
+static int nixge_ethtools_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ecoalesce)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
+ return -EFAULT;
+ }
+
+ if ((ecoalesce->rx_coalesce_usecs) ||
+ (ecoalesce->rx_coalesce_usecs_irq) ||
+ (ecoalesce->rx_max_coalesced_frames_irq) ||
+ (ecoalesce->tx_coalesce_usecs) ||
+ (ecoalesce->tx_coalesce_usecs_irq) ||
+ (ecoalesce->tx_max_coalesced_frames_irq) ||
+ (ecoalesce->stats_block_coalesce_usecs) ||
+ (ecoalesce->use_adaptive_rx_coalesce) ||
+ (ecoalesce->use_adaptive_tx_coalesce) ||
+ (ecoalesce->pkt_rate_low) ||
+ (ecoalesce->rx_coalesce_usecs_low) ||
+ (ecoalesce->rx_max_coalesced_frames_low) ||
+ (ecoalesce->tx_coalesce_usecs_low) ||
+ (ecoalesce->tx_max_coalesced_frames_low) ||
+ (ecoalesce->pkt_rate_high) ||
+ (ecoalesce->rx_coalesce_usecs_high) ||
+ (ecoalesce->rx_max_coalesced_frames_high) ||
+ (ecoalesce->tx_coalesce_usecs_high) ||
+ (ecoalesce->tx_max_coalesced_frames_high) ||
+ (ecoalesce->rate_sample_interval))
+ return -EOPNOTSUPP;
+ if (ecoalesce->rx_max_coalesced_frames)
+ priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->tx_max_coalesced_frames)
+ priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static const struct ethtool_ops nixge_ethtool_ops = {
+ .get_drvinfo = nixge_ethtools_get_drvinfo,
+ .get_coalesce = nixge_ethtools_get_coalesce,
+ .set_coalesce = nixge_ethtools_set_coalesce,
+};
+
+int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct nixge_priv *priv = bus->priv;
+ u32 status, tmp;
+ int err;
+ u16 device;
+
+ if (reg & MII_ADDR_C45) {
+ device = (reg >> 16) & 0x1f;
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
+ | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting address");
+ return -ETIMEDOUT;
+ }
+
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_READ) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ } else {
+ device = reg & 0x1f;
+
+ tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(MDIO_C22_READ) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ }
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting read command");
+ return -ETIMEDOUT;
+ }
+
+ status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
+
+ dev_dbg(priv->dev, "%s: phy_id = %x reg = %x got %x\n", __func__,
+ phy_id, reg & 0xffff, status);
+
+ return status;
+}
+
+int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ struct nixge_priv *priv = bus->priv;
+ u32 status, tmp;
+ int err;
+ u16 device;
+
+ /* FIXME: Currently don't do writes */
+ if (reg & MII_ADDR_C45)
+ return 0;
+
+ device = reg & 0x1f;
+
+ tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(MDIO_C22_WRITE) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting write command");
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(priv->dev, "%x %x <- %x\n", phy_id, reg, val);
+
+ return 0;
+}
+
+int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
+{
+ struct mii_bus *bus;
+ struct resource res;
+ int err;
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ bus->priv = priv;
+ bus->name = "NIXGE_MAC_mii_bus";
+ bus->read = nixge_mdio_read;
+ bus->write = nixge_mdio_write;
+ bus->parent = priv->dev;
+
+ priv->mii_bus = bus;
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ goto err_register;
+
+ dev_info(priv->dev, "MDIO bus registered\n");
+
+ return 0;
+
+err_register:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void *nixge_get_nvmem_address(struct device *dev)
+{
+ struct nvmem_cell *cell;
+ size_t cell_size;
+ char *mac;
+
+ cell = nvmem_cell_get(dev, "address");
+ if (IS_ERR(cell))
+ return cell;
+
+ mac = nvmem_cell_read(cell, &cell_size);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(mac))
+ return mac;
+
+ return mac;
+}
+
+static int nixge_probe(struct platform_device *pdev)
+{
+ int err;
+ struct nixge_priv *priv;
+ struct net_device *ndev;
+ struct resource *dmares;
+ const char *mac_addr;
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+ ndev->features = NETIF_F_SG;
+ ndev->netdev_ops = &nixge_netdev_ops;
+ ndev->ethtool_ops = &nixge_ethtool_ops;
+
+ /* MTU range: 64 - 9000 */
+ ndev->min_mtu = 64;
+ ndev->max_mtu = NIXGE_JUMBO_MTU;
+
+ mac_addr = nixge_get_nvmem_address(&pdev->dev);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ else
+ eth_hw_addr_random(ndev);
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->dev = &pdev->dev;
+
+ priv->features = 0;
+ /* default to this for now ... */
+ priv->rxmem = 10000;
+
+ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
+ if (IS_ERR(priv->dma_regs)) {
+ dev_err(&pdev->dev, "failed to map dma regs\n");
+ return PTR_ERR(priv->dma_regs);
+ }
+ priv->ctrl_regs = priv->dma_regs + 0x4000;
+ __nixge_set_mac_address(ndev, mac_addr);
+
+ priv->tx_irq = platform_get_irq_byname(pdev, "tx-irq");
+ if (priv->tx_irq < 0) {
+ dev_err(&pdev->dev, "no tx irq available");
+ return priv->tx_irq;
+ }
+
+ priv->rx_irq = platform_get_irq_byname(pdev, "rx-irq");
+ if (priv->rx_irq < 0) {
+ dev_err(&pdev->dev, "no rx irq available");
+ return priv->rx_irq;
+ }
+
+ priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+ priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+
+ spin_lock_init(&priv->lock);
+
+ err = nixge_mdio_setup(priv, pdev->dev.of_node);
+ if (err) {
+ dev_warn(&pdev->dev, "error registering mdio bus");
+ goto free_netdev;
+ }
+
+ priv->phy_dev = phy_find_first(priv->mii_bus);
+ if (!priv->phy_dev) {
+ dev_err(&pdev->dev, "error finding a phy ...");
+ goto free_netdev;
+ }
+
+ err = register_netdev(priv->ndev);
+ if (err) {
+ dev_err(priv->dev, "register_netdev() error (%i)\n", err);
+ goto free_netdev;
+ }
+
+ err = phy_connect_direct(ndev, priv->phy_dev, &nixge_handle_link_change,
+ priv->phy_interface);
+ if (err) {
+ dev_err(&pdev->dev, "failed to attach to phy ...");
+ goto unregister_mdio;
+ }
+
+ /* not sure if this is the correct way of dealing with this ... */
+ ndev->phydev->supported &= ~(SUPPORTED_Autoneg);
+ ndev->phydev->advertising = ndev->phydev->supported;
+ ndev->phydev->autoneg = AUTONEG_DISABLE;
+
+ phy_attached_info(ndev->phydev);
+
+ netdev_info(ndev, "NIXGE_MAC rev 0x%08x at %p and %p,tx_irq %d rx_irq %d(%pM)\n",
+ 0x200000, priv->dma_regs, priv->ctrl_regs, priv->tx_irq,
+ priv->rx_irq, ndev->dev_addr);
+
+ return 0;
+
+unregister_mdio:
+ phy_disconnect(ndev->phydev);
+ mdiobus_unregister(priv->mii_bus);
+ mdiobus_free(priv->mii_bus);
+
+free_netdev:
+ free_netdev(ndev);
+
+ return err;
+}
+
+static int nixge_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+ ndev->phydev = NULL;
+
+ mdiobus_unregister(priv->mii_bus);
+ mdiobus_free(priv->mii_bus);
+ priv->mii_bus = NULL;
+
+ unregister_netdev(ndev);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static int __maybe_unused nixge_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused nixge_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(nixge_pm_ops, nixge_suspend, nixge_resume);
+
+/* Match table for of_platform binding */
+static const struct of_device_id nixge_dt_ids[] = {
+ { .compatible = "ni,xge-enet-2.00", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nixge_dt_ids);
+
+static struct platform_driver nixge_driver = {
+ .probe = nixge_probe,
+ .remove = nixge_remove,
+ .driver = {
+ .name = "nixge",
+ .of_match_table = of_match_ptr(nixge_dt_ids),
+ .pm = &nixge_pm_ops,
+ },
+};
+module_platform_driver(nixge_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("National Instruments XGE Management MAC");
+MODULE_AUTHOR("Moritz Fischer <[email protected]>");
--
2.7.4

2017-07-13 22:36:43

by Andrew Lunn

[permalink] [raw]
Subject: Re: [PATCH 2/2] net: ethernet: nixge: Add support for National Instruments XGE netdev

> +++ b/drivers/net/ethernet/ni/nixge.c
> @@ -0,0 +1,1246 @@
> +/*
> + * Copyright (c) 2016-2017, National Instruments Corp.
> + *
> + * Network Driver for Ettus Research XGE MAC
> + *
> + * This is largely based on the Xilinx AXI Ethernet Driver,
> + * and uses the same DMA engine in the FPGA

Hi Moritz

Is the DMA code the same as in the AXI driver? Should it be pulled out
into a library and shared?

> +struct nixge_priv {
> + struct net_device *ndev;
> + struct device *dev;
> +
> + /* Connection to PHY device */
> + struct phy_device *phy_dev;
> + phy_interface_t phy_interface;

> + /* protecting link parameters */
> + spinlock_t lock;
> + int link;
> + int speed;
> + int duplex;

All these seem to be pointless. They are set, but never used.

> +
> +static inline void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset,
> + u32 val)

Please leave it up to the compile to inline.

> +static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
> +{
> + u32 timeout;
> + /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
> + * The reset process of Axi DMA takes a while to complete as all
> + * pending commands/transfers will be flushed or completed during
> + * this reset process.
> + */
> + nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
> + timeout = DELAY_OF_ONE_MILLISEC;
> + while (nixge_dma_read_reg(priv, offset) & XAXIDMA_CR_RESET_MASK) {
> + udelay(1);

There is a link between the 1 and the value of DELAY_OF_ONE_MILLISEC.
It would be good to try to link these two together.

> + if (--timeout == 0) {
> + netdev_err(priv->ndev, "%s: DMA reset timeout!\n",
> + __func__);
> + break;
> + }
> + }
> +}
> +

> +static void nixge_handle_link_change(struct net_device *ndev)
> +{
> + struct nixge_priv *priv = netdev_priv(ndev);
> + struct phy_device *phydev = ndev->phydev;
> + unsigned long flags;
> + int status_change = 0;
> +
> + spin_lock_irqsave(&priv->lock, flags);
> +
> + if (phydev->link != priv->link) {
> + if (!phydev->link) {
> + priv->speed = 0;
> + priv->duplex = -1;
> + }
> + priv->link = phydev->link;
> +
> + status_change = 1;
> + }
> +
> + spin_unlock_irqrestore(&priv->lock, flags);
> +
> + if (status_change) {
> + if (phydev->link) {
> + netif_carrier_on(ndev);
> + netdev_info(ndev, "link up (%d/%s)\n",
> + phydev->speed,
> + phydev->duplex == DUPLEX_FULL ?
> + "Full" : "Half");
> + } else {
> + netif_carrier_off(ndev);
> + netdev_info(ndev, "link down\n");
> + }

phy_print_status() should be used.

Also, the phylib will handle netif_carrier_off/on for you.

> +static int nixge_open(struct net_device *ndev)
> +{
> + struct nixge_priv *priv = netdev_priv(ndev);
> + int ret;
> +
> + nixge_device_reset(ndev);
> +
> + /* start netif carrier down */
> + netif_carrier_off(ndev);
> +
> + if (!ndev->phydev)
> + netdev_err(ndev, "no phy, phy_start() failed\n");

Not really correct. You don't call phy_start(). And phy_start() cannot
indicate a failure, it is a void function.

It would be a lot better to bail out if there is no phy. Probably
during probe.

> +static s32 __nixge_set_mac_address(struct net_device *ndev, const void *addr)
> +{
> + struct nixge_priv *priv = netdev_priv(ndev);
> +
> + if (addr)
> + memcpy(ndev->dev_addr, addr, ETH_ALEN);
> + if (!is_valid_ether_addr(ndev->dev_addr))
> + eth_random_addr(ndev->dev_addr);

Messy. I would change this. Make addr mandatory. If it is invalid,
return an error. That will make nixge_net_set_mac_address() do the
right thing. When called from nixge_probe() should verify what it gets
from the nvmem, and if it is invalid, pass a random MAC address.

> +
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
> + (ndev->dev_addr[2]) << 24 |
> + (ndev->dev_addr[3] << 16) |
> + (ndev->dev_addr[4] << 8) |
> + (ndev->dev_addr[5] << 0));
> +
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
> + (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
> +
> + return 0;
> +}
> +

> +static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
> + struct ethtool_drvinfo *ed)
> +{
> + strlcpy(ed->driver, "nixge", sizeof(ed->driver));
> + strlcpy(ed->version, "1.00a", sizeof(ed->version));

Driver version is pretty pointless. What does 1.00a mean? Say it gets
backported into F26. Is it still 1.00a even though lots of things
around it have changed?


> +int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
> +{
> + struct nixge_priv *priv = bus->priv;
> + u32 status, tmp;
> + int err;
> + u16 device;
> +
> + if (reg & MII_ADDR_C45) {
> + device = (reg >> 16) & 0x1f;
> +
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
> +
> + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
> + | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
> +
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
> +
> + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
> + !status, 10, 1000);
> + if (err) {
> + dev_err(priv->dev, "timeout setting address");
> + return -ETIMEDOUT;

Better to return err.

> + }
> +
> + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_READ) |
> + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
> + } else {
> + device = reg & 0x1f;
> +
> + tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(MDIO_C22_READ) |
> + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
> + }
> +
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
> +
> + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
> + !status, 10, 1000);
> + if (err) {
> + dev_err(priv->dev, "timeout setting read command");
> + return -ETIMEDOUT;

Again, return err.

> + }
> +
> + status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
> +
> + dev_dbg(priv->dev, "%s: phy_id = %x reg = %x got %x\n", __func__,
> + phy_id, reg & 0xffff, status);
> +
> + return status;
> +}
> +
> +int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
> +{
> + struct nixge_priv *priv = bus->priv;
> + u32 status, tmp;
> + int err;
> + u16 device;
> +
> + /* FIXME: Currently don't do writes */
> + if (reg & MII_ADDR_C45)
> + return 0;

-EOPNOTSUPP would be better.

> +
> + device = reg & 0x1f;
> +
> + tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(MDIO_C22_WRITE) |
> + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
> +
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
> + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
> +
> + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
> + !status, 10, 1000);
> + if (err) {
> + dev_err(priv->dev, "timeout setting write command");
> + return -ETIMEDOUT;
> + }
> +
> + dev_dbg(priv->dev, "%x %x <- %x\n", phy_id, reg, val);
> +
> + return 0;
> +}
> +
> +int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
> +{
> + struct mii_bus *bus;
> + struct resource res;
> + int err;
> +
> + bus = mdiobus_alloc();
> + if (!bus)
> + return -ENOMEM;
> +
> + of_address_to_resource(np, 0, &res);
> + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
> + (unsigned long long)res.start);

There are more meaningful things you could use, e.g. dev_name(priv->dev)

> + bus->priv = priv;
> + bus->name = "NIXGE_MAC_mii_bus";
> + bus->read = nixge_mdio_read;
> + bus->write = nixge_mdio_write;
> + bus->parent = priv->dev;
> +
> + priv->mii_bus = bus;
> + err = of_mdiobus_register(bus, np);
> + if (err)
> + goto err_register;
> +
> + dev_info(priv->dev, "MDIO bus registered\n");
> +
> + return 0;
> +
> +err_register:
> + mdiobus_free(bus);
> + return err;
> +}
> +
> +static void *nixge_get_nvmem_address(struct device *dev)
> +{
> + struct nvmem_cell *cell;
> + size_t cell_size;
> + char *mac;
> +
> + cell = nvmem_cell_get(dev, "address");
> + if (IS_ERR(cell))
> + return cell;
> +
> + mac = nvmem_cell_read(cell, &cell_size);
> + nvmem_cell_put(cell);
> +
> + if (IS_ERR(mac))
> + return mac;
> +
> + return mac;

Pointless if()

> +}
> +
> +static int nixge_probe(struct platform_device *pdev)
> +{
> + int err;
> + struct nixge_priv *priv;
> + struct net_device *ndev;
> + struct resource *dmares;
> + const char *mac_addr;
> +
> + ndev = alloc_etherdev(sizeof(*priv));
> + if (!ndev)
> + return -ENOMEM;
> +
> + platform_set_drvdata(pdev, ndev);
> + SET_NETDEV_DEV(ndev, &pdev->dev);
> +
> + ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
> + ndev->features = NETIF_F_SG;
> + ndev->netdev_ops = &nixge_netdev_ops;
> + ndev->ethtool_ops = &nixge_ethtool_ops;
> +
> + /* MTU range: 64 - 9000 */
> + ndev->min_mtu = 64;
> + ndev->max_mtu = NIXGE_JUMBO_MTU;
> +
> + mac_addr = nixge_get_nvmem_address(&pdev->dev);
> + if (mac_addr)
> + ether_addr_copy(ndev->dev_addr, mac_addr);
> + else
> + eth_hw_addr_random(ndev);
> +
> + priv = netdev_priv(ndev);
> + priv->ndev = ndev;
> + priv->dev = &pdev->dev;
> +
> + priv->features = 0;
> + /* default to this for now ... */
> + priv->rxmem = 10000;
> +
> + dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> + priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
> + if (IS_ERR(priv->dma_regs)) {
> + dev_err(&pdev->dev, "failed to map dma regs\n");
> + return PTR_ERR(priv->dma_regs);
> + }
> + priv->ctrl_regs = priv->dma_regs + 0x4000;
> + __nixge_set_mac_address(ndev, mac_addr);
> +
> + priv->tx_irq = platform_get_irq_byname(pdev, "tx-irq");
> + if (priv->tx_irq < 0) {
> + dev_err(&pdev->dev, "no tx irq available");
> + return priv->tx_irq;
> + }
> +
> + priv->rx_irq = platform_get_irq_byname(pdev, "rx-irq");
> + if (priv->rx_irq < 0) {
> + dev_err(&pdev->dev, "no rx irq available");
> + return priv->rx_irq;
> + }
> +
> + priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
> + priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
> +
> + spin_lock_init(&priv->lock);
> +
> + err = nixge_mdio_setup(priv, pdev->dev.of_node);
> + if (err) {
> + dev_warn(&pdev->dev, "error registering mdio bus");
> + goto free_netdev;
> + }
> +
> + priv->phy_dev = phy_find_first(priv->mii_bus);
> + if (!priv->phy_dev) {
> + dev_err(&pdev->dev, "error finding a phy ...");
> + goto free_netdev;
> + }

I don't recommend this. Enforce the binding has a phy-handle.

> +
> + err = register_netdev(priv->ndev);
> + if (err) {
> + dev_err(priv->dev, "register_netdev() error (%i)\n", err);
> + goto free_netdev;
> + }
> +
> + err = phy_connect_direct(ndev, priv->phy_dev, &nixge_handle_link_change,
> + priv->phy_interface);

and here use of_phy_connect().

And where do you set phy_interface? You should be reading it from
device tree.

> + if (err) {
> + dev_err(&pdev->dev, "failed to attach to phy ...");
> + goto unregister_mdio;
> + }
> +
> + /* not sure if this is the correct way of dealing with this ... */
> + ndev->phydev->supported &= ~(SUPPORTED_Autoneg);
> + ndev->phydev->advertising = ndev->phydev->supported;
> + ndev->phydev->autoneg = AUTONEG_DISABLE;

What are you trying to achieve?

Andrew

2017-07-14 00:31:17

by Moritz Fischer

[permalink] [raw]
Subject: Re: [PATCH 2/2] net: ethernet: nixge: Add support for National Instruments XGE netdev

Hi Andrew,

thanks for the quick response.

On Fri, Jul 14, 2017 at 12:36:36AM +0200, Andrew Lunn wrote:
> > +++ b/drivers/net/ethernet/ni/nixge.c
> > @@ -0,0 +1,1246 @@
> > +/*
> > + * Copyright (c) 2016-2017, National Instruments Corp.
> > + *
> > + * Network Driver for Ettus Research XGE MAC
> > + *
> > + * This is largely based on the Xilinx AXI Ethernet Driver,
> > + * and uses the same DMA engine in the FPGA
>
> Hi Moritz
>
> Is the DMA code the same as in the AXI driver? Should it be pulled out
> into a library and shared?

Mostly, I'll see what I can do. At least the register definitions and
common structures can be pulled out into a common header file.

>
> > +struct nixge_priv {
> > + struct net_device *ndev;
> > + struct device *dev;
> > +
> > + /* Connection to PHY device */
> > + struct phy_device *phy_dev;
> > + phy_interface_t phy_interface;
>
> > + /* protecting link parameters */
> > + spinlock_t lock;
> > + int link;
> > + int speed;
> > + int duplex;
>
> All these seem to be pointless. They are set, but never used.

Will fix.
>
> > +
> > +static inline void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset,
> > + u32 val)
>
> Please leave it up to the compile to inline.

Will fix.
>
> > +static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
> > +{
> > + u32 timeout;
> > + /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
> > + * The reset process of Axi DMA takes a while to complete as all
> > + * pending commands/transfers will be flushed or completed during
> > + * this reset process.
> > + */
> > + nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
> > + timeout = DELAY_OF_ONE_MILLISEC;
> > + while (nixge_dma_read_reg(priv, offset) & XAXIDMA_CR_RESET_MASK) {
> > + udelay(1);
>
> There is a link between the 1 and the value of DELAY_OF_ONE_MILLISEC.
> It would be good to try to link these two together.

D'oh ... Seems like a good candidate for iopoll ...
>
> > + if (--timeout == 0) {
> > + netdev_err(priv->ndev, "%s: DMA reset timeout!\n",
> > + __func__);
> > + break;
> > + }
> > + }
> > +}
> > +
>
> > +static void nixge_handle_link_change(struct net_device *ndev)
> > +{
> > + struct nixge_priv *priv = netdev_priv(ndev);
> > + struct phy_device *phydev = ndev->phydev;
> > + unsigned long flags;
> > + int status_change = 0;
> > +
> > + spin_lock_irqsave(&priv->lock, flags);
> > +
> > + if (phydev->link != priv->link) {
> > + if (!phydev->link) {
> > + priv->speed = 0;
> > + priv->duplex = -1;
> > + }
> > + priv->link = phydev->link;
> > +
> > + status_change = 1;
> > + }
> > +
> > + spin_unlock_irqrestore(&priv->lock, flags);
> > +
> > + if (status_change) {
> > + if (phydev->link) {
> > + netif_carrier_on(ndev);
> > + netdev_info(ndev, "link up (%d/%s)\n",
> > + phydev->speed,
> > + phydev->duplex == DUPLEX_FULL ?
> > + "Full" : "Half");
> > + } else {
> > + netif_carrier_off(ndev);
> > + netdev_info(ndev, "link down\n");
> > + }
>
> phy_print_status() should be used.

Will do.
>
> Also, the phylib will handle netif_carrier_off/on for you.

Good to know :)
>
> > +static int nixge_open(struct net_device *ndev)
> > +{
> > + struct nixge_priv *priv = netdev_priv(ndev);
> > + int ret;
> > +
> > + nixge_device_reset(ndev);
> > +
> > + /* start netif carrier down */
> > + netif_carrier_off(ndev);
> > +
> > + if (!ndev->phydev)
> > + netdev_err(ndev, "no phy, phy_start() failed\n");
>
> Not really correct. You don't call phy_start(). And phy_start() cannot
> indicate a failure, it is a void function.

Will fix.
>
> It would be a lot better to bail out if there is no phy. Probably
> during probe.

Yeah.
>
> > +static s32 __nixge_set_mac_address(struct net_device *ndev, const void *addr)
> > +{
> > + struct nixge_priv *priv = netdev_priv(ndev);
> > +
> > + if (addr)
> > + memcpy(ndev->dev_addr, addr, ETH_ALEN);
> > + if (!is_valid_ether_addr(ndev->dev_addr))
> > + eth_random_addr(ndev->dev_addr);
>
> Messy. I would change this. Make addr mandatory. If it is invalid,
> return an error. That will make nixge_net_set_mac_address() do the
> right thing. When called from nixge_probe() should verify what it gets
> from the nvmem, and if it is invalid, pass a random MAC address.

Will fix.
>
> > +
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
> > + (ndev->dev_addr[2]) << 24 |
> > + (ndev->dev_addr[3] << 16) |
> > + (ndev->dev_addr[4] << 8) |
> > + (ndev->dev_addr[5] << 0));
> > +
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
> > + (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
> > +
> > + return 0;
> > +}
> > +
>
> > +static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
> > + struct ethtool_drvinfo *ed)
> > +{
> > + strlcpy(ed->driver, "nixge", sizeof(ed->driver));
> > + strlcpy(ed->version, "1.00a", sizeof(ed->version));
>
> Driver version is pretty pointless. What does 1.00a mean? Say it gets
> backported into F26. Is it still 1.00a even though lots of things
> around it have changed?

Maybe I can just drop drvinfo?
>
>
> > +int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
> > +{
> > + struct nixge_priv *priv = bus->priv;
> > + u32 status, tmp;
> > + int err;
> > + u16 device;
> > +
> > + if (reg & MII_ADDR_C45) {
> > + device = (reg >> 16) & 0x1f;
> > +
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
> > +
> > + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
> > + | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
> > +
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
> > +
> > + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
> > + !status, 10, 1000);
> > + if (err) {
> > + dev_err(priv->dev, "timeout setting address");
> > + return -ETIMEDOUT;
>
> Better to return err.

Agreed.
>
> > + }
> > +
> > + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_READ) |
> > + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
> > + } else {
> > + device = reg & 0x1f;
> > +
> > + tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(MDIO_C22_READ) |
> > + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
> > + }
> > +
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
> > +
> > + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
> > + !status, 10, 1000);
> > + if (err) {
> > + dev_err(priv->dev, "timeout setting read command");
> > + return -ETIMEDOUT;
>
> Again, return err.

Agreed.
>
> > + }
> > +
> > + status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
> > +
> > + dev_dbg(priv->dev, "%s: phy_id = %x reg = %x got %x\n", __func__,
> > + phy_id, reg & 0xffff, status);
> > +
> > + return status;
> > +}
> > +
> > +int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
> > +{
> > + struct nixge_priv *priv = bus->priv;
> > + u32 status, tmp;
> > + int err;
> > + u16 device;
> > +
> > + /* FIXME: Currently don't do writes */
> > + if (reg & MII_ADDR_C45)
> > + return 0;
>
> -EOPNOTSUPP would be better.

Agreed, ultimately I wanna implement that.
>
> > +
> > + device = reg & 0x1f;
> > +
> > + tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(MDIO_C22_WRITE) |
> > + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
> > +
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
> > + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
> > +
> > + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
> > + !status, 10, 1000);
> > + if (err) {
> > + dev_err(priv->dev, "timeout setting write command");
> > + return -ETIMEDOUT;
> > + }
> > +
> > + dev_dbg(priv->dev, "%x %x <- %x\n", phy_id, reg, val);
> > +
> > + return 0;
> > +}
> > +
> > +int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
> > +{
> > + struct mii_bus *bus;
> > + struct resource res;
> > + int err;
> > +
> > + bus = mdiobus_alloc();
> > + if (!bus)
> > + return -ENOMEM;
> > +
> > + of_address_to_resource(np, 0, &res);
> > + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
> > + (unsigned long long)res.start);
>
> There are more meaningful things you could use, e.g. dev_name(priv->dev)

Agreed.
>
> > + bus->priv = priv;
> > + bus->name = "NIXGE_MAC_mii_bus";
> > + bus->read = nixge_mdio_read;
> > + bus->write = nixge_mdio_write;
> > + bus->parent = priv->dev;
> > +
> > + priv->mii_bus = bus;
> > + err = of_mdiobus_register(bus, np);
> > + if (err)
> > + goto err_register;
> > +
> > + dev_info(priv->dev, "MDIO bus registered\n");
> > +
> > + return 0;
> > +
> > +err_register:
> > + mdiobus_free(bus);
> > + return err;
> > +}
> > +
> > +static void *nixge_get_nvmem_address(struct device *dev)
> > +{
> > + struct nvmem_cell *cell;
> > + size_t cell_size;
> > + char *mac;
> > +
> > + cell = nvmem_cell_get(dev, "address");
> > + if (IS_ERR(cell))
> > + return cell;
> > +
> > + mac = nvmem_cell_read(cell, &cell_size);
> > + nvmem_cell_put(cell);
> > +
> > + if (IS_ERR(mac))
> > + return mac;
> > +
> > + return mac;
>
> Pointless if()

D'oh ... will fix.
>
> > +}
> > +
> > +static int nixge_probe(struct platform_device *pdev)
> > +{
> > + int err;
> > + struct nixge_priv *priv;
> > + struct net_device *ndev;
> > + struct resource *dmares;
> > + const char *mac_addr;
> > +
> > + ndev = alloc_etherdev(sizeof(*priv));
> > + if (!ndev)
> > + return -ENOMEM;
> > +
> > + platform_set_drvdata(pdev, ndev);
> > + SET_NETDEV_DEV(ndev, &pdev->dev);
> > +
> > + ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
> > + ndev->features = NETIF_F_SG;
> > + ndev->netdev_ops = &nixge_netdev_ops;
> > + ndev->ethtool_ops = &nixge_ethtool_ops;
> > +
> > + /* MTU range: 64 - 9000 */
> > + ndev->min_mtu = 64;
> > + ndev->max_mtu = NIXGE_JUMBO_MTU;
> > +
> > + mac_addr = nixge_get_nvmem_address(&pdev->dev);
> > + if (mac_addr)
> > + ether_addr_copy(ndev->dev_addr, mac_addr);
> > + else
> > + eth_hw_addr_random(ndev);
> > +
> > + priv = netdev_priv(ndev);
> > + priv->ndev = ndev;
> > + priv->dev = &pdev->dev;
> > +
> > + priv->features = 0;
> > + /* default to this for now ... */
> > + priv->rxmem = 10000;
> > +
> > + dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> > + priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
> > + if (IS_ERR(priv->dma_regs)) {
> > + dev_err(&pdev->dev, "failed to map dma regs\n");
> > + return PTR_ERR(priv->dma_regs);
> > + }
> > + priv->ctrl_regs = priv->dma_regs + 0x4000;
> > + __nixge_set_mac_address(ndev, mac_addr);
> > +
> > + priv->tx_irq = platform_get_irq_byname(pdev, "tx-irq");
> > + if (priv->tx_irq < 0) {
> > + dev_err(&pdev->dev, "no tx irq available");
> > + return priv->tx_irq;
> > + }
> > +
> > + priv->rx_irq = platform_get_irq_byname(pdev, "rx-irq");
> > + if (priv->rx_irq < 0) {
> > + dev_err(&pdev->dev, "no rx irq available");
> > + return priv->rx_irq;
> > + }
> > +
> > + priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
> > + priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
> > +
> > + spin_lock_init(&priv->lock);
> > +
> > + err = nixge_mdio_setup(priv, pdev->dev.of_node);
> > + if (err) {
> > + dev_warn(&pdev->dev, "error registering mdio bus");
> > + goto free_netdev;
> > + }
> > +
> > + priv->phy_dev = phy_find_first(priv->mii_bus);
> > + if (!priv->phy_dev) {
> > + dev_err(&pdev->dev, "error finding a phy ...");
> > + goto free_netdev;
> > + }
>
> I don't recommend this. Enforce the binding has a phy-handle.

Yeah, will move the of_phy_connect into open() and just check for
phandle here.
>
> > +
> > + err = register_netdev(priv->ndev);
> > + if (err) {
> > + dev_err(priv->dev, "register_netdev() error (%i)\n", err);
> > + goto free_netdev;
> > + }
> > +
> > + err = phy_connect_direct(ndev, priv->phy_dev, &nixge_handle_link_change,
> > + priv->phy_interface);
>
> and here use of_phy_connect().

I'll probably move that to the open()
>
> And where do you set phy_interface? You should be reading it from
> device tree.

True.
>
> > + if (err) {
> > + dev_err(&pdev->dev, "failed to attach to phy ...");
> > + goto unregister_mdio;
> > + }
> > +
> > + /* not sure if this is the correct way of dealing with this ... */
> > + ndev->phydev->supported &= ~(SUPPORTED_Autoneg);
> > + ndev->phydev->advertising = ndev->phydev->supported;
> > + ndev->phydev->autoneg = AUTONEG_DISABLE;
>
> What are you trying to achieve?

Basically can't do Autoneg, I'll need to take a closer look.

>
> Andrew

Thanks for your feedback,

Moritz


Attachments:
(No filename) (12.38 kB)
signature.asc (455.00 B)
Download all attachments

2017-07-14 00:36:24

by Moritz Fischer

[permalink] [raw]
Subject: Re: [PATCH 1/2] dt-bindings: net: Add bindings for National Instruments XGE netdev

Hi Yuan,

On Thu, Jul 13, 2017 at 5:33 PM, YUAN Linyu
<[email protected]> wrote:
>
>
>> -----Original Message-----
>> From: [email protected] [mailto:[email protected]]
>> On Behalf Of Moritz Fischer
>> Sent: Friday, July 14, 2017 5:22 AM
>> To: [email protected]
>> Cc: [email protected]; [email protected];
>> [email protected]; [email protected]; [email protected]; Moritz
>> Fischer
>> Subject: [PATCH 1/2] dt-bindings: net: Add bindings for National Instruments
>> XGE netdev
>>
>> This adds bindings for the NI XGE 1G/10G network device.
>>
>> Signed-off-by: Moritz Fischer <[email protected]>
>> ---
>> Documentation/devicetree/bindings/net/nixge.c | 32
>> +++++++++++++++++++++++++++
>> 1 file changed, 32 insertions(+)
>> create mode 100644 Documentation/devicetree/bindings/net/nixge.c
>
> It should be a text file, nixge.txt

You are absolutely right ... I need to have my head examined.
>
>>
>> diff --git a/Documentation/devicetree/bindings/net/nixge.c
>> b/Documentation/devicetree/bindings/net/nixge.c
>> new file mode 100644
>> index 0000000..9fff5a7

2017-07-14 00:48:22

by YUAN Linyu

[permalink] [raw]
Subject: RE: [PATCH 1/2] dt-bindings: net: Add bindings for National Instruments XGE netdev



> -----Original Message-----
> From: [email protected] [mailto:[email protected]]
> On Behalf Of Moritz Fischer
> Sent: Friday, July 14, 2017 5:22 AM
> To: [email protected]
> Cc: [email protected]; [email protected];
> [email protected]; [email protected]; [email protected]; Moritz
> Fischer
> Subject: [PATCH 1/2] dt-bindings: net: Add bindings for National Instruments
> XGE netdev
>
> This adds bindings for the NI XGE 1G/10G network device.
>
> Signed-off-by: Moritz Fischer <[email protected]>
> ---
> Documentation/devicetree/bindings/net/nixge.c | 32
> +++++++++++++++++++++++++++
> 1 file changed, 32 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/net/nixge.c

It should be a text file, nixge.txt

>
> diff --git a/Documentation/devicetree/bindings/net/nixge.c
> b/Documentation/devicetree/bindings/net/nixge.c
> new file mode 100644
> index 0000000..9fff5a7

2017-07-14 01:34:47

by Andrew Lunn

[permalink] [raw]
Subject: Re: [PATCH 2/2] net: ethernet: nixge: Add support for National Instruments XGE netdev

> > > + /* not sure if this is the correct way of dealing with this ... */
> > > + ndev->phydev->supported &= ~(SUPPORTED_Autoneg);
> > > + ndev->phydev->advertising = ndev->phydev->supported;
> > > + ndev->phydev->autoneg = AUTONEG_DISABLE;
> >
> > What are you trying to achieve?
>
> Basically can't do Autoneg, I'll need to take a closer look.

Hi Moritz

What i actually think you mean, is it can only do 1Gbps. So you could
autoneg, but only advertise 1Gbps. Look at masking out
PHY_10BT_FEATURES and PHY_100BT_FEATURES.

Take a look at:

http://elixir.free-electrons.com/linux/latest/source/drivers/net/ethernet/renesas/ravb_main.c#L1045

It might actually make sense to add a phy_set_min_speed(), a mirror to
phy_set_max_speed().

Andrew

2017-07-14 03:36:33

by Moritz Fischer

[permalink] [raw]
Subject: Re: [PATCH 2/2] net: ethernet: nixge: Add support for National Instruments XGE netdev

Hi Andrew,

On Thu, Jul 13, 2017 at 6:34 PM, Andrew Lunn <[email protected]> wrote:
>> > > + /* not sure if this is the correct way of dealing with this ... */
>> > > + ndev->phydev->supported &= ~(SUPPORTED_Autoneg);
>> > > + ndev->phydev->advertising = ndev->phydev->supported;
>> > > + ndev->phydev->autoneg = AUTONEG_DISABLE;
>> >
>> > What are you trying to achieve?
>>
>> Basically can't do Autoneg, I'll need to take a closer look.
>
> Hi Moritz
>
> What i actually think you mean, is it can only do 1Gbps. So you could
> autoneg, but only advertise 1Gbps. Look at masking out
> PHY_10BT_FEATURES and PHY_100BT_FEATURES.

It does either 1Gbps or 10Gbps (over SFP+), depending which bitstream is loaded
into the FPGA. In the current setup I could also just have two
different compatible
strings, since neither setup supports the other rate, but that might change.

It seems getting rid of that part (the default values) now works, too.

I'll need to take a closer look tomorrow (and I need to retest with 1Gbps)

>
> Take a look at:
>
> http://elixir.free-electrons.com/linux/latest/source/drivers/net/ethernet/renesas/ravb_main.c#L1045

Will do.

Thanks for feedback,

Moritz

2017-07-14 05:55:08

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 2/2] net: ethernet: nixge: Add support for National Instruments XGE netdev

Hi Moritz,

[auto build test WARNING on net-next/master]
[also build test WARNING on v4.12 next-20170713]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url: https://github.com/0day-ci/linux/commits/Moritz-Fischer/dt-bindings-net-Add-bindings-for-National-Instruments-XGE-netdev/20170714-125718
config: ia64-allmodconfig (attached as .config)
compiler: ia64-linux-gcc (GCC) 6.2.0
reproduce:
wget https://raw.githubusercontent.com/01org/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=ia64

All warnings (new ones prefixed by >>):

In file included from include/linux/if_ether.h:23:0,
from include/linux/etherdevice.h:25,
from drivers/net/ethernet/ni/nixge.c:14:
drivers/net/ethernet/ni/nixge.c: In function 'nixge_dma_bd_release':
>> drivers/net/ethernet/ni/nixge.c:241:17: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
dev_kfree_skb((struct sk_buff *)
^
include/linux/skbuff.h:977:38: note: in definition of macro 'dev_kfree_skb'
#define dev_kfree_skb(a) consume_skb(a)
^
drivers/net/ethernet/ni/nixge.c: In function 'nixge_dma_bd_init':
>> drivers/net/ethernet/ni/nixge.c:299:35: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
priv->rx_bd_v[i].sw_id_offset = (u32)skb;
^
drivers/net/ethernet/ni/nixge.c: In function 'nixge_start_xmit_done':
drivers/net/ethernet/ni/nixge.c:452:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
^
drivers/net/ethernet/ni/nixge.c: In function 'nixge_recv':
drivers/net/ethernet/ni/nixge.c:546:9: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
skb = (struct sk_buff *)(cur_p->sw_id_offset);
^
drivers/net/ethernet/ni/nixge.c:577:25: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
cur_p->sw_id_offset = (u32)new_skb;
^
drivers/net/ethernet/ni/nixge.c: In function 'nixge_dma_err_handler':
drivers/net/ethernet/ni/nixge.c:687:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
^

vim +241 drivers/net/ethernet/ni/nixge.c

228
229 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
230 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), cond, \
231 (sleep_us), (timeout_us))
232
233 static void nixge_dma_bd_release(struct net_device *ndev)
234 {
235 int i;
236 struct nixge_priv *priv = netdev_priv(ndev);
237
238 for (i = 0; i < RX_BD_NUM; i++) {
239 dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
240 priv->max_frm_size, DMA_FROM_DEVICE);
> 241 dev_kfree_skb((struct sk_buff *)
242 (priv->rx_bd_v[i].sw_id_offset));
243 }
244
245 if (priv->rx_bd_v) {
246 dma_free_coherent(ndev->dev.parent,
247 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
248 priv->rx_bd_v,
249 priv->rx_bd_p);
250 }
251 if (priv->tx_bd_v) {
252 dma_free_coherent(ndev->dev.parent,
253 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
254 priv->tx_bd_v,
255 priv->tx_bd_p);
256 }
257 }
258
259 static int nixge_dma_bd_init(struct net_device *ndev)
260 {
261 u32 cr;
262 int i;
263 struct sk_buff *skb;
264 struct nixge_priv *priv = netdev_priv(ndev);
265
266 /* Reset the indexes which are used for accessing the BDs */
267 priv->tx_bd_ci = 0;
268 priv->tx_bd_tail = 0;
269 priv->rx_bd_ci = 0;
270
271 /* Allocate the Tx and Rx buffer descriptors. */
272 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
273 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
274 &priv->tx_bd_p, GFP_KERNEL);
275 if (!priv->tx_bd_v)
276 goto out;
277
278 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
279 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
280 &priv->rx_bd_p, GFP_KERNEL);
281 if (!priv->rx_bd_v)
282 goto out;
283
284 for (i = 0; i < TX_BD_NUM; i++) {
285 priv->tx_bd_v[i].next = priv->tx_bd_p +
286 sizeof(*priv->tx_bd_v) *
287 ((i + 1) % TX_BD_NUM);
288 }
289
290 for (i = 0; i < RX_BD_NUM; i++) {
291 priv->rx_bd_v[i].next = priv->rx_bd_p +
292 sizeof(*priv->rx_bd_v) *
293 ((i + 1) % RX_BD_NUM);
294
295 skb = netdev_alloc_skb_ip_align(ndev, priv->max_frm_size);
296 if (!skb)
297 goto out;
298
> 299 priv->rx_bd_v[i].sw_id_offset = (u32)skb;
300 priv->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
301 skb->data,
302 priv->max_frm_size,
303 DMA_FROM_DEVICE);
304 priv->rx_bd_v[i].cntrl = priv->max_frm_size;
305 }
306
307 /* Start updating the Rx channel control register */
308 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
309 /* Update the interrupt coalesce count */
310 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
311 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
312 /* Update the delay timer count */
313 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
314 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
315 /* Enable coalesce, delay timer and error interrupts */
316 cr |= XAXIDMA_IRQ_ALL_MASK;
317 /* Write to the Rx channel control register */
318 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
319
320 /* Start updating the Tx channel control register */
321 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
322 /* Update the interrupt coalesce count */
323 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
324 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
325 /* Update the delay timer count */
326 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
327 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
328 /* Enable coalesce, delay timer and error interrupts */
329 cr |= XAXIDMA_IRQ_ALL_MASK;
330 /* Write to the Tx channel control register */
331 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
332
333 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
334 * halted state. This will make the Rx side ready for reception.
335 */
336 nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
337 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
338 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
339 cr | XAXIDMA_CR_RUNSTOP_MASK);
340 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
341 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
342
343 /* Write to the RS (Run-stop) bit in the Tx channel control register.
344 * Tx channel is now ready to run. But only after we write to the
345 * tail pointer register that the Tx channel will start transmitting.
346 */
347 nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
348 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
349 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
350 cr | XAXIDMA_CR_RUNSTOP_MASK);
351
352 return 0;
353 out:
354 nixge_dma_bd_release(ndev);
355 return -ENOMEM;
356 }
357

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation


Attachments:
(No filename) (7.72 kB)
.config.gz (46.88 kB)
Download all attachments

2017-07-15 14:37:29

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 2/2] net: ethernet: nixge: Add support for National Instruments XGE netdev

Hi Moritz,

[auto build test ERROR on net-next/master]
[also build test ERROR on v4.12 next-20170714]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url: https://github.com/0day-ci/linux/commits/Moritz-Fischer/dt-bindings-net-Add-bindings-for-National-Instruments-XGE-netdev/20170714-125718
config: um-allyesconfig (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=um

All errors (new ones prefixed by >>):

arch/um/drivers/built-in.o: In function `vde_open_real':
(.text+0xc9f1): warning: Using 'getgrnam' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking
arch/um/drivers/built-in.o: In function `vde_open_real':
(.text+0xc83c): warning: Using 'getpwuid' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking
arch/um/drivers/built-in.o: In function `vde_open_real':
(.text+0xcb55): warning: Using 'getaddrinfo' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking
arch/um/drivers/built-in.o: In function `pcap_nametoaddr':
(.text+0x1d5e5): warning: Using 'gethostbyname' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking
arch/um/drivers/built-in.o: In function `pcap_nametonetaddr':
(.text+0x1d685): warning: Using 'getnetbyname' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking
arch/um/drivers/built-in.o: In function `pcap_nametoproto':
(.text+0x1d8a5): warning: Using 'getprotobyname' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking
arch/um/drivers/built-in.o: In function `pcap_nametoport':
(.text+0x1d6d7): warning: Using 'getservbyname' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking
drivers/built-in.o: In function `nixge_start_xmit':
>> include/linux/dma-mapping.h:210: undefined reference to `bad_dma_ops'
drivers/built-in.o: In function `nixge_dma_bd_release':
>> drivers/net/ethernet/ni/nixge.c:234: undefined reference to `bad_dma_ops'
drivers/built-in.o: In function `nixge_dma_bd_release':
include/linux/dma-mapping.h:504: undefined reference to `bad_dma_ops'
include/linux/dma-mapping.h:510: undefined reference to `bad_dma_ops'
include/linux/dma-mapping.h:504: undefined reference to `bad_dma_ops'
drivers/built-in.o:drivers/net/ethernet/ni/nixge.c:252: more undefined references to `bad_dma_ops' follow
drivers/built-in.o: In function `nixge_mdio_setup':
>> drivers/net/ethernet/ni/nixge.c:1039: undefined reference to `of_address_to_resource'
drivers/built-in.o: In function `nixge_probe':
>> drivers/net/ethernet/ni/nixge.c:1120: undefined reference to `devm_ioremap_resource'
drivers/built-in.o: In function `img_ascii_lcd_probe':
drivers/auxdisplay/img-ascii-lcd.c:386: undefined reference to `devm_ioremap_resource'
collect2: error: ld returned 1 exit status

vim +234 drivers/net/ethernet/ni/nixge.c

228
229 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
230 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), cond, \
231 (sleep_us), (timeout_us))
232
233 static void nixge_dma_bd_release(struct net_device *ndev)
> 234 {
235 int i;
236 struct nixge_priv *priv = netdev_priv(ndev);
237
238 for (i = 0; i < RX_BD_NUM; i++) {
239 dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
240 priv->max_frm_size, DMA_FROM_DEVICE);
241 dev_kfree_skb((struct sk_buff *)
242 (priv->rx_bd_v[i].sw_id_offset));
243 }
244
245 if (priv->rx_bd_v) {
246 dma_free_coherent(ndev->dev.parent,
247 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
248 priv->rx_bd_v,
249 priv->rx_bd_p);
250 }
251 if (priv->tx_bd_v) {
252 dma_free_coherent(ndev->dev.parent,
253 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
254 priv->tx_bd_v,
255 priv->tx_bd_p);
256 }
257 }
258

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation


Attachments:
(No filename) (4.40 kB)
.config.gz (18.96 kB)
Download all attachments