2015-11-19 19:13:50

by Ondrej Zary

[permalink] [raw]
Subject: [PATCH 1/3 v2] dl2k: Handle memory allocation errors in alloc_list

If memory allocation fails in alloc_list(), free the already allocated
memory and return -ENOMEM. In rio_open(), call alloc_list() first and
abort if it fails. Move HW access (set RFDListPtr) out ot alloc_list().

Signed-off-by: Ondrej Zary <[email protected]>
---
drivers/net/ethernet/dlink/dl2k.c | 182 ++++++++++++++++++++-----------------
1 file changed, 97 insertions(+), 85 deletions(-)

diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index ccca479..ec0eb7f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -70,7 +70,6 @@ static const int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
static void rio_timer (unsigned long data);
static void rio_tx_timeout (struct net_device *dev);
-static void alloc_list (struct net_device *dev);
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
static void rio_free_tx (struct net_device *dev, int irq);
@@ -446,6 +445,92 @@ static void rio_set_led_mode(struct net_device *dev)
dw32(ASICCtrl, mode);
}

+static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
+{
+ return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
+}
+
+static void free_list(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ /* Free all the skbuffs in the queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = np->rx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]),
+ skb->len, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ }
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]),
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ np->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+ /* allocate and initialize Tx and Rx descriptors */
+static int alloc_list(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = 0;
+ np->old_rx = np->old_tx = 0;
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
+
+ /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = cpu_to_le64(TFDDone);
+ np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma +
+ ((i + 1) % TX_RING_SIZE) *
+ sizeof(struct netdev_desc));
+ }
+
+ /* Initialize Rx descriptors */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
+ ((i + 1) % RX_RING_SIZE) *
+ sizeof(struct netdev_desc));
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+
+ /* Allocate the rx buffers */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* Allocated fixed size of skbuff */
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (!skb) {
+ free_list(dev);
+ return -ENOMEM;
+ }
+
+ /* Rubicon now supports 40 bits of addressing space. */
+ np->rx_ring[i].fraginfo =
+ cpu_to_le64(pci_map_single(
+ np->pdev, skb->data, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
+ }
+
+ return 0;
+}
+
static int
rio_open (struct net_device *dev)
{
@@ -455,10 +540,16 @@ rio_open (struct net_device *dev)
int i;
u16 macctrl;

- i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
+ i = alloc_list(dev);
if (i)
return i;

+ i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
+ if (i) {
+ free_list(dev);
+ return i;
+ }
+
/* Reset all logic functions */
dw16(ASICCtrl + 2,
GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
@@ -473,7 +564,9 @@ rio_open (struct net_device *dev)
if (np->jumbo != 0)
dw16(MaxFrameSize, MAX_JUMBO+14);

- alloc_list (dev);
+ /* Set RFDListPtr */
+ dw32(RFDListPtr0, np->rx_ring_dma);
+ dw32(RFDListPtr1, 0);

/* Set station address */
/* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
@@ -586,60 +679,6 @@ rio_tx_timeout (struct net_device *dev)
dev->trans_start = jiffies; /* prevent tx timeout */
}

- /* allocate and initialize Tx and Rx descriptors */
-static void
-alloc_list (struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->ioaddr;
- int i;
-
- np->cur_rx = np->cur_tx = 0;
- np->old_rx = np->old_tx = 0;
- np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
-
- /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
- for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = NULL;
- np->tx_ring[i].status = cpu_to_le64 (TFDDone);
- np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
- ((i+1)%TX_RING_SIZE) *
- sizeof (struct netdev_desc));
- }
-
- /* Initialize Rx descriptors */
- for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
- ((i + 1) % RX_RING_SIZE) *
- sizeof (struct netdev_desc));
- np->rx_ring[i].status = 0;
- np->rx_ring[i].fraginfo = 0;
- np->rx_skbuff[i] = NULL;
- }
-
- /* Allocate the rx buffers */
- for (i = 0; i < RX_RING_SIZE; i++) {
- /* Allocated fixed size of skbuff */
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
- np->rx_skbuff[i] = skb;
- if (skb == NULL)
- break;
-
- /* Rubicon now supports 40 bits of addressing space. */
- np->rx_ring[i].fraginfo =
- cpu_to_le64 ( pci_map_single (
- np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
- np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
- }
-
- /* Set RFDListPtr */
- dw32(RFDListPtr0, np->rx_ring_dma);
- dw32(RFDListPtr1, 0);
-}
-
static netdev_tx_t
start_xmit (struct sk_buff *skb, struct net_device *dev)
{
@@ -748,11 +787,6 @@ rio_interrupt (int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}

-static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
-{
- return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
-}
-
static void
rio_free_tx (struct net_device *dev, int irq)
{
@@ -1733,8 +1767,6 @@ rio_close (struct net_device *dev)
void __iomem *ioaddr = np->ioaddr;

struct pci_dev *pdev = np->pdev;
- struct sk_buff *skb;
- int i;

netif_stop_queue (dev);

@@ -1747,27 +1779,7 @@ rio_close (struct net_device *dev)
free_irq(pdev->irq, dev);
del_timer_sync (&np->timer);

- /* Free all the skbuffs in the queue. */
- for (i = 0; i < RX_RING_SIZE; i++) {
- skb = np->rx_skbuff[i];
- if (skb) {
- pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
- skb->len, PCI_DMA_FROMDEVICE);
- dev_kfree_skb (skb);
- np->rx_skbuff[i] = NULL;
- }
- np->rx_ring[i].status = 0;
- np->rx_ring[i].fraginfo = 0;
- }
- for (i = 0; i < TX_RING_SIZE; i++) {
- skb = np->tx_skbuff[i];
- if (skb) {
- pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb (skb);
- np->tx_skbuff[i] = NULL;
- }
- }
+ free_list(dev);

return 0;
}
--
Ondrej Zary


2015-11-19 19:13:21

by Ondrej Zary

[permalink] [raw]
Subject: [PATCH 2/3] dl2k: Reorder and cleanup initialization

Move HW init and stop into separate functions.
Request IRQ only after the HW has been reset (so interrupts are
disabled and no stale interrupts are pending).

Signed-off-by: Ondrej Zary <[email protected]>
---
drivers/net/ethernet/dlink/dl2k.c | 95 ++++++++++++++++++++++---------------
1 file changed, 56 insertions(+), 39 deletions(-)

diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index ec0eb7f..9e9baa0 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -252,19 +252,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_unmap_rx;

- if (np->chip_id == CHIP_IP1000A &&
- (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
- /* PHY magic taken from ipg driver, undocumented registers */
- mii_write(dev, np->phy_addr, 31, 0x0001);
- mii_write(dev, np->phy_addr, 27, 0x01e0);
- mii_write(dev, np->phy_addr, 31, 0x0002);
- mii_write(dev, np->phy_addr, 27, 0xeb8e);
- mii_write(dev, np->phy_addr, 31, 0x0000);
- mii_write(dev, np->phy_addr, 30, 0x005e);
- /* advertise 1000BASE-T half & full duplex, prefer MASTER */
- mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
- }
-
/* Fiber device? */
np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
np->link_status = 0;
@@ -274,13 +261,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
if (np->an_enable == 2) {
np->an_enable = 1;
}
- mii_set_media_pcs (dev);
} else {
/* Auto-Negotiation is mandatory for 1000BASE-T,
IEEE 802.3ab Annex 28D page 14 */
if (np->speed == 1000)
np->an_enable = 1;
- mii_set_media (dev);
}

err = register_netdev (dev);
@@ -531,25 +516,13 @@ static int alloc_list(struct net_device *dev)
return 0;
}

-static int
-rio_open (struct net_device *dev)
+static void rio_hw_init(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
- const int irq = np->pdev->irq;
int i;
u16 macctrl;

- i = alloc_list(dev);
- if (i)
- return i;
-
- i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
- if (i) {
- free_list(dev);
- return i;
- }
-
/* Reset all logic functions */
dw16(ASICCtrl + 2,
GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
@@ -560,6 +533,24 @@ rio_open (struct net_device *dev)
/* DebugCtrl bit 4, 5, 9 must set */
dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);

+ if (np->chip_id == CHIP_IP1000A &&
+ (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
+ /* PHY magic taken from ipg driver, undocumented registers */
+ mii_write(dev, np->phy_addr, 31, 0x0001);
+ mii_write(dev, np->phy_addr, 27, 0x01e0);
+ mii_write(dev, np->phy_addr, 31, 0x0002);
+ mii_write(dev, np->phy_addr, 27, 0xeb8e);
+ mii_write(dev, np->phy_addr, 31, 0x0000);
+ mii_write(dev, np->phy_addr, 30, 0x005e);
+ /* advertise 1000BASE-T half & full duplex, prefer MASTER */
+ mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
+ }
+
+ if (np->phy_media)
+ mii_set_media_pcs(dev);
+ else
+ mii_set_media(dev);
+
/* Jumbo frame */
if (np->jumbo != 0)
dw16(MaxFrameSize, MAX_JUMBO+14);
@@ -602,10 +593,6 @@ rio_open (struct net_device *dev)
dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
}

- setup_timer(&np->timer, rio_timer, (unsigned long)dev);
- np->timer.expires = jiffies + 1*HZ;
- add_timer (&np->timer);
-
/* Start Tx/Rx */
dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);

@@ -615,6 +602,42 @@ rio_open (struct net_device *dev)
macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
dw16(MACCtrl, macctrl);
+}
+
+static void rio_hw_stop(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->ioaddr;
+
+ /* Disable interrupts */
+ dw16(IntEnable, 0);
+
+ /* Stop Tx and Rx logics */
+ dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
+}
+
+static int rio_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ const int irq = np->pdev->irq;
+ int i;
+
+ i = alloc_list(dev);
+ if (i)
+ return i;
+
+ rio_hw_init(dev);
+
+ i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
+ if (i) {
+ rio_hw_stop(dev);
+ free_list(dev);
+ return i;
+ }
+
+ setup_timer(&np->timer, rio_timer, (unsigned long)dev);
+ np->timer.expires = jiffies + 1 * HZ;
+ add_timer(&np->timer);

netif_start_queue (dev);

@@ -1764,17 +1787,11 @@ static int
rio_close (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->ioaddr;
-
struct pci_dev *pdev = np->pdev;

netif_stop_queue (dev);

- /* Disable interrupts */
- dw16(IntEnable, 0);
-
- /* Stop Tx and Rx logics */
- dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
+ rio_hw_stop(dev);

free_irq(pdev->irq, dev);
del_timer_sync (&np->timer);
--
Ondrej Zary

2015-11-19 19:13:20

by Ondrej Zary

[permalink] [raw]
Subject: [PATCH 3/3 v3] dl2k: Implement suspend

Add suspend/resume support to dl2k driver.
This requires RX/TX rings to be reset so split out the required
functionality from alloc_list() into new rio_reset_ring().

Tested on Asus NX1101 (IP1000A) and D-Link DGE-550T (DL-2000).

Signed-off-by: Ondrej Zary <[email protected]>
---
drivers/net/ethernet/dlink/dl2k.c | 79 ++++++++++++++++++++++++++++++-------
1 file changed, 65 insertions(+), 14 deletions(-)

diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 9e9baa0..f92b6d9 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -464,36 +464,40 @@ static void free_list(struct net_device *dev)
}
}

+static void rio_reset_ring(struct netdev_private *np)
+{
+ int i;
+
+ np->cur_rx = 0;
+ np->cur_tx = 0;
+ np->old_rx = 0;
+ np->old_tx = 0;
+
+ for (i = 0; i < TX_RING_SIZE; i++)
+ np->tx_ring[i].status = cpu_to_le64(TFDDone);
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ np->rx_ring[i].status = 0;
+}
+
/* allocate and initialize Tx and Rx descriptors */
static int alloc_list(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int i;

- np->cur_rx = np->cur_tx = 0;
- np->old_rx = np->old_tx = 0;
+ rio_reset_ring(np);
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);

/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = NULL;
- np->tx_ring[i].status = cpu_to_le64(TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma +
((i + 1) % TX_RING_SIZE) *
sizeof(struct netdev_desc));
}

- /* Initialize Rx descriptors */
- for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
- ((i + 1) % RX_RING_SIZE) *
- sizeof(struct netdev_desc));
- np->rx_ring[i].status = 0;
- np->rx_ring[i].fraginfo = 0;
- np->rx_skbuff[i] = NULL;
- }
-
- /* Allocate the rx buffers */
+ /* Initialize Rx descriptors & allocate buffers */
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
@@ -505,6 +509,9 @@ static int alloc_list(struct net_device *dev)
return -ENOMEM;
}

+ np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
+ ((i + 1) % RX_RING_SIZE) *
+ sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64(pci_map_single(
@@ -1824,11 +1831,55 @@ rio_remove1 (struct pci_dev *pdev)
}
}

+#ifdef CONFIG_PM_SLEEP
+static int rio_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+ del_timer_sync(&np->timer);
+ rio_hw_stop(dev);
+
+ return 0;
+}
+
+static int rio_resume(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ rio_reset_ring(np);
+ rio_hw_init(dev);
+ np->timer.expires = jiffies + 1 * HZ;
+ add_timer(&np->timer);
+ netif_device_attach(dev);
+ dl2k_enable_int(np);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
+#define RIO_PM_OPS (&rio_pm_ops)
+
+#else
+
+#define RIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
static struct pci_driver rio_driver = {
.name = "dl2k",
.id_table = rio_pci_tbl,
.probe = rio_probe1,
.remove = rio_remove1,
+ .driver.pm = RIO_PM_OPS,
};

module_pci_driver(rio_driver);
--
Ondrej Zary

2015-11-20 19:49:06

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 1/3 v2] dl2k: Handle memory allocation errors in alloc_list

From: Ondrej Zary <[email protected]>
Date: Thu, 19 Nov 2015 20:13:04 +0100

> If memory allocation fails in alloc_list(), free the already allocated
> memory and return -ENOMEM. In rio_open(), call alloc_list() first and
> abort if it fails. Move HW access (set RFDListPtr) out ot alloc_list().
>
> Signed-off-by: Ondrej Zary <[email protected]>

Applied to net-next.

2015-11-20 19:49:12

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 2/3] dl2k: Reorder and cleanup initialization

From: Ondrej Zary <[email protected]>
Date: Thu, 19 Nov 2015 20:13:05 +0100

> Move HW init and stop into separate functions.
> Request IRQ only after the HW has been reset (so interrupts are
> disabled and no stale interrupts are pending).
>
> Signed-off-by: Ondrej Zary <[email protected]>

Applied to net-next.

2015-11-20 19:49:21

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 3/3 v3] dl2k: Implement suspend

From: Ondrej Zary <[email protected]>
Date: Thu, 19 Nov 2015 20:13:06 +0100

> Add suspend/resume support to dl2k driver.
> This requires RX/TX rings to be reset so split out the required
> functionality from alloc_list() into new rio_reset_ring().
>
> Tested on Asus NX1101 (IP1000A) and D-Link DGE-550T (DL-2000).
>
> Signed-off-by: Ondrej Zary <[email protected]>

Applied to net-next.