Received: by 2002:a25:8b12:0:0:0:0:0 with SMTP id i18csp2936603ybl; Mon, 19 Aug 2019 09:34:44 -0700 (PDT) X-Google-Smtp-Source: APXvYqy8A4vAjPFR/CHir82YqlUKyseACss9CGZewFHL04KW2ijqecoJMiAOelirQMGx9fAfQjRq X-Received: by 2002:a17:90a:9f46:: with SMTP id q6mr20955049pjv.110.1566232484148; Mon, 19 Aug 2019 09:34:44 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1566232484; cv=none; d=google.com; s=arc-20160816; b=GGKIPr4Z/seUMqHXFPJqRNEeWytDvxnl095nIraNMK5hR/3wRHVXq26ptizT4Uzu5v ojPFGW5CUBKUz+y2s/QEDHg/nxIyConaWknV/TTkkM7F5tFB7tRAvQtojTdQR45ZxhP1 P/Ikwj36QzjV7xZXOp/zvzJkBrPV2Lzyiz56h60Z+k/J89fwEfkf80cjpVcjqczzi8OP 9F61e8btngy9+Fg9/XWcgaCQjwRBCXHQxpTFyDbFTmHcp8wWHmX6QzeptKO36CZaXHoL s1h2D1zVZoD6iF9QyA7n3Ng+ZvokS4tplD+LiBE+L/URKnQfeAEfXPy0u2JzwbvUboe6 7p7g== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:references:in-reply-to:message-id:date :subject:to:from; bh=nWABf/0oIgiUmZpk0acBmQVXocY1+kGjIHtPZ95/gzI=; b=OevMOnnUpylVdwW3pP9Z/GZFEm9Pf/5pErayj6o0fj1AdJOh77vA8Sv0Rbx3WG/KAi pMUP/Z/pWfitpY0+UNNR1zeC9H1WrQc4pmuN6kwuGf5MGqe9/hhLyvwlB1AyaRSkUWCN 7GXRKgTyTEcO7jR/HmGmnD5HnW8LjNrvx6mTLRfPSnaDChwEQhwWvmNkJEIkXn+WiStE slt93Df6tCo4gD0P7wq6mi8EkuaArt5ln4z+1QkD/VKLrbS132oVu0N8x99uAIxq9M4U iBmLRUWW61O70b7PedNAOGhR4F8hwd5CWO9BowuZZuI7MEc2Hyx4r949N3SU9c/zWsKN RUfg== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id q2si9218126pjq.31.2019.08.19.09.34.29; Mon, 19 Aug 2019 09:34:44 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728259AbfHSQdL (ORCPT + 99 others); Mon, 19 Aug 2019 12:33:11 -0400 Received: from mx2.suse.de ([195.135.220.15]:55542 "EHLO mx1.suse.de" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1727957AbfHSQcH (ORCPT ); Mon, 19 Aug 2019 12:32:07 -0400 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id 1D557B620; Mon, 19 Aug 2019 16:32:05 +0000 (UTC) From: Thomas Bogendoerfer To: Jonathan Corbet , Ralf Baechle , Paul Burton , James Hogan , Dmitry Torokhov , Lee Jones , "David S. Miller" , Srinivas Kandagatla , Alessandro Zummo , Alexandre Belloni , Greg Kroah-Hartman , Jiri Slaby , Evgeniy Polyakov , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mips@vger.kernel.org, linux-input@vger.kernel.org, netdev@vger.kernel.org, linux-rtc@vger.kernel.org, linux-serial@vger.kernel.org Subject: [PATCH v5 12/17] net: sgi: ioc3-eth: use dma-direct for dma allocations Date: Mon, 19 Aug 2019 18:31:35 +0200 Message-Id: <20190819163144.3478-13-tbogendoerfer@suse.de> X-Mailer: git-send-email 2.13.7 In-Reply-To: <20190819163144.3478-1-tbogendoerfer@suse.de> References: <20190819163144.3478-1-tbogendoerfer@suse.de> Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Replace the homegrown DMA memory allocation, which only works on SGI-IP27 machines, with the generic dma allocations. Signed-off-by: Thomas Bogendoerfer --- drivers/net/ethernet/sgi/ioc3-eth.c | 107 ++++++++++++++++++++++++++---------- 1 file changed, 77 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 7f85a3bfef14..647e3926bd71 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #ifdef CONFIG_SERIAL_8250 @@ -51,6 +50,8 @@ #include #include #include +#include + #include #include @@ -66,10 +67,12 @@ #define RX_BUFFS 64 #define RX_RING_ENTRIES 512 /* fixed in hardware */ #define RX_RING_MASK (RX_RING_ENTRIES - 1) +#define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64)) /* 128 TX buffers (not tunable) */ #define TX_RING_ENTRIES 128 #define TX_RING_MASK (TX_RING_ENTRIES - 1) +#define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd)) /* BEWARE: The IOC3 documentation documents the size of rx buffers as * 1644 while it's actually 1664. This one was nasty to track down... @@ -84,9 +87,12 @@ struct ioc3_private { struct ioc3_ethregs *regs; struct ioc3 *all_regs; + struct device *dma_dev; u32 *ssram; unsigned long *rxr; /* pointer to receiver ring */ struct ioc3_etxd *txr; + dma_addr_t rxr_dma; + dma_addr_t txr_dma; struct sk_buff *rx_skbs[RX_RING_ENTRIES]; struct sk_buff *tx_skbs[TX_RING_ENTRIES]; int rx_ci; /* RX consumer index */ @@ -116,18 +122,22 @@ static void ioc3_init(struct net_device *dev); static const char ioc3_str[] = "IOC3 Ethernet"; static const struct ethtool_ops ioc3_ethtool_ops; -static inline unsigned long ioc3_map(void *ptr, unsigned long vdev) +#ifdef CONFIG_PCI_XTALK_BRIDGE +static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) { -#ifdef CONFIG_SGI_IP27 - vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */ + return (addr & ~PCI64_ATTR_BAR) | attr; +} - return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF | - ((unsigned long)ptr & TO_PHYS_MASK); +#define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT) #else - return virt_to_bus(ptr); -#endif +static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) +{ + return addr; } +#define ERBAR_VAL 0 +#endif + #define IOC3_SIZE 0x100000 static inline u32 mcr_pack(u32 pulse, u32 sample) @@ -494,6 +504,7 @@ static inline void ioc3_rx(struct net_device *dev) int rx_entry, n_entry, len; struct ioc3_erxbuf *rxb; unsigned long *rxr; + dma_addr_t d; u32 w0, err; rxr = ip->rxr; /* Ring base */ @@ -550,7 +561,9 @@ static inline void ioc3_rx(struct net_device *dev) dev->stats.rx_frame_errors++; next: ip->rx_skbs[n_entry] = new_skb; - rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1)); + d = dma_map_single(ip->dma_dev, rxb, RX_BUF_SIZE, + DMA_FROM_DEVICE); + rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); rxb->w0 = 0; /* Clear valid flag */ n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */ @@ -754,6 +767,26 @@ static inline void ioc3_clean_rx_ring(struct ioc3_private *ip) } } +static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry) +{ + struct ioc3_etxd *desc; + u32 cmd, bufcnt, len; + + desc = &ip->txr[entry]; + cmd = be32_to_cpu(desc->cmd); + bufcnt = be32_to_cpu(desc->bufcnt); + if (cmd & ETXD_B1V) { + len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT; + dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1), + len, DMA_TO_DEVICE); + } + if (cmd & ETXD_B2V) { + len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT; + dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2), + len, DMA_TO_DEVICE); + } +} + static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) { struct sk_buff *skb; @@ -762,6 +795,7 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) for (i = 0; i < TX_RING_ENTRIES; i++) { skb = ip->tx_skbs[i]; if (skb) { + ioc3_tx_unmap(ip, i); ip->tx_skbs[i] = NULL; dev_kfree_skb_any(skb); } @@ -778,7 +812,8 @@ static void ioc3_free_rings(struct ioc3_private *ip) if (ip->txr) { ioc3_clean_tx_ring(ip); - free_pages((unsigned long)ip->txr, 2); + dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, + ip->txr_dma, 0); ip->txr = NULL; } @@ -788,12 +823,17 @@ static void ioc3_free_rings(struct ioc3_private *ip) while (n_entry != rx_entry) { skb = ip->rx_skbs[n_entry]; - if (skb) + if (skb) { + dma_unmap_single(ip->dma_dev, + be64_to_cpu(ip->rxr[n_entry]), + RX_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); + } n_entry = (n_entry + 1) & RX_RING_MASK; } - free_page((unsigned long)ip->rxr); + dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr, + ip->rxr_dma, 0); ip->rxr = NULL; } } @@ -801,16 +841,19 @@ static void ioc3_free_rings(struct ioc3_private *ip) static void ioc3_alloc_rings(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); - struct ioc3_erxbuf *rxb; unsigned long *rxr; + dma_addr_t rxb; int i; if (!ip->rxr) { /* Allocate and initialize rx ring. 4kb = 512 entries */ - ip->rxr = (unsigned long *)get_zeroed_page(GFP_ATOMIC); + ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE, + &ip->rxr_dma, GFP_ATOMIC, 0); rxr = ip->rxr; - if (!rxr) + if (!rxr) { pr_err("%s: get_zeroed_page() failed!\n", __func__); + return; + } /* Now the rx buffers. The RX ring may be larger but * we only allocate 16 buffers for now. Need to tune @@ -828,8 +871,9 @@ static void ioc3_alloc_rings(struct net_device *dev) ip->rx_skbs[i] = skb; - rxb = (struct ioc3_erxbuf *)skb->data; - rxr[i] = cpu_to_be64(ioc3_map(rxb, 1)); + rxb = dma_map_single(ip->dma_dev, skb->data, + RX_BUF_SIZE, DMA_BIDIRECTIONAL); + rxr[i] = cpu_to_be64(ioc3_map(rxb, PCI64_ATTR_BAR)); skb_reserve(skb, RX_OFFSET); } ip->rx_ci = 0; @@ -838,7 +882,9 @@ static void ioc3_alloc_rings(struct net_device *dev) if (!ip->txr) { /* Allocate and initialize tx rings. 16kb = 128 bufs. */ - ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2); + ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE, + &ip->txr_dma, + GFP_KERNEL | __GFP_ZERO, 0); if (!ip->txr) pr_err("%s: __get_free_pages() failed!\n", __func__); ip->tx_pi = 0; @@ -859,13 +905,13 @@ static void ioc3_init_rings(struct net_device *dev) ioc3_clean_tx_ring(ip); /* Now the rx ring base, consume & produce registers. */ - ring = ioc3_map(ip->rxr, 0); + ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC); writel(ring >> 32, ®s->erbr_h); writel(ring & 0xffffffff, ®s->erbr_l); writel(ip->rx_ci << 3, ®s->ercir); writel((ip->rx_pi << 3) | ERPIR_ARM, ®s->erpir); - ring = ioc3_map(ip->txr, 0); + ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC); ip->txqlen = 0; /* nothing queued */ @@ -915,13 +961,7 @@ static void ioc3_init(struct net_device *dev) readl(®s->emcr); /* Misc registers */ -#ifdef CONFIG_SGI_IP27 - /* Barrier on last store */ - writel(PCI64_ATTR_BAR >> 32, ®s->erbar); -#else - /* Let PCI API get it right */ - writel(0, ®s->erbar); -#endif + writel(ERBAR_VAL, ®s->erbar); readl(®s->etcdc); /* Clear on read */ writel(15, ®s->ercsr); /* RX low watermark */ writel(0, ®s->ertr); /* Interrupt immediately */ @@ -1187,6 +1227,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ip = netdev_priv(dev); ip->dev = dev; + ip->dma_dev = &pdev->dev; dev->irq = pdev->irq; @@ -1386,18 +1427,24 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long b2 = (data | 0x3fffUL) + 1UL; unsigned long s1 = b2 - data; unsigned long s2 = data + len - b2; + dma_addr_t d; desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | ETXD_B2V | w0); desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | (s2 << ETXD_B2CNT_SHIFT)); - desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); - desc->p2 = cpu_to_be64(ioc3_map((void *)b2, 1)); + d = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE); + desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); + d = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE); + desc->p2 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); } else { + dma_addr_t d; + /* Normal sized packet that doesn't cross a page boundary. */ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); - desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); + d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE); + desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); } mb(); /* make sure all descriptor changes are visible */ -- 2.13.7