2019-10-08 12:12:54

by Madalin-cristian Bucur

[permalink] [raw]
Subject: [PATCH 19/20] dpaa_eth: add dpaa_dma_to_virt()

Centralize the phys_to_virt() calls.

Signed-off-by: Madalin Bucur <[email protected]>
---
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 45 ++++++++++++++------------
1 file changed, 24 insertions(+), 21 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 77edf2e026e8..c178f1b8c5e5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -189,6 +189,15 @@ static int dpaa_rx_extra_headroom;
#define dpaa_get_max_mtu() \
(dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))

+static void *dpaa_dma_to_virt(struct device *dev, dma_addr_t addr)
+{
+ if (!addr) {
+ dev_err(dev, "Invalid address\n");
+ return NULL;
+ }
+ return phys_to_virt(addr);
+}
+
static int dpaa_netdev_init(struct net_device *net_dev,
const struct net_device_ops *dpaa_ops,
u16 tx_timeout)
@@ -1307,7 +1316,8 @@ static void dpaa_fd_release(const struct net_device *net_dev,
return;

if (qm_fd_get_format(fd) == qm_fd_sg) {
- vaddr = phys_to_virt(qm_fd_addr(fd));
+ vaddr = dpaa_dma_to_virt(dpaa_bp->priv->rx_dma_dev,
+ qm_fd_addr(fd));
sgt = vaddr + qm_fd_get_offset(fd);

dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
@@ -1588,12 +1598,13 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
struct device *dev = priv->net_dev->dev.parent;
struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
- void *vaddr = phys_to_virt(addr);
const struct qm_sg_entry *sgt;
struct sk_buff *skb;
+ void *vaddr;
u64 ns;
int i;

+ vaddr = dpaa_dma_to_virt(priv->rx_dma_dev, addr);
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
dma_unmap_page(priv->tx_dma_dev, addr,
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
@@ -1667,16 +1678,11 @@ static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
* accommodate the shared info area of the skb.
*/
static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
- const struct qm_fd *fd)
+ const struct qm_fd *fd, void *vaddr)
{
ssize_t fd_off = qm_fd_get_offset(fd);
- dma_addr_t addr = qm_fd_addr(fd);
struct dpaa_bp *dpaa_bp;
struct sk_buff *skb;
- void *vaddr;
-
- vaddr = phys_to_virt(addr);
- WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));

dpaa_bp = dpaa_bpid2pool(fd->bpid);
if (!dpaa_bp)
@@ -1705,14 +1711,13 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
* The page fragment holding the S/G Table is recycled here.
*/
static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
- const struct qm_fd *fd)
+ const struct qm_fd *fd, void *vaddr)
{
ssize_t fd_off = qm_fd_get_offset(fd);
- dma_addr_t addr = qm_fd_addr(fd);
const struct qm_sg_entry *sgt;
struct page *page, *head_page;
struct dpaa_bp *dpaa_bp;
- void *vaddr, *sg_vaddr;
+ void *sg_vaddr;
int frag_off, frag_len;
struct sk_buff *skb;
dma_addr_t sg_addr;
@@ -1721,9 +1726,6 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
int *count_ptr;
int i;

- vaddr = phys_to_virt(addr);
- WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
-
/* Iterate through the SGT entries and add data buffers to the skb */
sgt = vaddr + fd_off;
skb = NULL;
@@ -1732,7 +1734,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));

sg_addr = qm_sg_addr(&sgt[i]);
- sg_vaddr = phys_to_virt(sg_addr);
+ sg_vaddr = dpaa_dma_to_virt(priv->rx_dma_dev, sg_addr);
WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
SMP_CACHE_BYTES));

@@ -1811,7 +1813,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
/* free all the SG entries */
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
sg_addr = qm_sg_addr(&sgt[i]);
- sg_vaddr = phys_to_virt(sg_addr);
+ sg_vaddr = dpaa_dma_to_virt(priv->rx_dma_dev, sg_addr);
free_pages((unsigned long)sg_vaddr, 0);
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (dpaa_bp) {
@@ -2281,11 +2283,12 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
return qman_cb_dqrr_consume;
}

- dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ vaddr = dpaa_dma_to_virt(priv->rx_dma_dev, addr);
+
+ dma_unmap_page(priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
DMA_FROM_DEVICE);

/* prefetch the first 64 bytes of the frame or the SGT start */
- vaddr = phys_to_virt(addr);
prefetch(vaddr + qm_fd_get_offset(fd));

/* The only FD types that we may receive are contig and S/G */
@@ -2298,9 +2301,9 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
(*count_ptr)--;

if (likely(fd_format == qm_fd_contig))
- skb = contig_fd_to_skb(priv, fd);
+ skb = contig_fd_to_skb(priv, fd, vaddr);
else
- skb = sg_fd_to_skb(priv, fd);
+ skb = sg_fd_to_skb(priv, fd, vaddr);
if (!skb)
return qman_cb_dqrr_consume;

@@ -2640,7 +2643,7 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
DMA_FROM_DEVICE);

- skb_free_frag(phys_to_virt(addr));
+ skb_free_frag(dpaa_dma_to_virt(bp->priv->rx_dma_dev, addr));
}

/* Alloc the dpaa_bp struct and configure default values */
--
2.1.0


2019-10-09 07:42:07

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 19/20] dpaa_eth: add dpaa_dma_to_virt()

On Tue, Oct 08, 2019 at 03:10:40PM +0300, Madalin Bucur wrote:
> Centralize the phys_to_virt() calls.

You don't need to centralize those, you need to fix them. Calling
phys_to_virt on a dma_addr is completely bogus.

2019-10-09 10:08:51

by Laurentiu Tudor

[permalink] [raw]
Subject: Re: [PATCH 19/20] dpaa_eth: add dpaa_dma_to_virt()

On 09.10.2019 10:39, Christoph Hellwig wrote:
> On Tue, Oct 08, 2019 at 03:10:40PM +0300, Madalin Bucur wrote:
>> Centralize the phys_to_virt() calls.
>
> You don't need to centralize those, you need to fix them. Calling
> phys_to_virt on a dma_addr is completely bogus.
>

Yeah, that's on my TODO list [1] for quite a while, among others. I'll
return to it as soon as I'm done with the burning stuff I'm currently on.

[1] https://patchwork.kernel.org/patch/10968947/

---
Best Regards, Laurentiu

2019-10-09 10:57:53

by Madalin-cristian Bucur

[permalink] [raw]
Subject: RE: [PATCH 19/20] dpaa_eth: add dpaa_dma_to_virt()

> -----Original Message-----
> From: Christoph Hellwig <[email protected]>
> Sent: Wednesday, October 9, 2019 10:39 AM
> To: Madalin-cristian Bucur <[email protected]>
> Cc: [email protected]; [email protected]; Roy Pledge
> <[email protected]>; Laurentiu Tudor <[email protected]>; linux-
> [email protected]
> Subject: Re: [PATCH 19/20] dpaa_eth: add dpaa_dma_to_virt()
>
> On Tue, Oct 08, 2019 at 03:10:40PM +0300, Madalin Bucur wrote:
> > Centralize the phys_to_virt() calls.
>
> You don't need to centralize those, you need to fix them. Calling
> phys_to_virt on a dma_addr is completely bogus.

Hi Christoph, thank you for your input, I'm aware of the limited scenarios
that are supported with the current code state (SMMU disabled/bypassed).
The existing customers using the DPAA platforms cannot make use of the SMMU
features until this is fixed. The problem is there is no fast forward path
to fixing this, the performance requirements of the existing use-cases
preclude the use of the recommended approaches suggested to date. I'm moving
all these phys_to_virt calls into one central location specifically because
of this, as the lack of progress on the SMMU fix problem prevented me from
upstreaming other driver changes/fixes. Having this contained allows it to
follow a separate path towards a solution while it enables me to address
issues for the current users of the DPAA with minimal interference. To
illustrate the decoupling of the DPAA driver code changes from the iova
handling fix, the only change to the driver code that would be required
to make it work with the SMMU enables would look similar to this:

static void *dpaa_dma_to_virt(struct device *dev, dma_addr_t addr)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ if (domain)
+ return phys_to_virt(iommu_iova_to_phys(domain, addr));
+
return phys_to_virt(addr);
}

Other refinements in regards to the actual APIs to be used would only
affect this code area.

Thank you,
Madalin