From: Divy Le Ray <[email protected]>
Offload packets may be DMAed long after their SGE Tx descriptors are done
so they must remain mapped until they are freed rather than until their
descriptors are freed. Unmap such packets through an skb destructor.
Signed-off-by: Divy Le Ray <[email protected]>
---
drivers/net/cxgb3/sge.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 61 insertions(+), 2 deletions(-)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index daef7fd..d563f7a 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -85,6 +85,15 @@ struct unmap_info { /* packet unmapping
};
/*
+ * Holds unmapping information for Tx packets that need deferred unmapping.
+ * This structure lives at skb->head and must be allocated by callers.
+ */
+struct deferred_unmap_info {
+ struct pci_dev *pdev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/*
* Maps a number of flits to the number of Tx descriptors that can hold them.
* The formula is
*
@@ -232,10 +241,13 @@ static void free_tx_desc(struct adapter
struct pci_dev *pdev = adapter->pdev;
unsigned int cidx = q->cidx;
+ const int need_unmap = need_skb_unmap() &&
+ q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+
d = &q->sdesc[cidx];
while (n--) {
if (d->skb) { /* an SGL is present */
- if (need_skb_unmap())
+ if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
if (d->skb->priority == cidx)
kfree_skb(d->skb);
@@ -1207,6 +1219,50 @@ int t3_mgmt_tx(struct adapter *adap, str
}
/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ int i;
+ const dma_addr_t *p;
+ const struct skb_shared_info *si;
+ const struct deferred_unmap_info *dui;
+ const struct unmap_info *ui = (struct unmap_info *)skb->cb;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ p = dui->addr;
+
+ if (ui->len)
+ pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
+
+ si = skb_shinfo(skb);
+ for (i = 0; i < si->nr_frags; i++)
+ pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
+ PCI_DMA_TODEVICE);
+}
+
+static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
+ const struct sg_ent *sgl, int sgl_flits)
+{
+ dma_addr_t *p;
+ struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ dui->pdev = pdev;
+ for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
+ *p++ = be64_to_cpu(sgl->addr[0]);
+ *p++ = be64_to_cpu(sgl->addr[1]);
+ }
+ if (sgl_flits)
+ *p = be64_to_cpu(sgl->addr[0]);
+}
+
+/**
* write_ofld_wr - write an offload work request
* @adap: the adapter
* @skb: the packet to send
@@ -1242,8 +1298,11 @@ static void write_ofld_wr(struct adapter
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
adap->pdev);
- if (need_skb_unmap())
+ if (need_skb_unmap()) {
+ setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+ skb->destructor = deferred_unmap_destructor;
((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
+ }
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
gen, from->wr_hi, from->wr_lo);
Divy Le Ray wrote:
> From: Divy Le Ray <[email protected]>
>
> Offload packets may be DMAed long after their SGE Tx descriptors are done
> so they must remain mapped until they are freed rather than until their
> descriptors are freed. Unmap such packets through an skb destructor.
>
> Signed-off-by: Divy Le Ray <[email protected]>
this is questionable and sounds like you are working around a bug. I
can't think of another driver that uses skb destructors, and I could
have sworn that skb destructors were not for drivers to use anyway
Jeff
Jeff Garzik wrote:
> Divy Le Ray wrote:
>> From: Divy Le Ray <[email protected]>
>>
>> Offload packets may be DMAed long after their SGE Tx descriptors are
>> done
>> so they must remain mapped until they are freed rather than until their
>> descriptors are freed. Unmap such packets through an skb destructor.
>>
>> Signed-off-by: Divy Le Ray <[email protected]>
>
> this is questionable and sounds like you are working around a bug. I
> can't think of another driver that uses skb destructors, and I could
> have sworn that skb destructors were not for drivers to use anyway
It's not a bug work around. The payload of RDMA packets is DMAd lazily,
potentially well after the DMA descriptors themselves have been
processed and released. On plaftorms w/ IOMMUs, the packets need to
remain mapped in the IOMMU until their payload is fetched, at which time
the Scatter/Gather Engine may or may not still have knowledge of those
packets. So the SGE maps them for the IOMMU, then sets up the
destructor so they can be unmapped whenever they complete. It's true
that drivers don't typically set up destructors but the lazy DMA this
device does is also uncommon. Packets carrying these destructors are
exchanged between the RDMA driver and the low-level driver and aren't
visible to any parts of the stack that may have different expectations
of destructors. Using the destructor is the simplest, most convenient
way to solve this problem.
Cheers,
Divy