When frames are sent with very small fragments, the DMA engine appears to
lock up and transmit attempts time out. Fix this by detecting the presence
of small fragments and use skb_gso_segment + skb_linearize to deal with
them
Signed-off-by: Felix Fietkau <[email protected]>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 36 +++++++++++++++++++--
1 file changed, 34 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f7b1d39bf936..a9a270317ff4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1442,12 +1442,28 @@ static void mtk_wake_queue(struct mtk_eth *eth)
}
}
+static bool mtk_skb_has_small_frag(struct sk_buff *skb)
+{
+ int min_size = 16;
+ int i;
+
+ if (skb_headlen(skb) < min_size)
+ return true;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ if (skb_frag_size(&skb_shinfo(skb)->frags[i]) < min_size)
+ return true;
+
+ return false;
+}
+
static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_ring *ring = ð->tx_ring;
struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *segs, *next;
bool gso = false;
int tx_num;
@@ -1469,6 +1485,17 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ if (skb_is_gso(skb) && mtk_skb_has_small_frag(skb)) {
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_ALL_TSO);
+ if (IS_ERR(segs))
+ goto drop;
+
+ if (segs) {
+ consume_skb(skb);
+ skb = segs;
+ }
+ }
+
/* TSO: fill MSS info in tcp checksum field */
if (skb_is_gso(skb)) {
if (skb_cow_head(skb, 0)) {
@@ -1484,8 +1511,13 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
- goto drop;
+ skb_list_walk_safe(skb, skb, next) {
+ if ((mtk_skb_has_small_frag(skb) && skb_linearize(skb)) ||
+ mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) {
+ stats->tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+ }
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
netif_tx_stop_all_queues(dev);
--
2.38.1