Enable the tx/rx aggregation which could contain one or more packets
for each bulk in/out. This could reduce the loading of the host
controller by sending less bulk transfer.
The rx packets in the bulk in buffer should be 8-byte aligned, and
the tx packets in the bulk out buffer should be 4-byte aligned.
Signed-off-by: Hayes Wang <[email protected]>
---
drivers/net/usb/r8152.c | 627 +++++++++++++++++++++++++++++++++++-------------
1 file changed, 466 insertions(+), 161 deletions(-)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 11c51f2..c6c5aa2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -19,9 +19,10 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
+#include <linux/list.h>
/* Version Information */
-#define DRIVER_VERSION "v1.0.0 (2013/05/03)"
+#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <[email protected]>"
#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
#define MODULENAME "r8152"
@@ -267,6 +268,9 @@ enum rtl_register_content {
FULL_DUP = 0x01,
};
+#define RTL8152_MAX_TX 10
+#define RTL8152_MAX_RX 10
+
#define RTL8152_REQT_READ 0xc0
#define RTL8152_REQT_WRITE 0x40
#define RTL8152_REQ_GET_REGS 0x05
@@ -285,7 +289,6 @@ enum rtl_register_content {
/* rtl8152 flags */
enum rtl8152_flags {
RTL8152_UNPLUG = 0,
- RX_URB_FAIL,
RTL8152_SET_RX_MODE,
WORK_ENABLE
};
@@ -315,13 +318,36 @@ struct tx_desc {
u32 opts2;
};
+struct rx_agg {
+ struct list_head list;
+ struct urb *urb;
+ void *context;
+ void *buffer;
+ void *head;
+ u8 *data;
+};
+
+struct tx_agg {
+ struct list_head list;
+ struct urb *urb;
+ void *context;
+ void *buffer;
+ void *head;
+ u8 *data;
+ u32 skb_num;
+ u32 skb_len;
+};
+
struct r8152 {
unsigned long flags;
struct usb_device *udev;
struct tasklet_struct tl;
struct net_device *netdev;
- struct urb *rx_urb, *tx_urb;
- struct sk_buff *tx_skb, *rx_skb;
+ struct tx_agg tx_info[RTL8152_MAX_TX];
+ struct rx_agg rx_info[RTL8152_MAX_RX];
+ struct list_head rx_done, tx_free;
+ struct sk_buff_head tx_queue;
+ spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
u32 msg_enable;
@@ -340,6 +366,7 @@ enum rtl_version {
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
*/
static const int multicast_filter_limit = 32;
+static unsigned int rx_buf_sz = 16384;
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
@@ -686,6 +713,9 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
}
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
+
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
@@ -716,26 +746,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
-static int alloc_all_urbs(struct r8152 *tp)
-{
- tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tp->rx_urb)
- return 0;
- tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tp->tx_urb) {
- usb_free_urb(tp->rx_urb);
- return 0;
- }
-
- return 1;
-}
-
-static void free_all_urbs(struct r8152 *tp)
-{
- usb_free_urb(tp->rx_urb);
- usb_free_urb(tp->tx_urb);
-}
-
static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
{
return &dev->stats;
@@ -743,129 +753,417 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
static void read_bulk_callback(struct urb *urb)
{
- struct r8152 *tp;
- unsigned pkt_len;
- struct sk_buff *skb;
struct net_device *netdev;
- struct net_device_stats *stats;
+ unsigned long lockflags;
int status = urb->status;
+ struct rx_agg *agg;
+ struct r8152 *tp;
int result;
- struct rx_desc *rx_desc;
- tp = urb->context;
+ agg = urb->context;
+ if (!agg)
+ return;
+
+ tp = agg->context;
if (!tp)
return;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+
netdev = tp->netdev;
if (!netif_device_present(netdev))
return;
- stats = rtl8152_get_stats(netdev);
switch (status) {
case 0:
- break;
+ if (urb->actual_length < ETH_ZLEN)
+ break;
+
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+ tasklet_schedule(&tp->tl);
+ return;
case -ESHUTDOWN:
set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
+ return;
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
pr_warn_ratelimited("may be reset is needed?..\n");
- goto goon;
+ break;
default:
pr_warn_ratelimited("Rx status %d\n", status);
- goto goon;
+ break;
}
- /* protect against short packets (tell me why we got some?!?) */
- if (urb->actual_length < sizeof(*rx_desc))
- goto goon;
-
-
- rx_desc = (struct rx_desc *)urb->transfer_buffer;
- pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
- if (urb->actual_length < sizeof(struct rx_desc) + pkt_len)
- goto goon;
-
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
- if (!skb)
- goto goon;
-
- memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
-goon:
- usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
- (usb_complete_t)read_bulk_callback, tp);
- result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
+ result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
if (result == -ENODEV) {
netif_device_detach(tp->netdev);
} else if (result) {
- set_bit(RX_URB_FAIL, &tp->flags);
- goto resched;
- } else {
- clear_bit(RX_URB_FAIL, &tp->flags);
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+ tasklet_schedule(&tp->tl);
}
-
- return;
-resched:
- tasklet_schedule(&tp->tl);
}
-static void rx_fixup(unsigned long data)
+static void write_bulk_callback(struct urb *urb)
{
+ struct net_device_stats *stats;
+ unsigned long lockflags;
+ struct tx_agg *agg;
struct r8152 *tp;
- int status;
+ int status = urb->status;
- tp = (struct r8152 *)data;
- if (!test_bit(WORK_ENABLE, &tp->flags))
+ agg = urb->context;
+ if (!agg)
return;
- status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
- if (status == -ENODEV) {
- netif_device_detach(tp->netdev);
- } else if (status) {
- set_bit(RX_URB_FAIL, &tp->flags);
- goto tlsched;
+ tp = agg->context;
+ if (!tp)
+ return;
+
+ stats = rtl8152_get_stats(tp->netdev);
+ if (status) {
+ pr_warn_ratelimited("Tx status %d\n", status);
+ stats->tx_errors += agg->skb_num;
} else {
- clear_bit(RX_URB_FAIL, &tp->flags);
+ stats->tx_packets += agg->skb_num;
+ stats->tx_bytes += agg->skb_len;
}
- return;
-tlsched:
- tasklet_schedule(&tp->tl);
+ if (!netif_device_present(tp->netdev))
+ return;
+
+ agg->data = agg->head;
+ agg->skb_num = agg->skb_len = 0;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+
+ if (!skb_queue_empty(&tp->tx_queue))
+ tasklet_schedule(&tp->tl);
}
-static void write_bulk_callback(struct urb *urb)
+static inline void *rx_agg_align(void *data)
{
- struct r8152 *tp;
- int status = urb->status;
+ return (void *)ALIGN((uintptr_t)data, 8);
+}
- tp = urb->context;
- if (!tp)
+static inline void *tx_agg_align(void *data)
+{
+ return (void *)ALIGN((uintptr_t)data, 4);
+}
+
+static void free_all_mem(struct r8152 *tp)
+{
+ int i;
+
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ if (tp->rx_info[i].urb) {
+ usb_free_urb(tp->rx_info[i].urb);
+ tp->rx_info[i].urb = NULL;
+ }
+
+ if (tp->rx_info[i].buffer) {
+ kfree(tp->rx_info[i].buffer);
+ tp->rx_info[i].buffer = NULL;
+ tp->rx_info[i].head = tp->rx_info[i].data = NULL;
+ }
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++) {
+ if (tp->tx_info[i].urb) {
+ usb_free_urb(tp->tx_info[i].urb);
+ tp->tx_info[i].urb = NULL;
+ }
+
+ if (tp->tx_info[i].buffer) {
+ kfree(tp->tx_info[i].buffer);
+ tp->tx_info[i].buffer = NULL;
+ tp->tx_info[i].head = tp->tx_info[i].data = NULL;
+ }
+ }
+}
+
+static int alloc_all_mem(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+ struct urb *urb;
+ int node, i;
+ u8 *buf;
+
+ node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
+
+ spin_lock_init(&tp->rx_lock);
+ INIT_LIST_HEAD(&tp->rx_done);
+
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+
+ if (buf != rx_agg_align(buf)) {
+ kfree(buf);
+ buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(buf);
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&tp->rx_info[i].list);
+ tp->rx_info[i].context = tp;
+ tp->rx_info[i].urb = urb;
+ tp->rx_info[i].buffer = buf;
+ tp->rx_info[i].head = tp->rx_info[i].data = rx_agg_align(buf);
+ }
+
+ spin_lock_init(&tp->tx_lock);
+ INIT_LIST_HEAD(&tp->tx_free);
+ skb_queue_head_init(&tp->tx_queue);
+
+ for (i = 0; i < RTL8152_MAX_TX; i++) {
+ buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+
+ if (buf != tx_agg_align(buf)) {
+ kfree(buf);
+ buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(buf);
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&tp->tx_info[i].list);
+ tp->tx_info[i].context = tp;
+ tp->tx_info[i].urb = urb;
+ tp->tx_info[i].buffer = buf;
+ tp->tx_info[i].head = tp->tx_info[i].data = tx_agg_align(buf);
+
+ list_add_tail(&tp->tx_info[i].list, &tp->tx_free);
+ }
+
+ return 0;
+
+err1:
+ free_all_mem(tp);
+ return -ENOMEM;
+}
+
+static void rx_bottom(struct r8152 *tp)
+{
+ struct net_device_stats *stats;
+ struct net_device *netdev;
+ struct rx_agg *agg;
+ struct rx_desc *rx_desc;
+ unsigned long lockflags;
+ struct list_head *cursor, *next;
+ struct sk_buff *skb;
+ struct urb *urb;
+ unsigned pkt_len;
+ int len_used;
+ u8 *rx_data;
+ int ret;
+
+ netdev = tp->netdev;
+
+ if (!netif_running(netdev))
return;
- dev_kfree_skb_irq(tp->tx_skb);
- if (!netif_device_present(tp->netdev))
+
+ stats = rtl8152_get_stats(netdev);
+
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ list_for_each_safe(cursor, next, &tp->rx_done) {
+ list_del_init(cursor);
+ spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+
+ agg = list_entry(cursor, struct rx_agg, list);
+ urb = agg->urb;
+ if (urb->actual_length < ETH_ZLEN) {
+ ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ if (ret && ret != -ENODEV) {
+ list_add_tail(&agg->list, next);
+ tasklet_schedule(&tp->tl);
+ }
+ continue;
+ }
+
+ len_used = 0;
+ rx_desc = agg->head;
+ rx_data = agg->head;
+ smp_wmb();
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ len_used += sizeof(struct rx_desc) + pkt_len;
+
+ while (urb->actual_length >= len_used) {
+ if (pkt_len < ETH_ZLEN)
+ break;
+
+ pkt_len -= 4; /* CRC */
+ rx_data += sizeof(struct rx_desc);
+
+ skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+ if (!skb) {
+ stats->rx_dropped++;
+ break;
+ }
+ memcpy(skb->data, rx_data, pkt_len);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+
+ rx_data = rx_agg_align(rx_data + pkt_len + 4);
+ rx_desc = (struct rx_desc *)rx_data;
+ smp_wmb();
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ len_used = (int)(rx_data - (u8 *)agg->head);
+ len_used += sizeof(struct rx_desc) + pkt_len;
+ }
+
+ ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ if (ret && ret != -ENODEV) {
+ list_add_tail(&agg->list, next);
+ tasklet_schedule(&tp->tl);
+ }
+ }
+ spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+}
+
+static void tx_bottom(struct r8152 *tp)
+{
+ struct net_device_stats *stats;
+ struct net_device *netdev;
+ struct tx_agg *agg;
+ unsigned long lockflags;
+ u32 remain, total;
+ int res;
+
+ netdev = tp->netdev;
+
+ if (!netif_running(netdev))
return;
- if (status)
- dev_info(&urb->dev->dev, "%s: Tx status %d\n",
- tp->netdev->name, status);
- tp->netdev->trans_start = jiffies;
- netif_wake_queue(tp->netdev);
+
+next_agg:
+ agg = NULL;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ if (!skb_queue_empty(&tp->tx_queue) && !list_empty(&tp->tx_free)) {
+ struct list_head *cursor;
+
+ cursor = tp->tx_free.next;
+ list_del_init(cursor);
+ agg = list_entry(cursor, struct tx_agg, list);
+ }
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+
+ if (!agg)
+ return;
+
+ agg->data = agg->head;
+ agg->skb_num = agg->skb_len = 0;
+ remain = rx_buf_sz - sizeof(struct tx_desc);
+ total = 0;
+
+ while (remain >= ETH_ZLEN) {
+ struct tx_desc *tx_desc;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&tp->tx_queue);
+ if (!skb)
+ break;
+
+ len = skb->len;
+ if (remain < len) {
+ skb_queue_head(&tp->tx_queue, skb);
+ break;
+ }
+
+ agg->data = tx_agg_align(agg->data);
+ tx_desc = (struct tx_desc *)agg->data;
+ agg->data += sizeof(*tx_desc);
+
+ tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS |
+ TX_LS);
+ memcpy(agg->data, skb->data, len);
+ agg->skb_num++;
+ agg->skb_len += len;
+ dev_kfree_skb_any(skb);
+
+ agg->data += len;
+ remain = rx_buf_sz - sizeof(*tx_desc) -
+ (u32)(tx_agg_align(agg->data) - agg->head);
+ }
+
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ agg->head, (int)(agg->data - (u8 *)agg->head),
+ (usb_complete_t)write_bulk_callback, agg);
+ res = usb_submit_urb(agg->urb, GFP_ATOMIC);
+
+ stats = rtl8152_get_stats(netdev);
+
+ if (res) {
+ /* Can we get/handle EPIPE here? */
+ if (res == -ENODEV) {
+ netif_device_detach(tp->netdev);
+ } else {
+ netif_warn(tp, tx_err, netdev,
+ "failed tx_urb %d\n", res);
+ stats->tx_dropped += agg->skb_num;
+ agg->data = agg->head;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+ }
+ return;
+ }
+ goto next_agg;
+}
+
+static void bottom_half(unsigned long data)
+{
+ struct r8152 *tp;
+
+ tp = (struct r8152 *)data;
+ rx_bottom(tp);
+ tx_bottom(tp);
+}
+
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
+{
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
+ agg->head, rx_buf_sz,
+ (usb_complete_t)read_bulk_callback, agg);
+
+ return usb_submit_urb(agg->urb, mem_flags);
}
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ int i;
+
netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
- usb_unlink_urb(tp->tx_urb);
- stats->tx_errors++;
+ for (i = 0; i < RTL8152_MAX_TX; i++)
+ usb_unlink_urb(tp->tx_info[i].urb);
}
static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -923,33 +1221,43 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
{
struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ unsigned long lockflags;
+ struct tx_agg *agg = NULL;
struct tx_desc *tx_desc;
unsigned int len;
int res;
- netif_stop_queue(netdev);
- len = skb->len;
- if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) {
- struct sk_buff *tx_skb;
+ skb_tx_timestamp(skb);
- tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC);
- dev_kfree_skb_any(skb);
- if (!tx_skb) {
- stats->tx_dropped++;
- netif_wake_queue(netdev);
- return NETDEV_TX_OK;
- }
- skb = tx_skb;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) {
+ struct list_head *cursor;
+
+ cursor = tp->tx_free.next;
+ list_del_init(cursor);
+ agg = list_entry(cursor, struct tx_agg, list);
}
- tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc));
- memset(tx_desc, 0, sizeof(*tx_desc));
- tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS);
- tp->tx_skb = skb;
- skb_tx_timestamp(skb);
- usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
- skb->data, skb->len,
- (usb_complete_t)write_bulk_callback, tp);
- res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC);
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+
+ if (!agg) {
+ skb_queue_tail(&tp->tx_queue, skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_desc = (struct tx_desc *)agg->head;
+ agg->data = agg->head + sizeof(*tx_desc);
+ agg->skb_num = agg->skb_len = 0;
+
+ len = skb->len;
+ tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+ memcpy(agg->data, skb->data, len);
+ dev_kfree_skb_any(skb);
+ agg->skb_num++;
+ agg->skb_len += len;
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ agg->head, len + sizeof(*tx_desc),
+ (usb_complete_t)write_bulk_callback, agg);
+ res = usb_submit_urb(agg->urb, GFP_ATOMIC);
if (res) {
/* Can we get/handle EPIPE here? */
if (res == -ENODEV) {
@@ -957,12 +1265,12 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
} else {
netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res);
- stats->tx_errors++;
- netif_start_queue(netdev);
+ stats->tx_dropped++;
+ agg->data = agg->head;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
}
- } else {
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
}
return NETDEV_TX_OK;
@@ -999,17 +1307,18 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
- u32 ocp_data;
+ u32 ocp_data;
+ int i, ret;
u8 speed;
speed = rtl8152_get_speed(tp);
- if (speed & _100bps) {
+ if (speed & _10bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
- ocp_data &= ~EEEP_CR_EEEP_TX;
+ ocp_data |= EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
} else {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
- ocp_data |= EEEP_CR_EEEP_TX;
+ ocp_data &= ~EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
}
@@ -1023,23 +1332,26 @@ static int rtl8152_enable(struct r8152 *tp)
ocp_data &= ~RXDY_GATED_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
- usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
- (usb_complete_t)read_bulk_callback, tp);
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
+ if (ret)
+ break;
+ }
- return usb_submit_urb(tp->rx_urb, GFP_KERNEL);
+ return ret;
}
static void rtl8152_disable(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- usb_kill_urb(tp->tx_urb);
+ for (i = 0; i < RTL8152_MAX_TX; i++)
+ usb_kill_urb(tp->tx_info[i].urb);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
ocp_data |= RXDY_GATED_EN;
@@ -1058,7 +1370,8 @@ static void rtl8152_disable(struct r8152 *tp)
mdelay(1);
}
- usb_kill_urb(tp->rx_urb);
+ for (i = 0; i < RTL8152_MAX_RX; i++)
+ usb_kill_urb(tp->rx_info[i].urb);
rtl8152_nic_reset(tp);
}
@@ -1429,8 +1742,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
static void r8152b_init(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
rtl_clear_bp(tp);
@@ -1475,9 +1788,9 @@ static void r8152b_init(struct r8152 *tp)
break;
}
- /* disable rx aggregation */
+ /* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data |= RX_AGG_DISABLE;
+ ocp_data &= ~RX_AGG_DISABLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
@@ -1619,6 +1932,7 @@ static int rtl8152_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
+ int ret;
if (udev->actconfig->desc.bConfigurationValue != 1) {
usb_driver_set_configuration(udev, 1);
@@ -1631,10 +1945,12 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENOMEM;
}
+ SET_NETDEV_DEV(netdev, &intf->dev);
tp = netdev_priv(netdev);
+ memset(tp, 0, sizeof(*tp));
tp->msg_enable = 0x7FFF;
- tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp);
+ tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
tp->udev = udev;
@@ -1657,37 +1973,27 @@ static int rtl8152_probe(struct usb_interface *intf,
r8152b_init(tp);
set_ethernet_addr(tp);
- if (!alloc_all_urbs(tp)) {
- netif_err(tp, probe, netdev, "out of memory");
+ ret = alloc_all_mem(tp);
+ if (ret)
goto out;
- }
-
- tp->rx_skb = netdev_alloc_skb(netdev,
- RTL8152_RMS + sizeof(struct rx_desc));
- if (!tp->rx_skb)
- goto out1;
usb_set_intfdata(intf, tp);
- SET_NETDEV_DEV(netdev, &intf->dev);
-
- if (register_netdev(netdev) != 0) {
+ ret = register_netdev(netdev);
+ if (ret != 0) {
netif_err(tp, probe, netdev, "couldn't register the device");
- goto out2;
+ goto out1;
}
netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
return 0;
-out2:
- usb_set_intfdata(intf, NULL);
- dev_kfree_skb(tp->rx_skb);
out1:
- free_all_urbs(tp);
+ usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
- return -EIO;
+ return ret;
}
static void rtl8152_unload(struct r8152 *tp)
@@ -1715,9 +2021,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
rtl8152_unload(tp);
- free_all_urbs(tp);
- if (tp->rx_skb)
- dev_kfree_skb(tp->rx_skb);
+ free_all_mem(tp);
free_netdev(tp->netdev);
}
}
@@ -1732,11 +2036,12 @@ MODULE_DEVICE_TABLE(usb, rtl8152_table);
static struct usb_driver rtl8152_driver = {
.name = MODULENAME,
+ .id_table = rtl8152_table,
.probe = rtl8152_probe,
.disconnect = rtl8152_disconnect,
- .id_table = rtl8152_table,
.suspend = rtl8152_suspend,
- .resume = rtl8152_resume
+ .resume = rtl8152_resume,
+ .reset_resume = rtl8152_resume,
};
module_usb_driver(rtl8152_driver);
--
1.8.3.1
Use the interrupt transfer to replace polling link status. Delay
to update the link down status, because some of them result from
the change of speed.
Signed-off-by: Hayes Wang <[email protected]>
---
drivers/net/usb/r8152.c | 134 ++++++++++++++++++++++++++++++++++++++----------
1 file changed, 107 insertions(+), 27 deletions(-)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 5d9d949..32af41a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -272,6 +272,9 @@ enum rtl_register_content {
#define RTL8152_MAX_TX 10
#define RTL8152_MAX_RX 10
+#define INTBUFSIZE 2
+
+#define INTR_LINK 0x0004
#define RTL8152_REQT_READ 0xc0
#define RTL8152_REQT_WRITE 0x40
@@ -292,7 +295,8 @@ enum rtl_register_content {
enum rtl8152_flags {
RTL8152_UNPLUG = 0,
RTL8152_SET_RX_MODE,
- WORK_ENABLE
+ WORK_ENABLE,
+ RTL8152_LINK_CHG,
};
/* Define these values to match your device */
@@ -349,7 +353,9 @@ struct r8152 {
unsigned long flags;
struct usb_device *udev;
struct tasklet_struct tl;
+ struct usb_interface *intf;
struct net_device *netdev;
+ struct urb *intr_urb;
struct tx_agg tx_info[RTL8152_MAX_TX];
struct rx_agg rx_info[RTL8152_MAX_RX];
struct list_head rx_done, tx_free;
@@ -357,8 +363,10 @@ struct r8152 {
spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
+ int intr_interval;
u32 msg_enable;
u16 ocp_base;
+ u8 *intr_buff;
u8 version;
u8 speed;
};
@@ -855,6 +863,56 @@ static void write_bulk_callback(struct urb *urb)
tasklet_schedule(&tp->tl);
}
+static void intr_callback(struct urb *urb)
+{
+ struct r8152 *tp;
+ __u16 *d;
+ int status = urb->status;
+ int res;
+
+ tp = urb->context;
+ if (!tp)
+ return;
+
+ switch (status) {
+ case 0: /* success */
+ break;
+ case -ECONNRESET: /* unlink */
+ case -ESHUTDOWN:
+ netif_device_detach(tp->netdev);
+ case -ENOENT:
+ return;
+ case -EOVERFLOW:
+ netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ goto resubmit;
+ /* -EPIPE: should clear the halt */
+ default:
+ netif_info(tp, intr, tp->netdev, "intr status %d\n", status);
+ goto resubmit;
+ }
+
+ d = urb->transfer_buffer;
+ if (INTR_LINK & __le16_to_cpu(d[0])) {
+ if (!(tp->speed & LINK_STATUS)) {
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
+ } else {
+ if (tp->speed & LINK_STATUS) {
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 5 * HZ);
+ }
+ }
+
+resubmit:
+ res = usb_submit_urb(urb, GFP_ATOMIC);
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+ else if (res)
+ netif_err(tp, intr, tp->netdev,
+ "can't resubmit intr, status %d\n", res);
+}
+
static inline void *rx_agg_align(void *data)
{
return (void *)ALIGN((uintptr_t)data, 8);
@@ -894,11 +952,24 @@ static void free_all_mem(struct r8152 *tp)
tp->tx_info[i].head = tp->tx_info[i].data = NULL;
}
}
+
+ if (tp->intr_urb) {
+ usb_free_urb(tp->intr_urb);
+ tp->intr_urb = NULL;
+ }
+
+ if (tp->intr_buff) {
+ kfree(tp->intr_buff);
+ tp->intr_buff = NULL;
+ }
}
static int alloc_all_mem(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
+ struct usb_interface *intf = tp->intf;
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
struct urb *urb;
int node, i;
u8 *buf;
@@ -964,6 +1035,19 @@ static int alloc_all_mem(struct r8152 *tp)
list_add_tail(&tp->tx_info[i].list, &tp->tx_free);
}
+ tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tp->intr_urb)
+ goto err1;
+
+ tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL);
+ if (!tp->intr_buff)
+ goto err1;
+
+ tp->intr_interval = (int)ep_intr->desc.bInterval;
+ usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3),
+ tp->intr_buff, INTBUFSIZE, intr_callback,
+ tp, tp->intr_interval);
+
return 0;
err1:
@@ -1221,8 +1305,10 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- if (tp->speed & LINK_STATUS)
+ if (tp->speed & LINK_STATUS) {
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
}
static void _rtl8152_set_rx_mode(struct net_device *netdev)
@@ -1633,7 +1719,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
r8152_mdio_write(tp, MII_BMCR, bmcr);
out:
- schedule_delayed_work(&tp->schedule, 5 * HZ);
return ret;
}
@@ -1656,6 +1741,7 @@ static void set_carrier(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
u8 speed;
+ clear_bit(RTL8152_LINK_CHG, &tp->flags);
speed = rtl8152_get_speed(tp);
if (speed & LINK_STATUS) {
@@ -1683,13 +1769,12 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
goto out1;
- set_carrier(tp);
+ if (test_bit(RTL8152_LINK_CHG, &tp->flags))
+ set_carrier(tp);
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
- schedule_delayed_work(&tp->schedule, HZ);
-
out1:
return;
}
@@ -1699,28 +1784,20 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- tp->speed = rtl8152_get_speed(tp);
- if (tp->speed & LINK_STATUS) {
- res = rtl8152_enable(tp);
- if (res) {
- if (res == -ENODEV)
- netif_device_detach(tp->netdev);
-
- netif_err(tp, ifup, netdev,
- "rtl8152_open failed: %d\n", res);
- return res;
- }
-
- netif_carrier_on(netdev);
- } else {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
+ res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+ if (res) {
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+ netif_warn(tp, ifup, netdev,
+ "intr_urb submit failed: %d\n", res);
+ return res;
}
rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
- schedule_delayed_work(&tp->schedule, 0);
return res;
}
@@ -1730,6 +1807,7 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
@@ -1852,6 +1930,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
netif_device_detach(tp->netdev);
if (netif_running(tp->netdev)) {
+ usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
}
@@ -1868,10 +1947,11 @@ static int rtl8152_resume(struct usb_interface *intf)
r8152b_init(tp);
netif_device_attach(tp->netdev);
if (netif_running(tp->netdev)) {
- rtl8152_enable(tp);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
- set_bit(RTL8152_SET_RX_MODE, &tp->flags);
- schedule_delayed_work(&tp->schedule, 0);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
}
return 0;
@@ -2006,13 +2086,13 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->udev = udev;
tp->netdev = netdev;
+ tp->intf = intf;
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
netdev->features |= NETIF_F_IP_CSUM;
netdev->hw_features = NETIF_F_IP_CSUM;
SET_ETHTOOL_OPS(netdev, &ops);
- tp->speed = 0;
tp->mii.dev = netdev;
tp->mii.mdio_read = read_mii_word;
--
1.8.3.1
Enable tx checksum.
Signed-off-by: Hayes Wang <[email protected]>
---
drivers/net/usb/r8152.c | 63 +++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 58 insertions(+), 5 deletions(-)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c6c5aa2..5d9d949 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -20,6 +20,8 @@
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
/* Version Information */
#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
@@ -314,8 +316,13 @@ struct tx_desc {
u32 opts1;
#define TX_FS (1 << 31) /* First segment of a packet */
#define TX_LS (1 << 30) /* Final segment of a packet */
-#define TX_LEN_MASK 0xffff
+#define TX_LEN_MASK 0x3ffff
+
u32 opts2;
+#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
+#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
+#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
+#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
};
struct rx_agg {
@@ -964,6 +971,51 @@ err1:
return -ENOMEM;
}
+static void
+r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
+{
+ memset(desc, 0, sizeof(*desc));
+
+ desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 protocol;
+ u8 ip_protocol;
+ u32 opts2 = 0;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ opts2 |= IPV4_CS;
+ ip_protocol = ip_hdr(skb)->protocol;
+ break;
+
+ case htons(ETH_P_IPV6):
+ opts2 |= IPV6_CS;
+ ip_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+
+ default:
+ ip_protocol = IPPROTO_RAW;
+ break;
+ }
+
+ if (ip_protocol == IPPROTO_TCP) {
+ opts2 |= TCP_CS;
+ opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
+ } else if (ip_protocol == IPPROTO_UDP) {
+ opts2 |= UDP_CS;
+ } else
+ WARN_ON_ONCE(1);
+
+ desc->opts2 = cpu_to_le32(opts2);
+ }
+}
+
static void rx_bottom(struct r8152 *tp)
{
struct net_device_stats *stats;
@@ -1100,8 +1152,7 @@ next_agg:
tx_desc = (struct tx_desc *)agg->data;
agg->data += sizeof(*tx_desc);
- tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS |
- TX_LS);
+ r8152_tx_csum(tp, tx_desc, skb);
memcpy(agg->data, skb->data, len);
agg->skb_num++;
agg->skb_len += len;
@@ -1249,7 +1300,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
agg->skb_num = agg->skb_len = 0;
len = skb->len;
- tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+ r8152_tx_csum(tp, tx_desc, skb);
memcpy(agg->data, skb->data, len);
dev_kfree_skb_any(skb);
agg->skb_num++;
@@ -1957,7 +2008,9 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->netdev = netdev;
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
- netdev->features &= ~NETIF_F_IP_CSUM;
+
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->hw_features = NETIF_F_IP_CSUM;
SET_ETHTOOL_OPS(netdev, &ops);
tp->speed = 0;
--
1.8.3.1
On Tue, 2013-08-13 at 15:28 +0800, Hayes Wang wrote:
> +
> +static void rx_bottom(struct r8152 *tp)
> +{
> + struct net_device_stats *stats;
> + struct net_device *netdev;
> + struct rx_agg *agg;
> + struct rx_desc *rx_desc;
> + unsigned long lockflags;
> + struct list_head *cursor, *next;
> + struct sk_buff *skb;
> + struct urb *urb;
> + unsigned pkt_len;
> + int len_used;
> + u8 *rx_data;
> + int ret;
> +
> + netdev = tp->netdev;
> +
> + if (!netif_running(netdev))
> return;
> - dev_kfree_skb_irq(tp->tx_skb);
> - if (!netif_device_present(tp->netdev))
> +
> + stats = rtl8152_get_stats(netdev);
> +
> + spin_lock_irqsave(&tp->rx_lock, lockflags);
> + list_for_each_safe(cursor, next, &tp->rx_done) {
> + list_del_init(cursor);
> + spin_unlock_irqrestore(&tp->rx_lock, lockflags);
> +
> + agg = list_entry(cursor, struct rx_agg, list);
> + urb = agg->urb;
> + if (urb->actual_length < ETH_ZLEN) {
> + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
> + spin_lock_irqsave(&tp->rx_lock, lockflags);
> + if (ret && ret != -ENODEV) {
> + list_add_tail(&agg->list, next);
> + tasklet_schedule(&tp->tl);
> + }
> + continue;
> + }
> +
> + len_used = 0;
> + rx_desc = agg->head;
> + rx_data = agg->head;
> + smp_wmb();
> + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
> + len_used += sizeof(struct rx_desc) + pkt_len;
> +
> + while (urb->actual_length >= len_used) {
> + if (pkt_len < ETH_ZLEN)
> + break;
> +
> + pkt_len -= 4; /* CRC */
> + rx_data += sizeof(struct rx_desc);
> +
> + skb = netdev_alloc_skb_ip_align(netdev,
> pkt_len);
> + if (!skb) {
> + stats->rx_dropped++;
> + break;
> + }
> + memcpy(skb->data, rx_data, pkt_len);
> + skb_put(skb, pkt_len);
> + skb->protocol = eth_type_trans(skb, netdev);
> + netif_rx(skb);
> + stats->rx_packets++;
> + stats->rx_bytes += pkt_len;
> +
> + rx_data = rx_agg_align(rx_data + pkt_len + 4);
> + rx_desc = (struct rx_desc *)rx_data;
> + smp_wmb();
Against what is the memory barrier?
> + pkt_len = le32_to_cpu(rx_desc->opts1) &
> RX_LEN_MASK;
> + len_used = (int)(rx_data - (u8 *)agg->head);
> + len_used += sizeof(struct rx_desc) + pkt_len;
> + }
> +
> + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
> + spin_lock_irqsave(&tp->rx_lock, lockflags);
> + if (ret && ret != -ENODEV) {
> + list_add_tail(&agg->list, next);
> + tasklet_schedule(&tp->tl);
> + }
> + }
> + spin_unlock_irqrestore(&tp->rx_lock, lockflags);
> +}
Oliver Neukum [mailto:[email protected]]
> Sent: Tuesday, August 13, 2013 4:49 PM
> To: Hayeswang
> Cc: [email protected]; [email protected];
> [email protected]
> Subject: Re: [PATCH net-next 1/3] net/usb/r8152: support aggregation
>
[...]
> > + len_used = 0;
> > + rx_desc = agg->head;
> > + rx_data = agg->head;
> > + smp_wmb();
> > + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
> > + len_used += sizeof(struct rx_desc) + pkt_len;
> > +
> > + while (urb->actual_length >= len_used) {
> > + if (pkt_len < ETH_ZLEN)
> > + break;
> > +
> > + pkt_len -= 4; /* CRC */
> > + rx_data += sizeof(struct rx_desc);
> > +
> > + skb = netdev_alloc_skb_ip_align(netdev,
> > pkt_len);
> > + if (!skb) {
> > + stats->rx_dropped++;
> > + break;
> > + }
> > + memcpy(skb->data, rx_data, pkt_len);
> > + skb_put(skb, pkt_len);
> > + skb->protocol = eth_type_trans(skb, netdev);
> > + netif_rx(skb);
> > + stats->rx_packets++;
> > + stats->rx_bytes += pkt_len;
> > +
> > + rx_data = rx_agg_align(rx_data +
> pkt_len + 4);
> > + rx_desc = (struct rx_desc *)rx_data;
> > + smp_wmb();
>
> Against what is the memory barrier?
Excuse me. I don't understand your question. Do you mean the function should not
be used here?
Best Regards,
Hayes
Hello.
On 08/13/2013 11:28 AM, Hayes Wang wrote:
> Enable tx checksum.
> Signed-off-by: Hayes Wang <[email protected]>
> ---
> drivers/net/usb/r8152.c | 63 +++++++++++++++++++++++++++++++++++++++++++++----
> 1 file changed, 58 insertions(+), 5 deletions(-)
> diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
> index c6c5aa2..5d9d949 100644
> --- a/drivers/net/usb/r8152.c
> +++ b/drivers/net/usb/r8152.c
[...]
> @@ -964,6 +971,51 @@ err1:
> return -ENOMEM;
> }
>
> +static void
> +r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
> +{
[...]
> + if (ip_protocol == IPPROTO_TCP) {
> + opts2 |= TCP_CS;
> + opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
> + } else if (ip_protocol == IPPROTO_UDP) {
> + opts2 |= UDP_CS;
> + } else
> + WARN_ON_ONCE(1);
Stange, why *else if* branch has {} and *else* don't. It should, according
to Documentation/CodingStyle.
> +
> + desc->opts2 = cpu_to_le32(opts2);
> + }
> +}
> +
WBR, Sergei
On Tue, 2013-08-13 at 20:32 +0800, hayeswang wrote:
> Oliver Neukum [mailto:[email protected]]
> > Sent: Tuesday, August 13, 2013 4:49 PM
> > To: Hayeswang
> > Cc: [email protected]; [email protected];
> > [email protected]
> > Subject: Re: [PATCH net-next 1/3] net/usb/r8152: support aggregation
> >
> [...]
> > > + len_used = 0;
> > > + rx_desc = agg->head;
> > > + rx_data = agg->head;
> > > + smp_wmb();
> > > + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
> > > + len_used += sizeof(struct rx_desc) + pkt_len;
> > > +
> > > + while (urb->actual_length >= len_used) {
> > > + if (pkt_len < ETH_ZLEN)
> > > + break;
> > > +
> > > + pkt_len -= 4; /* CRC */
> > > + rx_data += sizeof(struct rx_desc);
> > > +
> > > + skb = netdev_alloc_skb_ip_align(netdev,
> > > pkt_len);
> > > + if (!skb) {
> > > + stats->rx_dropped++;
> > > + break;
> > > + }
> > > + memcpy(skb->data, rx_data, pkt_len);
> > > + skb_put(skb, pkt_len);
> > > + skb->protocol = eth_type_trans(skb, netdev);
> > > + netif_rx(skb);
> > > + stats->rx_packets++;
> > > + stats->rx_bytes += pkt_len;
> > > +
> > > + rx_data = rx_agg_align(rx_data +
> > pkt_len + 4);
> > > + rx_desc = (struct rx_desc *)rx_data;
> > > + smp_wmb();
> >
> > Against what is the memory barrier?
>
> Excuse me. I don't understand your question. Do you mean the function should not
> be used here?
I don't understand what problem the function is supposed to fix. As long
as I don't understand it I cannot say for sure whether it is correct.
There seems no obvious reason for a memory barrier, but there may be a
hidden reason I don't see.
Regards
Oliver
From: Oliver Neukum <[email protected]>
Date: Tue, 13 Aug 2013 17:17:10 +0200
> On Tue, 2013-08-13 at 20:32 +0800, hayeswang wrote:
>> Oliver Neukum [mailto:[email protected]]
>> > Sent: Tuesday, August 13, 2013 4:49 PM
>> > To: Hayeswang
>> > Cc: [email protected]; [email protected];
>> > [email protected]
>> > Subject: Re: [PATCH net-next 1/3] net/usb/r8152: support aggregation
>> >
>> [...]
>> > > + len_used = 0;
>> > > + rx_desc = agg->head;
>> > > + rx_data = agg->head;
>> > > + smp_wmb();
>> > > + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
>> > > + len_used += sizeof(struct rx_desc) + pkt_len;
>> > > +
>> > > + while (urb->actual_length >= len_used) {
>> > > + if (pkt_len < ETH_ZLEN)
>> > > + break;
>> > > +
>> > > + pkt_len -= 4; /* CRC */
>> > > + rx_data += sizeof(struct rx_desc);
>> > > +
>> > > + skb = netdev_alloc_skb_ip_align(netdev,
>> > > pkt_len);
>> > > + if (!skb) {
>> > > + stats->rx_dropped++;
>> > > + break;
>> > > + }
>> > > + memcpy(skb->data, rx_data, pkt_len);
>> > > + skb_put(skb, pkt_len);
>> > > + skb->protocol = eth_type_trans(skb, netdev);
>> > > + netif_rx(skb);
>> > > + stats->rx_packets++;
>> > > + stats->rx_bytes += pkt_len;
>> > > +
>> > > + rx_data = rx_agg_align(rx_data +
>> > pkt_len + 4);
>> > > + rx_desc = (struct rx_desc *)rx_data;
>> > > + smp_wmb();
>> >
>> > Against what is the memory barrier?
>>
>> Excuse me. I don't understand your question. Do you mean the function should not
>> be used here?
>
> I don't understand what problem the function is supposed to fix. As long
> as I don't understand it I cannot say for sure whether it is correct.
> There seems no obvious reason for a memory barrier, but there may be a
> hidden reason I don't see.
Hayes, when Oliver asks you "Against what is the memory barrier?" he is asking
you which memory operations you are trying to order.
You do not explain this in your commit message, nor do you explain it with a
suitable comment. This is not acceptable.
It is absolutely critical, that any time you add a memory barrier, you add a
comment above the new memory barrier explaining exactly what the barrier is
trying to achieve.
In fact, this is required by our coding standards.
David Miller [mailto:[email protected]]
> Sent: Wednesday, August 14, 2013 7:41 AM
> To: [email protected]
> Cc: Hayeswang; [email protected];
> [email protected]; [email protected]
> Subject: Re: [PATCH net-next 1/3] net/usb/r8152: support aggregation
>
[...]
> > I don't understand what problem the function is supposed to
> fix. As long
> > as I don't understand it I cannot say for sure whether it
> is correct.
> > There seems no obvious reason for a memory barrier, but
> there may be a
> > hidden reason I don't see.
>
> Hayes, when Oliver asks you "Against what is the memory
> barrier?" he is asking
> you which memory operations you are trying to order.
>
> You do not explain this in your commit message, nor do you
> explain it with a
> suitable comment. This is not acceptable.
>
> It is absolutely critical, that any time you add a memory
> barrier, you add a
> comment above the new memory barrier explaining exactly what
> the barrier is
> trying to achieve.
>
> In fact, this is required by our coding standards.
I just want to make sure the rx_desc and rx_data are set correctly before
they are used. However, I study some examples and information from internet,
and I think that the memory barries is not necessary here. Therefore, I would
remove them later.
Best Regards,
Hayes
Enable tx checksum.
Signed-off-by: Hayes Wang <[email protected]>
---
drivers/net/usb/r8152.c | 64 +++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 59 insertions(+), 5 deletions(-)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index abb0b9f..4d938a7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -20,6 +20,8 @@
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
/* Version Information */
#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
@@ -314,8 +316,13 @@ struct tx_desc {
u32 opts1;
#define TX_FS (1 << 31) /* First segment of a packet */
#define TX_LS (1 << 30) /* Final segment of a packet */
-#define TX_LEN_MASK 0xffff
+#define TX_LEN_MASK 0x3ffff
+
u32 opts2;
+#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
+#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
+#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
+#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
};
struct rx_agg {
@@ -968,6 +975,52 @@ err1:
return -ENOMEM;
}
+static void
+r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
+{
+ memset(desc, 0, sizeof(*desc));
+
+ desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 protocol;
+ u8 ip_protocol;
+ u32 opts2 = 0;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ opts2 |= IPV4_CS;
+ ip_protocol = ip_hdr(skb)->protocol;
+ break;
+
+ case htons(ETH_P_IPV6):
+ opts2 |= IPV6_CS;
+ ip_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+
+ default:
+ ip_protocol = IPPROTO_RAW;
+ break;
+ }
+
+ if (ip_protocol == IPPROTO_TCP) {
+ opts2 |= TCP_CS;
+ opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
+ } else if (ip_protocol == IPPROTO_UDP) {
+ opts2 |= UDP_CS;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+
+ desc->opts2 = cpu_to_le32(opts2);
+ }
+}
+
static void rx_bottom(struct r8152 *tp)
{
struct net_device_stats *stats;
@@ -1097,8 +1150,7 @@ next_agg:
tx_desc = (struct tx_desc *)tx_data;
tx_data += sizeof(*tx_desc);
- tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS |
- TX_LS);
+ r8152_tx_csum(tp, tx_desc, skb);
memcpy(tx_data, skb->data, len);
agg->skb_num++;
agg->skb_len += len;
@@ -1256,7 +1308,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
agg->skb_num = agg->skb_len = 0;
len = skb->len;
- tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+ r8152_tx_csum(tp, tx_desc, skb);
memcpy(tx_data, skb->data, len);
dev_kfree_skb_any(skb);
agg->skb_num++;
@@ -1977,7 +2029,9 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->netdev = netdev;
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
- netdev->features &= ~NETIF_F_IP_CSUM;
+
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->hw_features = NETIF_F_IP_CSUM;
SET_ETHTOOL_OPS(netdev, &ops);
tp->speed = 0;
--
1.8.3.1
Use the interrupt transfer to replace polling link status.
Signed-off-by: Hayes Wang <[email protected]>
---
drivers/net/usb/r8152.c | 140 ++++++++++++++++++++++++++++++++++++++----------
1 file changed, 113 insertions(+), 27 deletions(-)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 4d938a7..ef2498c 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -272,6 +272,9 @@ enum rtl_register_content {
#define RTL8152_MAX_TX 10
#define RTL8152_MAX_RX 10
+#define INTBUFSIZE 2
+
+#define INTR_LINK 0x0004
#define RTL8152_REQT_READ 0xc0
#define RTL8152_REQT_WRITE 0x40
@@ -292,7 +295,8 @@ enum rtl_register_content {
enum rtl8152_flags {
RTL8152_UNPLUG = 0,
RTL8152_SET_RX_MODE,
- WORK_ENABLE
+ WORK_ENABLE,
+ RTL8152_LINK_CHG,
};
/* Define these values to match your device */
@@ -347,7 +351,9 @@ struct r8152 {
unsigned long flags;
struct usb_device *udev;
struct tasklet_struct tl;
+ struct usb_interface *intf;
struct net_device *netdev;
+ struct urb *intr_urb;
struct tx_agg tx_info[RTL8152_MAX_TX];
struct rx_agg rx_info[RTL8152_MAX_RX];
struct list_head rx_done, tx_free;
@@ -355,8 +361,10 @@ struct r8152 {
spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
+ int intr_interval;
u32 msg_enable;
u16 ocp_base;
+ u8 *intr_buff;
u8 version;
u8 speed;
};
@@ -860,6 +868,62 @@ static void write_bulk_callback(struct urb *urb)
tasklet_schedule(&tp->tl);
}
+static void intr_callback(struct urb *urb)
+{
+ struct r8152 *tp;
+ __u16 *d;
+ int status = urb->status;
+ int res;
+
+ tp = urb->context;
+ if (!tp)
+ return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ switch (status) {
+ case 0: /* success */
+ break;
+ case -ECONNRESET: /* unlink */
+ case -ESHUTDOWN:
+ netif_device_detach(tp->netdev);
+ case -ENOENT:
+ return;
+ case -EOVERFLOW:
+ netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ goto resubmit;
+ /* -EPIPE: should clear the halt */
+ default:
+ netif_info(tp, intr, tp->netdev, "intr status %d\n", status);
+ goto resubmit;
+ }
+
+ d = urb->transfer_buffer;
+ if (INTR_LINK & __le16_to_cpu(d[0])) {
+ if (!(tp->speed & LINK_STATUS)) {
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
+ } else {
+ if (tp->speed & LINK_STATUS) {
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
+ }
+
+resubmit:
+ res = usb_submit_urb(urb, GFP_ATOMIC);
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+ else if (res)
+ netif_err(tp, intr, tp->netdev,
+ "can't resubmit intr, status %d\n", res);
+}
+
static inline void *rx_agg_align(void *data)
{
return (void *)ALIGN((uintptr_t)data, 8);
@@ -899,11 +963,24 @@ static void free_all_mem(struct r8152 *tp)
tp->tx_info[i].head = NULL;
}
}
+
+ if (tp->intr_urb) {
+ usb_free_urb(tp->intr_urb);
+ tp->intr_urb = NULL;
+ }
+
+ if (tp->intr_buff) {
+ kfree(tp->intr_buff);
+ tp->intr_buff = NULL;
+ }
}
static int alloc_all_mem(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
+ struct usb_interface *intf = tp->intf;
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
struct urb *urb;
int node, i;
u8 *buf;
@@ -968,6 +1045,19 @@ static int alloc_all_mem(struct r8152 *tp)
list_add_tail(&tp->tx_info[i].list, &tp->tx_free);
}
+ tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tp->intr_urb)
+ goto err1;
+
+ tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL);
+ if (!tp->intr_buff)
+ goto err1;
+
+ tp->intr_interval = (int)ep_intr->desc.bInterval;
+ usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3),
+ tp->intr_buff, INTBUFSIZE, intr_callback,
+ tp, tp->intr_interval);
+
return 0;
err1:
@@ -1228,8 +1318,10 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- if (tp->speed & LINK_STATUS)
+ if (tp->speed & LINK_STATUS) {
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
}
static void _rtl8152_set_rx_mode(struct net_device *netdev)
@@ -1648,7 +1740,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
r8152_mdio_write(tp, MII_BMCR, bmcr);
out:
- schedule_delayed_work(&tp->schedule, 5 * HZ);
return ret;
}
@@ -1671,6 +1762,7 @@ static void set_carrier(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
u8 speed;
+ clear_bit(RTL8152_LINK_CHG, &tp->flags);
speed = rtl8152_get_speed(tp);
if (speed & LINK_STATUS) {
@@ -1700,13 +1792,12 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
goto out1;
- set_carrier(tp);
+ if (test_bit(RTL8152_LINK_CHG, &tp->flags))
+ set_carrier(tp);
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
- schedule_delayed_work(&tp->schedule, HZ);
-
out1:
return;
}
@@ -1716,28 +1807,20 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- tp->speed = rtl8152_get_speed(tp);
- if (tp->speed & LINK_STATUS) {
- res = rtl8152_enable(tp);
- if (res) {
- if (res == -ENODEV)
- netif_device_detach(tp->netdev);
-
- netif_err(tp, ifup, netdev,
- "rtl8152_open failed: %d\n", res);
- return res;
- }
-
- netif_carrier_on(netdev);
- } else {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
+ res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+ if (res) {
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+ netif_warn(tp, ifup, netdev,
+ "intr_urb submit failed: %d\n", res);
+ return res;
}
rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
- schedule_delayed_work(&tp->schedule, 0);
return res;
}
@@ -1747,6 +1830,7 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
@@ -1872,6 +1956,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
tasklet_disable(&tp->tl);
}
@@ -1888,10 +1973,11 @@ static int rtl8152_resume(struct usb_interface *intf)
r8152b_init(tp);
netif_device_attach(tp->netdev);
if (netif_running(tp->netdev)) {
- rtl8152_enable(tp);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
- set_bit(RTL8152_SET_RX_MODE, &tp->flags);
- schedule_delayed_work(&tp->schedule, 0);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
tasklet_enable(&tp->tl);
}
@@ -2027,13 +2113,13 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->udev = udev;
tp->netdev = netdev;
+ tp->intf = intf;
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
netdev->features |= NETIF_F_IP_CSUM;
netdev->hw_features = NETIF_F_IP_CSUM;
SET_ETHTOOL_OPS(netdev, &ops);
- tp->speed = 0;
tp->mii.dev = netdev;
tp->mii.mdio_read = read_mii_word;
--
1.8.3.1
Enable the tx/rx aggregation which could contain one or more packets
for each bulk in/out. This could reduce the loading of the host
controller by sending less bulk transfer.
The rx packets in the bulk in buffer should be 8-byte aligned, and
the tx packets in the bulk out buffer should be 4-byte aligned.
Signed-off-by: Hayes Wang <[email protected]>
---
drivers/net/usb/r8152.c | 647 ++++++++++++++++++++++++++++++++++++------------
1 file changed, 486 insertions(+), 161 deletions(-)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 11c51f2..abb0b9f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -19,9 +19,10 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
+#include <linux/list.h>
/* Version Information */
-#define DRIVER_VERSION "v1.0.0 (2013/05/03)"
+#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <[email protected]>"
#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
#define MODULENAME "r8152"
@@ -267,6 +268,9 @@ enum rtl_register_content {
FULL_DUP = 0x01,
};
+#define RTL8152_MAX_TX 10
+#define RTL8152_MAX_RX 10
+
#define RTL8152_REQT_READ 0xc0
#define RTL8152_REQT_WRITE 0x40
#define RTL8152_REQ_GET_REGS 0x05
@@ -285,7 +289,6 @@ enum rtl_register_content {
/* rtl8152 flags */
enum rtl8152_flags {
RTL8152_UNPLUG = 0,
- RX_URB_FAIL,
RTL8152_SET_RX_MODE,
WORK_ENABLE
};
@@ -315,13 +318,34 @@ struct tx_desc {
u32 opts2;
};
+struct rx_agg {
+ struct list_head list;
+ struct urb *urb;
+ void *context;
+ void *buffer;
+ void *head;
+};
+
+struct tx_agg {
+ struct list_head list;
+ struct urb *urb;
+ void *context;
+ void *buffer;
+ void *head;
+ u32 skb_num;
+ u32 skb_len;
+};
+
struct r8152 {
unsigned long flags;
struct usb_device *udev;
struct tasklet_struct tl;
struct net_device *netdev;
- struct urb *rx_urb, *tx_urb;
- struct sk_buff *tx_skb, *rx_skb;
+ struct tx_agg tx_info[RTL8152_MAX_TX];
+ struct rx_agg rx_info[RTL8152_MAX_RX];
+ struct list_head rx_done, tx_free;
+ struct sk_buff_head tx_queue;
+ spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
u32 msg_enable;
@@ -340,6 +364,7 @@ enum rtl_version {
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
*/
static const int multicast_filter_limit = 32;
+static unsigned int rx_buf_sz = 16384;
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
@@ -686,6 +711,9 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
}
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
+
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
@@ -716,26 +744,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
-static int alloc_all_urbs(struct r8152 *tp)
-{
- tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tp->rx_urb)
- return 0;
- tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tp->tx_urb) {
- usb_free_urb(tp->rx_urb);
- return 0;
- }
-
- return 1;
-}
-
-static void free_all_urbs(struct r8152 *tp)
-{
- usb_free_urb(tp->rx_urb);
- usb_free_urb(tp->tx_urb);
-}
-
static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
{
return &dev->stats;
@@ -743,129 +751,425 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
static void read_bulk_callback(struct urb *urb)
{
- struct r8152 *tp;
- unsigned pkt_len;
- struct sk_buff *skb;
struct net_device *netdev;
- struct net_device_stats *stats;
+ unsigned long lockflags;
int status = urb->status;
+ struct rx_agg *agg;
+ struct r8152 *tp;
int result;
- struct rx_desc *rx_desc;
- tp = urb->context;
+ agg = urb->context;
+ if (!agg)
+ return;
+
+ tp = agg->context;
if (!tp)
return;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
netdev = tp->netdev;
- if (!netif_device_present(netdev))
+ if (!netif_carrier_ok(netdev))
return;
- stats = rtl8152_get_stats(netdev);
switch (status) {
case 0:
- break;
+ if (urb->actual_length < ETH_ZLEN)
+ break;
+
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+ tasklet_schedule(&tp->tl);
+ return;
case -ESHUTDOWN:
set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
+ return;
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
pr_warn_ratelimited("may be reset is needed?..\n");
- goto goon;
+ break;
default:
pr_warn_ratelimited("Rx status %d\n", status);
- goto goon;
+ break;
}
- /* protect against short packets (tell me why we got some?!?) */
- if (urb->actual_length < sizeof(*rx_desc))
- goto goon;
-
-
- rx_desc = (struct rx_desc *)urb->transfer_buffer;
- pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
- if (urb->actual_length < sizeof(struct rx_desc) + pkt_len)
- goto goon;
-
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
- if (!skb)
- goto goon;
-
- memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
-goon:
- usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
- (usb_complete_t)read_bulk_callback, tp);
- result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
+ result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
if (result == -ENODEV) {
netif_device_detach(tp->netdev);
} else if (result) {
- set_bit(RX_URB_FAIL, &tp->flags);
- goto resched;
- } else {
- clear_bit(RX_URB_FAIL, &tp->flags);
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+ tasklet_schedule(&tp->tl);
}
-
- return;
-resched:
- tasklet_schedule(&tp->tl);
}
-static void rx_fixup(unsigned long data)
+static void write_bulk_callback(struct urb *urb)
{
+ struct net_device_stats *stats;
+ unsigned long lockflags;
+ struct tx_agg *agg;
struct r8152 *tp;
- int status;
+ int status = urb->status;
- tp = (struct r8152 *)data;
- if (!test_bit(WORK_ENABLE, &tp->flags))
+ agg = urb->context;
+ if (!agg)
return;
- status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
- if (status == -ENODEV) {
- netif_device_detach(tp->netdev);
- } else if (status) {
- set_bit(RX_URB_FAIL, &tp->flags);
- goto tlsched;
+ tp = agg->context;
+ if (!tp)
+ return;
+
+ stats = rtl8152_get_stats(tp->netdev);
+ if (status) {
+ pr_warn_ratelimited("Tx status %d\n", status);
+ stats->tx_errors += agg->skb_num;
} else {
- clear_bit(RX_URB_FAIL, &tp->flags);
+ stats->tx_packets += agg->skb_num;
+ stats->tx_bytes += agg->skb_len;
}
- return;
-tlsched:
- tasklet_schedule(&tp->tl);
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+
+ if (!netif_carrier_ok(tp->netdev))
+ return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ if (!skb_queue_empty(&tp->tx_queue))
+ tasklet_schedule(&tp->tl);
}
-static void write_bulk_callback(struct urb *urb)
+static inline void *rx_agg_align(void *data)
+{
+ return (void *)ALIGN((uintptr_t)data, 8);
+}
+
+static inline void *tx_agg_align(void *data)
+{
+ return (void *)ALIGN((uintptr_t)data, 4);
+}
+
+static void free_all_mem(struct r8152 *tp)
+{
+ int i;
+
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ if (tp->rx_info[i].urb) {
+ usb_free_urb(tp->rx_info[i].urb);
+ tp->rx_info[i].urb = NULL;
+ }
+
+ if (tp->rx_info[i].buffer) {
+ kfree(tp->rx_info[i].buffer);
+ tp->rx_info[i].buffer = NULL;
+ tp->rx_info[i].head = NULL;
+ }
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++) {
+ if (tp->tx_info[i].urb) {
+ usb_free_urb(tp->tx_info[i].urb);
+ tp->tx_info[i].urb = NULL;
+ }
+
+ if (tp->tx_info[i].buffer) {
+ kfree(tp->tx_info[i].buffer);
+ tp->tx_info[i].buffer = NULL;
+ tp->tx_info[i].head = NULL;
+ }
+ }
+}
+
+static int alloc_all_mem(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+ struct urb *urb;
+ int node, i;
+ u8 *buf;
+
+ node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
+
+ spin_lock_init(&tp->rx_lock);
+ spin_lock_init(&tp->tx_lock);
+ INIT_LIST_HEAD(&tp->rx_done);
+ INIT_LIST_HEAD(&tp->tx_free);
+ skb_queue_head_init(&tp->tx_queue);
+
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+
+ if (buf != rx_agg_align(buf)) {
+ kfree(buf);
+ buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(buf);
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&tp->rx_info[i].list);
+ tp->rx_info[i].context = tp;
+ tp->rx_info[i].urb = urb;
+ tp->rx_info[i].buffer = buf;
+ tp->rx_info[i].head = rx_agg_align(buf);
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++) {
+ buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+
+ if (buf != tx_agg_align(buf)) {
+ kfree(buf);
+ buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(buf);
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&tp->tx_info[i].list);
+ tp->tx_info[i].context = tp;
+ tp->tx_info[i].urb = urb;
+ tp->tx_info[i].buffer = buf;
+ tp->tx_info[i].head = tx_agg_align(buf);
+
+ list_add_tail(&tp->tx_info[i].list, &tp->tx_free);
+ }
+
+ return 0;
+
+err1:
+ free_all_mem(tp);
+ return -ENOMEM;
+}
+
+static void rx_bottom(struct r8152 *tp)
+{
+ struct net_device_stats *stats;
+ struct net_device *netdev;
+ struct rx_agg *agg;
+ struct rx_desc *rx_desc;
+ unsigned long lockflags;
+ struct list_head *cursor, *next;
+ struct sk_buff *skb;
+ struct urb *urb;
+ unsigned pkt_len;
+ int len_used;
+ u8 *rx_data;
+ int ret;
+
+ netdev = tp->netdev;
+
+ stats = rtl8152_get_stats(netdev);
+
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ list_for_each_safe(cursor, next, &tp->rx_done) {
+ list_del_init(cursor);
+ spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+
+ agg = list_entry(cursor, struct rx_agg, list);
+ urb = agg->urb;
+ if (urb->actual_length < ETH_ZLEN) {
+ ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ if (ret && ret != -ENODEV) {
+ list_add_tail(&agg->list, next);
+ tasklet_schedule(&tp->tl);
+ }
+ continue;
+ }
+
+ len_used = 0;
+ rx_desc = agg->head;
+ rx_data = agg->head;
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ len_used += sizeof(struct rx_desc) + pkt_len;
+
+ while (urb->actual_length >= len_used) {
+ if (pkt_len < ETH_ZLEN)
+ break;
+
+ pkt_len -= 4; /* CRC */
+ rx_data += sizeof(struct rx_desc);
+
+ skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+ if (!skb) {
+ stats->rx_dropped++;
+ break;
+ }
+ memcpy(skb->data, rx_data, pkt_len);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+
+ rx_data = rx_agg_align(rx_data + pkt_len + 4);
+ rx_desc = (struct rx_desc *)rx_data;
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ len_used = (int)(rx_data - (u8 *)agg->head);
+ len_used += sizeof(struct rx_desc) + pkt_len;
+ }
+
+ ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+ spin_lock_irqsave(&tp->rx_lock, lockflags);
+ if (ret && ret != -ENODEV) {
+ list_add_tail(&agg->list, next);
+ tasklet_schedule(&tp->tl);
+ }
+ }
+ spin_unlock_irqrestore(&tp->rx_lock, lockflags);
+}
+
+static void tx_bottom(struct r8152 *tp)
+{
+ struct net_device_stats *stats;
+ struct net_device *netdev;
+ struct tx_agg *agg;
+ unsigned long lockflags;
+ u32 remain, total;
+ u8 *tx_data;
+ int res;
+
+ netdev = tp->netdev;
+
+next_agg:
+ agg = NULL;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ if (!skb_queue_empty(&tp->tx_queue) && !list_empty(&tp->tx_free)) {
+ struct list_head *cursor;
+
+ cursor = tp->tx_free.next;
+ list_del_init(cursor);
+ agg = list_entry(cursor, struct tx_agg, list);
+ }
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+
+ if (!agg)
+ return;
+
+ tx_data = agg->head;
+ agg->skb_num = agg->skb_len = 0;
+ remain = rx_buf_sz - sizeof(struct tx_desc);
+ total = 0;
+
+ while (remain >= ETH_ZLEN) {
+ struct tx_desc *tx_desc;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&tp->tx_queue);
+ if (!skb)
+ break;
+
+ len = skb->len;
+ if (remain < len) {
+ skb_queue_head(&tp->tx_queue, skb);
+ break;
+ }
+
+ tx_data = tx_agg_align(tx_data);
+ tx_desc = (struct tx_desc *)tx_data;
+ tx_data += sizeof(*tx_desc);
+
+ tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS |
+ TX_LS);
+ memcpy(tx_data, skb->data, len);
+ agg->skb_num++;
+ agg->skb_len += len;
+ dev_kfree_skb_any(skb);
+
+ tx_data += len;
+ remain = rx_buf_sz - sizeof(*tx_desc) -
+ (u32)(tx_agg_align(tx_data) - agg->head);
+ }
+
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ agg->head, (int)(tx_data - (u8 *)agg->head),
+ (usb_complete_t)write_bulk_callback, agg);
+ res = usb_submit_urb(agg->urb, GFP_ATOMIC);
+
+ stats = rtl8152_get_stats(netdev);
+
+ if (res) {
+ /* Can we get/handle EPIPE here? */
+ if (res == -ENODEV) {
+ netif_device_detach(netdev);
+ } else {
+ netif_warn(tp, tx_err, netdev,
+ "failed tx_urb %d\n", res);
+ stats->tx_dropped += agg->skb_num;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+ }
+ return;
+ }
+ goto next_agg;
+}
+
+static void bottom_half(unsigned long data)
{
struct r8152 *tp;
- int status = urb->status;
- tp = urb->context;
- if (!tp)
+ tp = (struct r8152 *)data;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- dev_kfree_skb_irq(tp->tx_skb);
- if (!netif_device_present(tp->netdev))
+
+ if (!netif_carrier_ok(tp->netdev))
return;
- if (status)
- dev_info(&urb->dev->dev, "%s: Tx status %d\n",
- tp->netdev->name, status);
- tp->netdev->trans_start = jiffies;
- netif_wake_queue(tp->netdev);
+
+ rx_bottom(tp);
+ tx_bottom(tp);
+}
+
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
+{
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
+ agg->head, rx_buf_sz,
+ (usb_complete_t)read_bulk_callback, agg);
+
+ return usb_submit_urb(agg->urb, mem_flags);
}
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ int i;
+
netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
- usb_unlink_urb(tp->tx_urb);
- stats->tx_errors++;
+ for (i = 0; i < RTL8152_MAX_TX; i++)
+ usb_unlink_urb(tp->tx_info[i].urb);
}
static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -923,33 +1227,44 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
{
struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ unsigned long lockflags;
+ struct tx_agg *agg = NULL;
struct tx_desc *tx_desc;
unsigned int len;
+ u8 *tx_data;
int res;
- netif_stop_queue(netdev);
- len = skb->len;
- if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) {
- struct sk_buff *tx_skb;
+ skb_tx_timestamp(skb);
- tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC);
- dev_kfree_skb_any(skb);
- if (!tx_skb) {
- stats->tx_dropped++;
- netif_wake_queue(netdev);
- return NETDEV_TX_OK;
- }
- skb = tx_skb;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) {
+ struct list_head *cursor;
+
+ cursor = tp->tx_free.next;
+ list_del_init(cursor);
+ agg = list_entry(cursor, struct tx_agg, list);
}
- tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc));
- memset(tx_desc, 0, sizeof(*tx_desc));
- tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS);
- tp->tx_skb = skb;
- skb_tx_timestamp(skb);
- usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
- skb->data, skb->len,
- (usb_complete_t)write_bulk_callback, tp);
- res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC);
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
+
+ if (!agg) {
+ skb_queue_tail(&tp->tx_queue, skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_desc = (struct tx_desc *)agg->head;
+ tx_data = agg->head + sizeof(*tx_desc);
+ agg->skb_num = agg->skb_len = 0;
+
+ len = skb->len;
+ tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+ memcpy(tx_data, skb->data, len);
+ dev_kfree_skb_any(skb);
+ agg->skb_num++;
+ agg->skb_len += len;
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ agg->head, len + sizeof(*tx_desc),
+ (usb_complete_t)write_bulk_callback, agg);
+ res = usb_submit_urb(agg->urb, GFP_ATOMIC);
if (res) {
/* Can we get/handle EPIPE here? */
if (res == -ENODEV) {
@@ -957,12 +1272,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
} else {
netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res);
- stats->tx_errors++;
- netif_start_queue(netdev);
+ stats->tx_dropped++;
+ spin_lock_irqsave(&tp->tx_lock, lockflags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, lockflags);
}
- } else {
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
}
return NETDEV_TX_OK;
@@ -999,17 +1313,18 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
- u32 ocp_data;
+ u32 ocp_data;
+ int i, ret;
u8 speed;
speed = rtl8152_get_speed(tp);
- if (speed & _100bps) {
+ if (speed & _10bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
- ocp_data &= ~EEEP_CR_EEEP_TX;
+ ocp_data |= EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
} else {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
- ocp_data |= EEEP_CR_EEEP_TX;
+ ocp_data &= ~EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
}
@@ -1023,23 +1338,34 @@ static int rtl8152_enable(struct r8152 *tp)
ocp_data &= ~RXDY_GATED_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
- usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
- (usb_complete_t)read_bulk_callback, tp);
+ INIT_LIST_HEAD(&tp->rx_done);
+ ret = 0;
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ INIT_LIST_HEAD(&tp->rx_info[i].list);
+ ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
+ }
- return usb_submit_urb(tp->rx_urb, GFP_KERNEL);
+ return ret;
}
static void rtl8152_disable(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
+ struct sk_buff *skb;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- usb_kill_urb(tp->tx_urb);
+ while ((skb = skb_dequeue(&tp->tx_queue))) {
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++)
+ usb_kill_urb(tp->tx_info[i].urb);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
ocp_data |= RXDY_GATED_EN;
@@ -1058,7 +1384,8 @@ static void rtl8152_disable(struct r8152 *tp)
mdelay(1);
}
- usb_kill_urb(tp->rx_urb);
+ for (i = 0; i < RTL8152_MAX_RX; i++)
+ usb_kill_urb(tp->rx_info[i].urb);
rtl8152_nic_reset(tp);
}
@@ -1303,7 +1630,9 @@ static void set_carrier(struct r8152 *tp)
} else {
if (tp->speed & LINK_STATUS) {
netif_carrier_off(netdev);
+ tasklet_disable(&tp->tl);
rtl8152_disable(tp);
+ tasklet_enable(&tp->tl);
}
}
tp->speed = speed;
@@ -1369,7 +1698,9 @@ static int rtl8152_close(struct net_device *netdev)
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
+ tasklet_disable(&tp->tl);
rtl8152_disable(tp);
+ tasklet_enable(&tp->tl);
return res;
}
@@ -1429,8 +1760,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
static void r8152b_init(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
rtl_clear_bp(tp);
@@ -1475,9 +1806,9 @@ static void r8152b_init(struct r8152 *tp)
break;
}
- /* disable rx aggregation */
+ /* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data |= RX_AGG_DISABLE;
+ ocp_data &= ~RX_AGG_DISABLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
@@ -1490,6 +1821,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
+ tasklet_disable(&tp->tl);
}
rtl8152_down(tp);
@@ -1508,6 +1840,7 @@ static int rtl8152_resume(struct usb_interface *intf)
set_bit(WORK_ENABLE, &tp->flags);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
+ tasklet_enable(&tp->tl);
}
return 0;
@@ -1619,6 +1952,7 @@ static int rtl8152_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
+ int ret;
if (udev->actconfig->desc.bConfigurationValue != 1) {
usb_driver_set_configuration(udev, 1);
@@ -1631,10 +1965,12 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENOMEM;
}
+ SET_NETDEV_DEV(netdev, &intf->dev);
tp = netdev_priv(netdev);
+ memset(tp, 0, sizeof(*tp));
tp->msg_enable = 0x7FFF;
- tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp);
+ tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
tp->udev = udev;
@@ -1657,37 +1993,27 @@ static int rtl8152_probe(struct usb_interface *intf,
r8152b_init(tp);
set_ethernet_addr(tp);
- if (!alloc_all_urbs(tp)) {
- netif_err(tp, probe, netdev, "out of memory");
+ ret = alloc_all_mem(tp);
+ if (ret)
goto out;
- }
-
- tp->rx_skb = netdev_alloc_skb(netdev,
- RTL8152_RMS + sizeof(struct rx_desc));
- if (!tp->rx_skb)
- goto out1;
usb_set_intfdata(intf, tp);
- SET_NETDEV_DEV(netdev, &intf->dev);
-
- if (register_netdev(netdev) != 0) {
+ ret = register_netdev(netdev);
+ if (ret != 0) {
netif_err(tp, probe, netdev, "couldn't register the device");
- goto out2;
+ goto out1;
}
netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
return 0;
-out2:
- usb_set_intfdata(intf, NULL);
- dev_kfree_skb(tp->rx_skb);
out1:
- free_all_urbs(tp);
+ usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
- return -EIO;
+ return ret;
}
static void rtl8152_unload(struct r8152 *tp)
@@ -1715,9 +2041,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
rtl8152_unload(tp);
- free_all_urbs(tp);
- if (tp->rx_skb)
- dev_kfree_skb(tp->rx_skb);
+ free_all_mem(tp);
free_netdev(tp->netdev);
}
}
@@ -1732,11 +2056,12 @@ MODULE_DEVICE_TABLE(usb, rtl8152_table);
static struct usb_driver rtl8152_driver = {
.name = MODULENAME,
+ .id_table = rtl8152_table,
.probe = rtl8152_probe,
.disconnect = rtl8152_disconnect,
- .id_table = rtl8152_table,
.suspend = rtl8152_suspend,
- .resume = rtl8152_resume
+ .resume = rtl8152_resume,
+ .reset_resume = rtl8152_resume,
};
module_usb_driver(rtl8152_driver);
--
1.8.3.1
From: Hayes Wang <[email protected]>
Date: Wed, 14 Aug 2013 20:54:38 +0800
> Enable the tx/rx aggregation which could contain one or more packets
> for each bulk in/out. This could reduce the loading of the host
> controller by sending less bulk transfer.
>
> The rx packets in the bulk in buffer should be 8-byte aligned, and
> the tx packets in the bulk out buffer should be 4-byte aligned.
>
> Signed-off-by: Hayes Wang <[email protected]>
Applied.
From: Hayes Wang <[email protected]>
Date: Wed, 14 Aug 2013 20:54:39 +0800
> Enable tx checksum.
>
> Signed-off-by: Hayes Wang <[email protected]>
Applied.
From: Hayes Wang <[email protected]>
Date: Wed, 14 Aug 2013 20:54:40 +0800
> Use the interrupt transfer to replace polling link status.
>
> Signed-off-by: Hayes Wang <[email protected]>
Applied.
Hayes Wang <[email protected]> :
[...]
> diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
> index 11c51f2..abb0b9f 100644
> --- a/drivers/net/usb/r8152.c
> +++ b/drivers/net/usb/r8152.c
[...]
> @@ -315,13 +318,34 @@ struct tx_desc {
> u32 opts2;
> };
>
> +struct rx_agg {
> + struct list_head list;
> + struct urb *urb;
> + void *context;
void * is not needed: context is always struct r8152 *.
> + void *buffer;
> + void *head;
> +};
> +
> +struct tx_agg {
> + struct list_head list;
> + struct urb *urb;
> + void *context;
void * is not needed: context is always struct r8152 *.
> + void *buffer;
> + void *head;
> + u32 skb_num;
> + u32 skb_len;
> +};
> +
> struct r8152 {
> unsigned long flags;
> struct usb_device *udev;
> struct tasklet_struct tl;
> struct net_device *netdev;
> - struct urb *rx_urb, *tx_urb;
> - struct sk_buff *tx_skb, *rx_skb;
> + struct tx_agg tx_info[RTL8152_MAX_TX];
> + struct rx_agg rx_info[RTL8152_MAX_RX];
> + struct list_head rx_done, tx_free;
> + struct sk_buff_head tx_queue;
> + spinlock_t rx_lock, tx_lock;
You may group rx data on one side and tx data on an other side to be
more cache friendly.
[...]
> @@ -686,6 +711,9 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
> ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
> }
>
> +static
> +int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
> +
It's a new, less than 10 lines function without driver internal dependencies.
The forward declaration is not needed.
[...]
> @@ -743,129 +751,425 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
>
> static void read_bulk_callback(struct urb *urb)
No rtl8152_ prefix ?
> {
[...]
> - if (!netif_device_present(netdev))
> + if (!netif_carrier_ok(netdev))
> return;
How is it related to the subject of the patch ?
[...]
> +static void rx_bottom(struct r8152 *tp)
> +{
> + struct net_device_stats *stats;
> + struct net_device *netdev;
> + struct rx_agg *agg;
> + struct rx_desc *rx_desc;
> + unsigned long lockflags;
Idiom: 'flags'.
> + struct list_head *cursor, *next;
> + struct sk_buff *skb;
> + struct urb *urb;
> + unsigned pkt_len;
> + int len_used;
> + u8 *rx_data;
> + int ret;
The scope of these variables is needlessly wide.
> +
> + netdev = tp->netdev;
> +
> + stats = rtl8152_get_stats(netdev);
> +
> + spin_lock_irqsave(&tp->rx_lock, lockflags);
> + list_for_each_safe(cursor, next, &tp->rx_done) {
> + list_del_init(cursor);
> + spin_unlock_irqrestore(&tp->rx_lock, lockflags);
> +
> + agg = list_entry(cursor, struct rx_agg, list);
> + urb = agg->urb;
> + if (urb->actual_length < ETH_ZLEN) {
goto submit;
> + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
> + spin_lock_irqsave(&tp->rx_lock, lockflags);
> + if (ret && ret != -ENODEV) {
> + list_add_tail(&agg->list, next);
> + tasklet_schedule(&tp->tl);
> + }
> + continue;
> + }
(remove the line above)
[...]
> + rx_data = rx_agg_align(rx_data + pkt_len + 4);
> + rx_desc = (struct rx_desc *)rx_data;
> + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
> + len_used = (int)(rx_data - (u8 *)agg->head);
> + len_used += sizeof(struct rx_desc) + pkt_len;
> + }
> +
submit:
> + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
> + spin_lock_irqsave(&tp->rx_lock, lockflags);
> + if (ret && ret != -ENODEV) {
> + list_add_tail(&agg->list, next);
> + tasklet_schedule(&tp->tl);
> + }
> + }
> + spin_unlock_irqrestore(&tp->rx_lock, lockflags);
> +}
It should be possible to retrieve more items in the spinlocked section
so as to have a chance to batch more work. I have not thought too deeply
about it.
> +
> +static void tx_bottom(struct r8152 *tp)
> +{
> + struct net_device_stats *stats;
> + struct net_device *netdev;
> + struct tx_agg *agg;
> + unsigned long lockflags;
> + u32 remain, total;
> + u8 *tx_data;
> + int res;
> +
> + netdev = tp->netdev;
> +
> +next_agg:
Use a real for / while loop and split this function as you experience
line length problem.
[...]
> +static void bottom_half(unsigned long data)
> {
> struct r8152 *tp;
> - int status = urb->status;
>
> - tp = urb->context;
> - if (!tp)
> + tp = (struct r8152 *)data;
struct r8152 *tp = (struct r8152 *)data;
[...]
> - if (!netif_device_present(tp->netdev))
> +
> + if (!netif_carrier_ok(tp->netdev))
> return;
It deserves an explanation.
[...]
> @@ -923,33 +1227,44 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
> {
[...]
> + spin_lock_irqsave(&tp->tx_lock, lockflags);
> + if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) {
> + struct list_head *cursor;
> +
> + cursor = tp->tx_free.next;
> + list_del_init(cursor);
> + agg = list_entry(cursor, struct tx_agg, list);
Duplicated in tx_bottom.
[...]
> @@ -999,17 +1313,18 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
>
> static int rtl8152_enable(struct r8152 *tp)
> {
> - u32 ocp_data;
> + u32 ocp_data;
> + int i, ret;
> u8 speed;
>
> speed = rtl8152_get_speed(tp);
> - if (speed & _100bps) {
> + if (speed & _10bps) {
> ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
> - ocp_data &= ~EEEP_CR_EEEP_TX;
> + ocp_data |= EEEP_CR_EEEP_TX;
> ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
> } else {
> ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
> - ocp_data |= EEEP_CR_EEEP_TX;
> + ocp_data &= ~EEEP_CR_EEEP_TX;
Call me "Mickey Mouse" if this is related to the subject of the patch.
[...]
> @@ -1631,10 +1965,12 @@ static int rtl8152_probe(struct usb_interface *intf,
> return -ENOMEM;
> }
>
> + SET_NETDEV_DEV(netdev, &intf->dev);
> tp = netdev_priv(netdev);
> + memset(tp, 0, sizeof(*tp));
Useless, see kzalloc in net/core/dev.c::alloc_netdev_mqs
--
Ueimor
Francois Romieu [mailto:[email protected]]
> Sent: Thursday, August 15, 2013 8:26 PM
> To: Hayeswang
> Cc: [email protected]; nic_swsd;
> [email protected]; [email protected]; David Miller
> Subject: Re: [PATCH net-next v2 1/3] net/usb/r8152: support
> aggregation
>
[...]
> > +static
> > +int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg,
> gfp_t mem_flags);
> > +
>
> It's a new, less than 10 lines function without driver
> internal dependencies.
>
> The forward declaration is not needed.
The r8152_submit_rx() need the declaration of read_bulk_callback(), and the
read_bulk_callback() need the declaration of r8152_submit_rx(), too. It likes
a dead lock, so I have no idea how to do it without another declaration.
[...]
> > - if (!netif_device_present(netdev))
> > + if (!netif_carrier_ok(netdev))
> > return;
>
> How is it related to the subject of the patch ?
When link down, the driver would cancel all bulks. This avoid the re-submitting
bulk.
> [...]
> > +static void rx_bottom(struct r8152 *tp)
> > +{
> > + struct net_device_stats *stats;
> > + struct net_device *netdev;
> > + struct rx_agg *agg;
> > + struct rx_desc *rx_desc;
> > + unsigned long lockflags;
>
> Idiom: 'flags'.
>
> > + struct list_head *cursor, *next;
> > + struct sk_buff *skb;
> > + struct urb *urb;
> > + unsigned pkt_len;
> > + int len_used;
> > + u8 *rx_data;
> > + int ret;
>
> The scope of these variables is needlessly wide.
>
> > +
> > + netdev = tp->netdev;
> > +
> > + stats = rtl8152_get_stats(netdev);
> > +
> > + spin_lock_irqsave(&tp->rx_lock, lockflags);
> > + list_for_each_safe(cursor, next, &tp->rx_done) {
> > + list_del_init(cursor);
> > + spin_unlock_irqrestore(&tp->rx_lock, lockflags);
> > +
> > + agg = list_entry(cursor, struct rx_agg, list);
> > + urb = agg->urb;
> > + if (urb->actual_length < ETH_ZLEN) {
>
> goto submit;
>
> > + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
> > + spin_lock_irqsave(&tp->rx_lock, lockflags);
> > + if (ret && ret != -ENODEV) {
> > + list_add_tail(&agg->list, next);
> > + tasklet_schedule(&tp->tl);
> > + }
> > + continue;
> > + }
>
> (remove the line above)
>
> [...]
> > + rx_data = rx_agg_align(rx_data + pkt_len + 4);
> > + rx_desc = (struct rx_desc *)rx_data;
> > + pkt_len = le32_to_cpu(rx_desc->opts1) &
> RX_LEN_MASK;
> > + len_used = (int)(rx_data - (u8 *)agg->head);
> > + len_used += sizeof(struct rx_desc) + pkt_len;
> > + }
> > +
>
> submit:
>
> > + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
> > + spin_lock_irqsave(&tp->rx_lock, lockflags);
> > + if (ret && ret != -ENODEV) {
> > + list_add_tail(&agg->list, next);
> > + tasklet_schedule(&tp->tl);
> > + }
> > + }
> > + spin_unlock_irqrestore(&tp->rx_lock, lockflags);
> > +}
>
> It should be possible to retrieve more items in the spinlocked section
> so as to have a chance to batch more work. I have not thought
> too deeply
> about it.
I only lock when I want to remove or inser the agg list, and unlock as soon as
possible. I don't think I keep locking for a long time.
Best Regards,
Hayes
hayeswang <[email protected]> :
> Francois Romieu [mailto:[email protected]]
[...]
> > The forward declaration is not needed.
>
> The r8152_submit_rx() need the declaration of read_bulk_callback(), and the
> read_bulk_callback() need the declaration of r8152_submit_rx(), too. It likes
> a dead lock, so I have no idea how to do it without another declaration.
Ok, send me a brain for Xmas.
[...]
> > How is it related to the subject of the patch ?
>
> When link down, the driver would cancel all bulks. This avoid the re-submitting
> bulk.
It's quite orthogonal to aggregation and could thus had been done in its own
patch for readability sake.
[...]
> > It should be possible to retrieve more items in the spinlocked section
> > so as to have a chance to batch more work. I have not thought
> > too deeply
> > about it.
>
> I only lock when I want to remove or inser the agg list, and unlock as soon as
> possible. I don't think I keep locking for a long time.
Sure. It doesn't make a difference if tp->rx_done doesn't grow much.
Thanks.
--
Ueimor