Hi,
This series adds a network driver for the Modem Host Interface (MHI) endpoint
devices that provides network interfaces to the PCIe based Qualcomm endpoint
devices supporting MHI bus (like Modems). This driver allows the MHI endpoint
devices to establish IP communication with the host machines (x86, ARM64) over
MHI bus.
On the host side, the existing mhi_net driver provides the network connectivity
to the host.
- Mani
Changes in v2:
* Fixed kfree(skb) with kfree_skb(skb)
* Reworded the Kconfig text slightly
* Dropped the MTU increase patch as it turned out only few devices support 32K
MTU
Manivannan Sadhasivam (2):
net: Add MHI Endpoint network driver
MAINTAINERS: Add entry for MHI networking drivers under MHI bus
MAINTAINERS | 1 +
drivers/net/Kconfig | 9 ++
drivers/net/Makefile | 1 +
drivers/net/mhi_ep_net.c | 331 +++++++++++++++++++++++++++++++++++++++
4 files changed, 342 insertions(+)
create mode 100644 drivers/net/mhi_ep_net.c
base-commit: e7214663e023be5e518e8d0d8f2dca6848731652
--
2.25.1
The host MHI net driver was not listed earlier. So let's add both host and
endpoint MHI net drivers under MHI bus.
Cc: Loic Poulain <[email protected]>
Signed-off-by: Manivannan Sadhasivam <[email protected]>
---
MAINTAINERS | 1 +
1 file changed, 1 insertion(+)
diff --git a/MAINTAINERS b/MAINTAINERS
index 081eb65ef865..14d9e15ae360 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13632,6 +13632,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git
F: Documentation/ABI/stable/sysfs-bus-mhi
F: Documentation/mhi/
F: drivers/bus/mhi/
+F: drivers/net/mhi_*
F: include/linux/mhi.h
MICROBLAZE ARCHITECTURE
--
2.25.1
Add a network driver for the Modem Host Interface (MHI) endpoint devices
that provides network interfaces to the PCIe based Qualcomm endpoint
devices supporting MHI bus. This driver allows the MHI endpoint devices to
establish IP communication with the host machines (x86, ARM64) over MHI
bus.
The driver currently supports only IP_SW0 MHI channel that can be used
to route IP traffic from the endpoint CPU to host machine.
Signed-off-by: Manivannan Sadhasivam <[email protected]>
---
drivers/net/Kconfig | 9 ++
drivers/net/Makefile | 1 +
drivers/net/mhi_ep_net.c | 331 +++++++++++++++++++++++++++++++++++++++
3 files changed, 341 insertions(+)
create mode 100644 drivers/net/mhi_ep_net.c
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 368c6f5b327e..36b628e2e49f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -452,6 +452,15 @@ config MHI_NET
QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55).
Say Y or M.
+config MHI_EP_NET
+ tristate "MHI Endpoint network driver"
+ depends on MHI_BUS_EP
+ help
+ This is the network driver for MHI bus implementation in endpoint
+ devices. It is used provide the network interface for QCOM endpoint
+ devices such as SDX55 modems.
+ Say Y or M.
+
endif # NET_CORE
config SUNGEM_PHY
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e26f98f897c5..b8e706a4150e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o
+obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o
#
# Networking Drivers
diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c
new file mode 100644
index 000000000000..0d7939caefc7
--- /dev/null
+++ b/drivers/net/mhi_ep_net.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MHI Endpoint Network driver
+ *
+ * Based on drivers/net/mhi_net.c
+ *
+ * Copyright (c) 2023, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <[email protected]>
+ */
+
+#include <linux/if_arp.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#define MHI_NET_MIN_MTU ETH_MIN_MTU
+#define MHI_NET_MAX_MTU 0xffff
+
+struct mhi_ep_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_ep_net_dev {
+ struct mhi_ep_device *mdev;
+ struct net_device *ndev;
+ struct mhi_ep_net_stats stats;
+ struct workqueue_struct *xmit_wq;
+ struct work_struct xmit_work;
+ struct sk_buff_head tx_buffers;
+ spinlock_t tx_lock; /* Lock for protecting tx_buffers */
+ u32 mru;
+};
+
+static void mhi_ep_net_dev_process_queue_packets(struct work_struct *work)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = container_of(work,
+ struct mhi_ep_net_dev, xmit_work);
+ struct mhi_ep_device *mdev = mhi_ep_netdev->mdev;
+ struct sk_buff_head q;
+ struct sk_buff *skb;
+ int ret;
+
+ if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) {
+ netif_stop_queue(mhi_ep_netdev->ndev);
+ return;
+ }
+
+ __skb_queue_head_init(&q);
+
+ spin_lock_bh(&mhi_ep_netdev->tx_lock);
+ skb_queue_splice_init(&mhi_ep_netdev->tx_buffers, &q);
+ spin_unlock_bh(&mhi_ep_netdev->tx_lock);
+
+ while ((skb = __skb_dequeue(&q))) {
+ ret = mhi_ep_queue_skb(mdev, skb);
+ if (ret) {
+ kfree_skb(skb);
+ goto exit_drop;
+ }
+
+ u64_stats_update_begin(&mhi_ep_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.tx_packets);
+ u64_stats_add(&mhi_ep_netdev->stats.tx_bytes, skb->len);
+ u64_stats_update_end(&mhi_ep_netdev->stats.tx_syncp);
+
+ /* Check if queue is empty */
+ if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) {
+ netif_stop_queue(mhi_ep_netdev->ndev);
+ break;
+ }
+
+ consume_skb(skb);
+ cond_resched();
+ }
+
+ return;
+
+exit_drop:
+ u64_stats_update_begin(&mhi_ep_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.tx_dropped);
+ u64_stats_update_end(&mhi_ep_netdev->stats.tx_syncp);
+}
+
+static int mhi_ndo_open(struct net_device *ndev)
+{
+ netif_carrier_on(ndev);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_ndo_stop(struct net_device *ndev)
+{
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+
+ spin_lock(&mhi_ep_netdev->tx_lock);
+ skb_queue_tail(&mhi_ep_netdev->tx_buffers, skb);
+ spin_unlock(&mhi_ep_netdev->tx_lock);
+
+ queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void mhi_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&mhi_ep_netdev->stats.rx_syncp);
+ stats->rx_packets = u64_stats_read(&mhi_ep_netdev->stats.rx_packets);
+ stats->rx_bytes = u64_stats_read(&mhi_ep_netdev->stats.rx_bytes);
+ stats->rx_errors = u64_stats_read(&mhi_ep_netdev->stats.rx_errors);
+ } while (u64_stats_fetch_retry(&mhi_ep_netdev->stats.rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin(&mhi_ep_netdev->stats.tx_syncp);
+ stats->tx_packets = u64_stats_read(&mhi_ep_netdev->stats.tx_packets);
+ stats->tx_bytes = u64_stats_read(&mhi_ep_netdev->stats.tx_bytes);
+ stats->tx_errors = u64_stats_read(&mhi_ep_netdev->stats.tx_errors);
+ stats->tx_dropped = u64_stats_read(&mhi_ep_netdev->stats.tx_dropped);
+ } while (u64_stats_fetch_retry(&mhi_ep_netdev->stats.tx_syncp, start));
+}
+
+static const struct net_device_ops mhi_ep_netdev_ops = {
+ .ndo_open = mhi_ndo_open,
+ .ndo_stop = mhi_ndo_stop,
+ .ndo_start_xmit = mhi_ndo_xmit,
+ .ndo_get_stats64 = mhi_ndo_get_stats64,
+};
+
+static void mhi_ep_net_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_RAWIP;
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_ep_netdev_ops;
+ ndev->mtu = MHI_EP_DEFAULT_MTU;
+ ndev->min_mtu = MHI_NET_MIN_MTU;
+ ndev->max_mtu = MHI_NET_MAX_MTU;
+ ndev->tx_queue_len = 1000;
+}
+
+static void mhi_ep_net_ul_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct net_device *ndev = mhi_ep_netdev->ndev;
+ struct sk_buff *skb;
+ size_t size;
+
+ size = mhi_ep_netdev->mru ? mhi_ep_netdev->mru : READ_ONCE(ndev->mtu);
+
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ return;
+ }
+
+ skb_copy_to_linear_data(skb, mhi_res->buf_addr, mhi_res->bytes_xferd);
+ skb->len = mhi_res->bytes_xferd;
+ skb->dev = mhi_ep_netdev->ndev;
+
+ if (unlikely(mhi_res->transaction_status)) {
+ switch (mhi_res->transaction_status) {
+ case -ENOTCONN:
+ /* MHI layer stopping/resetting the UL channel */
+ dev_kfree_skb_any(skb);
+ return;
+ default:
+ /* Unknown error, simply drop */
+ dev_kfree_skb_any(skb);
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ }
+ } else {
+ skb_put(skb, mhi_res->bytes_xferd);
+
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_ep_netdev->stats.rx_bytes, skb->len);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ netif_rx(skb);
+ }
+}
+
+static void mhi_ep_net_dl_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ if (unlikely(mhi_res->transaction_status == -ENOTCONN))
+ return;
+
+ /* Since we got enough buffers to queue, wake the queue if stopped */
+ if (netif_queue_stopped(mhi_ep_netdev->ndev)) {
+ netif_wake_queue(mhi_ep_netdev->ndev);
+ queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work);
+ }
+}
+
+static int mhi_ep_net_newlink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev;
+ int ret;
+
+ mhi_ep_netdev = netdev_priv(ndev);
+
+ dev_set_drvdata(&mhi_dev->dev, mhi_ep_netdev);
+ mhi_ep_netdev->ndev = ndev;
+ mhi_ep_netdev->mdev = mhi_dev;
+ mhi_ep_netdev->mru = mhi_dev->mhi_cntrl->mru;
+
+ skb_queue_head_init(&mhi_ep_netdev->tx_buffers);
+ spin_lock_init(&mhi_ep_netdev->tx_lock);
+
+ u64_stats_init(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_init(&mhi_ep_netdev->stats.tx_syncp);
+
+ mhi_ep_netdev->xmit_wq = alloc_workqueue("mhi_ep_net_xmit_wq", 0, WQ_HIGHPRI);
+ INIT_WORK(&mhi_ep_netdev->xmit_work, mhi_ep_net_dev_process_queue_packets);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ destroy_workqueue(mhi_ep_netdev->xmit_wq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhi_ep_net_dellink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+
+ destroy_workqueue(mhi_ep_netdev->xmit_wq);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static int mhi_ep_net_probe(struct mhi_ep_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct net_device *ndev;
+ int ret;
+
+ ndev = alloc_netdev(sizeof(struct mhi_ep_net_dev), (const char *)id->driver_data,
+ NET_NAME_PREDICTABLE, mhi_ep_net_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ ret = mhi_ep_net_newlink(mhi_dev, ndev);
+ if (ret) {
+ free_netdev(ndev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhi_ep_net_remove(struct mhi_ep_device *mhi_dev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ mhi_ep_net_dellink(mhi_dev, mhi_ep_netdev->ndev);
+}
+
+static const struct mhi_device_id mhi_ep_net_id_table[] = {
+ /* Software data PATH (from endpoint CPU) */
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_ep_net_id_table);
+
+static struct mhi_ep_driver mhi_ep_net_driver = {
+ .probe = mhi_ep_net_probe,
+ .remove = mhi_ep_net_remove,
+ .dl_xfer_cb = mhi_ep_net_dl_callback,
+ .ul_xfer_cb = mhi_ep_net_ul_callback,
+ .id_table = mhi_ep_net_id_table,
+ .driver = {
+ .name = "mhi_ep_net",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_ep_driver(mhi_ep_net_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
+MODULE_DESCRIPTION("MHI Endpoint Network driver");
+MODULE_LICENSE("GPL");
--
2.25.1
On 6/7/2023 9:24 AM, Manivannan Sadhasivam wrote:
> Add a network driver for the Modem Host Interface (MHI) endpoint devices
> that provides network interfaces to the PCIe based Qualcomm endpoint
> devices supporting MHI bus. This driver allows the MHI endpoint devices to
> establish IP communication with the host machines (x86, ARM64) over MHI
> bus.
>
> The driver currently supports only IP_SW0 MHI channel that can be used
> to route IP traffic from the endpoint CPU to host machine.
>
> Signed-off-by: Manivannan Sadhasivam <[email protected]>
> ---
> drivers/net/Kconfig | 9 ++
> drivers/net/Makefile | 1 +
> drivers/net/mhi_ep_net.c | 331 +++++++++++++++++++++++++++++++++++++++
> 3 files changed, 341 insertions(+)
> create mode 100644 drivers/net/mhi_ep_net.c
>
> diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
> index 368c6f5b327e..36b628e2e49f 100644
> --- a/drivers/net/Kconfig
> +++ b/drivers/net/Kconfig
> @@ -452,6 +452,15 @@ config MHI_NET
> QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55).
> Say Y or M.
>
> +config MHI_EP_NET
> + tristate "MHI Endpoint network driver"
> + depends on MHI_BUS_EP
> + help
> + This is the network driver for MHI bus implementation in endpoint
> + devices. It is used provide the network interface for QCOM endpoint
> + devices such as SDX55 modems.
> + Say Y or M.
What will the module be called if "m" is selected?
> +
> endif # NET_CORE
>
> config SUNGEM_PHY
> diff --git a/drivers/net/Makefile b/drivers/net/Makefile
> index e26f98f897c5..b8e706a4150e 100644
> --- a/drivers/net/Makefile
> +++ b/drivers/net/Makefile
> @@ -40,6 +40,7 @@ obj-$(CONFIG_NLMON) += nlmon.o
> obj-$(CONFIG_NET_VRF) += vrf.o
> obj-$(CONFIG_VSOCKMON) += vsockmon.o
> obj-$(CONFIG_MHI_NET) += mhi_net.o
> +obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o
>
> #
> # Networking Drivers
> diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c
> new file mode 100644
> index 000000000000..0d7939caefc7
> --- /dev/null
> +++ b/drivers/net/mhi_ep_net.c
> @@ -0,0 +1,331 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * MHI Endpoint Network driver
> + *
> + * Based on drivers/net/mhi_net.c
> + *
> + * Copyright (c) 2023, Linaro Ltd.
> + * Author: Manivannan Sadhasivam <[email protected]>
> + */
> +
> +#include <linux/if_arp.h>
> +#include <linux/mhi_ep.h>
> +#include <linux/mod_devicetable.h>
> +#include <linux/module.h>
> +#include <linux/netdevice.h>
> +#include <linux/skbuff.h>
> +#include <linux/u64_stats_sync.h>
> +
> +#define MHI_NET_MIN_MTU ETH_MIN_MTU
> +#define MHI_NET_MAX_MTU 0xffff
> +
> +struct mhi_ep_net_stats {
> + u64_stats_t rx_packets;
> + u64_stats_t rx_bytes;
> + u64_stats_t rx_errors;
> + u64_stats_t tx_packets;
> + u64_stats_t tx_bytes;
> + u64_stats_t tx_errors;
> + u64_stats_t tx_dropped;
> + struct u64_stats_sync tx_syncp;
> + struct u64_stats_sync rx_syncp;
> +};
> +
> +struct mhi_ep_net_dev {
> + struct mhi_ep_device *mdev;
> + struct net_device *ndev;
> + struct mhi_ep_net_stats stats;
> + struct workqueue_struct *xmit_wq;
> + struct work_struct xmit_work;
> + struct sk_buff_head tx_buffers;
> + spinlock_t tx_lock; /* Lock for protecting tx_buffers */
> + u32 mru;
> +};
> +
> +static void mhi_ep_net_dev_process_queue_packets(struct work_struct *work)
> +{
> + struct mhi_ep_net_dev *mhi_ep_netdev = container_of(work,
> + struct mhi_ep_net_dev, xmit_work);
Looks like this can fit all on one line to me.
On 6/7/2023 9:24 AM, Manivannan Sadhasivam wrote:
> The host MHI net driver was not listed earlier. So let's add both host and
> endpoint MHI net drivers under MHI bus.
>
> Cc: Loic Poulain <[email protected]>
> Signed-off-by: Manivannan Sadhasivam <[email protected]>
Reviewed-by: Jeffrey Hugo <[email protected]>
On Wed, 7 Jun 2023 20:54:25 +0530 Manivannan Sadhasivam wrote:
> This series adds a network driver for the Modem Host Interface (MHI) endpoint
> devices that provides network interfaces to the PCIe based Qualcomm endpoint
> devices supporting MHI bus (like Modems). This driver allows the MHI endpoint
> devices to establish IP communication with the host machines (x86, ARM64) over
> MHI bus.
>
> On the host side, the existing mhi_net driver provides the network connectivity
> to the host.
Why are you posting the next version before the discussion on the
previous one concluded? :|
In any case, I'm opposed to reuse of the networking stack to talk
to firmware. It's a local device. The networking subsystem doesn't
have to cater to fake networks. Please carry:
Nacked-by: Jakub Kicinski <[email protected]>
if there are future submissions.
On 6/7/2023 10:27 AM, Jeffrey Hugo wrote:
> On 6/7/2023 9:24 AM, Manivannan Sadhasivam wrote:
>> Add a network driver for the Modem Host Interface (MHI) endpoint devices
>> that provides network interfaces to the PCIe based Qualcomm endpoint
>> devices supporting MHI bus. This driver allows the MHI endpoint
>> devices to
>> establish IP communication with the host machines (x86, ARM64) over MHI
>> bus.
>>
>> The driver currently supports only IP_SW0 MHI channel that can be used
>> to route IP traffic from the endpoint CPU to host machine.
>>
>> Signed-off-by: Manivannan Sadhasivam <[email protected]>
>> ---
>> drivers/net/Kconfig | 9 ++
>> drivers/net/Makefile | 1 +
>> drivers/net/mhi_ep_net.c | 331 +++++++++++++++++++++++++++++++++++++++
>> 3 files changed, 341 insertions(+)
>> create mode 100644 drivers/net/mhi_ep_net.c
>>
>> diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
>> index 368c6f5b327e..36b628e2e49f 100644
>> --- a/drivers/net/Kconfig
>> +++ b/drivers/net/Kconfig
>> @@ -452,6 +452,15 @@ config MHI_NET
>> QCOM based WWAN modems for IP or QMAP/rmnet protocol (like
>> SDX55).
>> Say Y or M.
>> +config MHI_EP_NET
>> + tristate "MHI Endpoint network driver"
>> + depends on MHI_BUS_EP
>> + help
>> + This is the network driver for MHI bus implementation in endpoint
>> + devices. It is used provide the network interface for QCOM
>> endpoint
>> + devices such as SDX55 modems.
>> + Say Y or M.
>
> What will the module be called if "m" is selected?
>
>> +
>> endif # NET_CORE
>> config SUNGEM_PHY
>> diff --git a/drivers/net/Makefile b/drivers/net/Makefile
>> index e26f98f897c5..b8e706a4150e 100644
>> --- a/drivers/net/Makefile
>> +++ b/drivers/net/Makefile
>> @@ -40,6 +40,7 @@ obj-$(CONFIG_NLMON) += nlmon.o
>> obj-$(CONFIG_NET_VRF) += vrf.o
>> obj-$(CONFIG_VSOCKMON) += vsockmon.o
>> obj-$(CONFIG_MHI_NET) += mhi_net.o
>> +obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o
>> #
>> # Networking Drivers
>> diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c
>> new file mode 100644
>> index 000000000000..0d7939caefc7
>> --- /dev/null
>> +++ b/drivers/net/mhi_ep_net.c
>> @@ -0,0 +1,331 @@
>> +// SPDX-License-Identifier: GPL-2.0-or-later
>> +/*
>> + * MHI Endpoint Network driver
>> + *
>> + * Based on drivers/net/mhi_net.c
>> + *
>> + * Copyright (c) 2023, Linaro Ltd.
>> + * Author: Manivannan Sadhasivam <[email protected]>
>> + */
>> +
>> +#include <linux/if_arp.h>
>> +#include <linux/mhi_ep.h>
>> +#include <linux/mod_devicetable.h>
>> +#include <linux/module.h>
>> +#include <linux/netdevice.h>
>> +#include <linux/skbuff.h>
>> +#include <linux/u64_stats_sync.h>
>> +
>> +#define MHI_NET_MIN_MTU ETH_MIN_MTU
>> +#define MHI_NET_MAX_MTU 0xffff
ETH_MAX_MTU ?
Personal preference thing. If you think 0xffff is really the superior
option, so be it. Personally, it takes me a second to figure out that
is 64k - 1 and then relate it to the MHI packet size limit. Also seems
really odd with this line of code right next to, and related to,
ETH_MIN_MTU. Feels like a non-magic number here will make things more
maintainable.
Alternatively move MHI_MAX_MTU out of host/internal.h into something
that is convenient for this driver to include and use? It is a
fundamental constant for the MHI protocol, we just haven't yet had a
need for it to be used outside of the MHI bus implementation code.
>> +
>> +struct mhi_ep_net_stats {
>> + u64_stats_t rx_packets;
>> + u64_stats_t rx_bytes;
>> + u64_stats_t rx_errors;
>> + u64_stats_t tx_packets;
>> + u64_stats_t tx_bytes;
>> + u64_stats_t tx_errors;
>> + u64_stats_t tx_dropped;
>> + struct u64_stats_sync tx_syncp;
>> + struct u64_stats_sync rx_syncp;
>> +};
>> +
>> +struct mhi_ep_net_dev {
>> + struct mhi_ep_device *mdev;
>> + struct net_device *ndev;
>> + struct mhi_ep_net_stats stats;
>> + struct workqueue_struct *xmit_wq;
>> + struct work_struct xmit_work;
>> + struct sk_buff_head tx_buffers;
>> + spinlock_t tx_lock; /* Lock for protecting tx_buffers */
>> + u32 mru;
>> +};
>> +
>> +static void mhi_ep_net_dev_process_queue_packets(struct work_struct
>> *work)
>> +{
>> + struct mhi_ep_net_dev *mhi_ep_netdev = container_of(work,
>> + struct mhi_ep_net_dev, xmit_work);
>
> Looks like this can fit all on one line to me.
>
>
Le 07/06/2023 à 17:24, Manivannan Sadhasivam a écrit :
> Add a network driver for the Modem Host Interface (MHI) endpoint devices
> that provides network interfaces to the PCIe based Qualcomm endpoint
> devices supporting MHI bus. This driver allows the MHI endpoint devices to
> establish IP communication with the host machines (x86, ARM64) over MHI
> bus.
>
> The driver currently supports only IP_SW0 MHI channel that can be used
> to route IP traffic from the endpoint CPU to host machine.
>
> Signed-off-by: Manivannan Sadhasivam <[email protected]>
> ---
[...]
> +static int mhi_ep_net_newlink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
> +{
> + struct mhi_ep_net_dev *mhi_ep_netdev;
> + int ret;
> +
> + mhi_ep_netdev = netdev_priv(ndev);
> +
> + dev_set_drvdata(&mhi_dev->dev, mhi_ep_netdev);
> + mhi_ep_netdev->ndev = ndev;
> + mhi_ep_netdev->mdev = mhi_dev;
> + mhi_ep_netdev->mru = mhi_dev->mhi_cntrl->mru;
> +
> + skb_queue_head_init(&mhi_ep_netdev->tx_buffers);
> + spin_lock_init(&mhi_ep_netdev->tx_lock);
> +
> + u64_stats_init(&mhi_ep_netdev->stats.rx_syncp);
> + u64_stats_init(&mhi_ep_netdev->stats.tx_syncp);
> +
> + mhi_ep_netdev->xmit_wq = alloc_workqueue("mhi_ep_net_xmit_wq", 0, WQ_HIGHPRI);
if (!mhi_ep_netdev->xmit_wq)
return -ENOMEM;
> + INIT_WORK(&mhi_ep_netdev->xmit_work, mhi_ep_net_dev_process_queue_packets);
> +
> + ret = register_netdev(ndev);
> + if (ret) {
> + destroy_workqueue(mhi_ep_netdev->xmit_wq);
I don't really think it is needed, but to be consistent with
mhi_ep_net_dellink(), maybe:
dev_set_drvdata(&mhi_dev->dev, NULL);
CJ
> + return ret;
> + }
> +
> + return 0;
> +}
[...]
On Wed, Jun 07, 2023 at 10:27:47AM -0600, Jeffrey Hugo wrote:
> On 6/7/2023 9:24 AM, Manivannan Sadhasivam wrote:
> > Add a network driver for the Modem Host Interface (MHI) endpoint devices
> > that provides network interfaces to the PCIe based Qualcomm endpoint
> > devices supporting MHI bus. This driver allows the MHI endpoint devices to
> > establish IP communication with the host machines (x86, ARM64) over MHI
> > bus.
> >
> > The driver currently supports only IP_SW0 MHI channel that can be used
> > to route IP traffic from the endpoint CPU to host machine.
> >
> > Signed-off-by: Manivannan Sadhasivam <[email protected]>
> > ---
> > drivers/net/Kconfig | 9 ++
> > drivers/net/Makefile | 1 +
> > drivers/net/mhi_ep_net.c | 331 +++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 341 insertions(+)
> > create mode 100644 drivers/net/mhi_ep_net.c
> >
> > diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
> > index 368c6f5b327e..36b628e2e49f 100644
> > --- a/drivers/net/Kconfig
> > +++ b/drivers/net/Kconfig
> > @@ -452,6 +452,15 @@ config MHI_NET
> > QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55).
> > Say Y or M.
> > +config MHI_EP_NET
> > + tristate "MHI Endpoint network driver"
> > + depends on MHI_BUS_EP
> > + help
> > + This is the network driver for MHI bus implementation in endpoint
> > + devices. It is used provide the network interface for QCOM endpoint
> > + devices such as SDX55 modems.
> > + Say Y or M.
>
> What will the module be called if "m" is selected?
>
Will add that info.
> > +
> > endif # NET_CORE
> > config SUNGEM_PHY
> > diff --git a/drivers/net/Makefile b/drivers/net/Makefile
> > index e26f98f897c5..b8e706a4150e 100644
> > --- a/drivers/net/Makefile
> > +++ b/drivers/net/Makefile
> > @@ -40,6 +40,7 @@ obj-$(CONFIG_NLMON) += nlmon.o
> > obj-$(CONFIG_NET_VRF) += vrf.o
> > obj-$(CONFIG_VSOCKMON) += vsockmon.o
> > obj-$(CONFIG_MHI_NET) += mhi_net.o
> > +obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o
> > #
> > # Networking Drivers
> > diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c
> > new file mode 100644
> > index 000000000000..0d7939caefc7
> > --- /dev/null
> > +++ b/drivers/net/mhi_ep_net.c
> > @@ -0,0 +1,331 @@
> > +// SPDX-License-Identifier: GPL-2.0-or-later
> > +/*
> > + * MHI Endpoint Network driver
> > + *
> > + * Based on drivers/net/mhi_net.c
> > + *
> > + * Copyright (c) 2023, Linaro Ltd.
> > + * Author: Manivannan Sadhasivam <[email protected]>
> > + */
> > +
> > +#include <linux/if_arp.h>
> > +#include <linux/mhi_ep.h>
> > +#include <linux/mod_devicetable.h>
> > +#include <linux/module.h>
> > +#include <linux/netdevice.h>
> > +#include <linux/skbuff.h>
> > +#include <linux/u64_stats_sync.h>
> > +
> > +#define MHI_NET_MIN_MTU ETH_MIN_MTU
> > +#define MHI_NET_MAX_MTU 0xffff
> > +
> > +struct mhi_ep_net_stats {
> > + u64_stats_t rx_packets;
> > + u64_stats_t rx_bytes;
> > + u64_stats_t rx_errors;
> > + u64_stats_t tx_packets;
> > + u64_stats_t tx_bytes;
> > + u64_stats_t tx_errors;
> > + u64_stats_t tx_dropped;
> > + struct u64_stats_sync tx_syncp;
> > + struct u64_stats_sync rx_syncp;
> > +};
> > +
> > +struct mhi_ep_net_dev {
> > + struct mhi_ep_device *mdev;
> > + struct net_device *ndev;
> > + struct mhi_ep_net_stats stats;
> > + struct workqueue_struct *xmit_wq;
> > + struct work_struct xmit_work;
> > + struct sk_buff_head tx_buffers;
> > + spinlock_t tx_lock; /* Lock for protecting tx_buffers */
> > + u32 mru;
> > +};
> > +
> > +static void mhi_ep_net_dev_process_queue_packets(struct work_struct *work)
> > +{
> > + struct mhi_ep_net_dev *mhi_ep_netdev = container_of(work,
> > + struct mhi_ep_net_dev, xmit_work);
>
> Looks like this can fit all on one line to me.
>
That's me trying to stick to both 100 and 80 column width :/ Will fix it.
- Mani
--
மணிவண்ணன் சதாசிவம்
On Wed, Jun 07, 2023 at 09:49:22AM -0700, Jakub Kicinski wrote:
> On Wed, 7 Jun 2023 20:54:25 +0530 Manivannan Sadhasivam wrote:
> > This series adds a network driver for the Modem Host Interface (MHI) endpoint
> > devices that provides network interfaces to the PCIe based Qualcomm endpoint
> > devices supporting MHI bus (like Modems). This driver allows the MHI endpoint
> > devices to establish IP communication with the host machines (x86, ARM64) over
> > MHI bus.
> >
> > On the host side, the existing mhi_net driver provides the network connectivity
> > to the host.
>
> Why are you posting the next version before the discussion on the
> previous one concluded? :|
>
Previous discussion doesn't sound any controversial to me, so I thought of
respinning. Maybe I should've waited...
> In any case, I'm opposed to reuse of the networking stack to talk
> to firmware. It's a local device. The networking subsystem doesn't
> have to cater to fake networks. Please carry:
>
> Nacked-by: Jakub Kicinski <[email protected]>
>
> if there are future submissions.
Why shouldn't it be? With this kind of setup one could share the data connectivity
available in the device with the host over IP tunneling. If the IP source in the
device (like modem DSP) has no way to be shared with the host, then those IP
packets could be tunneled through this interface for providing connectivity to
the host.
I believe this is a common usecase among the PCIe based wireless endpoint
devices.
- Mani
--
மணிவண்ணன் சதாசிவம்
On Wed, 7 Jun 2023 22:41:53 +0530 Manivannan Sadhasivam wrote:
> > In any case, I'm opposed to reuse of the networking stack to talk
> > to firmware. It's a local device. The networking subsystem doesn't
> > have to cater to fake networks. Please carry:
> >
> > Nacked-by: Jakub Kicinski <[email protected]>
> >
> > if there are future submissions.
>
> Why shouldn't it be? With this kind of setup one could share the data connectivity
> available in the device with the host over IP tunneling. If the IP source in the
> device (like modem DSP) has no way to be shared with the host, then those IP
> packets could be tunneled through this interface for providing connectivity to
> the host.
>
> I believe this is a common usecase among the PCIe based wireless endpoint
> devices.
We can handwave our way into many scenarios and terrible architectures.
I don't see any compelling reason to merge this.
On Wed, Jun 07, 2023 at 09:49:22AM -0700, Jakub Kicinski wrote:
> On Wed, 7 Jun 2023 20:54:25 +0530 Manivannan Sadhasivam wrote:
> > This series adds a network driver for the Modem Host Interface (MHI) endpoint
> > devices that provides network interfaces to the PCIe based Qualcomm endpoint
> > devices supporting MHI bus (like Modems). This driver allows the MHI endpoint
> > devices to establish IP communication with the host machines (x86, ARM64) over
> > MHI bus.
> >
> > On the host side, the existing mhi_net driver provides the network connectivity
> > to the host.
>
> Why are you posting the next version before the discussion on the
> previous one concluded? :|
>
> In any case, I'm opposed to reuse of the networking stack to talk
> to firmware. It's a local device. The networking subsystem doesn't
> have to cater to fake networks. Please carry:
>
> Nacked-by: Jakub Kicinski <[email protected]>
Remote Processor Messaging (rpmsg) Framework does seem to be what is
supposed to be used for these sorts of situations. Not that i know
much about it.
Andrew
On Wed, Jun 07, 2023 at 10:52:54AM -0600, Jeffrey Hugo wrote:
> On 6/7/2023 10:27 AM, Jeffrey Hugo wrote:
> > On 6/7/2023 9:24 AM, Manivannan Sadhasivam wrote:
> > > Add a network driver for the Modem Host Interface (MHI) endpoint devices
> > > that provides network interfaces to the PCIe based Qualcomm endpoint
> > > devices supporting MHI bus. This driver allows the MHI endpoint
> > > devices to
> > > establish IP communication with the host machines (x86, ARM64) over MHI
> > > bus.
> > >
> > > The driver currently supports only IP_SW0 MHI channel that can be used
> > > to route IP traffic from the endpoint CPU to host machine.
> > >
> > > Signed-off-by: Manivannan Sadhasivam <[email protected]>
> > > ---
> > > drivers/net/Kconfig | 9 ++
> > > drivers/net/Makefile | 1 +
> > > drivers/net/mhi_ep_net.c | 331 +++++++++++++++++++++++++++++++++++++++
> > > 3 files changed, 341 insertions(+)
> > > create mode 100644 drivers/net/mhi_ep_net.c
> > >
> > > diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
> > > index 368c6f5b327e..36b628e2e49f 100644
> > > --- a/drivers/net/Kconfig
> > > +++ b/drivers/net/Kconfig
> > > @@ -452,6 +452,15 @@ config MHI_NET
> > > QCOM based WWAN modems for IP or QMAP/rmnet protocol (like
> > > SDX55).
> > > Say Y or M.
> > > +config MHI_EP_NET
> > > + tristate "MHI Endpoint network driver"
> > > + depends on MHI_BUS_EP
> > > + help
> > > + This is the network driver for MHI bus implementation in endpoint
> > > + devices. It is used provide the network interface for QCOM
> > > endpoint
> > > + devices such as SDX55 modems.
> > > + Say Y or M.
> >
> > What will the module be called if "m" is selected?
> >
> > > +
> > > endif # NET_CORE
> > > config SUNGEM_PHY
> > > diff --git a/drivers/net/Makefile b/drivers/net/Makefile
> > > index e26f98f897c5..b8e706a4150e 100644
> > > --- a/drivers/net/Makefile
> > > +++ b/drivers/net/Makefile
> > > @@ -40,6 +40,7 @@ obj-$(CONFIG_NLMON) += nlmon.o
> > > obj-$(CONFIG_NET_VRF) += vrf.o
> > > obj-$(CONFIG_VSOCKMON) += vsockmon.o
> > > obj-$(CONFIG_MHI_NET) += mhi_net.o
> > > +obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o
> > > #
> > > # Networking Drivers
> > > diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c
> > > new file mode 100644
> > > index 000000000000..0d7939caefc7
> > > --- /dev/null
> > > +++ b/drivers/net/mhi_ep_net.c
> > > @@ -0,0 +1,331 @@
> > > +// SPDX-License-Identifier: GPL-2.0-or-later
> > > +/*
> > > + * MHI Endpoint Network driver
> > > + *
> > > + * Based on drivers/net/mhi_net.c
> > > + *
> > > + * Copyright (c) 2023, Linaro Ltd.
> > > + * Author: Manivannan Sadhasivam <[email protected]>
> > > + */
> > > +
> > > +#include <linux/if_arp.h>
> > > +#include <linux/mhi_ep.h>
> > > +#include <linux/mod_devicetable.h>
> > > +#include <linux/module.h>
> > > +#include <linux/netdevice.h>
> > > +#include <linux/skbuff.h>
> > > +#include <linux/u64_stats_sync.h>
> > > +
> > > +#define MHI_NET_MIN_MTU ETH_MIN_MTU
> > > +#define MHI_NET_MAX_MTU 0xffff
>
> ETH_MAX_MTU ?
>
> Personal preference thing. If you think 0xffff is really the superior
> option, so be it. Personally, it takes me a second to figure out that is
> 64k - 1 and then relate it to the MHI packet size limit. Also seems really
> odd with this line of code right next to, and related to, ETH_MIN_MTU.
> Feels like a non-magic number here will make things more maintainable.
>
TBH, I tried to stick to mhi_net, so just carried this macro. And I agree, it
could be made more understandeable.
> Alternatively move MHI_MAX_MTU out of host/internal.h into something that is
> convenient for this driver to include and use? It is a fundamental constant
> for the MHI protocol, we just haven't yet had a need for it to be used
> outside of the MHI bus implementation code.
>
At some point I had a patch for moving this macro to mhi.h but it fell into
cracks... Will bring it back.
- Mani
> > > +
> > > +struct mhi_ep_net_stats {
> > > + u64_stats_t rx_packets;
> > > + u64_stats_t rx_bytes;
> > > + u64_stats_t rx_errors;
> > > + u64_stats_t tx_packets;
> > > + u64_stats_t tx_bytes;
> > > + u64_stats_t tx_errors;
> > > + u64_stats_t tx_dropped;
> > > + struct u64_stats_sync tx_syncp;
> > > + struct u64_stats_sync rx_syncp;
> > > +};
> > > +
> > > +struct mhi_ep_net_dev {
> > > + struct mhi_ep_device *mdev;
> > > + struct net_device *ndev;
> > > + struct mhi_ep_net_stats stats;
> > > + struct workqueue_struct *xmit_wq;
> > > + struct work_struct xmit_work;
> > > + struct sk_buff_head tx_buffers;
> > > + spinlock_t tx_lock; /* Lock for protecting tx_buffers */
> > > + u32 mru;
> > > +};
> > > +
> > > +static void mhi_ep_net_dev_process_queue_packets(struct work_struct
> > > *work)
> > > +{
> > > + struct mhi_ep_net_dev *mhi_ep_netdev = container_of(work,
> > > + struct mhi_ep_net_dev, xmit_work);
> >
> > Looks like this can fit all on one line to me.
> >
> >
>
--
மணிவண்ணன் சதாசிவம்
On Wed, Jun 07, 2023 at 10:43:50AM -0700, Jakub Kicinski wrote:
> On Wed, 7 Jun 2023 22:41:53 +0530 Manivannan Sadhasivam wrote:
> > > In any case, I'm opposed to reuse of the networking stack to talk
> > > to firmware. It's a local device. The networking subsystem doesn't
> > > have to cater to fake networks. Please carry:
> > >
> > > Nacked-by: Jakub Kicinski <[email protected]>
> > >
> > > if there are future submissions.
> >
> > Why shouldn't it be? With this kind of setup one could share the data connectivity
> > available in the device with the host over IP tunneling. If the IP source in the
> > device (like modem DSP) has no way to be shared with the host, then those IP
> > packets could be tunneled through this interface for providing connectivity to
> > the host.
> >
> > I believe this is a common usecase among the PCIe based wireless endpoint
> > devices.
>
> We can handwave our way into many scenarios and terrible architectures.
> I don't see any compelling reason to merge this.
These kind of usecases exist in the products out there in the market. Regarding
your comment on "opposed to reuse of the network stack to talk to firmware", it
not the just the firmware, it is the device in general that is talking to the
host over this interface. And I don't see how different it is from the host
perspective.
And these kind of scenarios exist with all types of interfaces like usb-gadget,
virtio etc... So not sure why the rule is different for networking subsystem.
- Mani
--
மணிவண்ணன் சதாசிவம்
On Wed, Jun 07, 2023 at 08:13:32PM +0200, Andrew Lunn wrote:
> On Wed, Jun 07, 2023 at 09:49:22AM -0700, Jakub Kicinski wrote:
> > On Wed, 7 Jun 2023 20:54:25 +0530 Manivannan Sadhasivam wrote:
> > > This series adds a network driver for the Modem Host Interface (MHI) endpoint
> > > devices that provides network interfaces to the PCIe based Qualcomm endpoint
> > > devices supporting MHI bus (like Modems). This driver allows the MHI endpoint
> > > devices to establish IP communication with the host machines (x86, ARM64) over
> > > MHI bus.
> > >
> > > On the host side, the existing mhi_net driver provides the network connectivity
> > > to the host.
> >
> > Why are you posting the next version before the discussion on the
> > previous one concluded? :|
> >
> > In any case, I'm opposed to reuse of the networking stack to talk
> > to firmware. It's a local device. The networking subsystem doesn't
> > have to cater to fake networks. Please carry:
> >
> > Nacked-by: Jakub Kicinski <[email protected]>
>
> Remote Processor Messaging (rpmsg) Framework does seem to be what is
> supposed to be used for these sorts of situations. Not that i know
> much about it.
>
Rpmsg is another messaging protocol used for talking to the remote processor.
MHI is somewhat similar in terms of usecase but it is a proprietary protocol
used by Qcom for their devices.
- Mani
> Andrew
--
மணிவண்ணன் சதாசிவம்
Hi Jakub,
On Thu, Jun 08, 2023 at 06:07:20PM +0530, Manivannan Sadhasivam wrote:
> On Wed, Jun 07, 2023 at 10:43:50AM -0700, Jakub Kicinski wrote:
> > On Wed, 7 Jun 2023 22:41:53 +0530 Manivannan Sadhasivam wrote:
> > > > In any case, I'm opposed to reuse of the networking stack to talk
> > > > to firmware. It's a local device. The networking subsystem doesn't
> > > > have to cater to fake networks. Please carry:
> > > >
> > > > Nacked-by: Jakub Kicinski <[email protected]>
> > > >
> > > > if there are future submissions.
> > >
> > > Why shouldn't it be? With this kind of setup one could share the data connectivity
> > > available in the device with the host over IP tunneling. If the IP source in the
> > > device (like modem DSP) has no way to be shared with the host, then those IP
> > > packets could be tunneled through this interface for providing connectivity to
> > > the host.
> > >
> > > I believe this is a common usecase among the PCIe based wireless endpoint
> > > devices.
> >
> > We can handwave our way into many scenarios and terrible architectures.
> > I don't see any compelling reason to merge this.
>
> These kind of usecases exist in the products out there in the market. Regarding
> your comment on "opposed to reuse of the network stack to talk to firmware", it
> not the just the firmware, it is the device in general that is talking to the
> host over this interface. And I don't see how different it is from the host
> perspective.
>
> And these kind of scenarios exist with all types of interfaces like usb-gadget,
> virtio etc... So not sure why the rule is different for networking subsystem.
>
Sorry to revive this old thread, this discussion seems to have fell through the
cracks...
As I explained above, other interfaces also expose this kind of functionality
between host and the device. One of the credible usecase with this driver is
sharing the network connectivity available in either host or the device with the
other end.
To make it clear, we have 2 kind of channels exposed by MHI for networking.
1. IP_SW0
2. IP_HW0
IP_SW0 is useful in scenarios I explained above and IP_HW0 is purely used to
provide data connectivity to the host machines with the help of modem IP in the
device. And the host side stack is already well supported in mainline. With the
proposed driver, Linux can run on the device itself and it will give Qcom a
chance to get rid of their proprietary firmware used on the PCIe endpoint
devices like modems, etc...
- Mani
> - Mani
>
> --
> மணிவண்ணன் சதாசிவம்
--
மணிவண்ணன் சதாசிவம்
On Fri, 17 Nov 2023 12:36:02 +0530 Manivannan Sadhasivam wrote:
> Sorry to revive this old thread, this discussion seems to have fell through the
> cracks...
It did not fall thru the cracks, you got a nack. Here it is in a more
official form:
Nacked-by: Jakub Kicinski <[email protected]>
Please make sure you keep this tag and CC me if you ever post any form
of these patches again.
On Fri, Nov 17, 2023 at 04:26:38PM -0800, Jakub Kicinski wrote:
> On Fri, 17 Nov 2023 12:36:02 +0530 Manivannan Sadhasivam wrote:
> > Sorry to revive this old thread, this discussion seems to have fell through the
> > cracks...
>
> It did not fall thru the cracks, you got a nack. Here it is in a more
> official form:
>
> Nacked-by: Jakub Kicinski <[email protected]>
>
> Please make sure you keep this tag and CC me if you ever post any form
> of these patches again.
Thanks for the NACK. Could you please respond to my reply justifying this driver
(the part you just snipped)? I'm posting it below:
> As I explained above, other interfaces also expose this kind of functionality
> between host and the device. One of the credible usecase with this driver is
> sharing the network connectivity available in either host or the device with the
> other end.
>
> To make it clear, we have 2 kind of channels exposed by MHI for networking.
>
> 1. IP_SW0
> 2. IP_HW0
>
> IP_SW0 is useful in scenarios I explained above and IP_HW0 is purely used to
> provide data connectivity to the host machines with the help of modem IP in the
> device. And the host side stack is already well supported in mainline. With the
> proposed driver, Linux can run on the device itself and it will give Qcom a
> chance to get rid of their proprietary firmware used on the PCIe endpoint
> devices like modems, etc...
I think you made up your mind that this driver is exposing the network interface
to the firmware on the device. I ought to clearify that the device running this
driver doesn't necessarily be a modem but a PCIe endpoint instance that uses the
netdev exposed by this driver to share data connectivity with another device.
This concept is not new and being supported by other protocols such as Virtio
etc...
- Mani
--
மணிவண்ணன் சதாசிவம்
On Mon, 27 Nov 2023 11:34:39 +0530 Manivannan Sadhasivam wrote:
> I think you made up your mind that this driver is exposing the network interface
> to the firmware on the device. I ought to clearify that the device running this
> driver doesn't necessarily be a modem but a PCIe endpoint instance that uses the
> netdev exposed by this driver to share data connectivity with another device.
Doesn't matter how many legit use cases you can come up with.
Using netdev as a device comm channel is something I am
fundamentally opposed to.
> This concept is not new and being supported by other protocols such as Virtio
> etc...
Yes. Use virtio, please.
On Mon, 27 Nov 2023 at 18:46, Jakub Kicinski <[email protected]> wrote:
>
> On Mon, 27 Nov 2023 11:34:39 +0530 Manivannan Sadhasivam wrote:
> > I think you made up your mind that this driver is exposing the network interface
> > to the firmware on the device. I ought to clearify that the device running this
> > driver doesn't necessarily be a modem but a PCIe endpoint instance that uses the
> > netdev exposed by this driver to share data connectivity with another device.
>
> Doesn't matter how many legit use cases you can come up with.
> Using netdev as a device comm channel is something I am
> fundamentally opposed to.
>
> > This concept is not new and being supported by other protocols such as Virtio
> > etc...
>
> Yes. Use virtio, please.
We can try using virtio if we control both sides of the link. However
there are usecases of the upstream Linux running on the modem (PCIe
EP) side and other systems (Win, Android) running on the RC side. In
such cases we have to provide the interface that is expected by the
host driver, which unfortunately is MHI. Not to mention that one of
the PCIe EP regions contains registers which are targeting the MHI
protocol. I am not sure how hardware will react if we bypass this
completely and implement VirtIIO or NTB instead.
Also, please excuse me if this was already answered, just for my understanding:
- If we limit functionality to just networking channels which are used
to pass IP data between host and EP, will that be accepted?
- If we were to implement the PCIe networking card running Linux (e.g.
using Freescale PowerQUICC or Cavium Octeon chips), would you also be
opposed to implementing the EP side of the link as the netdev?
--
With best wishes
Dmitry
On Tue, 28 Nov 2023 22:35:50 +0200 Dmitry Baryshkov wrote:
> Also, please excuse me if this was already answered, just for my understanding:
> - If we limit functionality to just networking channels which are used
> to pass IP data between host and EP, will that be accepted?
That's too hard to enforce. We have 200+ drivers, we can't carefully
review every single line of code to make sure you stick to the "just
networking" promise you make us. Plus the next guy will come and tell
us "but you let the company X do it".
> - If we were to implement the PCIe networking card running Linux (e.g.
> using Freescale PowerQUICC or Cavium Octeon chips), would you also be
> opposed to implementing the EP side of the link as the netdev?
Yes.
It's very tempting to reuse existing code, written for traffic to build
a control channel. This becomes painful because:
- the lifetime rules for interfaces to configure vs to pass traffic
are different, which inevitably leads to bugs in common code,
- the use cases are different, which leads to hacks / abuse,
and then it's a lot harder for us to refactor and optimize core
code / data structures,
- IDK how "channel to talk to FW" fits with the normal IP stack...
The "FW channel netdevs" exist for decades now, and are very popular
with middle box SDKs, I know. Your choices are:
- keep the code out of tree,
- use a generic interface with a strong standard definition, like
virtio, and expect that no customizations will be allowed.
On Tue, 28 Nov 2023 at 22:58, Jakub Kicinski <[email protected]> wrote:
>
> On Tue, 28 Nov 2023 22:35:50 +0200 Dmitry Baryshkov wrote:
> > Also, please excuse me if this was already answered, just for my understanding:
> > - If we limit functionality to just networking channels which are used
> > to pass IP data between host and EP, will that be accepted?
>
> That's too hard to enforce. We have 200+ drivers, we can't carefully
> review every single line of code to make sure you stick to the "just
> networking" promise you make us. Plus the next guy will come and tell
> us "but you let the company X do it".
>
> > - If we were to implement the PCIe networking card running Linux (e.g.
> > using Freescale PowerQUICC or Cavium Octeon chips), would you also be
> > opposed to implementing the EP side of the link as the netdev?
>
> Yes.
>
> It's very tempting to reuse existing code, written for traffic to build
> a control channel. This becomes painful because:
> - the lifetime rules for interfaces to configure vs to pass traffic
> are different, which inevitably leads to bugs in common code,
> - the use cases are different, which leads to hacks / abuse,
> and then it's a lot harder for us to refactor and optimize core
> code / data structures,
> - IDK how "channel to talk to FW" fits with the normal IP stack...
Ok, here you are talking about the control path. I can then assume
that you consider it to be fine to use netdev for the EP data path, if
the control path is kept separate and those two can not be mixed. Does
that sound correct?
>
> The "FW channel netdevs" exist for decades now, and are very popular
> with middle box SDKs, I know. Your choices are:
> - keep the code out of tree,
> - use a generic interface with a strong standard definition, like
> virtio, and expect that no customizations will be allowed.
--
With best wishes
Dmitry
On Mon, 4 Dec 2023 14:12:12 +0200 Dmitry Baryshkov wrote:
> Ok, here you are talking about the control path. I can then assume
> that you consider it to be fine to use netdev for the EP data path, if
> the control path is kept separate and those two can not be mixed. Does
> that sound correct?
If datapath == traffic which is intended to leave the card via
the external port, then yes.
On Mon, 4 Dec 2023 at 18:12, Jakub Kicinski <[email protected]> wrote:
>
> On Mon, 4 Dec 2023 14:12:12 +0200 Dmitry Baryshkov wrote:
> > Ok, here you are talking about the control path. I can then assume
> > that you consider it to be fine to use netdev for the EP data path, if
> > the control path is kept separate and those two can not be mixed. Does
> > that sound correct?
>
> If datapath == traffic which is intended to leave the card via
> the external port, then yes.
Then I think I understand what causes the confusion.
The MHI netdev is used to deliver network traffic to the modem CPU,
but it is not the controlpath.
For the control path we have non-IP MHI channels (QMI, IPCR, etc).
This can be the traffic targeting e.g. SSH or HTTP server running on
the EP side of the link.
I probably fail to see the difference between this scenario and the
proper virtio netdev which also allows us to send the same IPv4/v6
traffic to the CPU on the EP side.
--
With best wishes
Dmitry