2023-10-12 06:01:43

by Takeru Hayasaka

[permalink] [raw]
Subject: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

This is a patch that enables RSS functionality for GTP packets using
ethtool.
A user can include her TEID and make RSS work for GTP-U over IPv4 by
doing the following:
`ethtool -N ens3 rx-flow-hash gtpu4 sde`
In addition to gtpu(4|6), we now support gtpc(4|6),gtpc(4|6)t,gtpu(4|6)e,
gtpu(4|6)u, and gtpu(4|6)d.

GTP generates a flow that includes an ID called TEID to identify the
tunnel. This tunnel is created for each UE (User Equipment).
By performing RSS based on this flow, it is possible to apply RSS for
each communication unit from the UE.
Without this, RSS would only be effective within the range of IP
addresses.
For instance, the PGW can only perform RSS within the IP range of the
SGW.
problematic from a load distribution perspective, especially if there's
a bias in the terminals connected to a particular base station.
This case can be solved by using this patch

Signed-off-by: Takeru Hayasaka <[email protected]>
---
Added commit messages and options based on reviews

drivers/net/ethernet/intel/ice/ice_ethtool.c | 116 ++++++++++++++++++-
drivers/net/ethernet/intel/ice/ice_flow.h | 15 +++
drivers/net/ethernet/intel/ice/ice_lib.c | 84 ++++++++++++++
include/uapi/linux/ethtool.h | 13 +++
4 files changed, 226 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index ad4d4702129f..8d8bf1b41049 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -2404,6 +2404,24 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V4_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
break;
+ case GTPU_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_TEID_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_EH_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_UL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_DL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4;
+ break;
case TCP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
break;
@@ -2413,9 +2431,28 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
break;
+ case GTPU_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_TEID_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_EH_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_UL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_DL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6;
+ break;
default:
break;
}
+
return hdrs;
}

@@ -2431,6 +2468,12 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_GTPC_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_IP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_EH_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_EH_QFI BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI)
+#define ICE_FLOW_HASH_FLD_GTPU_UP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)

/**
* ice_parse_hash_flds - parses hash fields from RSS hash input
@@ -2448,6 +2491,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ case GTPU_V4_FLOW:
+ case GTPC_V4_FLOW:
+ case GTPC_TEID_V4_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_DL_V4_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
if (nfc->data & RXH_IP_DST)
@@ -2456,6 +2505,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
if (nfc->data & RXH_IP_DST)
@@ -2489,6 +2544,48 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
if (nfc->data & RXH_L4_B_2_3)
hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT;
break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT |
+ ICE_FLOW_HASH_FLD_UDP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT |
+ ICE_FLOW_HASH_FLD_UDP_DST_PORT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (nfc->data & RXH_GTP_TEID) {
+ switch (nfc->flow_type) {
+ case GTPC_TEID_V4_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID;
+ break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID;
+ break;
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID;
+ break;
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID;
+ break;
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID;
+ break;
default:
break;
}
@@ -2590,13 +2687,28 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)

if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT ||
- hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT)
+ hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
nfc->data |= (u64)RXH_L4_B_0_1;

if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
- hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
+ hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
nfc->data |= (u64)RXH_L4_B_2_3;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
+ nfc->data |= (u64)RXH_GTP_TEID;
}

/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index b465d27d9b80..7ddf6ce994e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -64,6 +64,21 @@
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
ICE_FLOW_HASH_GTP_U_EH_QFI)

+#define ICE_FLOW_HASH_GTP_U_UP_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID))
+#define ICE_FLOW_HASH_GTP_U_DWN_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_UP \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_UP_TEID)
+#define ICE_FLOW_HASH_GTP_U_IPV6_UP \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_UP_TEID)
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_DWN \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_DWN_TEID)
+#define ICE_FLOW_HASH_GTP_U_IPV6_DWN \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_DWN_TEID)
+
#define ICE_FLOW_HASH_PPPOE_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))

diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 201570cd2e0b..17852f24ff7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1667,6 +1667,48 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
vsi_num, status);

+ /* configure RSS for gtpc4 with input set IPv4 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpc4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpc4t with input set IPv4 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_IPV4_TEID,
+ ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpc4t flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpu4 with input set IPv4 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_U_IPV4_TEID,
+ ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpu4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpu4e with input set IPv4 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_U_IPV4_EH,
+ ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpu4e flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpu4u with input set IPv4 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_U_IPV4_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpu4u flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpu4d with input set IPv4 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_U_IPV4_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpu4d flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
@@ -1688,6 +1730,48 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);

+ /* configure RSS for gtpc6 with input set IPv4 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpc4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpc6t with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_IPV6_TEID,
+ ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpc6t flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpu6 with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_U_IPV6_TEID,
+ ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpu4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpu6e with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_U_IPV6_EH,
+ ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpu6e flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpu6u with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_U_IPV6_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpu6u flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for gtpu6d with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_GTP_U_IPV6_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for gtpu4d flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
ICE_FLOW_SEG_HDR_ESP);
if (status)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index f7fba0dc87e5..a2d4f2081cf3 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -2011,6 +2011,18 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define IPV4_FLOW 0x10 /* hash only */
#define IPV6_FLOW 0x11 /* hash only */
#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
+#define GTPU_V4_FLOW 0x13 /* hash only */
+#define GTPU_V6_FLOW 0x14 /* hash only */
+#define GTPC_V4_FLOW 0x15 /* hash only */
+#define GTPC_V6_FLOW 0x16 /* hash only */
+#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
+#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
+#define GTPU_EH_V4_FLOW 0x19 /* hash only */
+#define GTPU_EH_V6_FLOW 0x20 /* hash only */
+#define GTPU_UL_V4_FLOW 0x21 /* hash only */
+#define GTPU_UL_V6_FLOW 0x22 /* hash only */
+#define GTPU_DL_V4_FLOW 0x23 /* hash only */
+#define GTPU_DL_V6_FLOW 0x24 /* hash only */
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
#define FLOW_MAC_EXT 0x40000000
@@ -2025,6 +2037,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define RXH_IP_DST (1 << 5)
#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
+#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
#define RXH_DISCARD (1 << 31)

#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
--
2.34.1


2023-10-16 09:27:29

by Simon Horman

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

On Thu, Oct 12, 2023 at 06:01:15AM +0000, Takeru Hayasaka wrote:
> This is a patch that enables RSS functionality for GTP packets using
> ethtool.
> A user can include her TEID and make RSS work for GTP-U over IPv4 by
> doing the following:
> `ethtool -N ens3 rx-flow-hash gtpu4 sde`
> In addition to gtpu(4|6), we now support gtpc(4|6),gtpc(4|6)t,gtpu(4|6)e,
> gtpu(4|6)u, and gtpu(4|6)d.
>
> GTP generates a flow that includes an ID called TEID to identify the
> tunnel. This tunnel is created for each UE (User Equipment).
> By performing RSS based on this flow, it is possible to apply RSS for
> each communication unit from the UE.
> Without this, RSS would only be effective within the range of IP
> addresses.
> For instance, the PGW can only perform RSS within the IP range of the
> SGW.
> problematic from a load distribution perspective, especially if there's
> a bias in the terminals connected to a particular base station.
> This case can be solved by using this patch
>
> Signed-off-by: Takeru Hayasaka <[email protected]>
> ---
> Added commit messages and options based on reviews

Thanks Hayasaka-san,

Overall this looks good to me. And I see that the review of v1
has been addressed - by adding information about the need for
this to the commit message.

Reviewed-by: Simon Horman <[email protected]>

2023-10-16 22:23:50

by Jakub Kicinski

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Thanks for the v2!

Adding Willem, Pablo, and Harald to CC (please CC them on future
versions).

On Thu, 12 Oct 2023 06:01:15 +0000 Takeru Hayasaka wrote:
> diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
> index f7fba0dc87e5..a2d4f2081cf3 100644
> --- a/include/uapi/linux/ethtool.h
> +++ b/include/uapi/linux/ethtool.h
> @@ -2011,6 +2011,18 @@ static inline int ethtool_validate_duplex(__u8 duplex)
> #define IPV4_FLOW 0x10 /* hash only */
> #define IPV6_FLOW 0x11 /* hash only */
> #define ETHER_FLOW 0x12 /* spec only (ether_spec) */
> +#define GTPU_V4_FLOW 0x13 /* hash only */
> +#define GTPU_V6_FLOW 0x14 /* hash only */
> +#define GTPC_V4_FLOW 0x15 /* hash only */
> +#define GTPC_V6_FLOW 0x16 /* hash only */
> +#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
> +#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
> +#define GTPU_EH_V4_FLOW 0x19 /* hash only */
> +#define GTPU_EH_V6_FLOW 0x20 /* hash only */

nit: please note that these are hex numbers,
next value after 0x19 is 0x1a, not 0x20.

> +#define GTPU_UL_V4_FLOW 0x21 /* hash only */
> +#define GTPU_UL_V6_FLOW 0x22 /* hash only */
> +#define GTPU_DL_V4_FLOW 0x23 /* hash only */
> +#define GTPU_DL_V6_FLOW 0x24 /* hash only */
> /* Flag to enable additional fields in struct ethtool_rx_flow_spec */
> #define FLOW_EXT 0x80000000
> #define FLOW_MAC_EXT 0x40000000

What gives me pause here is the number of flow sub-types we define
for GTP hashing.

My understanding of GTP is limited to what I just read on Wikipedia.

IIUC the GTPC vs GTPU distinction comes down to the UDP port on
which the protocol runs? Are the frames also different?

I'm guessing UL/DL are uplink/downlink but what's EH?

How do GTPU_V4_FLOW, GTPU_EH_V4_FLOW, GTPU_UL_V4_FLOW, and
GTPU_DL_V4_FLOW differ?

Key question is - are there reasonable use cases that you can think of
for enabling GTP hashing for each one of those bits individually or can
we combine some of them?

> @@ -2025,6 +2037,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
> #define RXH_IP_DST (1 << 5)
> #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
> #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
> +#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
> #define RXH_DISCARD (1 << 31)

2023-10-17 06:16:08

by Harald Welte

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Hi Jakub and others,

On Mon, Oct 16, 2023 at 03:23:43PM -0700, Jakub Kicinski wrote:
> Adding Willem, Pablo, and Harald to CC (please CC them on future
> versions).

thanks for that Cc, it's much appreciated!

> My understanding of GTP is limited to what I just read on Wikipedia.

If there are any specific questions, I'm very happy to respond to them.

> IIUC the GTPC vs GTPU distinction comes down to the UDP port on
> which the protocol runs?

that is a convention. Similar to any other wll-known port

> Are the frames also different?

Yes, the message type is different. There is one specific message type used for GTP-U
and lots of others for GTP-C.

> I'm guessing UL/DL are uplink/downlink but what's EH?

Surprisingly, I also am not familiar with "EH" in the context of GTP. It's an ancronym
I don't recall reading in any related 3GPP spec.

> Key question is - are there reasonable use cases that you can think of
> for enabling GTP hashing for each one of those bits individually or can
> we combine some of them?

I cannot really comment on that, as I haven't yet been thinking about how RSS
might potentially be used in GTPU use cases. I would also appreciate
some enlightenment on that. What kind of network element/function are we talking
about (my guess is an UPF). How does its architecture look like to spread GTPU flows
across CPUs using RSS?

This is by the way something that I've been also seeing with patches
against the kernel gtp in recent years: People submit patches but are
not explaining the use cases, so it's hard to judge how relevant this
really is to most users.

--
- Harald Welte <[email protected]> https://laforge.gnumonks.org/
============================================================================
"Privacy in residential applications is a desirable marketing option."
(ETSI EN 300 175-7 Ch. A6)

2023-10-17 06:46:17

by Harald Welte

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Hi again,

On Tue, Oct 17, 2023 at 08:11:28AM +0200, Harald Welte wrote:
> I cannot really comment on that, as I haven't yet been thinking about how RSS
> might potentially be used in GTPU use cases. I would also appreciate
> some enlightenment on that. What kind of network element/function are we talking
> about (my guess is an UPF). How does its architecture look like to spread GTPU flows
> across CPUs using RSS?

Thinking of this a few more minutes: In my opinion the usual use case
would be to perform RSS distribution based on a (hash of) the TEID,
possibly in combination with the destination IP(v4/v6) address of the
outer IP header, and possibly also including the [outer] destination UDP
port number.

The latter could likely be always included in the hash, as either it is
the standard port (like in all public standard GTPU traffic) and would
hence not contribute to the distribution across the hash function, or it
would be a non-standard port number in some kind of private/custom
deployment, and then you would want to use it to differentiate traffic,
as otherwise you wouldn't use non-standard ports.

> +#define GTPU_V4_FLOW 0x13 /* hash only */
> +#define GTPU_V6_FLOW 0x14 /* hash only */

so if I'm guessing correctly, those would be hashing only on the V4/V6
destination address? Why would that be GTP specific? The IPv4/v6
header in front of the GTP header is a normal IP header.

> +#define GTPC_V4_FLOW 0x15 /* hash only */
> +#define GTPC_V6_FLOW 0x16 /* hash only */

Are there really deployments where the *very limited* GTP-C control
traffic needs RSS for scalability? The control plane GTP-C traffic
during session setup or mobility is extremely little, compared to GTP-U.

Also, same question applies: Why is hasing the v4/v6 destination address
GTP specific and not generic like any other IP packet?

> +#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
> +#define GTPC_TEID_V6_FLOW 0x18 /* hash only */

Why do we have TEID based hashing only in GTP-C? The User plane in
GTP-U is normally what you'd want to load-share across CPUs/nodes/...
That's where you have thousands to millions more packets than GTP-C.
What am I missing?

> +#define GTPU_EH_V4_FLOW 0x19 /* hash only */
> +#define GTPU_EH_V6_FLOW 0x20 /* hash only */
> +#define GTPU_UL_V4_FLOW 0x21 /* hash only */
> +#define GTPU_UL_V6_FLOW 0x22 /* hash only */
> +#define GTPU_DL_V4_FLOW 0x23 /* hash only */
> +#define GTPU_DL_V6_FLOW 0x24 /* hash only */

Can you explain what those are supposed to do? what exactly are those
hashing on?

IMHO that kind of explanation should be in the comment next to the
#define (for all of them) rather than "hash only". That way it's
obvious to the reader what they do, rather than having to guess.

--
- Harald Welte <[email protected]> https://laforge.gnumonks.org/
============================================================================
"Privacy in residential applications is a desirable marketing option."
(ETSI EN 300 175-7 Ch. A6)

2023-10-17 14:18:37

by Takeru Hayasaka

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Hi Jakub-san and Simon-san
Thank you for reviewing again!

> Reviewed-by: Simon Horman <[email protected]>
Thanks;)

> Adding Willem, Pablo, and Harald to CC (please CC them on future
versions).

of course. thanks!

> nit: please note that these are hex numbers,
next value after 0x19 is 0x1a, not 0x20.

!!!!! I'm so embarrassed.... I will next version fix

> What gives me pause here is the number of flow sub-types we define
> for GTP hashing.
>
> My understanding of GTP is limited to what I just read on Wikipedia.
>
> IIUC the GTPC vs GTPU distinction comes down to the UDP port on
> which the protocol runs? Are the frames also different?
>
> I'm guessing UL/DL are uplink/downlink but what's EH?
>
> How do GTPU_V4_FLOW, GTPU_EH_V4_FLOW, GTPU_UL_V4_FLOW, and
> GTPU_DL_V4_FLOW differ?
>
> Key question is - are there reasonable use cases that you can think of
> for enabling GTP hashing for each one of those bits individually or can
> we combine some of them?

Firstly, what I want to convey is that the structure of each of these
packets is entirely different. Therefore, in terms of ethtool, since
packets with the same structure are considered a flow, I understand
that it is necessary to define such different things (I actually think
that the people at Intel are doing it that way).

Let me first explain the difference between GTPC and GTPU.
The UDP ports are different in GTPC and GTPU.
What's further different is that in the case of GTPC, GTPv2-C is used,
and in the case of GTPU, GTPv1-U is used, which are mainstream in
current mobile communications.

Especially the uniqueness of GTPC communication varies according to
the processing phase.
CSR (Create Session Request) starts processing from a state where TEID
is not included. Therefore, it is separated into cases where packets
have TEID and where they don’t.
Of course, there are cases where we want to specially process only the
communication without TEID, and just creating a session is one of the
more vulnerable parts of the mobile network.

EH stands for Extension Header.
This is the case with GTPU packets compatible with 5G. If it’s the
Flow Director, it reads a parameter related to QoS called QFI.
Without this, it is impossible to process GTPv1 packets compatible with 5G.
Furthermore, this Extension Header has parts where the shape differs
depending on UL/DL, which is called the PDU Session Container.

Specific use cases basically apply to services that terminate GTP itself.

The structure of processing in RSS with ethtool until now was to
select a fixed shape of packets and parameters of those packets to
perform RSS.
Conforming to this format is why it becomes so numerous.


2023年10月17日(火) 7:23 Jakub Kicinski <[email protected]>:

>
> Thanks for the v2!
>
> Adding Willem, Pablo, and Harald to CC (please CC them on future
> versions).
>
> On Thu, 12 Oct 2023 06:01:15 +0000 Takeru Hayasaka wrote:
> > diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
> > index f7fba0dc87e5..a2d4f2081cf3 100644
> > --- a/include/uapi/linux/ethtool.h
> > +++ b/include/uapi/linux/ethtool.h
> > @@ -2011,6 +2011,18 @@ static inline int ethtool_validate_duplex(__u8 duplex)
> > #define IPV4_FLOW 0x10 /* hash only */
> > #define IPV6_FLOW 0x11 /* hash only */
> > #define ETHER_FLOW 0x12 /* spec only (ether_spec) */
> > +#define GTPU_V4_FLOW 0x13 /* hash only */
> > +#define GTPU_V6_FLOW 0x14 /* hash only */
> > +#define GTPC_V4_FLOW 0x15 /* hash only */
> > +#define GTPC_V6_FLOW 0x16 /* hash only */
> > +#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
> > +#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
> > +#define GTPU_EH_V4_FLOW 0x19 /* hash only */
> > +#define GTPU_EH_V6_FLOW 0x20 /* hash only */
>
> nit: please note that these are hex numbers,
> next value after 0x19 is 0x1a, not 0x20.
>
> > +#define GTPU_UL_V4_FLOW 0x21 /* hash only */
> > +#define GTPU_UL_V6_FLOW 0x22 /* hash only */
> > +#define GTPU_DL_V4_FLOW 0x23 /* hash only */
> > +#define GTPU_DL_V6_FLOW 0x24 /* hash only */
> > /* Flag to enable additional fields in struct ethtool_rx_flow_spec */
> > #define FLOW_EXT 0x80000000
> > #define FLOW_MAC_EXT 0x40000000
>
> What gives me pause here is the number of flow sub-types we define
> for GTP hashing.
>
> My understanding of GTP is limited to what I just read on Wikipedia.
>
> IIUC the GTPC vs GTPU distinction comes down to the UDP port on
> which the protocol runs? Are the frames also different?
>
> I'm guessing UL/DL are uplink/downlink but what's EH?
>
> How do GTPU_V4_FLOW, GTPU_EH_V4_FLOW, GTPU_UL_V4_FLOW, and
> GTPU_DL_V4_FLOW differ?
>
> Key question is - are there reasonable use cases that you can think of
> for enabling GTP hashing for each one of those bits individually or can
> we combine some of them?
>
> > @@ -2025,6 +2037,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
> > #define RXH_IP_DST (1 << 5)
> > #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
> > #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
> > +#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
> > #define RXH_DISCARD (1 << 31)

2023-10-17 14:38:30

by Takeru Hayasaka

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Hi Harald-san

Thanks for your review!

> so if I'm guessing correctly, those would be hashing only on the V4/V6
destination address? Why would that be GTP specific? The IPv4/v6
header in front of the GTP header is a normal IP header.

This is not correct. The TEID and the src port/dst port of the inner
packet are also included.

> Are there really deployments where the *very limited* GTP-C control
I also think that it should not be limited to GTP-C. However, as I
wrote in the email earlier, all the flows written are different in
packet structure, including GTP-C. In the semantics of ethtool, I
thought it was correct to pass a fixed packet structure and the
controllable parameters for it. At least, the Intel ice driver that I
modified is already like that.

> IMHO that kind of explanation should be in the comment next to the
> #define (for all of them) rather than "hash only". That way it's
> obvious to the reader what they do, rather than having to guess.

Regarding what should be hashed, this is a complex case. It will also
answer other questions, but for example, if you read this Intel ice
driver, there are cases where you can manipulate the port of the Inter
packet. I think this varies depending on the driver to be implemented.

Note that these comments follow the existing code of ethtool.

FYI: I think it will be helpful for you!
https://www.intel.com/content/www/us/en/content-details/617015/intel-ethernet-controller-e810-dynamic-device-personalization-ddp-technology-guide.html
(cf. Table 8. Patterns and Input Sets for iavf RSS)

2023年10月17日(火) 23:18 takeru hayasaka <[email protected]>:
>
> Hi Jakub-san and Simon-san
> Thank you for reviewing again!
>
> > Reviewed-by: Simon Horman <[email protected]>
> Thanks;)
>
> > Adding Willem, Pablo, and Harald to CC (please CC them on future
> versions).
>
> of course. thanks!
>
> > nit: please note that these are hex numbers,
> next value after 0x19 is 0x1a, not 0x20.
>
> !!!!! I'm so embarrassed.... I will next version fix
>
> > What gives me pause here is the number of flow sub-types we define
> > for GTP hashing.
> >
> > My understanding of GTP is limited to what I just read on Wikipedia.
> >
> > IIUC the GTPC vs GTPU distinction comes down to the UDP port on
> > which the protocol runs? Are the frames also different?
> >
> > I'm guessing UL/DL are uplink/downlink but what's EH?
> >
> > How do GTPU_V4_FLOW, GTPU_EH_V4_FLOW, GTPU_UL_V4_FLOW, and
> > GTPU_DL_V4_FLOW differ?
> >
> > Key question is - are there reasonable use cases that you can think of
> > for enabling GTP hashing for each one of those bits individually or can
> > we combine some of them?
>
> Firstly, what I want to convey is that the structure of each of these
> packets is entirely different. Therefore, in terms of ethtool, since
> packets with the same structure are considered a flow, I understand
> that it is necessary to define such different things (I actually think
> that the people at Intel are doing it that way).
>
> Let me first explain the difference between GTPC and GTPU.
> The UDP ports are different in GTPC and GTPU.
> What's further different is that in the case of GTPC, GTPv2-C is used,
> and in the case of GTPU, GTPv1-U is used, which are mainstream in
> current mobile communications.
>
> Especially the uniqueness of GTPC communication varies according to
> the processing phase.
> CSR (Create Session Request) starts processing from a state where TEID
> is not included. Therefore, it is separated into cases where packets
> have TEID and where they don’t.
> Of course, there are cases where we want to specially process only the
> communication without TEID, and just creating a session is one of the
> more vulnerable parts of the mobile network.
>
> EH stands for Extension Header.
> This is the case with GTPU packets compatible with 5G. If it’s the
> Flow Director, it reads a parameter related to QoS called QFI.
> Without this, it is impossible to process GTPv1 packets compatible with 5G.
> Furthermore, this Extension Header has parts where the shape differs
> depending on UL/DL, which is called the PDU Session Container.
>
> Specific use cases basically apply to services that terminate GTP itself.
>
> The structure of processing in RSS with ethtool until now was to
> select a fixed shape of packets and parameters of those packets to
> perform RSS.
> Conforming to this format is why it becomes so numerous.
>
>
> 2023年10月17日(火) 7:23 Jakub Kicinski <[email protected]>:
>
> >
> > Thanks for the v2!
> >
> > Adding Willem, Pablo, and Harald to CC (please CC them on future
> > versions).
> >
> > On Thu, 12 Oct 2023 06:01:15 +0000 Takeru Hayasaka wrote:
> > > diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
> > > index f7fba0dc87e5..a2d4f2081cf3 100644
> > > --- a/include/uapi/linux/ethtool.h
> > > +++ b/include/uapi/linux/ethtool.h
> > > @@ -2011,6 +2011,18 @@ static inline int ethtool_validate_duplex(__u8 duplex)
> > > #define IPV4_FLOW 0x10 /* hash only */
> > > #define IPV6_FLOW 0x11 /* hash only */
> > > #define ETHER_FLOW 0x12 /* spec only (ether_spec) */
> > > +#define GTPU_V4_FLOW 0x13 /* hash only */
> > > +#define GTPU_V6_FLOW 0x14 /* hash only */
> > > +#define GTPC_V4_FLOW 0x15 /* hash only */
> > > +#define GTPC_V6_FLOW 0x16 /* hash only */
> > > +#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
> > > +#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
> > > +#define GTPU_EH_V4_FLOW 0x19 /* hash only */
> > > +#define GTPU_EH_V6_FLOW 0x20 /* hash only */
> >
> > nit: please note that these are hex numbers,
> > next value after 0x19 is 0x1a, not 0x20.
> >
> > > +#define GTPU_UL_V4_FLOW 0x21 /* hash only */
> > > +#define GTPU_UL_V6_FLOW 0x22 /* hash only */
> > > +#define GTPU_DL_V4_FLOW 0x23 /* hash only */
> > > +#define GTPU_DL_V6_FLOW 0x24 /* hash only */
> > > /* Flag to enable additional fields in struct ethtool_rx_flow_spec */
> > > #define FLOW_EXT 0x80000000
> > > #define FLOW_MAC_EXT 0x40000000
> >
> > What gives me pause here is the number of flow sub-types we define
> > for GTP hashing.
> >
> > My understanding of GTP is limited to what I just read on Wikipedia.
> >
> > IIUC the GTPC vs GTPU distinction comes down to the UDP port on
> > which the protocol runs? Are the frames also different?
> >
> > I'm guessing UL/DL are uplink/downlink but what's EH?
> >
> > How do GTPU_V4_FLOW, GTPU_EH_V4_FLOW, GTPU_UL_V4_FLOW, and
> > GTPU_DL_V4_FLOW differ?
> >
> > Key question is - are there reasonable use cases that you can think of
> > for enabling GTP hashing for each one of those bits individually or can
> > we combine some of them?
> >
> > > @@ -2025,6 +2037,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
> > > #define RXH_IP_DST (1 << 5)
> > > #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
> > > #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
> > > +#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
> > > #define RXH_DISCARD (1 << 31)

2023-10-17 16:49:37

by Takeru Hayasaka

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

I'm not very proficient in English, so I'm worried whether I can
explain it well.
Therefore, I will try to briefly explain the flow and what kind of
cases these are in a straightforward manner.

> +#define GTPU_V4_FLOW 0x13 /* hash only /
> +#define GTPU_V6_FLOW 0x14 / hash only */

These are the cases for GTPv1-U IPv4 and v6. The format of the GTP
packets only includes elements like TEID and GTP version. They are
mainly expected to be used for UE data communication.

> +#define GTPC_V4_FLOW 0x15 /* hash only /
> +#define GTPC_V6_FLOW 0x16 / hash only */

These are the cases for GTPv2-C IPv4 and v6. The format of these GTP
packets does not include TEID. They are mainly expected to be used for
communication for generating sessions for UE data communication,
commonly referred to as CSR.

> +#define GTPC_TEID_V4_FLOW 0x17 /* hash only /
> +#define GTPC_TEID_V6_FLOW 0x18 / hash only */

These are the cases for GTPv2-C IPv4 and v6. Unlike before, the format
of these GTP packets includes TEID. After session creation, it becomes
this packet. It is mainly used for requests to achieve UE handovers.

> +#define GTPU_EH_V4_FLOW 0x19 /* hash only /
> +#define GTPU_EH_V6_FLOW 0x20 / hash only */

These are cases of GTPv1-U and Extension Header for the PDU Session
Container for IPv4 and v6. The format of these GTP packets includes
TEID and QFI. In 5G communication using UPF and others, data
communication using this extended header will be conducted.

> +#define GTPU_UL_V4_FLOW 0x21 /* hash only /
> +#define GTPU_UL_V6_FLOW 0x22 / hash only /
> +#define GTPU_DL_V4_FLOW 0x23 / hash only /
> +#define GTPU_DL_V6_FLOW 0x24 / hash only */

These packets are cases of GTPv1-U and Extension Header for the PDU
Session Container of IPv4 and v6. The difference from before is
distinguishing by looking at the PDU Session Container. There is also
a difference in the data contained based on DL/UL, which can be used
to distinguish packets.

All these functionalities explained up to this point are useful when
you want to process data communication from the mobile network at UPF,
PGW, etc.

Jakub-san might be concerned because there are many flows. If the
number of these flows seems too much, it might be good to reduce the
cases of EH_UL/DL and GTPC only.
I made this implementation to fully utilize the GTP features in Intel
ICE for RSS, so there are many flows.
The reason is that UL/DL can be substituted with EH (looking at the
implementation in Intel ICE, both UL/DL seem to be dont care), and for
GTPC without TEID, originally it should be hashed with IMSI etc. when
doing RSS, but it doesn’t seem to be done right now.

In other words, if it seems better to reduce them, it might be good to
only incorporate gtp4|6, gtpc4|6, gtpue4|6.

I would be happy to receive your feedback :)

2023年10月17日(火) 23:37 takeru hayasaka <[email protected]>:

>
> Hi Harald-san
>
> Thanks for your review!
>
> > so if I'm guessing correctly, those would be hashing only on the V4/V6
> destination address? Why would that be GTP specific? The IPv4/v6
> header in front of the GTP header is a normal IP header.
>
> This is not correct. The TEID and the src port/dst port of the inner
> packet are also included.
>
> > Are there really deployments where the *very limited* GTP-C control
> I also think that it should not be limited to GTP-C. However, as I
> wrote in the email earlier, all the flows written are different in
> packet structure, including GTP-C. In the semantics of ethtool, I
> thought it was correct to pass a fixed packet structure and the
> controllable parameters for it. At least, the Intel ice driver that I
> modified is already like that.
>
> > IMHO that kind of explanation should be in the comment next to the
> > #define (for all of them) rather than "hash only". That way it's
> > obvious to the reader what they do, rather than having to guess.
>
> Regarding what should be hashed, this is a complex case. It will also
> answer other questions, but for example, if you read this Intel ice
> driver, there are cases where you can manipulate the port of the Inter
> packet. I think this varies depending on the driver to be implemented.
>
> Note that these comments follow the existing code of ethtool.
>
> FYI: I think it will be helpful for you!
> https://www.intel.com/content/www/us/en/content-details/617015/intel-ethernet-controller-e810-dynamic-device-personalization-ddp-technology-guide.html
> (cf. Table 8. Patterns and Input Sets for iavf RSS)
>
> 2023年10月17日(火) 23:18 takeru hayasaka <[email protected]>:
> >
> > Hi Jakub-san and Simon-san
> > Thank you for reviewing again!
> >
> > > Reviewed-by: Simon Horman <[email protected]>
> > Thanks;)
> >
> > > Adding Willem, Pablo, and Harald to CC (please CC them on future
> > versions).
> >
> > of course. thanks!
> >
> > > nit: please note that these are hex numbers,
> > next value after 0x19 is 0x1a, not 0x20.
> >
> > !!!!! I'm so embarrassed.... I will next version fix
> >
> > > What gives me pause here is the number of flow sub-types we define
> > > for GTP hashing.
> > >
> > > My understanding of GTP is limited to what I just read on Wikipedia.
> > >
> > > IIUC the GTPC vs GTPU distinction comes down to the UDP port on
> > > which the protocol runs? Are the frames also different?
> > >
> > > I'm guessing UL/DL are uplink/downlink but what's EH?
> > >
> > > How do GTPU_V4_FLOW, GTPU_EH_V4_FLOW, GTPU_UL_V4_FLOW, and
> > > GTPU_DL_V4_FLOW differ?
> > >
> > > Key question is - are there reasonable use cases that you can think of
> > > for enabling GTP hashing for each one of those bits individually or can
> > > we combine some of them?
> >
> > Firstly, what I want to convey is that the structure of each of these
> > packets is entirely different. Therefore, in terms of ethtool, since
> > packets with the same structure are considered a flow, I understand
> > that it is necessary to define such different things (I actually think
> > that the people at Intel are doing it that way).
> >
> > Let me first explain the difference between GTPC and GTPU.
> > The UDP ports are different in GTPC and GTPU.
> > What's further different is that in the case of GTPC, GTPv2-C is used,
> > and in the case of GTPU, GTPv1-U is used, which are mainstream in
> > current mobile communications.
> >
> > Especially the uniqueness of GTPC communication varies according to
> > the processing phase.
> > CSR (Create Session Request) starts processing from a state where TEID
> > is not included. Therefore, it is separated into cases where packets
> > have TEID and where they don’t.
> > Of course, there are cases where we want to specially process only the
> > communication without TEID, and just creating a session is one of the
> > more vulnerable parts of the mobile network.
> >
> > EH stands for Extension Header.
> > This is the case with GTPU packets compatible with 5G. If it’s the
> > Flow Director, it reads a parameter related to QoS called QFI.
> > Without this, it is impossible to process GTPv1 packets compatible with 5G.
> > Furthermore, this Extension Header has parts where the shape differs
> > depending on UL/DL, which is called the PDU Session Container.
> >
> > Specific use cases basically apply to services that terminate GTP itself.
> >
> > The structure of processing in RSS with ethtool until now was to
> > select a fixed shape of packets and parameters of those packets to
> > perform RSS.
> > Conforming to this format is why it becomes so numerous.
> >
> >
> > 2023年10月17日(火) 7:23 Jakub Kicinski <[email protected]>:
> >
> > >
> > > Thanks for the v2!
> > >
> > > Adding Willem, Pablo, and Harald to CC (please CC them on future
> > > versions).
> > >
> > > On Thu, 12 Oct 2023 06:01:15 +0000 Takeru Hayasaka wrote:
> > > > diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
> > > > index f7fba0dc87e5..a2d4f2081cf3 100644
> > > > --- a/include/uapi/linux/ethtool.h
> > > > +++ b/include/uapi/linux/ethtool.h
> > > > @@ -2011,6 +2011,18 @@ static inline int ethtool_validate_duplex(__u8 duplex)
> > > > #define IPV4_FLOW 0x10 /* hash only */
> > > > #define IPV6_FLOW 0x11 /* hash only */
> > > > #define ETHER_FLOW 0x12 /* spec only (ether_spec) */
> > > > +#define GTPU_V4_FLOW 0x13 /* hash only */
> > > > +#define GTPU_V6_FLOW 0x14 /* hash only */
> > > > +#define GTPC_V4_FLOW 0x15 /* hash only */
> > > > +#define GTPC_V6_FLOW 0x16 /* hash only */
> > > > +#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
> > > > +#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
> > > > +#define GTPU_EH_V4_FLOW 0x19 /* hash only */
> > > > +#define GTPU_EH_V6_FLOW 0x20 /* hash only */
> > >
> > > nit: please note that these are hex numbers,
> > > next value after 0x19 is 0x1a, not 0x20.
> > >
> > > > +#define GTPU_UL_V4_FLOW 0x21 /* hash only */
> > > > +#define GTPU_UL_V6_FLOW 0x22 /* hash only */
> > > > +#define GTPU_DL_V4_FLOW 0x23 /* hash only */
> > > > +#define GTPU_DL_V6_FLOW 0x24 /* hash only */
> > > > /* Flag to enable additional fields in struct ethtool_rx_flow_spec */
> > > > #define FLOW_EXT 0x80000000
> > > > #define FLOW_MAC_EXT 0x40000000
> > >
> > > What gives me pause here is the number of flow sub-types we define
> > > for GTP hashing.
> > >
> > > My understanding of GTP is limited to what I just read on Wikipedia.
> > >
> > > IIUC the GTPC vs GTPU distinction comes down to the UDP port on
> > > which the protocol runs? Are the frames also different?
> > >
> > > I'm guessing UL/DL are uplink/downlink but what's EH?
> > >
> > > How do GTPU_V4_FLOW, GTPU_EH_V4_FLOW, GTPU_UL_V4_FLOW, and
> > > GTPU_DL_V4_FLOW differ?
> > >
> > > Key question is - are there reasonable use cases that you can think of
> > > for enabling GTP hashing for each one of those bits individually or can
> > > we combine some of them?
> > >
> > > > @@ -2025,6 +2037,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
> > > > #define RXH_IP_DST (1 << 5)
> > > > #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
> > > > #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
> > > > +#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
> > > > #define RXH_DISCARD (1 << 31)

2023-10-17 23:49:29

by Jakub Kicinski

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

On Tue, 17 Oct 2023 23:37:57 +0900 takeru hayasaka wrote:
> > Are there really deployments where the *very limited* GTP-C control
> I also think that it should not be limited to GTP-C. However, as I
> wrote in the email earlier, all the flows written are different in
> packet structure, including GTP-C. In the semantics of ethtool, I
> thought it was correct to pass a fixed packet structure and the
> controllable parameters for it. At least, the Intel ice driver that I
> modified is already like that.

I may be wrong (this API predates my involvement in Linux by a decade)
but I think that the current ethtool API is not all that precise in
terms of exact packet headers.

For example the TCPv6 flow includes IPv6 and TCP headers, but the
packet may or may not have any number of encapsulation headers in place.
VLAN, VXLAN, GENEVE etc. If the NIC can parse them - it will extract
the inner-most IPv6 and TCP src/dst and hash on that.

In a way TCP or IP headers may also differ by e.g. including options.
But as long as the fields we care about (source / dst) are in place,
we treat all variants of the header the same.

The question really is how much we should extend this sort of thinking
to GTP and say - we treat all GTP flows with extractable TEID the same;
and how much the user can actually benefit from controlling particular
sub-category of GTP flows. Or knowing that NIC supports a particular
sub-category.

Let's forget about capabilities of Intel NICs for now - can you as a
user think of practical use cases where we'd want to turn on hashing
based on TEID for, e.g. gtpu6 and not gtpc6?

2023-10-18 01:53:36

by Takeru Hayasaka

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Hi Jakub san

Thank you for your continued review!

> I may be wrong (this API predates my involvement in Linux by a decade)
> but I think that the current ethtool API is not all that precise in
> terms of exact packet headers.
>
> For example the TCPv6 flow includes IPv6 and TCP headers, but the
> packet may or may not have any number of encapsulation headers in place.
> VLAN, VXLAN, GENEVE etc. If the NIC can parse them - it will extract
> the inner-most IPv6 and TCP src/dst and hash on that.
>
> In a way TCP or IP headers may also differ by e.g. including options.
> But as long as the fields we care about (source / dst) are in place,
> we treat all variants of the header the same.
>
> The question really is how much we should extend this sort of thinking
> to GTP and say - we treat all GTP flows with extractable TEID the same;
> and how much the user can actually benefit from controlling particular
> sub-category of GTP flows. Or knowing that NIC supports a particular
> sub-category.
>
> Let's forget about capabilities of Intel NICs for now - can you as a
> user think of practical use cases where we'd want to turn on hashing
> based on TEID for, e.g. gtpu6 and not gtpc6?

of course!
There are clearly cases where we would want to use gtpu4|6 instead of gtpc4|6.

For instance, there are PGWs that have the capability to separate the
termination of communication of 4G LTE users into Control and User
planes (C/U).
This is quite convenient from a scalability perspective. In fact, in
5G UPF, the communication is explicitly only on the User plane
(Uplane).

Therefore, services are expected to receive only GTPU traffic (e.g.,
PGW-U, UPF) or only GTPC traffic (e.g., PGW-C). Hence, there arises a
necessity to use only GTPU.

If we do not distinguish packets into Control/User (C/U) with options
like gtp4|6, I can conceive scenarios where performance tuning becomes
challenging.
For example, in cases where we want to process only the control
communication (GTPC) using Flow Director on specific CPUs, while
processing GTPU on the remaining cores.
In scenarios like IoT, where user communication is minimal but the
volume of devices is vast, the control traffic could substantially
increase. Thus, this might also be possible in reverse.
In short, this pertains to being mindful of CPU core affinity.

If we were to propose again, setting aside considerations specific to
Intel, I believe, considering the users of ethtool, the smallest units
should be gtpu4|6 and gtpc4|6.
Regarding Extension Headers and such, I think it would be more
straightforward to handle them implicitly.

What does everyone else think?

2023年10月18日(水) 8:49 Jakub Kicinski <[email protected]>:
>
> On Tue, 17 Oct 2023 23:37:57 +0900 takeru hayasaka wrote:
> > > Are there really deployments where the *very limited* GTP-C control
> > I also think that it should not be limited to GTP-C. However, as I
> > wrote in the email earlier, all the flows written are different in
> > packet structure, including GTP-C. In the semantics of ethtool, I
> > thought it was correct to pass a fixed packet structure and the
> > controllable parameters for it. At least, the Intel ice driver that I
> > modified is already like that.
>
> I may be wrong (this API predates my involvement in Linux by a decade)
> but I think that the current ethtool API is not all that precise in
> terms of exact packet headers.
>
> For example the TCPv6 flow includes IPv6 and TCP headers, but the
> packet may or may not have any number of encapsulation headers in place.
> VLAN, VXLAN, GENEVE etc. If the NIC can parse them - it will extract
> the inner-most IPv6 and TCP src/dst and hash on that.
>
> In a way TCP or IP headers may also differ by e.g. including options.
> But as long as the fields we care about (source / dst) are in place,
> we treat all variants of the header the same.
>
> The question really is how much we should extend this sort of thinking
> to GTP and say - we treat all GTP flows with extractable TEID the same;
> and how much the user can actually benefit from controlling particular
> sub-category of GTP flows. Or knowing that NIC supports a particular
> sub-category.
>
> Let's forget about capabilities of Intel NICs for now - can you as a
> user think of practical use cases where we'd want to turn on hashing
> based on TEID for, e.g. gtpu6 and not gtpc6?

2023-10-18 08:16:19

by Harald Welte

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Dear Takeru, Jakub and list,

On Wed, Oct 18, 2023 at 10:53:02AM +0900, takeru hayasaka wrote:
> > Let's forget about capabilities of Intel NICs for now - can you as a
> > user think of practical use cases where we'd want to turn on hashing
> > based on TEID for, e.g. gtpu6 and not gtpc6?
>
> of course!
> There are clearly cases where we would want to use gtpu4|6 instead of gtpc4|6.
>
> For instance, there are PGWs that have the capability to separate the
> termination of communication of 4G LTE users into Control and User
> planes (C/U).

I would argue it is the standard case in any PGW (or SMF+UPF) to process
GTP-C differently than GTP-U. That's what the entire CUPS (control/user plane
split) architecture is meant for.

Now the question is how does one implement that? As the related signaling protocols
all allow to specify different IP addresses for GTPv1/v2-C (v1 for 2G/3G
and v2 for 4G) and GTPv1-U (v1 used everywhere) it is always possible to
use separate IP addresses for control and user plane. It's even normal
that a single GTP-C instance (on one IP) manages multiple GTP-U
instances on other IP-addresses. Those IPs are then handled by
completely different physical servers/systems/VMs/...

So only in case the user intentionally configures their network to use
the same IP address for GTP-C and GTP-U traffic one will need to start
distinguishing GTP-C and GTP-U on one host/NIC with the RSS mechanism:
Steer the GTP-C traffic to the control plane instance on one CPU and
spread the GTP-U traffic via hash function to multiple other
queues/CPUs. I personally think it's short-sighted to use identical IPs
for control and user plane, as it means you can never scale out to
multiple machines without introducing some kind of dedicated load
balancer in front. But assuming some people still want to do it that
way: Yes, then you need the feature to split GTP-C form GTP-U via RSS to
scale well.

What I'm somehwat more wondering about is the usability to load-balance
GTP-C traffic over multiple queues/cores. As stated earlier, that's
just signaling.

> If we were to propose again, setting aside considerations specific to
> Intel, I believe, considering the users of ethtool, the smallest units
> should be gtpu4|6 and gtpc4|6.

agreed. Though I'm not entirely sure one would usually want to treat v4
different from v6. I'd assume they would usually both follow the same
RSS scheme?

> Regarding Extension Headers and such, I think it would be more
> straightforward to handle them implicitly.

I would also agree to that.
--
- Harald Welte <[email protected]> https://laforge.gnumonks.org/
============================================================================
"Privacy in residential applications is a desirable marketing option."
(ETSI EN 300 175-7 Ch. A6)

2023-10-18 08:27:49

by Harald Welte

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Hi Takeru,

On Wed, Oct 18, 2023 at 01:49:08AM +0900, takeru hayasaka wrote:
> I'm not very proficient in English, so I'm worried whether I can
> explain it well.

Don't worry, you were very clear in this e-mail.

> Therefore, I will try to briefly explain the flow and what kind of
> cases these are in a straightforward manner.

Thanks for taking the time. As stated, I think it would be best to have
these or some other some brief comments about the different flow types
in the source code (and especially the documentation) of ethtool.

Based on your explanation, I agree that indeed those are all different
flow types that occur in real-life on PGW/UPF and other 3GPP network
elements/functions. I can also very well imagine that there are use
cases to steer all of those separately, including the EH and UL/DL types
you mentioned.

So I'm supporing your patch with all its many different flow types for RSS.

Thanks,
Harald
--
- Harald Welte <[email protected]> https://laforge.gnumonks.org/
============================================================================
"Privacy in residential applications is a desirable marketing option."
(ETSI EN 300 175-7 Ch. A6)

2023-10-18 16:20:58

by Takeru Hayasaka

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Hi Harald-san and all.

Thank you for the review and comment!

> So only in case the user intentionally configures their network to use
> the same IP address for GTP-C and GTP-U traffic one will need to start
> distinguishing GTP-C and GTP-U on one host/NIC with the RSS mechanism:
> Steer the GTP-C traffic to the control plane instance on one CPU and
> spread the GTP-U traffic via hash function to multiple other
> queues/CPUs. I personally think it's short-sighted to use identical IPs
> for control and user plane, as it means you can never scale out to
> multiple machines without introducing some kind of dedicated load
> balancer in front. But assuming some people still want to do it that
> way: Yes, then you need the feature to split GTP-C form GTP-U via RSS to
> scale well.

I don't deny that using the same IP is short-sighted. However, in
environments such as Private 5G and Private LTE, it is possible that a
small mobile core OSS (e.g., NextEPC, Free5GC, etc.) is placed. Even
if the addresses are separated, processing on the same computer
instance is a possible scenario, so there are practical use cases.

> agreed. Though I'm not entirely sure one would usually want to treat v4
> different from v6. I'd assume they would usually both follow the same
> RSS scheme?

Indeed, you might want them to be treated in the same way.
But this follows the existing design of Ethtool.
In fact, formats like tcp4, tcp6, etc... with the L3 version appended,
are given, and the existing implementation of Ethtool is described in
the format of IPv4|6 + L4. I don’t know why the existing
implementation is divided into IPv4 and v6.

> Don't worry, you were very clear in this e-mail.

Thank you for your kind comment :)

> Thanks for taking the time. As stated, I think it would be best to have
> these or some other some brief comments about the different flow types
> in the source code (and especially the documentation) of ethtool.

Understood. I’m thinking of writing a definition in the Ethtool header
about this flow in the next version of the patch :)

> Based on your explanation, I agree that indeed those are all different
> flow types that occur in real-life on PGW/UPF and other 3GPP network
> elements/functions. I can also very well imagine that there are use
> cases to steer all of those separately, including the EH and UL/DL types
> you mentioned.

Thanks. I'm glad you understood. I appreciate your review and comments.

I've been able to organize various comments and I think you've
understood what is operated by the patch I sent.

Now, here, I’d like to propose two policies for the next version of the patch.

1. Keep this patch as is and write the necessary supplementary
comments (of course, nits fix will be done).
The good thing about this is that it can handle detailed use cases (as
Harald-san understood)

There might be something more pleasant than expected use cases. The
bad thing is the multitude of flow formats. Considering 6G, it may
increase a bit more.

2.Limit the rx-flow-hash flow type to gtpu4|6 and gtpc4|6, and rewrite
to implicitly execute the previous function.
we will add comments (There will be fewer comments than plan 1).

In other words, in Intel Ice, the proposal has the following semantics.
gtpu4|6: GTPU_V(4|6)_FLOW + GTPU_EH_V(4|6)_FLOW
gtpc4|6: GTPC_V(4|6)_FLOW + GTPC_TEID_V(4|6)_FLOW

The good thing is that it seems easy for users to use, and the format
of the GTP-related flow is less likely to increase or decrease in the
future.
The bad thing is that it may not be able to handle detailed use cases.

Please let me know which one, 1 or 2, you prefer.
Also, I would be happy if there is any further feedback!

Thanks

2023年10月18日(水) 17:26 Harald Welte <[email protected]>:
>
> Hi Takeru,
>
> On Wed, Oct 18, 2023 at 01:49:08AM +0900, takeru hayasaka wrote:
> > I'm not very proficient in English, so I'm worried whether I can
> > explain it well.
>
> Don't worry, you were very clear in this e-mail.
>
> > Therefore, I will try to briefly explain the flow and what kind of
> > cases these are in a straightforward manner.
>
> Thanks for taking the time. As stated, I think it would be best to have
> these or some other some brief comments about the different flow types
> in the source code (and especially the documentation) of ethtool.
>
> Based on your explanation, I agree that indeed those are all different
> flow types that occur in real-life on PGW/UPF and other 3GPP network
> elements/functions. I can also very well imagine that there are use
> cases to steer all of those separately, including the EH and UL/DL types
> you mentioned.
>
> So I'm supporing your patch with all its many different flow types for RSS.
>
> Thanks,
> Harald
> --
> - Harald Welte <[email protected]> https://laforge.gnumonks.org/
> ============================================================================
> "Privacy in residential applications is a desirable marketing option."
> (ETSI EN 300 175-7 Ch. A6)

2023-10-18 17:38:36

by Jakub Kicinski

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

On Wed, 18 Oct 2023 10:53:02 +0900 takeru hayasaka wrote:
> For instance, there are PGWs that have the capability to separate the
> termination of communication of 4G LTE users into Control and User
> planes (C/U).
> This is quite convenient from a scalability perspective. In fact, in
> 5G UPF, the communication is explicitly only on the User plane
> (Uplane).
>
> Therefore, services are expected to receive only GTPU traffic (e.g.,
> PGW-U, UPF) or only GTPC traffic (e.g., PGW-C). Hence, there arises a
> necessity to use only GTPU.
>
> If we do not distinguish packets into Control/User (C/U) with options
> like gtp4|6, I can conceive scenarios where performance tuning becomes
> challenging.
> For example, in cases where we want to process only the control
> communication (GTPC) using Flow Director on specific CPUs, while
> processing GTPU on the remaining cores.
> In scenarios like IoT, where user communication is minimal but the
> volume of devices is vast, the control traffic could substantially
> increase. Thus, this might also be possible in reverse.
> In short, this pertains to being mindful of CPU core affinity.
>
> If we were to propose again, setting aside considerations specific to
> Intel, I believe, considering the users of ethtool, the smallest units
> should be gtpu4|6 and gtpc4|6.
> Regarding Extension Headers and such, I think it would be more
> straightforward to handle them implicitly.
>
> What does everyone else think?

Harald went further and questioned use of the same IP addresses for
-U and -C traffic, but even within one endpoint aren't these running
on a different port? Can someone reasonably use the same UDP port
for both types of traffic?

2023-10-18 17:40:50

by Jakub Kicinski

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

On Wed, 18 Oct 2023 10:12:44 +0200 Harald Welte wrote:
> > If we were to propose again, setting aside considerations specific to
> > Intel, I believe, considering the users of ethtool, the smallest units
> > should be gtpu4|6 and gtpc4|6.
>
> agreed. Though I'm not entirely sure one would usually want to treat v4
> different from v6. I'd assume they would usually both follow the same
> RSS scheme?

FWIW I had the same thought. But if we do add flow matching
support for GTP one day we'll have to define a struct like
struct ethtool_tcpip4_spec, which means size of the address
matters?

2023-10-18 18:09:19

by Harald Welte

[permalink] [raw]
Subject: Re: [PATCH net-next v2] ethtool: ice: Support for RSS settings to GTP from ethtool

Hi Jakub,

On Wed, Oct 18, 2023 at 10:37:03AM -0700, Jakub Kicinski wrote:
> Harald went further and questioned use of the same IP addresses for
> -U and -C traffic, but even within one endpoint aren't these running
> on a different port?

yes.

> Can someone reasonably use the same UDP port for both types of traffic?

I don't think so. In the entire 3GPP protocol world, the UDP port numbers
for GTP-U and GTP-C are fixed. The various signaling protocols allow you to
communicate the IPv4/v6 address and TEID of tunnel endpoints, but never allow
you to communicate the port number - which hence must always be the well-known port
(2123 for GTP-C + 2152 for GTP-U).

Of course somebody could do whatever they want in some kind of internal interface
not required to interoperate with any other equipment/implementation/operator, but
I'd consider it not falling in your question of "reasonable use".

Regards,
Harald

--
- Harald Welte <[email protected]> https://laforge.gnumonks.org/
============================================================================
"Privacy in residential applications is a desirable marketing option."
(ETSI EN 300 175-7 Ch. A6)