2022-11-23 10:53:29

by wangchuanlei

[permalink] [raw]
Subject: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

Add support to count upall packets, when kmod of openvswitch
upcall to userspace , here count the number of packets for
upcall succeed and failed, which is a better way to see how
many packets upcalled to userspace(ovs-vswitchd) on every
interfaces.

Here optimize the function used by comments of v3.

Changes since v3:
- use nested NLA_NESTED attribute in netlink message

Changes since v2:
- add count of upcall failed packets

Changes since v1:
- add count of upcall succeed packets

Signed-off-by: wangchuanlei <[email protected]>
---
include/uapi/linux/openvswitch.h | 19 ++++++++++++
net/openvswitch/datapath.c | 52 ++++++++++++++++++++++++++++++++
net/openvswitch/datapath.h | 12 ++++++++
net/openvswitch/vport.c | 48 +++++++++++++++++++++++++++++
net/openvswitch/vport.h | 6 ++++
5 files changed, 137 insertions(+)

diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 94066f87e9ee..fa13bce15fae 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -126,6 +126,11 @@ struct ovs_vport_stats {
__u64 tx_dropped; /* no space available in linux */
};

+struct ovs_vport_upcall_stats {
+ uint64_t upcall_success; /* total packets upcalls succeed */
+ uint64_t upcall_fail; /* total packets upcalls failed */
+};
+
/* Allow last Netlink attribute to be unaligned */
#define OVS_DP_F_UNALIGNED (1 << 0)

@@ -277,11 +282,25 @@ enum ovs_vport_attr {
OVS_VPORT_ATTR_PAD,
OVS_VPORT_ATTR_IFINDEX,
OVS_VPORT_ATTR_NETNSID,
+ OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
__OVS_VPORT_ATTR_MAX
};

#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)

+/**
+ * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL* commands
+ * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
+ * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
+ */
+enum ovs_vport_upcall_attr {
+ OVS_VPORT_UPCALL_SUCCESS, /* 64-bit upcall success packets */
+ OVS_VPORT_UPCALL_FAIL, /* 64-bit upcall fail packets */
+ __OVS_VPORT_UPCALL_MAX
+};
+
+#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX-1)
+
enum {
OVS_VXLAN_EXT_UNSPEC,
OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index c8a9075ddd0a..5254c51cfa60 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -209,6 +209,25 @@ static struct vport *new_vport(const struct vport_parms *parms)
return vport;
}

+static void ovs_vport_upcalls(struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info,
+ bool upcall_success)
+{
+ if (upcall_info->cmd == OVS_PACKET_CMD_MISS ||
+ upcall_info->cmd == OVS_PACKET_CMD_ACTION) {
+ const struct vport *p = OVS_CB(skb)->input_vport;
+ struct vport_upcall_stats_percpu *vport_stats;
+
+ vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);
+ u64_stats_update_begin(&vport_stats->syncp);
+ if (upcall_success)
+ u64_stats_inc(&vport_stats->n_upcall_success);
+ else
+ u64_stats_inc(&vport_stats->n_upcall_fail);
+ u64_stats_update_end(&vport_stats->syncp);
+ }
+}
+
void ovs_dp_detach_port(struct vport *p)
{
ASSERT_OVSL();
@@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
/* First drop references to device. */
hlist_del_rcu(&p->dp_hash_node);

+ /* Free percpu memory */
+ free_percpu(p->vport_upcall_stats_percpu);
+
/* Then destroy it. */
ovs_vport_del(p);
}
@@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
else
err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
+
+ ovs_vport_upcalls(skb, upcall_info, !err);
if (err)
goto err;

@@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_destroy_portids;
}

+ vport->vport_upcall_stats_percpu =
+ netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
+ if (!vport->vport_upcall_stats_percpu) {
+ err = -ENOMEM;
+ goto err_destroy_upcall_stats;
+ }
+
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_DP_CMD_NEW);
BUG_ON(err < 0);
@@ -1837,6 +1868,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_notify(&dp_datapath_genl_family, reply, info);
return 0;

+err_destroy_upcall_stats:
err_destroy_portids:
kfree(rcu_dereference_raw(dp->upcall_portids));
err_unlock_and_destroy_meters:
@@ -2068,6 +2100,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
{
struct ovs_header *ovs_header;
struct ovs_vport_stats vport_stats;
+ struct ovs_vport_upcall_stats stat;
+ struct nlattr *nla;
int err;

ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
@@ -2097,6 +2131,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
OVS_VPORT_ATTR_PAD))
goto nla_put_failure;

+ nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
+ if (!nla)
+ goto nla_put_failure;
+
+ ovs_vport_get_upcall_stats(vport, &stat);
+ if (ovs_vport_put_upcall_stats(skb, &stat))
+ goto nla_put_failure;
+ nla_nest_end(skb, nla);
+
if (ovs_vport_get_upcall_portids(vport, skb))
goto nla_put_failure;

@@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto exit_unlock_free;
}

+ vport->vport_upcall_stats_percpu =
+ netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
+
+ if (!vport->vport_upcall_stats_percpu) {
+ err = -ENOMEM;
+ goto exit_unlock_free;
+ }
+
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
OVS_VPORT_CMD_NEW, GFP_KERNEL);
@@ -2507,6 +2558,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
[OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
[OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
+ [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
};

static const struct genl_small_ops dp_vport_genl_ops[] = {
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 0cd29971a907..933dec5e4175 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -50,6 +50,18 @@ struct dp_stats_percpu {
struct u64_stats_sync syncp;
};

+/**
+ * struct vport_upcall_stats_percpu - per-cpu packet upcall statistics for
+ * a given vport.
+ * @n_upcall_success: Number of packets that upcall to userspace succeed.
+ * @n_upcall_fail: Number of packets that upcall to userspace failed.
+ */
+struct vport_upcall_stats_percpu {
+ u64_stats_t n_upcall_success;
+ u64_stats_t n_upcall_fail;
+ struct u64_stats_sync syncp;
+};
+
/**
* struct dp_nlsk_pids - array of netlink portids of for a datapath.
* This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 82a74f998966..a69c9356b57c 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -284,6 +284,54 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
stats->tx_packets = dev_stats->tx_packets;
}

+/**
+ * ovs_vport_get_upcall_stats - retrieve upcall stats
+ *
+ * @vport: vport from which to retrieve the stats
+ * @ovs_vport_upcall_stats: location to store stats
+ *
+ * Retrieves upcall stats for the given device.
+ *
+ * Must be called with ovs_mutex or rcu_read_lock.
+ */
+void ovs_vport_get_upcall_stats(struct vport *vport, struct ovs_vport_upcall_stats *stats)
+{
+ int i;
+
+ stats->upcall_success = 0;
+ stats->upcall_fail = 0;
+
+ for_each_possible_cpu(i) {
+ const struct vport_upcall_stats_percpu *percpu_upcall_stats;
+ unsigned int start;
+
+ percpu_upcall_stats = per_cpu_ptr(vport->vport_upcall_stats_percpu, i);
+ do {
+ start = u64_stats_fetch_begin(&percpu_upcall_stats->syncp);
+ stats->upcall_success +=
+ u64_stats_read(&percpu_upcall_stats->n_upcall_success);
+ stats->upcall_fail += u64_stats_read(&percpu_upcall_stats->n_upcall_fail);
+ } while (u64_stats_fetch_retry(&percpu_upcall_stats->syncp, start));
+ }
+}
+
+int ovs_vport_put_upcall_stats(struct sk_buff *skb,
+ struct ovs_vport_upcall_stats *stats)
+{
+ if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->upcall_success,
+ OVS_VPORT_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->upcall_fail,
+ OVS_VPORT_ATTR_PAD))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
/**
* ovs_vport_get_options - retrieve device options
*
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 7d276f60c000..02cf8c589588 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name);

void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);

+void ovs_vport_get_upcall_stats(struct vport *vport,
+ struct ovs_vport_upcall_stats *stats);
+int ovs_vport_put_upcall_stats(struct sk_buff *skb,
+ struct ovs_vport_upcall_stats *stats);
+
int ovs_vport_set_options(struct vport *, struct nlattr *options);
int ovs_vport_get_options(const struct vport *, struct sk_buff *);

@@ -78,6 +83,7 @@ struct vport {
struct hlist_node hash_node;
struct hlist_node dp_hash_node;
const struct vport_ops *ops;
+ struct vport_upcall_stats_percpu __percpu *vport_upcall_stats_percpu;

struct list_head detach_list;
struct rcu_head rcu;
--
2.27.0


2022-11-23 19:00:23

by Alexander Lobakin

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

From: wangchuanlei <[email protected]>
Date: Wed, 23 Nov 2022 04:18:43 -0500

> Add support to count upall packets, when kmod of openvswitch
> upcall to userspace , here count the number of packets for
> upcall succeed and failed, which is a better way to see how
> many packets upcalled to userspace(ovs-vswitchd) on every
> interfaces.
>
> Here optimize the function used by comments of v3.
>
> Changes since v3:
> - use nested NLA_NESTED attribute in netlink message
>
> Changes since v2:
> - add count of upcall failed packets
>
> Changes since v1:
> - add count of upcall succeed packets
>
> Signed-off-by: wangchuanlei <[email protected]>
> ---
> include/uapi/linux/openvswitch.h | 19 ++++++++++++
> net/openvswitch/datapath.c | 52 ++++++++++++++++++++++++++++++++
> net/openvswitch/datapath.h | 12 ++++++++
> net/openvswitch/vport.c | 48 +++++++++++++++++++++++++++++
> net/openvswitch/vport.h | 6 ++++
> 5 files changed, 137 insertions(+)
>
> diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
> index 94066f87e9ee..fa13bce15fae 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -126,6 +126,11 @@ struct ovs_vport_stats {
> __u64 tx_dropped; /* no space available in linux */
> };
>
> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */

Please no uint64_t int the UAPI headers. __u64 as above.

> +};
> +
> /* Allow last Netlink attribute to be unaligned */
> #define OVS_DP_F_UNALIGNED (1 << 0)
>
> @@ -277,11 +282,25 @@ enum ovs_vport_attr {
> OVS_VPORT_ATTR_PAD,
> OVS_VPORT_ATTR_IFINDEX,
> OVS_VPORT_ATTR_NETNSID,
> + OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
> __OVS_VPORT_ATTR_MAX
> };
>
> #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
>
> +/**
> + * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL* commands
> + * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
> + * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
> + */
> +enum ovs_vport_upcall_attr {
> + OVS_VPORT_UPCALL_SUCCESS, /* 64-bit upcall success packets */
> + OVS_VPORT_UPCALL_FAIL, /* 64-bit upcall fail packets */
> + __OVS_VPORT_UPCALL_MAX
> +};
> +
> +#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX-1)

Spaces around arithm operator ('-').

> +
> enum {
> OVS_VXLAN_EXT_UNSPEC,
> OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index c8a9075ddd0a..5254c51cfa60 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -209,6 +209,25 @@ static struct vport *new_vport(const struct vport_parms *parms)
> return vport;
> }
>
> +static void ovs_vport_upcalls(struct sk_buff *skb,
> + const struct dp_upcall_info *upcall_info,
> + bool upcall_success)

^^^^^^^^^^^^^^^^^^^

Just `bool success`? It's clear that is's about upcalls, I don't see
a need to repeat it in every argument's name.

> +{
> + if (upcall_info->cmd == OVS_PACKET_CMD_MISS ||
> + upcall_info->cmd == OVS_PACKET_CMD_ACTION) {

if (cmd != MISS && cmd != ACTION)
return;

Saves 1 indent level.

> + const struct vport *p = OVS_CB(skb)->input_vport;
> + struct vport_upcall_stats_percpu *vport_stats;
> +
> + vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);

Why make a separate structure? You can just expand dp_stats_percpu,
this function would then be just a couple lines in ovs_dp_upcall().

> + u64_stats_update_begin(&vport_stats->syncp);
> + if (upcall_success)
> + u64_stats_inc(&vport_stats->n_upcall_success);
> + else
> + u64_stats_inc(&vport_stats->n_upcall_fail);
> + u64_stats_update_end(&vport_stats->syncp);
> + }
> +}
> +
> void ovs_dp_detach_port(struct vport *p)
> {
> ASSERT_OVSL();
> @@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
> /* First drop references to device. */
> hlist_del_rcu(&p->dp_hash_node);
>
> + /* Free percpu memory */
> + free_percpu(p->vport_upcall_stats_percpu);
> +
> /* Then destroy it. */
> ovs_vport_del(p);
> }
> @@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> else
> err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> +
> + ovs_vport_upcalls(skb, upcall_info, !err);
> if (err)
> goto err;

Also, as you may see, your ::upcall_fail counter will be always
exactly the same as stats->n_lost. So there's no point introducing
a new one.
However, you can expand the structure dp_stats_percpu and add a new
field there which would store the number of successfull upcalls.
...but I don't see a reason for this to be honest. From my PoV,
it's better to count the number of successfully processed packets
at the end of queue_userspace_packet() right before the 'out:'
label[0]. But please make sure then you don't duplicate some other
counter (I'm not deep into OvS, so can't say for sure if there's
anything similar to what you want).

>
> @@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto err_destroy_portids;
> }
>
> + vport->vport_upcall_stats_percpu =

This can be at least twice shorter, e.g. 'upcall_stats'. Don't try
to describe every detail in symbol names.

> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto err_destroy_upcall_stats;

I know you followed the previous label logics, but you actually
aren't destroying the stats under this label. Here you should
have `goto err_destroy_portids` as that's what you're actually doing
on that error path.

> + }
> +
> err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> info->snd_seq, 0, OVS_DP_CMD_NEW);
> BUG_ON(err < 0);

[...]

> @@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto exit_unlock_free;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> +
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto exit_unlock_free;
> + }

Why do you allocate them twice?

> +
> err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> info->snd_portid, info->snd_seq, 0,
> OVS_VPORT_CMD_NEW, GFP_KERNEL);

[...]

> @@ -50,6 +50,18 @@ struct dp_stats_percpu {
> struct u64_stats_sync syncp;
> };
>
> +/**
> + * struct vport_upcall_stats_percpu - per-cpu packet upcall statistics for
> + * a given vport.
> + * @n_upcall_success: Number of packets that upcall to userspace succeed.
> + * @n_upcall_fail: Number of packets that upcall to userspace failed.
> + */
> +struct vport_upcall_stats_percpu {
> + u64_stats_t n_upcall_success;
> + u64_stats_t n_upcall_fail;
> + struct u64_stats_sync syncp;

Nit: syncp would feel better at the start. You could then sort the
structure by the field hit probability and reduce cache misses %)

> +};
> +
> /**
> * struct dp_nlsk_pids - array of netlink portids of for a datapath.
> * This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
> diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
> index 82a74f998966..a69c9356b57c 100644
> --- a/net/openvswitch/vport.c
> +++ b/net/openvswitch/vport.c
> @@ -284,6 +284,54 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
> stats->tx_packets = dev_stats->tx_packets;
> }
>
> +/**
> + * ovs_vport_get_upcall_stats - retrieve upcall stats
> + *
> + * @vport: vport from which to retrieve the stats
> + * @ovs_vport_upcall_stats: location to store stats
> + *
> + * Retrieves upcall stats for the given device.
> + *
> + * Must be called with ovs_mutex or rcu_read_lock.
> + */
> +void ovs_vport_get_upcall_stats(struct vport *vport, struct ovs_vport_upcall_stats *stats)
> +{
> + int i;
> +
> + stats->upcall_success = 0;
> + stats->upcall_fail = 0;
> +
> + for_each_possible_cpu(i) {
> + const struct vport_upcall_stats_percpu *percpu_upcall_stats;

You wouldn't need to linewrap the lines below if you didn't make its
name so huge.

> + unsigned int start;
> +
> + percpu_upcall_stats = per_cpu_ptr(vport->vport_upcall_stats_percpu, i);
> + do {
> + start = u64_stats_fetch_begin(&percpu_upcall_stats->syncp);
> + stats->upcall_success +=
> + u64_stats_read(&percpu_upcall_stats->n_upcall_success);
> + stats->upcall_fail += u64_stats_read(&percpu_upcall_stats->n_upcall_fail);
> + } while (u64_stats_fetch_retry(&percpu_upcall_stats->syncp, start));
> + }
> +}
> +
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats)
> +{
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->upcall_success,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->upcall_fail,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + return 0;
> +
> +nla_put_failure:
> + return -EMSGSIZE;

goto with only one action makes no sense, just exit directly.

> +}
> +
> /**
> * ovs_vport_get_options - retrieve device options
> *
> diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
> index 7d276f60c000..02cf8c589588 100644
> --- a/net/openvswitch/vport.h
> +++ b/net/openvswitch/vport.h
> @@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name);
>
> void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
>
> +void ovs_vport_get_upcall_stats(struct vport *vport,
> + struct ovs_vport_upcall_stats *stats);
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats);
> +
> int ovs_vport_set_options(struct vport *, struct nlattr *options);
> int ovs_vport_get_options(const struct vport *, struct sk_buff *);
>
> @@ -78,6 +83,7 @@ struct vport {
> struct hlist_node hash_node;
> struct hlist_node dp_hash_node;
> const struct vport_ops *ops;
> + struct vport_upcall_stats_percpu __percpu *vport_upcall_stats_percpu;

Almost 80 columns in one field definition :D

>
> struct list_head detach_list;
> struct rcu_head rcu;
> --
> 2.27.0

[0] https://elixir.bootlin.com/linux/v6.1-rc6/source/net/openvswitch/datapath.c#L557

Thanks,
Olek

2022-11-24 02:39:46

by wangchuanlei

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

Hi,
Thank you for review! I will give a new verson of patch based on your comments,
and i give a explanation on every comments from you, please see below!

Best reagrds!
wangchuanlei

From: Alexander Lobakin [mailto:[email protected]]
To: [email protected]

> From: wangchuanlei <[email protected]>
> Date: Wed, 23 Nov 2022 04:18:43 -0500

> Add support to count upall packets, when kmod of openvswitch upcall to
> userspace , here count the number of packets for upcall succeed and
> failed, which is a better way to see how many packets upcalled to
> userspace(ovs-vswitchd) on every interfaces.
>
> Here optimize the function used by comments of v3.
>
> Changes since v3:
> - use nested NLA_NESTED attribute in netlink message
>
> Changes since v2:
> - add count of upcall failed packets
>
> Changes since v1:
> - add count of upcall succeed packets
>
> Signed-off-by: wangchuanlei <[email protected]>
> ---
> include/uapi/linux/openvswitch.h | 19 ++++++++++++
> net/openvswitch/datapath.c | 52 ++++++++++++++++++++++++++++++++
> net/openvswitch/datapath.h | 12 ++++++++
> net/openvswitch/vport.c | 48 +++++++++++++++++++++++++++++
> net/openvswitch/vport.h | 6 ++++
> 5 files changed, 137 insertions(+)
>
> diff --git a/include/uapi/linux/openvswitch.h
> b/include/uapi/linux/openvswitch.h
> index 94066f87e9ee..fa13bce15fae 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -126,6 +126,11 @@ struct ovs_vport_stats {
> __u64 tx_dropped; /* no space available in linux */
> };
>
> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */

Please no uint64_t int the UAPI headers. __u64 as above. --Yes !

> +};
> +
> /* Allow last Netlink attribute to be unaligned */
> #define OVS_DP_F_UNALIGNED (1 << 0)
>
> @@ -277,11 +282,25 @@ enum ovs_vport_attr {
> OVS_VPORT_ATTR_PAD,
> OVS_VPORT_ATTR_IFINDEX,
> OVS_VPORT_ATTR_NETNSID,
> + OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
> __OVS_VPORT_ATTR_MAX
> };
>
> #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
>
> +/**
> + * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL*
> +commands
> + * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
> + * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
> + */
> +enum ovs_vport_upcall_attr {
> + OVS_VPORT_UPCALL_SUCCESS, /* 64-bit upcall success packets */
> + OVS_VPORT_UPCALL_FAIL, /* 64-bit upcall fail packets */
> + __OVS_VPORT_UPCALL_MAX
> +};
> +
> +#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX-1)

Spaces around arithm operator ('-'). --Yes !

> +
> enum {
> OVS_VXLAN_EXT_UNSPEC,
> OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index c8a9075ddd0a..5254c51cfa60 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -209,6 +209,25 @@ static struct vport *new_vport(const struct vport_parms *parms)
> return vport;
> }
>
> +static void ovs_vport_upcalls(struct sk_buff *skb,
> + const struct dp_upcall_info *upcall_info,
> + bool upcall_success)

^^^^^^^^^^^^^^^^^^^

Just `bool success`? It's clear that is's about upcalls, I don't see a need to repeat it in every argument's name.
--Yes !
> +{
> + if (upcall_info->cmd == OVS_PACKET_CMD_MISS ||
> + upcall_info->cmd == OVS_PACKET_CMD_ACTION) {

if (cmd != MISS && cmd != ACTION)
return;

Saves 1 indent level. --you are right!

> + const struct vport *p = OVS_CB(skb)->input_vport;
> + struct vport_upcall_stats_percpu *vport_stats;
> +
> + vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);

Why make a separate structure? You can just expand dp_stats_percpu, this function would then be just a couple lines in ovs_dp_upcall().
-- emm, beacause of this statistics based on vport, so new structure should insert to "struct vport"


> + u64_stats_update_begin(&vport_stats->syncp);
> + if (upcall_success)
> + u64_stats_inc(&vport_stats->n_upcall_success);
> + else
> + u64_stats_inc(&vport_stats->n_upcall_fail);
> + u64_stats_update_end(&vport_stats->syncp);
> + }
> +}
> +
> void ovs_dp_detach_port(struct vport *p) {
> ASSERT_OVSL();
> @@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
> /* First drop references to device. */
> hlist_del_rcu(&p->dp_hash_node);
>
> + /* Free percpu memory */
> + free_percpu(p->vport_upcall_stats_percpu);
> +
> /* Then destroy it. */
> ovs_vport_del(p);
> }
> @@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> else
> err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> +
> + ovs_vport_upcalls(skb, upcall_info, !err);
> if (err)
> goto err;

Also, as you may see, your ::upcall_fail counter will be always exactly the same as stats->n_lost. So there's no point introducing a new one.
However, you can expand the structure dp_stats_percpu and add a new field there which would store the number of successfull upcalls.
...but I don't see a reason for this to be honest. From my PoV, it's better to count the number of successfully processed packets at the end of queue_userspace_packet() right before the 'out:'
label[0]. But please make sure then you don't duplicate some other counter (I'm not deep into OvS, so can't say for sure if there's anything similar to what you want).
--in ovs , as stats->n_lost only count the sum of packets of all ports, not on individal port , so expand the structure dp_stats_percpu may be not suitable
--and count upcall failed packets is useful beacuse no all of upcall packets are successfully sent。

>
> @@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto err_destroy_portids;
> }
>
> + vport->vport_upcall_stats_percpu =

This can be at least twice shorter, e.g. 'upcall_stats'. Don't try to describe every detail in symbol names.
--yes!
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto err_destroy_upcall_stats;

I know you followed the previous label logics, but you actually aren't destroying the stats under this label. Here you should have `goto err_destroy_portids` as that's what you're actually doing on that error path.
--here is just keep format of code, and has no influence on function

> + }
> +
> err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> info->snd_seq, 0, OVS_DP_CMD_NEW);
> BUG_ON(err < 0);

[...]

> @@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto exit_unlock_free;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> +
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto exit_unlock_free;
> + }

Why do you allocate them twice?
-- here is in different code segment on in vport_cmd_new , the other is in dp_cmd_new, they are has no collisions

> +
> err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> info->snd_portid, info->snd_seq, 0,
> OVS_VPORT_CMD_NEW, GFP_KERNEL);

[...]

> @@ -50,6 +50,18 @@ struct dp_stats_percpu {
> struct u64_stats_sync syncp;
> };
>
> +/**
> + * struct vport_upcall_stats_percpu - per-cpu packet upcall
> +statistics for
> + * a given vport.
> + * @n_upcall_success: Number of packets that upcall to userspace succeed.
> + * @n_upcall_fail: Number of packets that upcall to userspace failed.
> + */
> +struct vport_upcall_stats_percpu {
> + u64_stats_t n_upcall_success;
> + u64_stats_t n_upcall_fail;
> + struct u64_stats_sync syncp;

Nit: syncp would feel better at the start. You could then sort the structure by the field hit probability and reduce cache misses %)
--ok

> +};
> +
> /**
> * struct dp_nlsk_pids - array of netlink portids of for a datapath.
> * This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
> diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index
> 82a74f998966..a69c9356b57c 100644
> --- a/net/openvswitch/vport.c
> +++ b/net/openvswitch/vport.c
> @@ -284,6 +284,54 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
> stats->tx_packets = dev_stats->tx_packets; }
>
> +/**
> + * ovs_vport_get_upcall_stats - retrieve upcall stats
> + *
> + * @vport: vport from which to retrieve the stats
> + * @ovs_vport_upcall_stats: location to store stats
> + *
> + * Retrieves upcall stats for the given device.
> + *
> + * Must be called with ovs_mutex or rcu_read_lock.
> + */
> +void ovs_vport_get_upcall_stats(struct vport *vport, struct
> +ovs_vport_upcall_stats *stats) {
> + int i;
> +
> + stats->upcall_success = 0;
> + stats->upcall_fail = 0;
> +
> + for_each_possible_cpu(i) {
> + const struct vport_upcall_stats_percpu *percpu_upcall_stats;

You wouldn't need to linewrap the lines below if you didn't make its name so huge.
--you are right, i would change the name!
> + unsigned int start;
> +
> + percpu_upcall_stats = per_cpu_ptr(vport->vport_upcall_stats_percpu, i);
> + do {
> + start = u64_stats_fetch_begin(&percpu_upcall_stats->syncp);
> + stats->upcall_success +=
> + u64_stats_read(&percpu_upcall_stats->n_upcall_success);
> + stats->upcall_fail += u64_stats_read(&percpu_upcall_stats->n_upcall_fail);
> + } while (u64_stats_fetch_retry(&percpu_upcall_stats->syncp, start));
> + }
> +}
> +
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats) {
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->upcall_success,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->upcall_fail,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + return 0;
> +
> +nla_put_failure:
> + return -EMSGSIZE;

goto with only one action makes no sense, just exit directly.
--you are right!
> +}
> +
> /**
> * ovs_vport_get_options - retrieve device options
> *
> diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index
> 7d276f60c000..02cf8c589588 100644
> --- a/net/openvswitch/vport.h
> +++ b/net/openvswitch/vport.h
> @@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net
> *net, const char *name);
>
> void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
>
> +void ovs_vport_get_upcall_stats(struct vport *vport,
> + struct ovs_vport_upcall_stats *stats); int
> +ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats);
> +
> int ovs_vport_set_options(struct vport *, struct nlattr *options);
> int ovs_vport_get_options(const struct vport *, struct sk_buff *);
>
> @@ -78,6 +83,7 @@ struct vport {
> struct hlist_node hash_node;
> struct hlist_node dp_hash_node;
> const struct vport_ops *ops;
> + struct vport_upcall_stats_percpu __percpu
> +*vport_upcall_stats_percpu;

Almost 80 columns in one field definition :D
--yes i would change the name !

>
> struct list_head detach_list;
> struct rcu_head rcu;
> --
> 2.27.0

[0] https://elixir.bootlin.com/linux/v6.1-rc6/source/net/openvswitch/datapath.c#L557

Thanks,
Olek

2022-11-24 03:06:26

by wangchuanlei

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

Hi,
Thank you for review! I will give a new verson of patch based on your comments,
and i give a explanation on every comments from you, please see below!

Best reagrds!
wangchuanlei

From: Alexander Lobakin [mailto:[email protected]]
To: [email protected]

> From: wangchuanlei <[email protected]>
> Date: Wed, 23 Nov 2022 04:18:43 -0500

> Add support to count upall packets, when kmod of openvswitch upcall to
> userspace , here count the number of packets for upcall succeed and
> failed, which is a better way to see how many packets upcalled to
> userspace(ovs-vswitchd) on every interfaces.
>
> Here optimize the function used by comments of v3.
>
> Changes since v3:
> - use nested NLA_NESTED attribute in netlink message
>
> Changes since v2:
> - add count of upcall failed packets
>
> Changes since v1:
> - add count of upcall succeed packets
>
> Signed-off-by: wangchuanlei <[email protected]>
> ---
> include/uapi/linux/openvswitch.h | 19 ++++++++++++
> net/openvswitch/datapath.c | 52 ++++++++++++++++++++++++++++++++
> net/openvswitch/datapath.h | 12 ++++++++
> net/openvswitch/vport.c | 48 +++++++++++++++++++++++++++++
> net/openvswitch/vport.h | 6 ++++
> 5 files changed, 137 insertions(+)
>
> diff --git a/include/uapi/linux/openvswitch.h
> b/include/uapi/linux/openvswitch.h
> index 94066f87e9ee..fa13bce15fae 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -126,6 +126,11 @@ struct ovs_vport_stats {
> __u64 tx_dropped; /* no space available in linux */
> };
>
> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */

Please no uint64_t int the UAPI headers. __u64 as above. --Yes !

> +};
> +
> /* Allow last Netlink attribute to be unaligned */
> #define OVS_DP_F_UNALIGNED (1 << 0)
>
> @@ -277,11 +282,25 @@ enum ovs_vport_attr {
> OVS_VPORT_ATTR_PAD,
> OVS_VPORT_ATTR_IFINDEX,
> OVS_VPORT_ATTR_NETNSID,
> + OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
> __OVS_VPORT_ATTR_MAX
> };
>
> #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
>
> +/**
> + * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL*
> +commands
> + * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
> + * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
> + */
> +enum ovs_vport_upcall_attr {
> + OVS_VPORT_UPCALL_SUCCESS, /* 64-bit upcall success packets */
> + OVS_VPORT_UPCALL_FAIL, /* 64-bit upcall fail packets */
> + __OVS_VPORT_UPCALL_MAX
> +};
> +
> +#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX-1)

Spaces around arithm operator ('-'). --Yes !

> +
> enum {
> OVS_VXLAN_EXT_UNSPEC,
> OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index c8a9075ddd0a..5254c51cfa60 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -209,6 +209,25 @@ static struct vport *new_vport(const struct vport_parms *parms)
> return vport;
> }
>
> +static void ovs_vport_upcalls(struct sk_buff *skb,
> + const struct dp_upcall_info *upcall_info,
> + bool upcall_success)

^^^^^^^^^^^^^^^^^^^

Just `bool success`? It's clear that is's about upcalls, I don't see a need to repeat it in every argument's name.
--Yes !
> +{
> + if (upcall_info->cmd == OVS_PACKET_CMD_MISS ||
> + upcall_info->cmd == OVS_PACKET_CMD_ACTION) {

if (cmd != MISS && cmd != ACTION)
return;

Saves 1 indent level. --you are right!

> + const struct vport *p = OVS_CB(skb)->input_vport;
> + struct vport_upcall_stats_percpu *vport_stats;
> +
> + vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);

Why make a separate structure? You can just expand dp_stats_percpu, this function would then be just a couple lines in ovs_dp_upcall().
-- emm, beacause of this statistics based on vport, so new structure should insert to "struct vport"


> + u64_stats_update_begin(&vport_stats->syncp);
> + if (upcall_success)
> + u64_stats_inc(&vport_stats->n_upcall_success);
> + else
> + u64_stats_inc(&vport_stats->n_upcall_fail);
> + u64_stats_update_end(&vport_stats->syncp);
> + }
> +}
> +
> void ovs_dp_detach_port(struct vport *p) {
> ASSERT_OVSL();
> @@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
> /* First drop references to device. */
> hlist_del_rcu(&p->dp_hash_node);
>
> + /* Free percpu memory */
> + free_percpu(p->vport_upcall_stats_percpu);
> +
> /* Then destroy it. */
> ovs_vport_del(p);
> }
> @@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> else
> err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> +
> + ovs_vport_upcalls(skb, upcall_info, !err);
> if (err)
> goto err;

Also, as you may see, your ::upcall_fail counter will be always exactly the same as stats->n_lost. So there's no point introducing a new one.
However, you can expand the structure dp_stats_percpu and add a new field there which would store the number of successfull upcalls.
...but I don't see a reason for this to be honest. From my PoV, it's better to count the number of successfully processed packets at the end of queue_userspace_packet() right before the 'out:'
label[0]. But please make sure then you don't duplicate some other counter (I'm not deep into OvS, so can't say for sure if there's anything similar to what you want).
--in ovs , as stats->n_lost only count the sum of packets of all ports, not on individal port , so expand the structure dp_stats_percpu may be not suitable
--and count upcall failed packets is useful beacuse no all of upcall packets are successfully sent。

>
> @@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto err_destroy_portids;
> }
>
> + vport->vport_upcall_stats_percpu =

This can be at least twice shorter, e.g. 'upcall_stats'. Don't try to describe every detail in symbol names.
--yes!
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto err_destroy_upcall_stats;

I know you followed the previous label logics, but you actually aren't destroying the stats under this label. Here you should have `goto err_destroy_portids` as that's what you're actually doing on that error path.
--here is just keep format of code, and has no influence on function

> + }
> +
> err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> info->snd_seq, 0, OVS_DP_CMD_NEW);
> BUG_ON(err < 0);

[...]

> @@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto exit_unlock_free;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> +
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto exit_unlock_free;
> + }

Why do you allocate them twice?
-- here is in different code segment on in vport_cmd_new , the other is in dp_cmd_new, they are has no collisions

> +
> err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> info->snd_portid, info->snd_seq, 0,
> OVS_VPORT_CMD_NEW, GFP_KERNEL);

[...]

> @@ -50,6 +50,18 @@ struct dp_stats_percpu {
> struct u64_stats_sync syncp;
> };
>
> +/**
> + * struct vport_upcall_stats_percpu - per-cpu packet upcall
> +statistics for
> + * a given vport.
> + * @n_upcall_success: Number of packets that upcall to userspace succeed.
> + * @n_upcall_fail: Number of packets that upcall to userspace failed.
> + */
> +struct vport_upcall_stats_percpu {
> + u64_stats_t n_upcall_success;
> + u64_stats_t n_upcall_fail;
> + struct u64_stats_sync syncp;

Nit: syncp would feel better at the start. You could then sort the structure by the field hit probability and reduce cache misses %)
--ok

> +};
> +
> /**
> * struct dp_nlsk_pids - array of netlink portids of for a datapath.
> * This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
> diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index
> 82a74f998966..a69c9356b57c 100644
> --- a/net/openvswitch/vport.c
> +++ b/net/openvswitch/vport.c
> @@ -284,6 +284,54 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
> stats->tx_packets = dev_stats->tx_packets; }
>
> +/**
> + * ovs_vport_get_upcall_stats - retrieve upcall stats
> + *
> + * @vport: vport from which to retrieve the stats
> + * @ovs_vport_upcall_stats: location to store stats
> + *
> + * Retrieves upcall stats for the given device.
> + *
> + * Must be called with ovs_mutex or rcu_read_lock.
> + */
> +void ovs_vport_get_upcall_stats(struct vport *vport, struct
> +ovs_vport_upcall_stats *stats) {
> + int i;
> +
> + stats->upcall_success = 0;
> + stats->upcall_fail = 0;
> +
> + for_each_possible_cpu(i) {
> + const struct vport_upcall_stats_percpu *percpu_upcall_stats;

You wouldn't need to linewrap the lines below if you didn't make its name so huge.
--you are right, i would change the name!
> + unsigned int start;
> +
> + percpu_upcall_stats = per_cpu_ptr(vport->vport_upcall_stats_percpu, i);
> + do {
> + start = u64_stats_fetch_begin(&percpu_upcall_stats->syncp);
> + stats->upcall_success +=
> + u64_stats_read(&percpu_upcall_stats->n_upcall_success);
> + stats->upcall_fail += u64_stats_read(&percpu_upcall_stats->n_upcall_fail);
> + } while (u64_stats_fetch_retry(&percpu_upcall_stats->syncp, start));
> + }
> +}
> +
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats) {
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->upcall_success,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->upcall_fail,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + return 0;
> +
> +nla_put_failure:
> + return -EMSGSIZE;

goto with only one action makes no sense, just exit directly.
--you are right!
> +}
> +
> /**
> * ovs_vport_get_options - retrieve device options
> *
> diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index
> 7d276f60c000..02cf8c589588 100644
> --- a/net/openvswitch/vport.h
> +++ b/net/openvswitch/vport.h
> @@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net
> *net, const char *name);
>
> void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
>
> +void ovs_vport_get_upcall_stats(struct vport *vport,
> + struct ovs_vport_upcall_stats *stats); int
> +ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats);
> +
> int ovs_vport_set_options(struct vport *, struct nlattr *options);
> int ovs_vport_get_options(const struct vport *, struct sk_buff *);
>
> @@ -78,6 +83,7 @@ struct vport {
> struct hlist_node hash_node;
> struct hlist_node dp_hash_node;
> const struct vport_ops *ops;
> + struct vport_upcall_stats_percpu __percpu
> +*vport_upcall_stats_percpu;

Almost 80 columns in one field definition :D
--yes i would change the name !

>
> struct list_head detach_list;
> struct rcu_head rcu;
> --
> 2.27.0

[0] https://elixir.bootlin.com/linux/v6.1-rc6/source/net/openvswitch/datapath.c#L557

Thanks,
Olek

2022-11-24 08:44:55

by wangchuanlei

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

Thank you for review ,Eelco Chaudron,
I will a new version of this patch soon based on comments of you and Alexander.

Best regards!
wangchuanlei


On 23 Nov 2022, at 10:18, wangchuanlei wrote:

> Add support to count upall packets, when kmod of openvswitch upcall to
> userspace , here count the number of packets for upcall succeed and
> failed, which is a better way to see how many packets upcalled to
> userspace(ovs-vswitchd) on every interfaces.
>
> Here optimize the function used by comments of v3.
>
> Changes since v3:
> - use nested NLA_NESTED attribute in netlink message
>
> Changes since v2:
> - add count of upcall failed packets
>
> Changes since v1:
> - add count of upcall succeed packets

There is already a review from Alexander, so I only commented on some things that caught my attention after glazing over the patch.
I will do a full review of the next revisions.

//Eelco


> Signed-off-by: wangchuanlei <[email protected]>
> ---
> include/uapi/linux/openvswitch.h | 19 ++++++++++++
> net/openvswitch/datapath.c | 52 ++++++++++++++++++++++++++++++++
> net/openvswitch/datapath.h | 12 ++++++++
> net/openvswitch/vport.c | 48 +++++++++++++++++++++++++++++
> net/openvswitch/vport.h | 6 ++++
> 5 files changed, 137 insertions(+)
>
> diff --git a/include/uapi/linux/openvswitch.h
> b/include/uapi/linux/openvswitch.h
> index 94066f87e9ee..fa13bce15fae 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -126,6 +126,11 @@ struct ovs_vport_stats {
> __u64 tx_dropped; /* no space available in linux */
> };
>
> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */
> +};

This is no longer a user API data structure, so it should be removed from this include.

> +
> /* Allow last Netlink attribute to be unaligned */
> #define OVS_DP_F_UNALIGNED (1 << 0)
>
> @@ -277,11 +282,25 @@ enum ovs_vport_attr {
> OVS_VPORT_ATTR_PAD,
> OVS_VPORT_ATTR_IFINDEX,
> OVS_VPORT_ATTR_NETNSID,
> + OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
> __OVS_VPORT_ATTR_MAX
> };
>
> #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
>
> +/**
> + * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL*
> +commands
> + * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
> + * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
> + */
> +enum ovs_vport_upcall_attr {
> + OVS_VPORT_UPCALL_SUCCESS, /* 64-bit upcall success packets */
> + OVS_VPORT_UPCALL_FAIL, /* 64-bit upcall fail packets */
> + __OVS_VPORT_UPCALL_MAX
> +};

Here you have comments ending with and without a dot (.), maybe make it uniform.
Maybe the comment on the structure can be removed as they are explained right above?


> +
> +#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX-1)
> +
> enum {
> OVS_VXLAN_EXT_UNSPEC,
> OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index c8a9075ddd0a..5254c51cfa60 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -209,6 +209,25 @@ static struct vport *new_vport(const struct vport_parms *parms)
> return vport;
> }
>
> +static void ovs_vport_upcalls(struct sk_buff *skb,
> + const struct dp_upcall_info *upcall_info,
> + bool upcall_success)
> +{
> + if (upcall_info->cmd == OVS_PACKET_CMD_MISS ||
> + upcall_info->cmd == OVS_PACKET_CMD_ACTION) {
> + const struct vport *p = OVS_CB(skb)->input_vport;
> + struct vport_upcall_stats_percpu *vport_stats;
> +
> + vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);
> + u64_stats_update_begin(&vport_stats->syncp);
> + if (upcall_success)
> + u64_stats_inc(&vport_stats->n_upcall_success);
> + else
> + u64_stats_inc(&vport_stats->n_upcall_fail);
> + u64_stats_update_end(&vport_stats->syncp);
> + }
> +}
> +
> void ovs_dp_detach_port(struct vport *p) {
> ASSERT_OVSL();
> @@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
> /* First drop references to device. */
> hlist_del_rcu(&p->dp_hash_node);
>
> + /* Free percpu memory */
> + free_percpu(p->vport_upcall_stats_percpu);
> +
> /* Then destroy it. */
> ovs_vport_del(p);
> }
> @@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> else
> err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> +
> + ovs_vport_upcalls(skb, upcall_info, !err);
> if (err)
> goto err;
>
> @@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto err_destroy_portids;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto err_destroy_upcall_stats;
> + }
> +
> err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> info->snd_seq, 0, OVS_DP_CMD_NEW);
> BUG_ON(err < 0);
> @@ -1837,6 +1868,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> ovs_notify(&dp_datapath_genl_family, reply, info);
> return 0;
>
> +err_destroy_upcall_stats:
> err_destroy_portids:
> kfree(rcu_dereference_raw(dp->upcall_portids));
> err_unlock_and_destroy_meters:
> @@ -2068,6 +2100,8 @@ static int ovs_vport_cmd_fill_info(struct vport
> *vport, struct sk_buff *skb, {
> struct ovs_header *ovs_header;
> struct ovs_vport_stats vport_stats;
> + struct ovs_vport_upcall_stats stat;
> + struct nlattr *nla;
> int err;
>
> ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, @@
> -2097,6 +2131,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
> OVS_VPORT_ATTR_PAD))
> goto nla_put_failure;
>
> + nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
> + if (!nla)
> + goto nla_put_failure;
> +
> + ovs_vport_get_upcall_stats(vport, &stat);
> + if (ovs_vport_put_upcall_stats(skb, &stat))
> + goto nla_put_failure;
> + nla_nest_end(skb, nla);
> +
> if (ovs_vport_get_upcall_portids(vport, skb))
> goto nla_put_failure;
>
> @@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto exit_unlock_free;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> +
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto exit_unlock_free;
> + }
> +
> err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> info->snd_portid, info->snd_seq, 0,
> OVS_VPORT_CMD_NEW, GFP_KERNEL); @@ -2507,6 +2558,7 @@
> static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
> [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
> [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
> [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
> + [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
> };
>
> static const struct genl_small_ops dp_vport_genl_ops[] = { diff --git
> a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index
> 0cd29971a907..933dec5e4175 100644
> --- a/net/openvswitch/datapath.h
> +++ b/net/openvswitch/datapath.h
> @@ -50,6 +50,18 @@ struct dp_stats_percpu {
> struct u64_stats_sync syncp;
> };
>
> +/**
> + * struct vport_upcall_stats_percpu - per-cpu packet upcall
> +statistics for
> + * a given vport.
> + * @n_upcall_success: Number of packets that upcall to userspace succeed.
> + * @n_upcall_fail: Number of packets that upcall to userspace failed.
> + */
> +struct vport_upcall_stats_percpu {
> + u64_stats_t n_upcall_success;
> + u64_stats_t n_upcall_fail;
> + struct u64_stats_sync syncp;
> +};
> +
> /**
> * struct dp_nlsk_pids - array of netlink portids of for a datapath.
> * This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
> diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index
> 82a74f998966..a69c9356b57c 100644
> --- a/net/openvswitch/vport.c
> +++ b/net/openvswitch/vport.c
> @@ -284,6 +284,54 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
> stats->tx_packets = dev_stats->tx_packets; }
>
> +/**
> + * ovs_vport_get_upcall_stats - retrieve upcall stats
> + *
> + * @vport: vport from which to retrieve the stats
> + * @ovs_vport_upcall_stats: location to store stats
> + *
> + * Retrieves upcall stats for the given device.
> + *
> + * Must be called with ovs_mutex or rcu_read_lock.
> + */
> +void ovs_vport_get_upcall_stats(struct vport *vport, struct
> +ovs_vport_upcall_stats *stats) {
> + int i;
> +
> + stats->upcall_success = 0;
> + stats->upcall_fail = 0;
> +
> + for_each_possible_cpu(i) {
> + const struct vport_upcall_stats_percpu *percpu_upcall_stats;
> + unsigned int start;
> +
> + percpu_upcall_stats = per_cpu_ptr(vport->vport_upcall_stats_percpu, i);
> + do {
> + start = u64_stats_fetch_begin(&percpu_upcall_stats->syncp);
> + stats->upcall_success +=
> + u64_stats_read(&percpu_upcall_stats->n_upcall_success);
> + stats->upcall_fail += u64_stats_read(&percpu_upcall_stats->n_upcall_fail);
> + } while (u64_stats_fetch_retry(&percpu_upcall_stats->syncp, start));
> + }
> +}
> +
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats) {
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->upcall_success,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->upcall_fail,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + return 0;
> +
> +nla_put_failure:
> + return -EMSGSIZE;
> +}
> +
> /**
> * ovs_vport_get_options - retrieve device options
> *
> diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index
> 7d276f60c000..02cf8c589588 100644
> --- a/net/openvswitch/vport.h
> +++ b/net/openvswitch/vport.h
> @@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net
> *net, const char *name);
>
> void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
>
> +void ovs_vport_get_upcall_stats(struct vport *vport,
> + struct ovs_vport_upcall_stats *stats); int
> +ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats);
> +
> int ovs_vport_set_options(struct vport *, struct nlattr *options);
> int ovs_vport_get_options(const struct vport *, struct sk_buff *);
>
> @@ -78,6 +83,7 @@ struct vport {
> struct hlist_node hash_node;
> struct hlist_node dp_hash_node;
> const struct vport_ops *ops;
> + struct vport_upcall_stats_percpu __percpu
> +*vport_upcall_stats_percpu;
>
> struct list_head detach_list;
> struct rcu_head rcu;
> --
> 2.27.0

2022-11-24 09:07:16

by Eelco Chaudron

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets



On 23 Nov 2022, at 10:18, wangchuanlei wrote:

> Add support to count upall packets, when kmod of openvswitch
> upcall to userspace , here count the number of packets for
> upcall succeed and failed, which is a better way to see how
> many packets upcalled to userspace(ovs-vswitchd) on every
> interfaces.
>
> Here optimize the function used by comments of v3.
>
> Changes since v3:
> - use nested NLA_NESTED attribute in netlink message
>
> Changes since v2:
> - add count of upcall failed packets
>
> Changes since v1:
> - add count of upcall succeed packets

There is already a review from Alexander, so I only commented on some things that caught my attention after glazing over the patch.
I will do a full review of the next revisions.

//Eelco


> Signed-off-by: wangchuanlei <[email protected]>
> ---
> include/uapi/linux/openvswitch.h | 19 ++++++++++++
> net/openvswitch/datapath.c | 52 ++++++++++++++++++++++++++++++++
> net/openvswitch/datapath.h | 12 ++++++++
> net/openvswitch/vport.c | 48 +++++++++++++++++++++++++++++
> net/openvswitch/vport.h | 6 ++++
> 5 files changed, 137 insertions(+)
>
> diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
> index 94066f87e9ee..fa13bce15fae 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -126,6 +126,11 @@ struct ovs_vport_stats {
> __u64 tx_dropped; /* no space available in linux */
> };
>
> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */
> +};

This is no longer a user API data structure, so it should be removed from this include.

> +
> /* Allow last Netlink attribute to be unaligned */
> #define OVS_DP_F_UNALIGNED (1 << 0)
>
> @@ -277,11 +282,25 @@ enum ovs_vport_attr {
> OVS_VPORT_ATTR_PAD,
> OVS_VPORT_ATTR_IFINDEX,
> OVS_VPORT_ATTR_NETNSID,
> + OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
> __OVS_VPORT_ATTR_MAX
> };
>
> #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
>
> +/**
> + * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL* commands
> + * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
> + * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
> + */
> +enum ovs_vport_upcall_attr {
> + OVS_VPORT_UPCALL_SUCCESS, /* 64-bit upcall success packets */
> + OVS_VPORT_UPCALL_FAIL, /* 64-bit upcall fail packets */
> + __OVS_VPORT_UPCALL_MAX
> +};

Here you have comments ending with and without a dot (.), maybe make it uniform.
Maybe the comment on the structure can be removed as they are explained right above?


> +
> +#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX-1)
> +
> enum {
> OVS_VXLAN_EXT_UNSPEC,
> OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index c8a9075ddd0a..5254c51cfa60 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -209,6 +209,25 @@ static struct vport *new_vport(const struct vport_parms *parms)
> return vport;
> }
>
> +static void ovs_vport_upcalls(struct sk_buff *skb,
> + const struct dp_upcall_info *upcall_info,
> + bool upcall_success)
> +{
> + if (upcall_info->cmd == OVS_PACKET_CMD_MISS ||
> + upcall_info->cmd == OVS_PACKET_CMD_ACTION) {
> + const struct vport *p = OVS_CB(skb)->input_vport;
> + struct vport_upcall_stats_percpu *vport_stats;
> +
> + vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);
> + u64_stats_update_begin(&vport_stats->syncp);
> + if (upcall_success)
> + u64_stats_inc(&vport_stats->n_upcall_success);
> + else
> + u64_stats_inc(&vport_stats->n_upcall_fail);
> + u64_stats_update_end(&vport_stats->syncp);
> + }
> +}
> +
> void ovs_dp_detach_port(struct vport *p)
> {
> ASSERT_OVSL();
> @@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
> /* First drop references to device. */
> hlist_del_rcu(&p->dp_hash_node);
>
> + /* Free percpu memory */
> + free_percpu(p->vport_upcall_stats_percpu);
> +
> /* Then destroy it. */
> ovs_vport_del(p);
> }
> @@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> else
> err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> +
> + ovs_vport_upcalls(skb, upcall_info, !err);
> if (err)
> goto err;
>
> @@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto err_destroy_portids;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto err_destroy_upcall_stats;
> + }
> +
> err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> info->snd_seq, 0, OVS_DP_CMD_NEW);
> BUG_ON(err < 0);
> @@ -1837,6 +1868,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> ovs_notify(&dp_datapath_genl_family, reply, info);
> return 0;
>
> +err_destroy_upcall_stats:
> err_destroy_portids:
> kfree(rcu_dereference_raw(dp->upcall_portids));
> err_unlock_and_destroy_meters:
> @@ -2068,6 +2100,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
> {
> struct ovs_header *ovs_header;
> struct ovs_vport_stats vport_stats;
> + struct ovs_vport_upcall_stats stat;
> + struct nlattr *nla;
> int err;
>
> ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
> @@ -2097,6 +2131,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
> OVS_VPORT_ATTR_PAD))
> goto nla_put_failure;
>
> + nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
> + if (!nla)
> + goto nla_put_failure;
> +
> + ovs_vport_get_upcall_stats(vport, &stat);
> + if (ovs_vport_put_upcall_stats(skb, &stat))
> + goto nla_put_failure;
> + nla_nest_end(skb, nla);
> +
> if (ovs_vport_get_upcall_portids(vport, skb))
> goto nla_put_failure;
>
> @@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto exit_unlock_free;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> +
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto exit_unlock_free;
> + }
> +
> err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> info->snd_portid, info->snd_seq, 0,
> OVS_VPORT_CMD_NEW, GFP_KERNEL);
> @@ -2507,6 +2558,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
> [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
> [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
> [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
> + [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
> };
>
> static const struct genl_small_ops dp_vport_genl_ops[] = {
> diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
> index 0cd29971a907..933dec5e4175 100644
> --- a/net/openvswitch/datapath.h
> +++ b/net/openvswitch/datapath.h
> @@ -50,6 +50,18 @@ struct dp_stats_percpu {
> struct u64_stats_sync syncp;
> };
>
> +/**
> + * struct vport_upcall_stats_percpu - per-cpu packet upcall statistics for
> + * a given vport.
> + * @n_upcall_success: Number of packets that upcall to userspace succeed.
> + * @n_upcall_fail: Number of packets that upcall to userspace failed.
> + */
> +struct vport_upcall_stats_percpu {
> + u64_stats_t n_upcall_success;
> + u64_stats_t n_upcall_fail;
> + struct u64_stats_sync syncp;
> +};
> +
> /**
> * struct dp_nlsk_pids - array of netlink portids of for a datapath.
> * This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
> diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
> index 82a74f998966..a69c9356b57c 100644
> --- a/net/openvswitch/vport.c
> +++ b/net/openvswitch/vport.c
> @@ -284,6 +284,54 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
> stats->tx_packets = dev_stats->tx_packets;
> }
>
> +/**
> + * ovs_vport_get_upcall_stats - retrieve upcall stats
> + *
> + * @vport: vport from which to retrieve the stats
> + * @ovs_vport_upcall_stats: location to store stats
> + *
> + * Retrieves upcall stats for the given device.
> + *
> + * Must be called with ovs_mutex or rcu_read_lock.
> + */
> +void ovs_vport_get_upcall_stats(struct vport *vport, struct ovs_vport_upcall_stats *stats)
> +{
> + int i;
> +
> + stats->upcall_success = 0;
> + stats->upcall_fail = 0;
> +
> + for_each_possible_cpu(i) {
> + const struct vport_upcall_stats_percpu *percpu_upcall_stats;
> + unsigned int start;
> +
> + percpu_upcall_stats = per_cpu_ptr(vport->vport_upcall_stats_percpu, i);
> + do {
> + start = u64_stats_fetch_begin(&percpu_upcall_stats->syncp);
> + stats->upcall_success +=
> + u64_stats_read(&percpu_upcall_stats->n_upcall_success);
> + stats->upcall_fail += u64_stats_read(&percpu_upcall_stats->n_upcall_fail);
> + } while (u64_stats_fetch_retry(&percpu_upcall_stats->syncp, start));
> + }
> +}
> +
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats)
> +{
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->upcall_success,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->upcall_fail,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + return 0;
> +
> +nla_put_failure:
> + return -EMSGSIZE;
> +}
> +
> /**
> * ovs_vport_get_options - retrieve device options
> *
> diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
> index 7d276f60c000..02cf8c589588 100644
> --- a/net/openvswitch/vport.h
> +++ b/net/openvswitch/vport.h
> @@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name);
>
> void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
>
> +void ovs_vport_get_upcall_stats(struct vport *vport,
> + struct ovs_vport_upcall_stats *stats);
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats);
> +
> int ovs_vport_set_options(struct vport *, struct nlattr *options);
> int ovs_vport_get_options(const struct vport *, struct sk_buff *);
>
> @@ -78,6 +83,7 @@ struct vport {
> struct hlist_node hash_node;
> struct hlist_node dp_hash_node;
> const struct vport_ops *ops;
> + struct vport_upcall_stats_percpu __percpu *vport_upcall_stats_percpu;
>
> struct list_head detach_list;
> struct rcu_head rcu;
> --
> 2.27.0

2022-11-24 17:40:09

by Alexander Lobakin

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

From: wangchuanlei <[email protected]>
Date: Wed, 23 Nov 2022 21:24:16 -0500

> Hi,
> Thank you for review! I will give a new verson of patch based on your comments,
> and i give a explanation on every comments from you, please see below!

Oh, just noticed, the subject prefix [openvswitch] is not correct,
please use [PATCH net-next v5] next time.

>
> Best reagrds!
> wangchuanlei

[...]

> > + const struct vport *p = OVS_CB(skb)->input_vport;
> > + struct vport_upcall_stats_percpu *vport_stats;
> > +
> > + vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);
>
> Why make a separate structure? You can just expand dp_stats_percpu, this function would then be just a couple lines in ovs_dp_upcall().
> -- emm, beacause of this statistics based on vport, so new structure should insert to "struct vport"

Ah, my bad. Didn't notice that :')

>
>
> > + u64_stats_update_begin(&vport_stats->syncp);
> > + if (upcall_success)
> > + u64_stats_inc(&vport_stats->n_upcall_success);
> > + else
> > + u64_stats_inc(&vport_stats->n_upcall_fail);
> > + u64_stats_update_end(&vport_stats->syncp);
> > + }
> > +}
> > +
> > void ovs_dp_detach_port(struct vport *p) {
> > ASSERT_OVSL();
> > @@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
> > /* First drop references to device. */
> > hlist_del_rcu(&p->dp_hash_node);
> >
> > + /* Free percpu memory */
> > + free_percpu(p->vport_upcall_stats_percpu);
> > +
> > /* Then destroy it. */
> > ovs_vport_del(p);
> > }
> > @@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> > err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> > else
> > err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> > +
> > + ovs_vport_upcalls(skb, upcall_info, !err);
> > if (err)
> > goto err;
>
> Also, as you may see, your ::upcall_fail counter will be always exactly the same as stats->n_lost. So there's no point introducing a new one.
> However, you can expand the structure dp_stats_percpu and add a new field there which would store the number of successfull upcalls.
> ...but I don't see a reason for this to be honest. From my PoV, it's better to count the number of successfully processed packets at the end of queue_userspace_packet() right before the 'out:'
> label[0]. But please make sure then you don't duplicate some other counter (I'm not deep into OvS, so can't say for sure if there's anything similar to what you want).
> --in ovs , as stats->n_lost only count the sum of packets of all ports, not on individal port , so expand the structure dp_stats_percpu may be not suitable
> --and count upcall failed packets is useful beacuse no all of upcall packets are successfully sent。

Yes, I see now, thanks for the explanation! I think it's good idea
in general to introduce OvS per-vport stats. There are some, but
they're stored in net_device::dev_stats, which I'm not a fan of :D

>
> >
> > @@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> > goto err_destroy_portids;
> > }
> >
> > + vport->vport_upcall_stats_percpu =
>
> This can be at least twice shorter, e.g. 'upcall_stats'. Don't try to describe every detail in symbol names.
> --yes!
> > + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> > + if (!vport->vport_upcall_stats_percpu) {
> > + err = -ENOMEM;
> > + goto err_destroy_upcall_stats;
>
> I know you followed the previous label logics, but you actually aren't destroying the stats under this label. Here you should have `goto err_destroy_portids` as that's what you're actually doing on that error path.
> --here is just keep format of code, and has no influence on function

Correct, so you can use the already existing label here.

>
> > + }
> > +
> > err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> > info->snd_seq, 0, OVS_DP_CMD_NEW);
> > BUG_ON(err < 0);
>
> [...]
>
> > @@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> > goto exit_unlock_free;
> > }
> >
> > + vport->vport_upcall_stats_percpu =
> > + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> > +
> > + if (!vport->vport_upcall_stats_percpu) {
> > + err = -ENOMEM;
> > + goto exit_unlock_free;
> > + }
>
> Why do you allocate them twice?
> -- here is in different code segment on in vport_cmd_new , the other is in dp_cmd_new, they are has no collisions

+ (resolved)

>
> > +
> > err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> > info->snd_portid, info->snd_seq, 0,
> > OVS_VPORT_CMD_NEW, GFP_KERNEL);

[...]

> > --
> > 2.27.0
>
> [0] https://elixir.bootlin.com/linux/v6.1-rc6/source/net/openvswitch/datapath.c#L557
>
> Thanks,
> Olek

Thanks,
Olek

2022-11-26 04:46:30

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

Hi wangchuanlei,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on net-next/master]
[also build test ERROR on net/master linus/master v6.1-rc6 next-20221125]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/wangchuanlei/openvswitch-Add-support-to-count-upcall-packets/20221123-172156
patch link: https://lore.kernel.org/r/20221123091843.3414856-1-wangchuanlei%40inspur.com
patch subject: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets
config: x86_64-randconfig-a015
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
# https://github.com/intel-lab-lkp/linux/commit/2a933adf45aad43350c7074e0f9a6a12e7f41986
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review wangchuanlei/openvswitch-Add-support-to-count-upcall-packets/20221123-172156
git checkout 2a933adf45aad43350c7074e0f9a6a12e7f41986
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>

All errors (new ones prefixed by >>):

In file included from <command-line>:
>> ./usr/include/linux/openvswitch.h:130:9: error: unknown type name 'uint64_t'
130 | uint64_t upcall_success; /* total packets upcalls succeed */
| ^~~~~~~~
./usr/include/linux/openvswitch.h:131:9: error: unknown type name 'uint64_t'
131 | uint64_t upcall_fail; /* total packets upcalls failed */
| ^~~~~~~~

--
0-DAY CI Kernel Test Service
https://01.org/lkp


Attachments:
(No filename) (1.94 kB)
config (138.25 kB)
Download all attachments

2022-11-30 06:54:11

by wangchuanlei

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

Hi,Eelco Chaudron

> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */
> +};

This is no longer a user API data structure, so it should be removed from this include.
--This structure will used in userspace, ovs-vswitchd will use it.
-- and that will be another patch of ovs-vswitchd. so it keep it here ?


>Thank you for review ,Eelco Chaudron,
>I will a new version of this patch soon based on comments of you and Alexander.

Best regards!
wangchuanlei


On 23 Nov 2022, at 10:18, wangchuanlei wrote:

> Add support to count upall packets, when kmod of openvswitch upcall to
> userspace , here count the number of packets for upcall succeed and
> failed, which is a better way to see how many packets upcalled to
> userspace(ovs-vswitchd) on every interfaces.
>
> Here optimize the function used by comments of v3.
>
> Changes since v3:
> - use nested NLA_NESTED attribute in netlink message
>
> Changes since v2:
> - add count of upcall failed packets
>
> Changes since v1:
> - add count of upcall succeed packets

There is already a review from Alexander, so I only commented on some things that caught my attention after glazing over the patch.
I will do a full review of the next revisions.

//Eelco


> Signed-off-by: wangchuanlei <[email protected]>
> ---
> include/uapi/linux/openvswitch.h | 19 ++++++++++++
> net/openvswitch/datapath.c | 52 ++++++++++++++++++++++++++++++++
> net/openvswitch/datapath.h | 12 ++++++++
> net/openvswitch/vport.c | 48 +++++++++++++++++++++++++++++
> net/openvswitch/vport.h | 6 ++++
> 5 files changed, 137 insertions(+)
>
> diff --git a/include/uapi/linux/openvswitch.h
> b/include/uapi/linux/openvswitch.h
> index 94066f87e9ee..fa13bce15fae 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -126,6 +126,11 @@ struct ovs_vport_stats {
> __u64 tx_dropped; /* no space available in linux */
> };
>
> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */
> +};

This is no longer a user API data structure, so it should be removed from this include.

> +
> /* Allow last Netlink attribute to be unaligned */
> #define OVS_DP_F_UNALIGNED (1 << 0)
>
> @@ -277,11 +282,25 @@ enum ovs_vport_attr {
> OVS_VPORT_ATTR_PAD,
> OVS_VPORT_ATTR_IFINDEX,
> OVS_VPORT_ATTR_NETNSID,
> + OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
> __OVS_VPORT_ATTR_MAX
> };
>
> #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
>
> +/**
> + * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL*
> +commands
> + * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
> + * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
> + */
> +enum ovs_vport_upcall_attr {
> + OVS_VPORT_UPCALL_SUCCESS, /* 64-bit upcall success packets */
> + OVS_VPORT_UPCALL_FAIL, /* 64-bit upcall fail packets */
> + __OVS_VPORT_UPCALL_MAX
> +};

Here you have comments ending with and without a dot (.), maybe make it uniform.
Maybe the comment on the structure can be removed as they are explained right above?


> +
> +#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX-1)
> +
> enum {
> OVS_VXLAN_EXT_UNSPEC,
> OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index c8a9075ddd0a..5254c51cfa60 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -209,6 +209,25 @@ static struct vport *new_vport(const struct vport_parms *parms)
> return vport;
> }
>
> +static void ovs_vport_upcalls(struct sk_buff *skb,
> + const struct dp_upcall_info *upcall_info,
> + bool upcall_success)
> +{
> + if (upcall_info->cmd == OVS_PACKET_CMD_MISS ||
> + upcall_info->cmd == OVS_PACKET_CMD_ACTION) {
> + const struct vport *p = OVS_CB(skb)->input_vport;
> + struct vport_upcall_stats_percpu *vport_stats;
> +
> + vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);
> + u64_stats_update_begin(&vport_stats->syncp);
> + if (upcall_success)
> + u64_stats_inc(&vport_stats->n_upcall_success);
> + else
> + u64_stats_inc(&vport_stats->n_upcall_fail);
> + u64_stats_update_end(&vport_stats->syncp);
> + }
> +}
> +
> void ovs_dp_detach_port(struct vport *p) {
> ASSERT_OVSL();
> @@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
> /* First drop references to device. */
> hlist_del_rcu(&p->dp_hash_node);
>
> + /* Free percpu memory */
> + free_percpu(p->vport_upcall_stats_percpu);
> +
> /* Then destroy it. */
> ovs_vport_del(p);
> }
> @@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> else
> err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> +
> + ovs_vport_upcalls(skb, upcall_info, !err);
> if (err)
> goto err;
>
> @@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto err_destroy_portids;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto err_destroy_upcall_stats;
> + }
> +
> err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> info->snd_seq, 0, OVS_DP_CMD_NEW);
> BUG_ON(err < 0);
> @@ -1837,6 +1868,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> ovs_notify(&dp_datapath_genl_family, reply, info);
> return 0;
>
> +err_destroy_upcall_stats:
> err_destroy_portids:
> kfree(rcu_dereference_raw(dp->upcall_portids));
> err_unlock_and_destroy_meters:
> @@ -2068,6 +2100,8 @@ static int ovs_vport_cmd_fill_info(struct vport
> *vport, struct sk_buff *skb, {
> struct ovs_header *ovs_header;
> struct ovs_vport_stats vport_stats;
> + struct ovs_vport_upcall_stats stat;
> + struct nlattr *nla;
> int err;
>
> ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, @@
> -2097,6 +2131,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
> OVS_VPORT_ATTR_PAD))
> goto nla_put_failure;
>
> + nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
> + if (!nla)
> + goto nla_put_failure;
> +
> + ovs_vport_get_upcall_stats(vport, &stat);
> + if (ovs_vport_put_upcall_stats(skb, &stat))
> + goto nla_put_failure;
> + nla_nest_end(skb, nla);
> +
> if (ovs_vport_get_upcall_portids(vport, skb))
> goto nla_put_failure;
>
> @@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto exit_unlock_free;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> +
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto exit_unlock_free;
> + }
> +
> err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> info->snd_portid, info->snd_seq, 0,
> OVS_VPORT_CMD_NEW, GFP_KERNEL); @@ -2507,6 +2558,7 @@
> static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
> [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
> [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
> [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
> + [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
> };
>
> static const struct genl_small_ops dp_vport_genl_ops[] = { diff --git
> a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index
> 0cd29971a907..933dec5e4175 100644
> --- a/net/openvswitch/datapath.h
> +++ b/net/openvswitch/datapath.h
> @@ -50,6 +50,18 @@ struct dp_stats_percpu {
> struct u64_stats_sync syncp;
> };
>
> +/**
> + * struct vport_upcall_stats_percpu - per-cpu packet upcall
> +statistics for
> + * a given vport.
> + * @n_upcall_success: Number of packets that upcall to userspace succeed.
> + * @n_upcall_fail: Number of packets that upcall to userspace failed.
> + */
> +struct vport_upcall_stats_percpu {
> + u64_stats_t n_upcall_success;
> + u64_stats_t n_upcall_fail;
> + struct u64_stats_sync syncp;
> +};
> +
> /**
> * struct dp_nlsk_pids - array of netlink portids of for a datapath.
> * This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
> diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index
> 82a74f998966..a69c9356b57c 100644
> --- a/net/openvswitch/vport.c
> +++ b/net/openvswitch/vport.c
> @@ -284,6 +284,54 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
> stats->tx_packets = dev_stats->tx_packets; }
>
> +/**
> + * ovs_vport_get_upcall_stats - retrieve upcall stats
> + *
> + * @vport: vport from which to retrieve the stats
> + * @ovs_vport_upcall_stats: location to store stats
> + *
> + * Retrieves upcall stats for the given device.
> + *
> + * Must be called with ovs_mutex or rcu_read_lock.
> + */
> +void ovs_vport_get_upcall_stats(struct vport *vport, struct
> +ovs_vport_upcall_stats *stats) {
> + int i;
> +
> + stats->upcall_success = 0;
> + stats->upcall_fail = 0;
> +
> + for_each_possible_cpu(i) {
> + const struct vport_upcall_stats_percpu *percpu_upcall_stats;
> + unsigned int start;
> +
> + percpu_upcall_stats = per_cpu_ptr(vport->vport_upcall_stats_percpu, i);
> + do {
> + start = u64_stats_fetch_begin(&percpu_upcall_stats->syncp);
> + stats->upcall_success +=
> + u64_stats_read(&percpu_upcall_stats->n_upcall_success);
> + stats->upcall_fail += u64_stats_read(&percpu_upcall_stats->n_upcall_fail);
> + } while (u64_stats_fetch_retry(&percpu_upcall_stats->syncp, start));
> + }
> +}
> +
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats) {
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->upcall_success,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->upcall_fail,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + return 0;
> +
> +nla_put_failure:
> + return -EMSGSIZE;
> +}
> +
> /**
> * ovs_vport_get_options - retrieve device options
> *
> diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index
> 7d276f60c000..02cf8c589588 100644
> --- a/net/openvswitch/vport.h
> +++ b/net/openvswitch/vport.h
> @@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net
> *net, const char *name);
>
> void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
>
> +void ovs_vport_get_upcall_stats(struct vport *vport,
> + struct ovs_vport_upcall_stats *stats); int
> +ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats);
> +
> int ovs_vport_set_options(struct vport *, struct nlattr *options);
> int ovs_vport_get_options(const struct vport *, struct sk_buff *);
>
> @@ -78,6 +83,7 @@ struct vport {
> struct hlist_node hash_node;
> struct hlist_node dp_hash_node;
> const struct vport_ops *ops;
> + struct vport_upcall_stats_percpu __percpu
> +*vport_upcall_stats_percpu;
>
> struct list_head detach_list;
> struct rcu_head rcu;
> --
> 2.27.0

2022-11-30 07:06:14

by wangchuanlei

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

Hi,Eelco Chaudron

> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */
> +};

This is no longer a user API data structure, so it should be removed from this include.
--This structure will used in userspace, ovs-vswitchd will use it.
-- and that will be another patch of ovs-vswitchd. so it keep it here ?


>Thank you for review ,Eelco Chaudron,
>I will a new version of this patch soon based on comments of you and Alexander.

Best regards!
wangchuanlei


On 23 Nov 2022, at 10:18, wangchuanlei wrote:

> Add support to count upall packets, when kmod of openvswitch upcall to
> userspace , here count the number of packets for upcall succeed and
> failed, which is a better way to see how many packets upcalled to
> userspace(ovs-vswitchd) on every interfaces.
>
> Here optimize the function used by comments of v3.
>
> Changes since v3:
> - use nested NLA_NESTED attribute in netlink message
>
> Changes since v2:
> - add count of upcall failed packets
>
> Changes since v1:
> - add count of upcall succeed packets

There is already a review from Alexander, so I only commented on some things that caught my attention after glazing over the patch.
I will do a full review of the next revisions.

//Eelco


> Signed-off-by: wangchuanlei <[email protected]>
> ---
> include/uapi/linux/openvswitch.h | 19 ++++++++++++
> net/openvswitch/datapath.c | 52 ++++++++++++++++++++++++++++++++
> net/openvswitch/datapath.h | 12 ++++++++
> net/openvswitch/vport.c | 48 +++++++++++++++++++++++++++++
> net/openvswitch/vport.h | 6 ++++
> 5 files changed, 137 insertions(+)
>
> diff --git a/include/uapi/linux/openvswitch.h
> b/include/uapi/linux/openvswitch.h
> index 94066f87e9ee..fa13bce15fae 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -126,6 +126,11 @@ struct ovs_vport_stats {
> __u64 tx_dropped; /* no space available in linux */
> };
>
> +struct ovs_vport_upcall_stats {
> + uint64_t upcall_success; /* total packets upcalls succeed */
> + uint64_t upcall_fail; /* total packets upcalls failed */
> +};

This is no longer a user API data structure, so it should be removed from this include.

> +
> /* Allow last Netlink attribute to be unaligned */
> #define OVS_DP_F_UNALIGNED (1 << 0)
>
> @@ -277,11 +282,25 @@ enum ovs_vport_attr {
> OVS_VPORT_ATTR_PAD,
> OVS_VPORT_ATTR_IFINDEX,
> OVS_VPORT_ATTR_NETNSID,
> + OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
> __OVS_VPORT_ATTR_MAX
> };
>
> #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
>
> +/**
> + * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL*
> +commands
> + * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
> + * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
> + */
> +enum ovs_vport_upcall_attr {
> + OVS_VPORT_UPCALL_SUCCESS, /* 64-bit upcall success packets */
> + OVS_VPORT_UPCALL_FAIL, /* 64-bit upcall fail packets */
> + __OVS_VPORT_UPCALL_MAX
> +};

Here you have comments ending with and without a dot (.), maybe make it uniform.
Maybe the comment on the structure can be removed as they are explained right above?


> +
> +#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX-1)
> +
> enum {
> OVS_VXLAN_EXT_UNSPEC,
> OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index c8a9075ddd0a..5254c51cfa60 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -209,6 +209,25 @@ static struct vport *new_vport(const struct vport_parms *parms)
> return vport;
> }
>
> +static void ovs_vport_upcalls(struct sk_buff *skb,
> + const struct dp_upcall_info *upcall_info,
> + bool upcall_success)
> +{
> + if (upcall_info->cmd == OVS_PACKET_CMD_MISS ||
> + upcall_info->cmd == OVS_PACKET_CMD_ACTION) {
> + const struct vport *p = OVS_CB(skb)->input_vport;
> + struct vport_upcall_stats_percpu *vport_stats;
> +
> + vport_stats = this_cpu_ptr(p->vport_upcall_stats_percpu);
> + u64_stats_update_begin(&vport_stats->syncp);
> + if (upcall_success)
> + u64_stats_inc(&vport_stats->n_upcall_success);
> + else
> + u64_stats_inc(&vport_stats->n_upcall_fail);
> + u64_stats_update_end(&vport_stats->syncp);
> + }
> +}
> +
> void ovs_dp_detach_port(struct vport *p) {
> ASSERT_OVSL();
> @@ -216,6 +235,9 @@ void ovs_dp_detach_port(struct vport *p)
> /* First drop references to device. */
> hlist_del_rcu(&p->dp_hash_node);
>
> + /* Free percpu memory */
> + free_percpu(p->vport_upcall_stats_percpu);
> +
> /* Then destroy it. */
> ovs_vport_del(p);
> }
> @@ -305,6 +327,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> else
> err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> +
> + ovs_vport_upcalls(skb, upcall_info, !err);
> if (err)
> goto err;
>
> @@ -1825,6 +1849,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto err_destroy_portids;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto err_destroy_upcall_stats;
> + }
> +
> err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> info->snd_seq, 0, OVS_DP_CMD_NEW);
> BUG_ON(err < 0);
> @@ -1837,6 +1868,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> ovs_notify(&dp_datapath_genl_family, reply, info);
> return 0;
>
> +err_destroy_upcall_stats:
> err_destroy_portids:
> kfree(rcu_dereference_raw(dp->upcall_portids));
> err_unlock_and_destroy_meters:
> @@ -2068,6 +2100,8 @@ static int ovs_vport_cmd_fill_info(struct vport
> *vport, struct sk_buff *skb, {
> struct ovs_header *ovs_header;
> struct ovs_vport_stats vport_stats;
> + struct ovs_vport_upcall_stats stat;
> + struct nlattr *nla;
> int err;
>
> ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, @@
> -2097,6 +2131,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
> OVS_VPORT_ATTR_PAD))
> goto nla_put_failure;
>
> + nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
> + if (!nla)
> + goto nla_put_failure;
> +
> + ovs_vport_get_upcall_stats(vport, &stat);
> + if (ovs_vport_put_upcall_stats(skb, &stat))
> + goto nla_put_failure;
> + nla_nest_end(skb, nla);
> +
> if (ovs_vport_get_upcall_portids(vport, skb))
> goto nla_put_failure;
>
> @@ -2278,6 +2321,14 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto exit_unlock_free;
> }
>
> + vport->vport_upcall_stats_percpu =
> + netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> +
> + if (!vport->vport_upcall_stats_percpu) {
> + err = -ENOMEM;
> + goto exit_unlock_free;
> + }
> +
> err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> info->snd_portid, info->snd_seq, 0,
> OVS_VPORT_CMD_NEW, GFP_KERNEL); @@ -2507,6 +2558,7 @@
> static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
> [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
> [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
> [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
> + [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
> };
>
> static const struct genl_small_ops dp_vport_genl_ops[] = { diff --git
> a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index
> 0cd29971a907..933dec5e4175 100644
> --- a/net/openvswitch/datapath.h
> +++ b/net/openvswitch/datapath.h
> @@ -50,6 +50,18 @@ struct dp_stats_percpu {
> struct u64_stats_sync syncp;
> };
>
> +/**
> + * struct vport_upcall_stats_percpu - per-cpu packet upcall
> +statistics for
> + * a given vport.
> + * @n_upcall_success: Number of packets that upcall to userspace succeed.
> + * @n_upcall_fail: Number of packets that upcall to userspace failed.
> + */
> +struct vport_upcall_stats_percpu {
> + u64_stats_t n_upcall_success;
> + u64_stats_t n_upcall_fail;
> + struct u64_stats_sync syncp;
> +};
> +
> /**
> * struct dp_nlsk_pids - array of netlink portids of for a datapath.
> * This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
> diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index
> 82a74f998966..a69c9356b57c 100644
> --- a/net/openvswitch/vport.c
> +++ b/net/openvswitch/vport.c
> @@ -284,6 +284,54 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
> stats->tx_packets = dev_stats->tx_packets; }
>
> +/**
> + * ovs_vport_get_upcall_stats - retrieve upcall stats
> + *
> + * @vport: vport from which to retrieve the stats
> + * @ovs_vport_upcall_stats: location to store stats
> + *
> + * Retrieves upcall stats for the given device.
> + *
> + * Must be called with ovs_mutex or rcu_read_lock.
> + */
> +void ovs_vport_get_upcall_stats(struct vport *vport, struct
> +ovs_vport_upcall_stats *stats) {
> + int i;
> +
> + stats->upcall_success = 0;
> + stats->upcall_fail = 0;
> +
> + for_each_possible_cpu(i) {
> + const struct vport_upcall_stats_percpu *percpu_upcall_stats;
> + unsigned int start;
> +
> + percpu_upcall_stats = per_cpu_ptr(vport->vport_upcall_stats_percpu, i);
> + do {
> + start = u64_stats_fetch_begin(&percpu_upcall_stats->syncp);
> + stats->upcall_success +=
> + u64_stats_read(&percpu_upcall_stats->n_upcall_success);
> + stats->upcall_fail += u64_stats_read(&percpu_upcall_stats->n_upcall_fail);
> + } while (u64_stats_fetch_retry(&percpu_upcall_stats->syncp, start));
> + }
> +}
> +
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats) {
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->upcall_success,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->upcall_fail,
> + OVS_VPORT_ATTR_PAD))
> + goto nla_put_failure;
> +
> + return 0;
> +
> +nla_put_failure:
> + return -EMSGSIZE;
> +}
> +
> /**
> * ovs_vport_get_options - retrieve device options
> *
> diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index
> 7d276f60c000..02cf8c589588 100644
> --- a/net/openvswitch/vport.h
> +++ b/net/openvswitch/vport.h
> @@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net
> *net, const char *name);
>
> void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
>
> +void ovs_vport_get_upcall_stats(struct vport *vport,
> + struct ovs_vport_upcall_stats *stats); int
> +ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats);
> +
> int ovs_vport_set_options(struct vport *, struct nlattr *options);
> int ovs_vport_get_options(const struct vport *, struct sk_buff *);
>
> @@ -78,6 +83,7 @@ struct vport {
> struct hlist_node hash_node;
> struct hlist_node dp_hash_node;
> const struct vport_ops *ops;
> + struct vport_upcall_stats_percpu __percpu
> +*vport_upcall_stats_percpu;
>
> struct list_head detach_list;
> struct rcu_head rcu;
> --
> 2.27.0

2022-11-30 09:32:43

by wangchuanlei

[permalink] [raw]
Subject: Re: [PATCH] [openvswitch v4] openvswitch: Add support to count upcall packets

Hi,Eelco Chaudron

> +struct ovs_vport_upcall_stats {
> + __u64 tx_success; /* total packets upcalls succeed */
> + __u64 tx_fail; /* total packets upcalls failed */
> +};
> +

=> This is no longer a user API data structure, so it should be removed from this include.
=> --This structure will used in userspace, ovs-vswitchd will use it.
=> -- and that will be another patch of ovs-vswitchd. so it keep it here ?

The above was your response to v4. However, as this structure is not part of the UAPI
from the Linux side, it should not be exposed. If you need a similar structure in OVS one should be defined there.

--Yes, i modified here , v6 will pushed several minutes ,thanks for review!
Best regards!
wangchuanlei

On 30 Nov 2022, at 8:25, wangchuanlei wrote:

> Add support to count upall packets, when kmod of openvswitch upcall to
> userspace , here count the number of packets for upcall succeed and
> failed, which is a better way to see how many packets upcalled to
> userspace(ovs-vswitchd) on every interfaces.
>
> Here modify format of code used by comments of v4.
>
> Changes since v4:
> - optimize the function used by comments
>
> Changes since v3:
> - use nested NLA_NESTED attribute in netlink message
>
> Changes since v2:
> - add count of upcall failed packets
>
> Changes since v1:
> - add count of upcall succeed packets
>
> Signed-off-by: wangchuanlei <[email protected]>
> ---
> include/uapi/linux/openvswitch.h | 19 ++++++++++++
> net/openvswitch/datapath.c | 50 ++++++++++++++++++++++++++++++++
> net/openvswitch/vport.c | 44 ++++++++++++++++++++++++++++
> net/openvswitch/vport.h | 19 ++++++++++++
> 4 files changed, 132 insertions(+)
>
> diff --git a/include/uapi/linux/openvswitch.h
> b/include/uapi/linux/openvswitch.h
> index 94066f87e9ee..ad7cea9827cc 100644
> --- a/include/uapi/linux/openvswitch.h
> +++ b/include/uapi/linux/openvswitch.h
> @@ -126,6 +126,11 @@ struct ovs_vport_stats {
> __u64 tx_dropped; /* no space available in linux */
> };
>
> +struct ovs_vport_upcall_stats {
> + __u64 tx_success; /* total packets upcalls succeed */
> + __u64 tx_fail; /* total packets upcalls failed */
> +};
> +

=> This is no longer a user API data structure, so it should be removed from this include.
=> --This structure will used in userspace, ovs-vswitchd will use it.
=> -- and that will be another patch of ovs-vswitchd. so it keep it here ?

The above was your response to v4. However, as this structure is not part of the UAPI from the Linux side, it should not be exposed. If you need a similar structure in OVS one should be defined there.


> /* Allow last Netlink attribute to be unaligned */
> #define OVS_DP_F_UNALIGNED (1 << 0)
>
> @@ -277,11 +282,25 @@ enum ovs_vport_attr {
> OVS_VPORT_ATTR_PAD,
> OVS_VPORT_ATTR_IFINDEX,
> OVS_VPORT_ATTR_NETNSID,
> + OVS_VPORT_ATTR_UPCALL_STATS, /* struct ovs_vport_upcall_stats */
> __OVS_VPORT_ATTR_MAX
> };
>
> #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
>
> +/**
> + * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL*
> +commands
> + * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
> + * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
> + */
> +enum ovs_vport_upcall_attr {
> + OVS_VPORT_UPCALL_SUCCESS,
> + OVS_VPORT_UPCALL_FAIL,
> + __OVS_VPORT_UPCALL_MAX
> +};
> +
> +#define OVS_VPORT_UPCALL_MAX (__OVS_VPORT_UPCALL_MAX - 1)
> +
> enum {
> OVS_VXLAN_EXT_UNSPEC,
> OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index c8a9075ddd0a..f9279aee2adb 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -209,6 +209,26 @@ static struct vport *new_vport(const struct vport_parms *parms)
> return vport;
> }
>
> +static void ovs_vport_upcalls(struct sk_buff *skb,
> + const struct dp_upcall_info *upcall_info,
> + bool upcall_result)
> +{
> + struct vport *p = OVS_CB(skb)->input_vport;
> + struct vport_upcall_stats_percpu *vport_stats;
> +
> + if (upcall_info->cmd != OVS_PACKET_CMD_MISS &&
> + upcall_info->cmd != OVS_PACKET_CMD_ACTION)
> + return;
> +
> + vport_stats = this_cpu_ptr(p->upcall_stats);
> + u64_stats_update_begin(&vport_stats->syncp);
> + if (upcall_result)
> + u64_stats_inc(&vport_stats->n_success);
> + else
> + u64_stats_inc(&vport_stats->n_fail);
> + u64_stats_update_end(&vport_stats->syncp);
> +}
> +
> void ovs_dp_detach_port(struct vport *p) {
> ASSERT_OVSL();
> @@ -216,6 +236,9 @@ void ovs_dp_detach_port(struct vport *p)
> /* First drop references to device. */
> hlist_del_rcu(&p->dp_hash_node);
>
> + /* Free percpu memory */
> + free_percpu(p->upcall_stats);
> +
> /* Then destroy it. */
> ovs_vport_del(p);
> }
> @@ -305,6 +328,8 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
> err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
> else
> err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
> +
> + ovs_vport_upcalls(skb, upcall_info, !err);
> if (err)
> goto err;
>
> @@ -1825,6 +1850,12 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto err_destroy_portids;
> }
>
> + vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
> + if (!vport->upcall_stats) {
> + err = -ENOMEM;
> + goto err_destroy_portids;
> + }
> +
> err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
> info->snd_seq, 0, OVS_DP_CMD_NEW);
> BUG_ON(err < 0);
> @@ -2068,6 +2099,8 @@ static int ovs_vport_cmd_fill_info(struct vport
> *vport, struct sk_buff *skb, {
> struct ovs_header *ovs_header;
> struct ovs_vport_stats vport_stats;
> + struct ovs_vport_upcall_stats stat;
> + struct nlattr *nla;
> int err;
>
> ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, @@
> -2097,6 +2130,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
> OVS_VPORT_ATTR_PAD))
> goto nla_put_failure;
>
> + nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
> + if (!nla)
> + goto nla_put_failure;
> +
> + ovs_vport_get_upcall_stats(vport, &stat);
> + if (ovs_vport_put_upcall_stats(skb, &stat))
> + goto nla_put_failure;
> + nla_nest_end(skb, nla);
> +
> if (ovs_vport_get_upcall_portids(vport, skb))
> goto nla_put_failure;
>
> @@ -2278,6 +2320,13 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
> goto exit_unlock_free;
> }
>
> + vport->upcall_stats = netdev_alloc_pcpu_stats(struct
> +vport_upcall_stats_percpu);
> +
> + if (!vport->upcall_stats) {
> + err = -ENOMEM;
> + goto exit_unlock_free;
> + }
> +
> err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
> info->snd_portid, info->snd_seq, 0,
> OVS_VPORT_CMD_NEW, GFP_KERNEL); @@ -2507,6 +2556,7 @@
> static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
> [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
> [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
> [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
> + [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
> };
>
> static const struct genl_small_ops dp_vport_genl_ops[] = { diff --git
> a/net/openvswitch/vport.c b/net/openvswitch/vport.c index
> 82a74f998966..fd95536b35ef 100644
> --- a/net/openvswitch/vport.c
> +++ b/net/openvswitch/vport.c
> @@ -284,6 +284,50 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
> stats->tx_packets = dev_stats->tx_packets; }
>
> +/**
> + * ovs_vport_get_upcall_stats - retrieve upcall stats
> + *
> + * @vport: vport from which to retrieve the stats
> + * @ovs_vport_upcall_stats: location to store stats
> + *
> + * Retrieves upcall stats for the given device.
> + *
> + * Must be called with ovs_mutex or rcu_read_lock.
> + */
> +void ovs_vport_get_upcall_stats(struct vport *vport, struct
> +ovs_vport_upcall_stats *stats) {
> + int i;
> +
> + stats->tx_success = 0;
> + stats->tx_fail = 0;
> +
> + for_each_possible_cpu(i) {
> + const struct vport_upcall_stats_percpu *upcall_stats;
> + unsigned int start;
> +
> + upcall_stats = per_cpu_ptr(vport->upcall_stats, i);
> + do {
> + start = u64_stats_fetch_begin(&upcall_stats->syncp);
> + stats->tx_success += u64_stats_read(&upcall_stats->n_success);
> + stats->tx_fail += u64_stats_read(&upcall_stats->n_fail);
> + } while (u64_stats_fetch_retry(&upcall_stats->syncp, start));
> + }
> +}
> +
> +int ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats) {
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_SUCCESS, stats->tx_success,
> + OVS_VPORT_ATTR_PAD))
> + return -EMSGSIZE;
> +
> + if (nla_put_u64_64bit(skb, OVS_VPORT_UPCALL_FAIL, stats->tx_fail,
> + OVS_VPORT_ATTR_PAD))
> + return -EMSGSIZE;
> +
> + return 0;
> +}
> +
> /**
> * ovs_vport_get_options - retrieve device options
> *
> diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index
> 7d276f60c000..b872117af763 100644
> --- a/net/openvswitch/vport.h
> +++ b/net/openvswitch/vport.h
> @@ -32,6 +32,11 @@ struct vport *ovs_vport_locate(const struct net
> *net, const char *name);
>
> void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
>
> +void ovs_vport_get_upcall_stats(struct vport *vport,
> + struct ovs_vport_upcall_stats *stats); int
> +ovs_vport_put_upcall_stats(struct sk_buff *skb,
> + struct ovs_vport_upcall_stats *stats);
> +
> int ovs_vport_set_options(struct vport *, struct nlattr *options);
> int ovs_vport_get_options(const struct vport *, struct sk_buff *);
>
> @@ -65,6 +70,7 @@ struct vport_portids {
> * @hash_node: Element in @dev_table hash table in vport.c.
> * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
> * @ops: Class structure.
> + * @upcall_stats: Upcall stats of every ports.
> * @detach_list: list used for detaching vport in net-exit call.
> * @rcu: RCU callback head for deferred destruction.
> */
> @@ -78,6 +84,7 @@ struct vport {
> struct hlist_node hash_node;
> struct hlist_node dp_hash_node;
> const struct vport_ops *ops;
> + struct vport_upcall_stats_percpu __percpu *upcall_stats;
>
> struct list_head detach_list;
> struct rcu_head rcu;
> @@ -137,6 +144,18 @@ struct vport_ops {
> struct list_head list;
> };
>
> +/**
> + * struct vport_upcall_stats_percpu - per-cpu packet upcall
> +statistics for
> + * a given vport.
> + * @n_success: Number of packets that upcall to userspace succeed.
> + * @n_fail: Number of packets that upcall to userspace failed.
> + */
> +struct vport_upcall_stats_percpu {
> + struct u64_stats_sync syncp;
> + u64_stats_t n_success;
> + u64_stats_t n_fail;
> +};
> +
> struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
> const struct vport_parms *); void ovs_vport_free(struct
> vport *);
> --
> 2.27.0