2016-03-19 02:11:42

by Bob Copeland

[permalink] [raw]
Subject: [PATCH 0/5] mesh struct cleanups

This series makes a few minor mesh cleanups after the rhashtable
rework -- mostly some reductions in padding and allocation waste.

Bob Copeland (5):
mac80211: mesh: handle failed alloc for rmc cache
mac80211: mesh: use hlist for rmc cache
mac80211: mesh: embed gates hlist head directly
mac80211: mesh: reorder structure members
mac80211: mesh: fix mesh path kerneldoc

net/mac80211/mesh.c | 21 +++++++++++++--------
net/mac80211/mesh.h | 31 +++++++++++++++++--------------
net/mac80211/mesh_pathtbl.c | 18 ++++--------------
3 files changed, 34 insertions(+), 36 deletions(-)

--
2.6.1



2016-03-19 02:11:43

by Bob Copeland

[permalink] [raw]
Subject: [PATCH 4/5] mac80211: mesh: reorder structure members

Reduce padding waste in struct mesh_table and struct rmc_entry by
moving the smaller fields to the end.

Signed-off-by: Bob Copeland <[email protected]>
---
net/mac80211/mesh.h | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 46b540a25d9d..4a59c034cc6d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -133,11 +133,10 @@ struct mesh_path {
* @rhash: the rhashtable containing struct mesh_paths, keyed by dest addr
*/
struct mesh_table {
- atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
struct hlist_head known_gates;
spinlock_t gates_lock;
-
struct rhashtable rhead;
+ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};

/* Recent multicast cache */
@@ -159,8 +158,8 @@ struct mesh_table {
*/
struct rmc_entry {
struct hlist_node list;
- u32 seqnum;
unsigned long exp_time;
+ u32 seqnum;
u8 sa[ETH_ALEN];
};

--
2.6.1


2016-03-19 02:11:43

by Bob Copeland

[permalink] [raw]
Subject: [PATCH 5/5] mac80211: mesh: fix mesh path kerneldoc

Several of the mesh path fields are undocumented and some
of the documentation is no longer correct or relevant after
the switch to rhashtable. Clean up the kernel doc
accordingly and reorder some fields to match the structure
layout.

Signed-off-by: Bob Copeland <[email protected]>
---
net/mac80211/mesh.h | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 4a59c034cc6d..f298987228c9 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -21,8 +21,6 @@
/**
* enum mesh_path_flags - mac80211 mesh path flags
*
- *
- *
* @MESH_PATH_ACTIVE: the mesh path can be used for forwarding
* @MESH_PATH_RESOLVING: the discovery process is running for this mesh path
* @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence
@@ -70,12 +68,16 @@ enum mesh_deferred_task_flags {
* struct mesh_path - mac80211 mesh path structure
*
* @dst: mesh path destination mac address
+ * @mpp: mesh proxy mac address
+ * @rhash: rhashtable list pointer
+ * @gate_list: list pointer for known gates list
* @sdata: mesh subif
* @next_hop: mesh neighbor to which frames for this destination will be
* forwarded
* @timer: mesh path discovery timer
* @frame_queue: pending queue for frames sent to this destination while the
* path is unresolved
+ * @rcu: rcu head for freeing mesh path
* @sn: target sequence number
* @metric: current metric to this destination
* @hop_count: hops to destination
@@ -94,10 +96,10 @@ enum mesh_deferred_task_flags {
* @is_gate: the destination station of this path is a mesh gate
*
*
- * The combination of dst and sdata is unique in the mesh path table. Since the
- * next_hop STA is only protected by RCU as well, deleting the STA must also
- * remove/substitute the mesh_path structure and wait until that is no longer
- * reachable before destroying the STA completely.
+ * The dst address is unique in the mesh path table. Since the mesh_path is
+ * protected by RCU, deleting the next_hop STA must remove / substitute the
+ * mesh_path structure and wait until that is no longer reachable before
+ * destroying the STA completely.
*/
struct mesh_path {
u8 dst[ETH_ALEN];
@@ -127,10 +129,11 @@ struct mesh_path {
/**
* struct mesh_table
*
- * @entries: number of entries in the table
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
- * @rhash: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @gates_lock: protects updates to known_gates
+ * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
@@ -151,6 +154,7 @@ struct mesh_table {
* @seqnum: mesh sequence number of the frame
* @exp_time: expiration time of the entry, in jiffies
* @sa: source address of the frame
+ * @list: hashtable list pointer
*
* The Recent Multicast Cache keeps track of the latest multicast frames that
* have been received by a mesh interface and discards received multicast frames
--
2.6.1


2016-03-19 02:11:42

by Bob Copeland

[permalink] [raw]
Subject: [PATCH 1/5] mac80211: mesh: handle failed alloc for rmc cache

In the unlikely case that mesh_rmc_init() fails with -ENOMEM,
the rmc pointer will be left as NULL but the interface is still
operational because ieee80211_mesh_init_sdata() is not allowed
to fail.

If this happens, we would blindly dereference rmc when checking
whether a multicast frame is in the cache. Instead just drop the
frames in the forwarding path.

Signed-off-by: Bob Copeland <[email protected]>
---
net/mac80211/mesh.c | 3 +++
1 file changed, 3 insertions(+)

diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index a216c439b6f2..d0d8eeaa8129 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -220,6 +220,9 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
u8 idx;
struct rmc_entry *p, *n;

+ if (!rmc)
+ return -1;
+
/* Don't care about endianness since only match matters */
memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
--
2.6.1


2016-03-19 02:11:50

by Bob Copeland

[permalink] [raw]
Subject: [PATCH 2/5] mac80211: mesh: use hlist for rmc cache

The RMC cache has 256 list heads plus a u32, which puts it at the
unfortunate size of 4104 bytes with padding. kmalloc() will then
round this up to the next power-of-two, so we wind up actually
using two pages here where most of the second is wasted.

Switch to hlist heads here to reduce the structure size down to
fit within a page.

Signed-off-by: Bob Copeland <[email protected]>
---
net/mac80211/mesh.c | 18 ++++++++++--------
net/mac80211/mesh.h | 4 ++--
2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d0d8eeaa8129..1a2aaf461e98 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -174,22 +174,23 @@ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
return -ENOMEM;
sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
for (i = 0; i < RMC_BUCKETS; i++)
- INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
+ INIT_HLIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
return 0;
}

void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
{
struct mesh_rmc *rmc = sdata->u.mesh.rmc;
- struct rmc_entry *p, *n;
+ struct rmc_entry *p;
+ struct hlist_node *n;
int i;

if (!sdata->u.mesh.rmc)
return;

for (i = 0; i < RMC_BUCKETS; i++) {
- list_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
- list_del(&p->list);
+ hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
+ hlist_del(&p->list);
kmem_cache_free(rm_cache, p);
}
}
@@ -218,7 +219,8 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
u32 seqnum = 0;
int entries = 0;
u8 idx;
- struct rmc_entry *p, *n;
+ struct rmc_entry *p;
+ struct hlist_node *n;

if (!rmc)
return -1;
@@ -226,11 +228,11 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
/* Don't care about endianness since only match matters */
memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
- list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
+ hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
++entries;
if (time_after(jiffies, p->exp_time) ||
entries == RMC_QUEUE_MAX_LEN) {
- list_del(&p->list);
+ hlist_del(&p->list);
kmem_cache_free(rm_cache, p);
--entries;
} else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa))
@@ -244,7 +246,7 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
p->seqnum = seqnum;
p->exp_time = jiffies + RMC_TIMEOUT;
memcpy(p->sa, sa, ETH_ALEN);
- list_add(&p->list, &rmc->bucket[idx]);
+ hlist_add_head(&p->list, &rmc->bucket[idx]);
return 0;
}

diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e1415c952e9c..bc3f9a32b5a4 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -158,14 +158,14 @@ struct mesh_table {
* that are found in the cache.
*/
struct rmc_entry {
- struct list_head list;
+ struct hlist_node list;
u32 seqnum;
unsigned long exp_time;
u8 sa[ETH_ALEN];
};

struct mesh_rmc {
- struct list_head bucket[RMC_BUCKETS];
+ struct hlist_head bucket[RMC_BUCKETS];
u32 idx_mask;
};

--
2.6.1


2016-03-19 02:11:41

by Bob Copeland

[permalink] [raw]
Subject: [PATCH 3/5] mac80211: mesh: embed gates hlist head directly

Since we have converted the mesh path tables to rhashtable, we are
no longer swapping out the entire mesh_pathtbl pointer with RCU.
As a result, we no longer need indirection to the hlist head for
the gates list and can simply embed it, saving a pair of
pointer-sized allocations.

Signed-off-by: Bob Copeland <[email protected]>
---
net/mac80211/mesh.h | 2 +-
net/mac80211/mesh_pathtbl.c | 18 ++++--------------
2 files changed, 5 insertions(+), 15 deletions(-)

diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index bc3f9a32b5a4..46b540a25d9d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -134,7 +134,7 @@ struct mesh_path {
*/
struct mesh_table {
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
- struct hlist_head *known_gates;
+ struct hlist_head known_gates;
spinlock_t gates_lock;

struct rhashtable rhead;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 1c9412a29ca3..6db2ddfa0695 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -58,12 +58,7 @@ static struct mesh_table *mesh_table_alloc(void)
if (!newtbl)
return NULL;

- newtbl->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
- if (!newtbl->known_gates) {
- kfree(newtbl);
- return NULL;
- }
- INIT_HLIST_HEAD(newtbl->known_gates);
+ INIT_HLIST_HEAD(&newtbl->known_gates);
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);

@@ -341,7 +336,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
mpath->sdata->u.mesh.num_gates++;

spin_lock(&tbl->gates_lock);
- hlist_add_head_rcu(&mpath->gate_list, tbl->known_gates);
+ hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
spin_unlock(&tbl->gates_lock);

spin_unlock_bh(&mpath->state_lock);
@@ -759,16 +754,11 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct mesh_path *from_mpath = mpath;
struct mesh_path *gate;
bool copy = false;
- struct hlist_head *known_gates;

tbl = sdata->u.mesh.mesh_paths;
- known_gates = tbl->known_gates;
-
- if (!known_gates)
- return -EHOSTUNREACH;

rcu_read_lock();
- hlist_for_each_entry_rcu(gate, known_gates, gate_list) {
+ hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
if (gate->flags & MESH_PATH_ACTIVE) {
mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
mesh_path_move_to_queue(gate, from_mpath, copy);
@@ -781,7 +771,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
}
}

- hlist_for_each_entry_rcu(gate, known_gates, gate_list) {
+ hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
mesh_path_tx_pending(gate);
}
--
2.6.1


2016-04-05 10:22:37

by Johannes Berg

[permalink] [raw]
Subject: Re: [PATCH 0/5] mesh struct cleanups

On Fri, 2016-03-18 at 22:11 -0400, Bob Copeland wrote:
> This series makes a few minor mesh cleanups after the rhashtable
> rework -- mostly some reductions in padding and allocation waste.
>

Also applied.

johannes