2008-02-04 21:24:49

by Luis Carlos Cobo

[permalink] [raw]
Subject: [PATCH 12/13] o11s: support for on-demand Hybrid Wireless Mesh Protocol

This file implements the on-demand Hybrid Wireless Mesh Protocol, at this moment
using hop-count as the metric. When no mesh path exists for a given destination
or the mesh path is not active, frames addressed to that destination will be
queued and a Path Request frame will be sent. Queued frames will be sent when
the path is resolved (usually after reception of a Path Response) or discarded
if discovery times out. Path Requests will also be sent to refresh paths that
are being used and are close to expiring.

Path Errors are sent when a path discovery process triggered by the attempt to
forward a frame originated in a different Mesh Point times out. We are currently
not keeping track of per-destination transmission error rates. Once we do, a
Path Error will be sent when a peer link is estimated to be unreachable.

Multiple destination support in Path Requests and Path Errors and precursors
have not been implemented yet.

Signed-off-by: Luis Carlos Cobo <[email protected]>
---
net/mac80211/mesh_hwmp.c | 813 ++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 813 insertions(+), 0 deletions(-)
create mode 100644 net/mac80211/mesh_hwmp.c

diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
new file mode 100644
index 0000000..89c1959
--- /dev/null
+++ b/net/mac80211/mesh_hwmp.c
@@ -0,0 +1,813 @@
+/*
+ * Copyright (c) 2008 open80211s Ltd.
+ * Author: Luis Carlos Cobo <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "mesh.h"
+
+#define IEEE80211_FC(type, stype) cpu_to_le16(type | stype)
+
+/* Number of frames buffered per destination for unresolved destinations */
+#define MESH_FRAME_QUEUE_LEN 10
+#define MAX_PREQ_QUEUE_LEN 64
+
+/* Destination only */
+#define MP_F_DO 0x1
+/* Reply and forward */
+#define MP_F_RF 0x2
+
+/* HWMP IE processing macros */
+#define AE_F (1<<6)
+#define AE_F_SET(x) (*x & AE_F)
+#define PREQ_IE_FLAGS(x) (*(x))
+#define PREQ_IE_HOPCOUNT(x) (*(x + 1))
+#define PREQ_IE_TTL(x) (*(x + 2))
+#define PREQ_IE_PREQ_ID(x) le32_to_cpu(*((u32 *) (x + 3)))
+#define PREQ_IE_ORIG_ADDR(x) (x + 7)
+#define PREQ_IE_ORIG_DSN(x) le32_to_cpu(*((u32 *) (x + 13)))
+#define PREQ_IE_LIFETIME(x) le32_to_cpu(*((u32 *) \
+ (AE_F_SET(x) ? x + 23 : x + 17)))
+#define PREQ_IE_METRIC(x) le32_to_cpu(*((u32 *) \
+ (AE_F_SET(x) ? x + 27 : x + 21)))
+#define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
+#define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
+#define PREQ_IE_DST_DSN(x) le32_to_cpu(*((u32 *) \
+ (AE_F_SET(x) ? x + 39 : x + 33)))
+
+
+#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
+#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
+#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
+#define PREP_IE_ORIG_ADDR(x) (x + 3)
+#define PREP_IE_ORIG_DSN(x) le32_to_cpu(*((u32 *) (x + 9)))
+#define PREP_IE_LIFETIME(x) le32_to_cpu(*((u32 *) \
+ (AE_F_SET(x) ? x + 19 : x + 13)))
+#define PREP_IE_METRIC(x) le32_to_cpu(*((u32 *) \
+ (AE_F_SET(x) ? x + 23 : x + 17)))
+#define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
+#define PREP_IE_DST_DSN(x) le32_to_cpu(*((u32 *) \
+ (AE_F_SET(x) ? x + 33 : x + 27)))
+
+#define PERR_IE_DST_ADDR(x) (x + 2)
+#define PERR_IE_DST_DSN(x) le32_to_cpu(*((u32 *) (x + 8)))
+
+#define TU_TO_LOCAL(x) (jiffies + msecs_to_jiffies(x * 1024 / 1000))
+#define MSEC_TO_TU(x) (x*1000/1024)
+#define DSN_GT(x, y) ((long) (y) - (long) (x) < 0)
+#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
+
+#define net_traversal_jiffies(s) \
+ msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
+#define default_lifetime(s) \
+ MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout)
+#define min_preq_int_jiff(s) \
+ (msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval))
+#define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries)
+#define disc_timeout_jiff(s) \
+ msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout)
+
+enum mpath_frame_type {
+ MPATH_PREQ = 0,
+ MPATH_PREP,
+ MPATH_PERR
+};
+
+static int mesh_send_path_sel_frame(enum mpath_frame_type action, u8 flags,
+ u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
+ __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
+ __le32 metric, __le32 preq_id, struct net_device *dev)
+{
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+ struct ieee80211_mgmt *mgmt;
+ u8 *pos;
+ int ie_len;
+
+ if (!skb)
+ return -1;
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+ /* 25 is the size of the common mgmt part (24) plus the size of the
+ * common action part (1)
+ */
+ mgmt = (struct ieee80211_mgmt *)
+ skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
+ memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+ mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
+ IEEE80211_STYPE_ACTION);
+
+ memcpy(mgmt->da, da, ETH_ALEN);
+ memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+ /* BSSID is left zeroed, wildcard value */
+ mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
+ mgmt->u.action.u.mesh_action.action_code = action;
+
+ switch (action) {
+ case MPATH_PREQ:
+ ie_len = 37;
+ pos = skb_put(skb, 2 + ie_len);
+ *pos++ = WLAN_EID_PREQ;
+ break;
+ case MPATH_PREP:
+ ie_len = 31;
+ pos = skb_put(skb, 2 + ie_len);
+ *pos++ = WLAN_EID_PREP;
+ break;
+ default:
+ kfree(skb);
+ return -ENOTSUPP;
+ break;
+ }
+ *pos++ = ie_len;
+ *pos++ = flags;
+ *pos++ = hop_count;
+ *pos++ = ttl;
+ if (action == MPATH_PREQ) {
+ memcpy(pos, &preq_id, 4);
+ pos += 4;
+ }
+ memcpy(pos, orig_addr, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &orig_dsn, 4);
+ pos += 4;
+ memcpy(pos, &lifetime, 4);
+ pos += 4;
+ memcpy(pos, &metric, 4);
+ pos += 4;
+ if (action == MPATH_PREQ) {
+ /* destination count */
+ *pos++ = 1;
+ *pos++ = dst_flags;
+ }
+ memcpy(pos, dst, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &dst_dsn, 4);
+
+ ieee80211_sta_tx(dev, skb, 0);
+ return 0;
+}
+
+/**
+ * mesh_send_path error - Sends a PERR mesh management frame
+ *
+ * @dst: broken destination
+ * @dst_dsn: dsn of the broken destination
+ * @ra: node this frame is addressed to
+ */
+int mesh_send_path_error(u8 *dst, __le32 dst_dsn, u8 *ra,
+ struct net_device *dev)
+{
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+ struct ieee80211_mgmt *mgmt;
+ u8 *pos;
+ int ie_len;
+
+ if (!skb)
+ return -1;
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+ /* 25 is the size of the common mgmt part (24) plus the size of the
+ * common action part (1)
+ */
+ mgmt = (struct ieee80211_mgmt *)
+ skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
+ memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+ mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
+ IEEE80211_STYPE_ACTION);
+
+ memcpy(mgmt->da, ra, ETH_ALEN);
+ memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+ /* BSSID is left zeroed, wildcard value */
+ mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
+ mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
+ ie_len = 12;
+ pos = skb_put(skb, 2 + ie_len);
+ *pos++ = WLAN_EID_PERR;
+ *pos++ = ie_len;
+ /* mode flags, reserved */
+ *pos++ = 0;
+ /* number of destinations */
+ *pos++ = 1;
+ memcpy(pos, dst, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &dst_dsn, 4);
+
+ ieee80211_sta_tx(dev, skb, 0);
+ return 0;
+}
+
+/**
+ * hwmp_get_rinfo_from_ie - Update routing information to originator and
+ * transmitter of an hwmp (PREQ, PREP, RANN) frame.
+ *
+ * Returns: last hop metric or 0 if the frame should not be further processed
+ *
+ * Notes: this function is the only place besides direct user info where path
+ * information is updated.
+ */
+static u32 hwmp_get_rinfo_from_ie(struct net_device *dev,
+ struct ieee80211_mgmt *mgmt,
+ u8 *hwmp_ie)
+{
+ struct mesh_path *mpath;
+ struct mesh_plink *mpl;
+ bool fresh_info;
+ u8 *orig_addr, *ta;
+ u32 orig_dsn, orig_metric;
+ unsigned long orig_lifetime, conv_lifetime;
+ u32 last_hop_metric;
+ bool process = true;
+ u8 action = mgmt->u.action.u.mesh_action.action_code;
+
+ rcu_read_lock();
+ mpl = mesh_plink_lookup(mgmt->sa, dev);
+ if (!mpl)
+ return 0;
+
+ last_hop_metric = mpl->hop_metric;
+ /* Update and check originator routing info */
+ fresh_info = true;
+
+ switch (action) {
+ case MPATH_PREQ:
+ orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
+ orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie);
+ orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
+ orig_metric = PREQ_IE_METRIC(hwmp_ie);
+ break;
+ case MPATH_PREP:
+ /* Originator here refers to the MP that was de destination in
+ * the Path Request. The draft refers to it as destination
+ * address, even though usually it is the origin of the frame
+ * itself. We divert from the nomenclature in the draft so that
+ * we can use a single function to gather path information from
+ * both PREQ and PREP frames.
+ */
+ orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
+ orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie);
+ orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
+ orig_metric = PREP_IE_METRIC(hwmp_ie);
+ break;
+ default:
+ return 0;
+ }
+ conv_lifetime = TU_TO_LOCAL(orig_lifetime);
+
+ if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) {
+ /* This MP is the originator, we are not interested in this
+ * frame, except for updating transmitter's path info.
+ */
+ process = false;
+ fresh_info = false;
+ } else {
+ mpath = mesh_path_lookup(orig_addr, dev);
+ if (mpath) {
+ spin_lock_bh(&mpath->state_lock);
+ if (mpath->flags & MESH_PATH_FIXED)
+ fresh_info = false;
+ else if ((mpath->flags & MESH_PATH_ACTIVE) &&
+ (mpath->flags & MESH_PATH_DSN_VALID)) {
+ if (DSN_GT(mpath->dsn, orig_dsn) ||
+ (mpath->dsn == orig_dsn &&
+ action == MPATH_PREQ &&
+ orig_metric + last_hop_metric
+ > mpath->metric)) {
+ process = false;
+ fresh_info = false;
+ }
+ }
+ } else {
+ add_mesh_path(orig_addr, dev);
+ mpath = mesh_path_lookup(orig_addr, dev);
+ if (!mpath) {
+ rcu_read_unlock();
+ return 0;
+ }
+ spin_lock_bh(&mpath->state_lock);
+ }
+
+ if (fresh_info) {
+ mpath->next_hop = mpl;
+ mpath->flags |= MESH_PATH_DSN_VALID;
+ mpath->metric = orig_metric + last_hop_metric;
+ mpath->dsn = orig_dsn;
+ mpath->lifetime = time_after(mpath->lifetime,
+ conv_lifetime) ?
+ mpath->lifetime : conv_lifetime;
+ activate_path(mpath);
+ spin_unlock_bh(&mpath->state_lock);
+ send_path_pending_frames(mpath);
+ /* draft says preq_id should be saved to, but there does
+ * not seem to be any use for it, skiping by now
+ */
+ } else
+ spin_unlock_bh(&mpath->state_lock);
+ }
+
+ /* Update and check transmitter routing info */
+ ta = mgmt->sa;
+ if (memcmp(orig_addr, ta, ETH_ALEN) == 0)
+ fresh_info = false;
+ else {
+ fresh_info = true;
+
+ mpath = mesh_path_lookup(ta, dev);
+ if (mpath) {
+ spin_lock_bh(&mpath->state_lock);
+ if ((mpath->flags & MESH_PATH_FIXED) ||
+ ((mpath->flags & MESH_PATH_ACTIVE) &&
+ (last_hop_metric > mpath->metric)))
+ fresh_info = false;
+ } else {
+ add_mesh_path(ta, dev);
+ mpath = mesh_path_lookup(ta, dev);
+ if (!mpath) {
+ rcu_read_unlock();
+ return 0;
+ }
+ spin_lock_bh(&mpath->state_lock);
+ }
+
+ if (fresh_info) {
+ mpath->next_hop = mpl;
+ mpath->flags &= ~MESH_PATH_DSN_VALID;
+ mpath->metric = last_hop_metric;
+ mpath->lifetime = time_after(mpath->lifetime,
+ conv_lifetime) ?
+ mpath->lifetime : conv_lifetime;
+ activate_path(mpath);
+ spin_unlock_bh(&mpath->state_lock);
+ send_path_pending_frames(mpath);
+ } else
+ spin_unlock_bh(&mpath->state_lock);
+ }
+
+ rcu_read_unlock();
+
+ return process ? last_hop_metric : 0;
+}
+
+static void hwmp_process_preq_frame(struct net_device *dev,
+ struct ieee80211_mgmt *mgmt,
+ u8 *preq_elem, u32 last_hop_metric) {
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+ struct mesh_path *mpath;
+ u8 *dst_addr, *orig_addr;
+ u8 dst_flags, ttl;
+ u32 orig_dsn, dst_dsn, metric, lifetime;
+ bool reply = false;
+ bool forward = true;
+
+ /* Update destination DSN, if present */
+ dst_addr = PREQ_IE_DST_ADDR(preq_elem);
+ orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
+ dst_dsn = PREQ_IE_DST_DSN(preq_elem);
+ orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
+ dst_flags = PREQ_IE_DST_F(preq_elem);
+
+ if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) {
+ forward = false;
+ reply = true;
+ metric = 0;
+ if (time_after(jiffies, ifsta->last_dsn_update +
+ net_traversal_jiffies(sdata)) ||
+ time_before(jiffies, ifsta->last_dsn_update)) {
+ dst_dsn = ++ifsta->dsn;
+ ifsta->last_dsn_update = jiffies;
+ }
+ } else {
+ rcu_read_lock();
+ mpath = mesh_path_lookup(dst_addr, dev);
+ if (mpath) {
+ if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
+ DSN_LT(mpath->dsn, dst_dsn)) {
+ mpath->dsn = dst_dsn;
+ mpath->flags &= MESH_PATH_DSN_VALID;
+ } else if ((!(dst_flags & MP_F_DO)) &&
+ (mpath->flags & MESH_PATH_ACTIVE)) {
+ reply = true;
+ metric = mpath->metric;
+ dst_dsn = mpath->dsn;
+ if (dst_flags & MP_F_RF)
+ dst_flags |= MP_F_DO;
+ else
+ forward = false;
+ }
+ }
+ rcu_read_unlock();
+ }
+
+ if (reply) {
+ lifetime = PREQ_IE_LIFETIME(preq_elem);
+ ttl = ifsta->mshcfg.dot11MeshTTL;
+ if (ttl != 0)
+ mesh_send_path_sel_frame(MPATH_PREP, 0, dst_addr,
+ __cpu_to_le32(dst_dsn), 0, orig_addr,
+ __cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
+ __cpu_to_le32(lifetime), __cpu_to_le32(metric),
+ 0, dev);
+ else
+ ifsta->mshstats.dropped_frames_ttl++;
+ }
+
+ if (forward) {
+ u32 preq_id;
+ u8 hopcount, flags;
+
+ ttl = PREQ_IE_TTL(preq_elem);
+ lifetime = PREQ_IE_LIFETIME(preq_elem);
+ if (ttl <= 1) {
+ ifsta->mshstats.dropped_frames_ttl++;
+ return;
+ }
+ --ttl;
+ flags = PREQ_IE_FLAGS(preq_elem);
+ metric = PREQ_IE_METRIC(preq_elem) + last_hop_metric;
+ preq_id = PREQ_IE_PREQ_ID(preq_elem);
+ hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
+ mesh_send_path_sel_frame(MPATH_PREQ, flags, orig_addr,
+ __cpu_to_le32(orig_dsn), dst_flags, dst_addr,
+ __cpu_to_le32(dst_dsn), dev->broadcast,
+ hopcount, ttl, __cpu_to_le32(lifetime),
+ __cpu_to_le32(metric), __cpu_to_le32(preq_id),
+ dev);
+ ifsta->mshstats.fwded_frames++;
+ }
+}
+
+
+static void hwmp_process_prep_frame(struct net_device *dev,
+ struct ieee80211_mgmt *mgmt,
+ u8 *prep_elem, u32 last_hop_metric)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct mesh_path *mpath;
+ u8 *dst_addr, *orig_addr;
+ u8 ttl, hopcount, flags;
+ u8 next_hop[ETH_ALEN];
+ u32 dst_dsn, orig_dsn, metric, lifetime;
+
+ /* Note that we divert from the draft nomenclature and denominate
+ * destination to what the draft refers to as origininator. So in this
+ * function destnation refers to the final destination of the PREP,
+ * which corresponds with the originator of the PREQ which this PREP
+ * replies
+ */
+ dst_addr = PREP_IE_DST_ADDR(prep_elem);
+ if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0)
+ /* destination, no forwarding required */
+ return;
+
+ ttl = PREP_IE_TTL(prep_elem);
+ if (ttl <= 1) {
+ sdata->u.sta.mshstats.dropped_frames_ttl++;
+ return;
+ }
+
+ rcu_read_lock();
+ mpath = mesh_path_lookup(dst_addr, dev);
+ if (mpath)
+ spin_lock_bh(&mpath->state_lock);
+ else
+ goto fail;
+ if (!(mpath->flags & MESH_PATH_ACTIVE)) {
+ spin_unlock_bh(&mpath->state_lock);
+ goto fail;
+ }
+ memcpy(next_hop, mpath->next_hop->ha, ETH_ALEN);
+ spin_unlock_bh(&mpath->state_lock);
+ --ttl;
+ flags = PREP_IE_FLAGS(prep_elem);
+ metric = PREP_IE_METRIC(prep_elem) + last_hop_metric;
+ lifetime = PREP_IE_LIFETIME(prep_elem);
+ hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
+ orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+ dst_dsn = PREP_IE_DST_DSN(prep_elem);
+ orig_dsn = PREP_IE_ORIG_DSN(prep_elem);
+
+ mesh_send_path_sel_frame(MPATH_PREP, flags, orig_addr,
+ __cpu_to_le32(orig_dsn), 0, dst_addr,
+ __cpu_to_le32(dst_dsn), mpath->next_hop->ha, hopcount, ttl,
+ __cpu_to_le32(lifetime), __cpu_to_le32(metric),
+ 0, dev);
+ rcu_read_unlock();
+ sdata->u.sta.mshstats.fwded_frames++;
+ return;
+
+fail:
+ rcu_read_unlock();
+ sdata->u.sta.mshstats.dropped_frames_no_route++;
+ return;
+}
+
+static void hwmp_process_perr_frame(struct net_device *dev,
+ struct ieee80211_mgmt *mgmt, u8 *perr_elem)
+{
+ struct mesh_path *mpath;
+ u8 *ta, *dst_addr;
+ u32 dst_dsn;
+
+ ta = mgmt->sa;
+ dst_addr = PERR_IE_DST_ADDR(perr_elem);
+ dst_dsn = PERR_IE_DST_DSN(perr_elem);
+ rcu_read_lock();
+ mpath = mesh_path_lookup(dst_addr, dev);
+ if (mpath) {
+ spin_lock_bh(&mpath->state_lock);
+ if (mpath->flags & MESH_PATH_ACTIVE &&
+ memcmp(ta, mpath->next_hop->ha, ETH_ALEN) == 0 &&
+ (!(mpath->flags & MESH_PATH_DSN_VALID) ||
+ DSN_GT(dst_dsn, mpath->dsn))) {
+ mpath->flags &= ~MESH_PATH_ACTIVE;
+ mpath->dsn = dst_dsn;
+ spin_unlock_bh(&mpath->state_lock);
+ mesh_send_path_error(dst_addr, dst_dsn, dev->broadcast,
+ dev);
+ } else
+ spin_unlock_bh(&mpath->state_lock);
+ }
+ rcu_read_unlock();
+}
+
+
+
+void mesh_rx_path_sel_frame(struct net_device *dev,
+ struct ieee80211_mgmt *mgmt,
+ size_t len)
+{
+ struct ieee802_11_elems elems;
+ size_t baselen;
+ u32 last_hop_metric;
+
+ baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
+ ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
+ len - baselen, &elems);
+
+ switch (mgmt->u.action.u.mesh_action.action_code) {
+ case MPATH_PREQ:
+ if (!elems.preq || elems.preq_len != 37)
+ /* Right now we support just 1 destination and no AE */
+ return;
+ last_hop_metric = hwmp_get_rinfo_from_ie(dev, mgmt, elems.preq);
+ if (!last_hop_metric)
+ return;
+ hwmp_process_preq_frame(dev, mgmt, elems.preq, last_hop_metric);
+ break;
+ case MPATH_PREP:
+ if (!elems.prep || elems.prep_len != 31)
+ /* Right now we support no AE */
+ return;
+ last_hop_metric = hwmp_get_rinfo_from_ie(dev, mgmt, elems.prep);
+ if (!last_hop_metric)
+ return;
+ hwmp_process_prep_frame(dev, mgmt, elems.prep, last_hop_metric);
+ break;
+ case MPATH_PERR:
+ if (!elems.perr || elems.perr_len != 12)
+ /* Right now we support only one destination per PERR */
+ return;
+ hwmp_process_perr_frame(dev, mgmt, elems.perr);
+ default:
+ return;
+ }
+
+}
+
+/**
+ * mesh_hwmp_queue_preq - queues a PREP to a given destination
+ * @mpath: mesh path to discover
+ * @flags: special attributes of the PREP to be sent
+ *
+ * Locking: the function must be called from within a rcu read lock block.
+ *
+ */
+static void mesh_hwmp_queue_preq(struct mesh_path *mpath, u8 flags)
+{
+ struct ieee80211_sub_if_data *sdata =
+ IEEE80211_DEV_TO_SUB_IF(mpath->dev);
+ struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+ struct mesh_preq_queue *preq_node;
+
+ preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
+ if (!preq_node) {
+ printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
+ return;
+ }
+
+ spin_lock(&ifsta->mesh_preq_queue_lock);
+ if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
+ spin_unlock(&ifsta->mesh_preq_queue_lock);
+ kfree(preq_node);
+ if (printk_ratelimit())
+ printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
+ return;
+ }
+
+ memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
+ preq_node->flags = flags;
+
+ list_add_tail(&preq_node->list, &ifsta->preq_queue.list);
+ ++ifsta->preq_queue_len;
+ spin_unlock(&ifsta->mesh_preq_queue_lock);
+
+ if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata)))
+ queue_work(sdata->local->hw.workqueue, &ifsta->work);
+
+ else if (time_before(jiffies, ifsta->last_preq)) {
+ /* avoid long wait if did not send preqs for a long time
+ * and jiffies wrapped around
+ */
+ ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
+ queue_work(sdata->local->hw.workqueue, &ifsta->work);
+ } else
+ mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq +
+ min_preq_int_jiff(sdata));
+}
+
+void mesh_start_path_discovery(struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata =
+ IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+ struct mesh_preq_queue *preq_node;
+ struct mesh_path *mpath;
+ u8 ttl, dst_flags;
+ u32 lifetime;
+
+ spin_lock(&ifsta->mesh_preq_queue_lock);
+ if (!ifsta->preq_queue_len ||
+ time_before(jiffies, ifsta->last_preq +
+ min_preq_int_jiff(sdata))) {
+ spin_unlock(&ifsta->mesh_preq_queue_lock);
+ return;
+ }
+
+ preq_node = list_first_entry(&ifsta->preq_queue.list,
+ struct mesh_preq_queue, list);
+ list_del(&preq_node->list);
+ --ifsta->preq_queue_len;
+ spin_unlock(&ifsta->mesh_preq_queue_lock);
+
+ rcu_read_lock();
+ mpath = mesh_path_lookup(preq_node->dst, dev);
+ if (!mpath)
+ goto enddiscovery;
+
+ spin_lock_bh(&mpath->state_lock);
+ if (preq_node->flags & PREQ_Q_F_START) {
+ if (mpath->flags & MESH_PATH_RESOLVING) {
+ spin_unlock_bh(&mpath->state_lock);
+ goto enddiscovery;
+ } else {
+ mpath->flags &= ~MESH_PATH_RESOLVED;
+ mpath->flags |= MESH_PATH_RESOLVING;
+ mpath->discovery_timeout = 0;
+ mpath->discovery_timeout = disc_timeout_jiff(sdata);
+ }
+ } else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
+ mpath->flags & MESH_PATH_RESOLVED) {
+ mpath->flags &= ~MESH_PATH_RESOLVING;
+ spin_unlock_bh(&mpath->state_lock);
+ goto enddiscovery;
+ }
+
+ ifsta->last_preq = jiffies;
+
+ if (time_after(jiffies, ifsta->last_dsn_update +
+ net_traversal_jiffies(sdata)) ||
+ time_before(jiffies, ifsta->last_dsn_update)) {
+ ++ifsta->dsn;
+ sdata->u.sta.last_dsn_update = jiffies;
+ }
+ lifetime = default_lifetime(sdata);
+ ttl = sdata->u.sta.mshcfg.dot11MeshTTL;
+ if (ttl == 0) {
+ sdata->u.sta.mshstats.dropped_frames_ttl++;
+ spin_unlock_bh(&mpath->state_lock);
+ goto enddiscovery;
+ }
+
+ if (preq_node->flags & PREQ_Q_F_REFRESH)
+ dst_flags = MP_F_DO;
+ else
+ dst_flags = MP_F_RF;
+
+ spin_unlock_bh(&mpath->state_lock);
+ mesh_send_path_sel_frame(MPATH_PREQ, 0, dev->dev_addr,
+ __cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst,
+ __cpu_to_le32(mpath->dsn), dev->broadcast, 0,
+ ttl, __cpu_to_le32(lifetime), 0,
+ __cpu_to_le32(ifsta->preq_id++), dev);
+ mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+
+enddiscovery:
+ rcu_read_unlock();
+ kfree(preq_node);
+}
+
+/**
+ * ieee80211s_lookup_nexthop - Put the appropriate next hop on a mesh frame
+ * @next_hop: output argument for next hop address
+ * @skb: frame to be sent
+ * @dev: network device the frame will be sent through
+ *
+ * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
+ * found, the function will start a path discovery and queue the frame so it is
+ * sent when the path is available, so the caller must not free skb in this
+ * case.
+ */
+int mesh_lookup_nexthop(u8 *next_hop, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct sk_buff *skb_to_free = NULL;
+ struct mesh_path *mpath;
+ int err = 0;
+
+ rcu_read_lock();
+ mpath = mesh_path_lookup(skb->data, dev);
+
+ if (!mpath) {
+ add_mesh_path(skb->data, dev);
+ mpath = mesh_path_lookup(skb->data, dev);
+ if (!mpath) {
+ dev_kfree_skb(skb);
+ sdata->u.sta.mshstats.dropped_frames_no_route++;
+ err = -ENOSPC;
+ goto endlookup;
+ }
+ }
+
+ if (mpath->flags & MESH_PATH_ACTIVE) {
+ if (time_after(jiffies, mpath->lifetime -
+ msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time))
+ && skb->pkt_type != PACKET_OTHERHOST
+ && !(mpath->flags & MESH_PATH_RESOLVING)
+ && !(mpath->flags & MESH_PATH_FIXED)) {
+ mesh_hwmp_queue_preq(mpath,
+ PREQ_Q_F_START | PREQ_Q_F_REFRESH);
+ }
+ memcpy(next_hop, rcu_dereference(mpath->next_hop)->ha,
+ ETH_ALEN);
+ } else {
+ if (!(mpath->flags & MESH_PATH_RESOLVING)) {
+ /* Start discovery only if it is not running yet */
+ mesh_hwmp_queue_preq(mpath, PREQ_Q_F_START);
+ }
+
+ if (skb_queue_len(&mpath->frame_queue) >=
+ MESH_FRAME_QUEUE_LEN) {
+ skb_to_free = mpath->frame_queue.next;
+ skb_unlink(skb_to_free, &mpath->frame_queue);
+ }
+
+ skb_queue_tail(&mpath->frame_queue, skb);
+ if (skb_to_free)
+ mesh_path_discard_frame(skb_to_free, dev);
+ err = -ENOENT;
+ }
+
+endlookup:
+ rcu_read_unlock();
+ return err;
+}
+
+void mpath_timer(unsigned long data)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct mesh_path *mpath;
+ bool delete = false;
+
+ rcu_read_lock();
+ mpath = (struct mesh_path *) data;
+ mpath = rcu_dereference(mpath);
+ if (!mpath)
+ goto endmpathtimer;
+ spin_lock_bh(&mpath->state_lock);
+ sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
+ if (mpath->flags & MESH_PATH_DELETE) {
+ mpath->flags = 0;
+ delete = true;
+ } else if (mpath->flags & MESH_PATH_RESOLVED ||
+ (!(mpath->flags & MESH_PATH_RESOLVING)))
+ mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
+ else if (mpath->discovery_retries < max_preq_retries(sdata)) {
+ ++mpath->discovery_retries;
+ mpath->discovery_timeout *= 2;
+ mesh_hwmp_queue_preq(mpath, 0);
+ } else {
+ mpath->flags = 0;
+ mpath->lifetime = jiffies;
+ mpath_empty_pending_queue(mpath);
+ }
+
+ spin_unlock_bh(&mpath->state_lock);
+endmpathtimer:
+ rcu_read_unlock();
+ if (delete)
+ del_mesh_path(mpath->dst, mpath->dev);
+}
--
1.5.2.5