This patch series fixes issues with usage of few of the spinlocks
used by the driver. To summarise it fixes below issues:
1. Driver does access the elements returned by the list, without
acquiring the lock.
2. Driver release the lock during iteration of the list and hold
it back before starting the next iteration.
3. Driver release the lock while the element is still in process.
Karthik Ananthapadmanabha (3):
mwifiex: cleanup rx_pkt_lock usage in 11n_rxreorder.c
mwifiex: cleanup tx_ba_stream_tbl_lock usage
mwifiex: cleanup rx_reorder_tbl_lock usage
drivers/net/wireless/marvell/mwifiex/11n.c | 45 +++++++++++++---------
.../net/wireless/marvell/mwifiex/11n_rxreorder.c | 42 +++++++++++++-------
drivers/net/wireless/marvell/mwifiex/uap_txrx.c | 3 ++
3 files changed, 58 insertions(+), 32 deletions(-)
--
1.9.1
From: Karthik Ananthapadmanabha <[email protected]>
Fix the incorrect usage of tx_ba_stream_tbl_lock spinlock:
1. We shouldn't access the fields of the elements returned by
mwifiex_get_ba_status() and mwifiex_get_ba_tbl(), after we have
released the spin lock. To fix this, aquire the spinlock before
calling these functions and release the lock only after
processing the correspnding element is complete.
2. Move tx_ba_stream_tbl_lock out of mwifiex_del_ba_tbl(), to
avoid recursive spinlock. Acquire the spinlock explicitly, before
calling mwifiex_del_ba_tbl().
Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
Signed-off-by: Ganapathi Bhat <[email protected]>
---
drivers/net/wireless/marvell/mwifiex/11n.c | 45 +++++++++++++---------
.../net/wireless/marvell/mwifiex/11n_rxreorder.c | 3 --
2 files changed, 26 insertions(+), 22 deletions(-)
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 8772e39..38d7b48 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -77,24 +77,19 @@ int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
/*
* This function returns the pointer to an entry in BA Stream
- * table which matches the requested BA status.
+ * table which matches the requested BA status. The caller
+ * must hold tx_ba_stream_tbl_lock before calling this function.
*/
static struct mwifiex_tx_ba_stream_tbl *
mwifiex_get_ba_status(struct mwifiex_private *priv,
enum mwifiex_ba_status ba_status)
{
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
- if (tx_ba_tsr_tbl->ba_status == ba_status) {
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
- flags);
+ if (tx_ba_tsr_tbl->ba_status == ba_status)
return tx_ba_tsr_tbl;
- }
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
return NULL;
}
@@ -114,9 +109,11 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
struct host_cmd_ds_11n_delba *del_ba = &resp->params.del_ba;
uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set);
+ unsigned long flags;
tid = del_ba_param_set >> DELBA_TID_POS;
if (del_ba->del_result == BA_RESULT_SUCCESS) {
+ spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
mwifiex_del_ba_tbl(priv, tid, del_ba->peer_mac_addr,
TYPE_DELBA_SENT,
INITIATOR_BIT(del_ba_param_set));
@@ -125,6 +122,7 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
if (tx_ba_tbl)
mwifiex_send_addba(priv, tx_ba_tbl->tid,
tx_ba_tbl->ra);
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
} else { /*
* In case of failure, recreate the deleted stream in case
* we initiated the ADDBA
@@ -135,11 +133,13 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
mwifiex_create_ba_tbl(priv, del_ba->peer_mac_addr, tid,
BA_SETUP_INPROGRESS);
+ spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
tx_ba_tbl = mwifiex_get_ba_status(priv, BA_SETUP_INPROGRESS);
if (tx_ba_tbl)
mwifiex_del_ba_tbl(priv, tx_ba_tbl->tid, tx_ba_tbl->ra,
TYPE_DELBA_SENT, true);
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
}
return 0;
@@ -160,6 +160,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
struct mwifiex_ra_list_tbl *ra_list;
+ unsigned long flags;
u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -176,14 +177,17 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
ra_list->ba_status = BA_SETUP_NONE;
ra_list->amsdu_in_ampdu = false;
}
+ spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
TYPE_DELBA_SENT, true);
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
priv->aggr_prio_tbl[tid].ampdu_ap =
BA_STREAM_NOT_ALLOWED;
return 0;
}
+ spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
if (tx_ba_tbl) {
mwifiex_dbg(priv->adapter, EVENT, "info: BA stream complete\n");
@@ -201,6 +205,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
} else {
mwifiex_dbg(priv->adapter, ERROR, "BA stream not created\n");
}
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
return 0;
}
@@ -501,24 +506,20 @@ void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv)
/*
* This function returns the pointer to an entry in BA Stream
- * table which matches the given RA/TID pair.
+ * table which matches the given RA/TID pair. The caller must
+ * hold tx_ba_stream_tbl_lock before calling this function.
*/
struct mwifiex_tx_ba_stream_tbl *
mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
{
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
if (ether_addr_equal_unaligned(tx_ba_tsr_tbl->ra, ra) &&
- tx_ba_tsr_tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
- flags);
+ tx_ba_tsr_tbl->tid == tid)
return tx_ba_tsr_tbl;
- }
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+
return NULL;
}
@@ -534,11 +535,15 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
unsigned long flags;
int tid_down;
+ spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
new_node = kzalloc(sizeof(struct mwifiex_tx_ba_stream_tbl),
GFP_ATOMIC);
- if (!new_node)
+ if (!new_node) {
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
+ flags);
return;
+ }
tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
ra_list = mwifiex_wmm_get_ralist_node(priv, tid_down, ra);
@@ -552,10 +557,9 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
new_node->ba_status = ba_status;
memcpy(new_node->ra, ra, ETH_ALEN);
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_add_tail(&new_node->list, &priv->tx_ba_stream_tbl_ptr);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
}
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
}
/*
@@ -680,11 +684,14 @@ void mwifiex_11n_delete_ba_stream(struct mwifiex_private *priv, u8 *del_ba)
(struct host_cmd_ds_11n_delba *) del_ba;
uint16_t del_ba_param_set = le16_to_cpu(cmd_del_ba->del_ba_param_set);
int tid;
+ unsigned long flags;
tid = del_ba_param_set >> DELBA_TID_POS;
+ spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
mwifiex_del_ba_tbl(priv, tid, cmd_del_ba->peer_mac_addr,
TYPE_DELBA_RECEIVE, INITIATOR_BIT(del_ba_param_set));
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 0149c4a..4631bc2 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -682,7 +682,6 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
struct mwifiex_ra_list_tbl *ra_list;
u8 cleanup_rx_reorder_tbl;
- unsigned long flags;
int tid_down;
if (type == TYPE_DELBA_RECEIVE)
@@ -716,9 +715,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
ra_list->amsdu_in_ampdu = false;
ra_list->ba_status = BA_SETUP_NONE;
}
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
}
}
--
1.9.1
From: Karthik Ananthapadmanabha <[email protected]>
Fix the incorrect usage of rx_pkt_lock spinlock. Realsing the
spinlock while the element is still in process is unsafe. The
lock must be released only after the element processing is
complete.
Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
Signed-off-by: Ganapathi Bhat <[email protected]>
---
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 1edcdda..0149c4a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -124,9 +124,10 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
+
+ spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -167,8 +168,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
}
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
+ spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
--
1.9.1
From: Karthik Ananthapadmanabha <[email protected]>
Fix the incorrect usage of rx_reorder_tbl_lock spinlock:
1. We shouldn't access the fields of the elements returned by
mwifiex_11n_get_rx_reorder_tbl() after we have released spin
lock. To fix this, To fix this, aquire the spinlock before
calling this function and release the lock only after processing
the corresponding element is complete.
2. Realsing the spinlock during iteration of the list and holding
it back before next iteration is unsafe. Fix it by releasing the
lock only after iteration of the list is complete.
Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
Signed-off-by: Ganapathi Bhat <[email protected]>
---
.../net/wireless/marvell/mwifiex/11n_rxreorder.c | 34 +++++++++++++++++-----
drivers/net/wireless/marvell/mwifiex/uap_txrx.c | 3 ++
2 files changed, 29 insertions(+), 8 deletions(-)
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 4631bc2..b99ace8 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -234,23 +234,19 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
/*
* This function returns the pointer to an entry in Rx reordering
- * table which matches the given TA/TID pair.
+ * table which matches the given TA/TID pair. The caller must
+ * hold rx_reorder_tbl_lock, before calling this function.
*/
struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
return tbl;
}
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL;
}
@@ -355,11 +351,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node)
@@ -571,15 +570,19 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
u16 pkt_index;
bool init_window_shift = false;
int ret = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (pkt_type != PKT_TYPE_BAR)
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
@@ -666,6 +669,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
if (!tbl->timer_context.timer_is_set ||
prev_start_win != tbl->start_win)
mwifiex_11n_rxreorder_timer_restart(tbl);
+
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
@@ -683,6 +688,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list;
u8 cleanup_rx_reorder_tbl;
int tid_down;
+ unsigned long flags;
if (type == TYPE_DELBA_RECEIVE)
cleanup_rx_reorder_tbl = (initiator) ? true : false;
@@ -693,14 +699,18 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, EVENT,
"event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
@@ -732,6 +742,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
+ unsigned long flags;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
@@ -745,17 +756,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return 0;
}
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl) {
@@ -766,6 +780,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
else
tbl->amsdu = false;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_dbg(priv->adapter, CMD,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -806,9 +821,7 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
&priv->rx_reorder_tbl_ptr, list) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
}
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
@@ -933,6 +946,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
int tlv_buf_left = len;
int ret;
u8 *tmp;
+ unsigned long flags;
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
@@ -952,14 +966,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
tlv_bitmap_len);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_reor_tbl_ptr =
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
tlv_rxba->mac);
if (!rx_reor_tbl_ptr) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, ERROR,
"Can not find rx_reorder_tbl!");
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
for (i = 0; i < tlv_bitmap_len; i++) {
for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 1e6a62c..21dc14f 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
ret = mwifiex_handle_uap_rx_forward(priv, skb);
return ret;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
--
1.9.1
Hi,
On Tue, Oct 31, 2017 at 03:12:45PM +0530, Ganapathi Bhat wrote:
> From: Karthik Ananthapadmanabha <[email protected]>
>
> Fix the incorrect usage of rx_pkt_lock spinlock. Realsing the
> spinlock while the element is still in process is unsafe. The
> lock must be released only after the element processing is
> complete.
>
> Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
> Signed-off-by: Ganapathi Bhat <[email protected]>
> ---
> drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 5 +++--
> 1 file changed, 3 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> index 1edcdda..0149c4a 100644
> --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> @@ -124,9 +124,10 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
> rx_tmp_ptr = tbl->rx_reorder_ptr[i];
> tbl->rx_reorder_ptr[i] = NULL;
> }
> - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
So, what is this lock protecting? Is it for protecting the packet
itself, or for protecting the ring buffer? As I read it, it's just the
latter, since once we've removed it from the ring buffer (and are about
to dispatch it up toward the networking layer), it's handled only by a
single context (or else, protected via some other common lock(s)).
If I'm correct above, then I think the current code here is actually
safe, since it holds the lock while removing from the ring buffer --
after it's removed from the ring, there's no way another thread can find
this payload, and so we can release the lock.
On a related note: I don't actually see the ring buffer *insertion*
being protected by this rx_pkt_lock, so is it really useful? See
mwifiex_11n_rx_reorder_pkt(). Perhaps the correct lock is actually
rx_reorder_tbl_lock()? Except that serves multiple purposes it seems...
Another thought: from what I can tell, all of these operations are
*only* performed on the main thread, so there's actually no concurrency
involved at all. So we also could probably drop this lock entirely...
I guess the real point is: can you explain better what you intend this
lock to do? Then we can review whether you're accomplishing your
intention, or whether we should improve the usage of this lock in some
other way, or perhaps even kill it entirely.
Thanks,
Brian
> if (rx_tmp_ptr)
> mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
> +
> + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> }
>
> spin_lock_irqsave(&priv->rx_pkt_lock, flags);
> @@ -167,8 +168,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
> }
> rx_tmp_ptr = tbl->rx_reorder_ptr[i];
> tbl->rx_reorder_ptr[i] = NULL;
> - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
> + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> }
>
> spin_lock_irqsave(&priv->rx_pkt_lock, flags);
> --
> 1.9.1
>
Hi,
On Tue, Nov 7, 2017 at 8:25 AM, Ganapathi Bhat <[email protected]> wrote:
> Hi Doug/Brian,
>
> Thanks a lot for the comments and the discussion. First of all we will abort the change added by this patch as we don't need rx_pkt_lock acquired to protect the deleted item. Next, we will prepare below changes to address the concerns discussed:
> 1. Move rx_pkt_lock from mwifiex_private to rx_reorder_tbl
...or, possibly, remove rx_pkt_lock completely. See my other response...
-Doug
Hi,
On Mon, Nov 6, 2017 at 6:25 PM, Brian Norris <[email protected]> wrote:
> Hi Doug and Ganapathi,
>
> On Mon, Nov 06, 2017 at 04:42:20PM -0800, Doug Anderson wrote:
>> Hi,
>>
>> Please take the review below with a grain of salt since I'm not
>> terribly familiar with this driver. I thought I might be able to help
>> since I had previously looked at some of the spinlock stuff, but after
>> digging through the below I'm not 100% convinced I understand this
>> driver enough to do a quick review...
>>
>> On Thu, Nov 2, 2017 at 3:30 AM, Ganapathi Bhat <[email protected]> wrote:
>> > Hi Brian,
>> >
>> >> On Tue, Oct 31, 2017 at 03:12:45PM +0530, Ganapathi Bhat wrote:
>> >> > From: Karthik Ananthapadmanabha <[email protected]>
>> >> >
>> >> > Fix the incorrect usage of rx_pkt_lock spinlock. Realsing the spinlock
>> >> > while the element is still in process is unsafe. The lock must be
>> >> > released only after the element processing is complete.
>> >> >
>> >> > Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
>> >> > Signed-off-by: Ganapathi Bhat <[email protected]>
>> >> > ---
>> >> > drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 5 +++--
>> >> > 1 file changed, 3 insertions(+), 2 deletions(-)
>> >> >
>> >> > diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
>> >> > b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
>> >> > index 1edcdda..0149c4a 100644
>> >> > --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
>> >> > +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
>> >> > @@ -124,9 +124,10 @@ static int mwifiex_11n_dispatch_pkt(struct
>> >> mwifiex_private *priv, void *payload)
>> >> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
>> >> > tbl->rx_reorder_ptr[i] = NULL;
>> >> > }
>> >> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>> >>
>> >> So, what is this lock protecting? Is it for protecting the packet itself, or for
>> >> protecting the ring buffer?
>> >
>> > This lock is protecting the ring buffer but with the current change, we are trying also to protect the packet too.
>>
>> What are you trying to protect the packet from? In other words: what
>> other code is trying to make changes to the same structure? I'd agree
>
> Agreed on this question. I don't understand what, specifically, about
> "the packet" you're trying to protect, nor what other code might be
> accessing it.
>
>> with Brian that on first glance nobody else can have a reference to
>> this packet since we've removed it from the global list. If you're
>
> Right, if we're safely removing it from the pseudo ring buffer, then
> nobody else should have access to it now.
>
>> trying to protect something internal to this structure or something
>> global to mwifiex then it seems like you'd want a different lock...
>>
>>
>> >> As I read it, it's just the latter, since once we've
>> >> removed it from the ring buffer (and are about to dispatch it up toward the
>> >> networking layer), it's handled only by a single context (or else, protected via
>> >> some other common lock(s)).
>> >>
>> >> If I'm correct above, then I think the current code here is actually safe, since
>> >> it holds the lock while removing from the ring buffer -- after it's removed
>> >> from the ring, there's no way another thread can find this payload, and so we
>> >> can release the lock.
>> >
>> > There are cases where the ring buffer is iterated by cfg80211 thread:
>> > mwifiex_cfg80211_tdls_oper => mwifiex_tdls_oper => mwifiex_tdls_process_disable_link => mwifiex_11n_cleanup_reorder_tbl => mwifiex_del_rx_reorder_entry => mwifiex_11n_dispatch_pkt_until_start_win => {iterates the ring buffer}
>> > So, at worst case we can have two threads (main & cfg80211) running mwifiex_11n_dispatch_pkt_until_start_win(), iterating the ring buffer.
>>
>> Let me see if I'm understanding properly.
>>
>> There's a global (one for the whole driver) list called the
>
> Just a point of order: it's a single list for each 'mwifiex_private' --
> there can actually be multiple virtual interfaces attached to each
> physical wiphy interface, and each virtual interface gets a
> 'mwifiex_private' instance. But otherwise, your descriptions are
> accurate I believe.
Ah, thanks for clarification! :)
>> "rx_reorder_tbl_ptr". Insertion, deletion, and iteration of elements
>> into this list are protected by the global "rx_reorder_tbl_lock" so we
>> don't corrupt the linked list structure.
>>
>> The actual elements in the "rx_reorder_tbl_ptr" are _not_ protected by
>> the global "rx_reorder_tbl_lock". I make this statement because I see
>> mwifiex_11n_get_rx_reorder_tbl() which drops the lock before returning
>> an entry.
>
> Actually, patch 3 is changing that one :)
The patch 3 that you said introduces a deadlock? ;)
>> ...and it seems sane that there could be a separate (more
>> fine grained) lock protecting these elements. It is OK to keep a
>> pointer to an element in this list without holding the lock--even if
>> it's removed from the list the pointer is still valid. Presumably
>> full deletion of the element (and any other access of members of the
>> element) is protected by some other lock.
>
> Aiee! You have too much faith. mwifiex_11n_del_rx_reorder_tbl_by_ta() ->
> mwifiex_del_rx_reorder_entry(), for instance, has *very* bad handling of
> this lock. It looks like:
> (a) it does a series of acquire/release pairs on this lock while still
> retaining the mwifiex_rx_reorder_tbl pointer. That's bad.
> (b) it doesn't have any other lock to protect deletion; it *hopes* that
> it cleared out the remaining packets in its buffer
> (mwifiex_11n_dispatch_pkt_until_start_win()); but it doesn't hold any
> other relevant locks between that and this:
>
> spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> list_del(&tbl->list);
> spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
>
> kfree(tbl->rx_reorder_ptr);
> kfree(tbl);
>
> That's pretty bad.
>
> I think it's best to work toward holding that lock while anyone is accessing
> the entry. So we should only drop the lock when we're done accessing the
> element, or when we've removed it from the list.
OK, sounds good. I guess that'll have to be prototyped and so we can
see if it's possible to do without running into deadlocks.
One question I have: do we ever actually do anything serious with one
of these elements when it's _not_ in the list? As far as I can tell
entries are added to "priv->rx_reorder_tbl_ptr" at creation time and
removed at deletion time.
If that's true and we start holding the "rx_reorder_tbl_lock" whenever
we're accessing the linked list _or_ the contents then we can actually
just get rid of the "rx_pkt_lock" because whenever we grab rx_pkt_lock
we're guaranteed to already be holding the rx_reorder_tbl_lock.
I guess I'd have to see this prototyped to see if it really works.
>> Each element in the "rx_reorder_tbl_ptr" list is a "struct
>> mwifiex_rx_reorder_tbl". It contains a pseudo-circular buffer
>> (win_size elements big) "tbl->rx_reorder_ptr[]". Insertions and
>> deletions from the pseudo-circular buffer are supposed to be protected
>> by the global "rx_pkt_lock".
>
> Seems right to me.
>
>> In general if you hold an "index" into
>> this buffer you should be holding the "rx_pkt_lock",
>
> Sure.
>
>> but if you hold
>> one of the elements (each element is a pointer) then you presumably
>> don't need the "rx_pkt_lock" (if you do, I'd be curious as to why).
>
> You mean, "if you hold one of these elements [and have removed it from the ring
> buffer]" then you don't need the lock? If so, I agree.
>
>> Did I get that right?
>>
>>
>> So overall the "rx_pkt_lock" seems a bit fishy to me.
>>
>> My first clue that something seems wrong is that "rx_pkt_lock" is
>> global (one for the whole driver) but seems to be protecting something
>> that is more fine-grained. Locks really ought to be part of the
>> structure whose elements they are protecting. Said another way, I'd
>> expect "rx_pkt_lock" NOT to be in "priv" but for there to be a
>> separate instance of "rx_pkt_lock" inside each "struct
>> mwifiex_rx_reorder_tbl". That would make it much more obvious what
>> was being protected and also would allow different tables to be worked
>> on at the same time.
>>
>> NOTE: it's probably not the end of the world if there's a single
>> global "rx_pkt_lock", it's just less ideal and not all that different
>> than having one big lock protecting everything in the driver.
>
> Agreed on the above 2 paragraphs. At a minimum, rx_pkt_lock should be
> documented better. But part of that documentation is typically putting the lock
> near what it's protecting.
>
> But overall, I'm not sure we're getting much interesting concurrency
> here at all, and so I'm not sure if all the (poort attempt at)
> fine-grained locking is even helpful. Ganapathi only pointed out
> cfg80211 TDLS operations as the "2nd thread of execution" besides the
> mwifiex "main thread" -- this thread is only used for configuration
> (e.g., setup, teardown, and configuration of TDLS links). And any time
> we're holding the "fine grained" rx_pkt_lock, we already are holdling
> the larger table lock.
>
>> The other thing that is fishy is that "rx_pkt_lock" doesn't seem to be
>> reliably protecting, again as Brian pointed out. Specifically:
>>
>> * It seems like "start_win" is supposed to be protected by
>> "rx_pkt_lock" too, but it's not in
>> mwifiex_11n_dispatch_pkt_until_start_win() (and many other places, I
>> didn't check). In general any place where we're holding an _index_
>> into the table seems like it should have the "rx_pkt_lock" and
>> "start_win" is an index. Why do I say this? If you are holding an
>> _index_ and something is deleted from the list (or shuffled) then the
>> index just won't be valid anymore. Thus if others can be inserting /
>> removing / shuffling the list then holding an index isn't safe without
>> the lock.
>
> Agreed.
>
>> * There are several places that access "rx_reorder_ptr" without
>> grabbing "rx_pkt_lock". That seems wrong. For instance,
>> mwifiex_get_rx_reorder_tbl(), but probably other places too.
>
> Ah, yes.
>
>> * Functions like mwifiex_11n_find_last_seq_num() seem quite confused.
>> They act on a table entry but for some reason grab the
>> "rx_reorder_tbl_lock", which was supposed to be protecting the linked
>> list (I think).
>
> Agreed.
>
>> Worse yet mwifiex_11n_find_last_seq_num() returns an
>> _index_ into the pseudo-queue. I don't think that is a super valid
>> thing to do unless you assume that the caller has the
>> rx_reorder_tbl_lock, which it doesn't. Probably
>> mwifiex_11n_find_last_seq_num() should require the caller hold
>> "rx_pkt_lock".
>
> I'm not sure the right solution here, but it does look wrong today. The
> only saving grace here would be if we don't care about this being
> slightly inaccurate. For instance, I *think* it's fine to return some of
> this out of order.
>
> On a similar note: why does mwifiex_11n_dispatch_pkt_until_start_win()
> release rx_pkt_lock in between each index (i.e., the "for (i = 0; i <
> pkt_to_send; ++i)" loop)? That means that the entire ring buffer
> indexing could change underneath our feet. I'm not sure this is
> technically unsafe (we do still grab the lock and do NULL checks), but
> it might still cause undesireable results around what packets get
> dispatched or now -- i.e., we might not *really* be honoring the window
> arguments that were passed in.
Yeah, it's pretty silly. It would get fixed if you held the lock
during the whole time you held onto an index (since "i" is an index).
...but maybe we'll just get rid of the rx_pkt_lock anyway in which
case it won't matter?
Speaking of silly things in that function though, right now we have:
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
Since mwifiex_11n_dispatch_pkt() actually checks for a NULL parameter
and there's no reason to say (a = NULL; if b != NULL then a = b), the
above is actually just:
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
...but that's just an optimization.
>> >> On a related note: I don't actually see the ring buffer *insertion* being
>> >> protected by this rx_pkt_lock, so is it really useful? See
>> >> mwifiex_11n_rx_reorder_pkt(). Perhaps the correct lock is actually
>> >> rx_reorder_tbl_lock()? Except that serves multiple purposes it seems...
>> >
>> > Right, ring buffer insertion is not protected. I think we should be using both rx_reorder_tbl_lock(which is already present) and rx_pkt_lock(we need to add) during insertion (mwifiex_11n_rx_reorder_pkt()).
>>
>> As far as I see mwifiex_11n_rx_reorder_pkt() doesn't hold
>> "rx_reorder_tbl_lock" for the whole function. It just grabs it to
>> search the linked list and then drops it. Is that right? Seems OK
>> given my current (limited) understanding but it sounded like you were
>> expecting it to be held for the whole function?
>
> I think patch 3 fixes that? It tries to grab the lock for (almost) the
> entire function.
>
>> IMHO then it needs to grab "rx_pkt_lock" before the access to
>> tbl->start_win, but I haven't looked at everything fully.
>>
>>
>> > Also, we should be using rx_pkt_lock instead of rx_reorder_tbl_lock in mwifiex_11n_find_last_seq_num().
>> >>
>> >> Another thought: from what I can tell, all of these operations are
>> >> *only* performed on the main thread, so there's actually no concurrency
>> >> involved at all. So we also could probably drop this lock entirely...
>> >
>> > Let us know your inputs on above observations.
>>
>> Not sure if my thoughts above made sense or were useful. Hopefully I
>> didn't sound too stupid... ;)
>
> Not stupid to me. Thanks for looking.
>
> Brian
>
>> >> I guess the real point is: can you explain better what you intend this lock to
>> >> do? Then we can review whether you're accomplishing your intention, or
>> >> whether we should improve the usage of this lock in some other way, or
>> >> perhaps even kill it entirely.
>> >>
>> >> Thanks,
>> >> Brian
>> >>
>> >> > if (rx_tmp_ptr)
>> >> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
>> >> > +
>> >> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>> >> > }
>> >> >
>> >> > spin_lock_irqsave(&priv->rx_pkt_lock, flags); @@ -167,8 +168,8 @@
>> >> > static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void
>> >> *payload)
>> >> > }
>> >> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
>> >> > tbl->rx_reorder_ptr[i] = NULL;
>> >> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>> >> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
>> >> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>> >> > }
>> >> >
>> >> > spin_lock_irqsave(&priv->rx_pkt_lock, flags);
>> >> > --
>> >> > 1.9.1
>> >> >
>> >
>> > Regards,
>> > Ganapathi
Hi Doug and Ganapathi,
On Mon, Nov 06, 2017 at 04:42:20PM -0800, Doug Anderson wrote:
> Hi,
>
> Please take the review below with a grain of salt since I'm not
> terribly familiar with this driver. I thought I might be able to help
> since I had previously looked at some of the spinlock stuff, but after
> digging through the below I'm not 100% convinced I understand this
> driver enough to do a quick review...
>
> On Thu, Nov 2, 2017 at 3:30 AM, Ganapathi Bhat <[email protected]> wrote:
> > Hi Brian,
> >
> >> On Tue, Oct 31, 2017 at 03:12:45PM +0530, Ganapathi Bhat wrote:
> >> > From: Karthik Ananthapadmanabha <[email protected]>
> >> >
> >> > Fix the incorrect usage of rx_pkt_lock spinlock. Realsing the spinlock
> >> > while the element is still in process is unsafe. The lock must be
> >> > released only after the element processing is complete.
> >> >
> >> > Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
> >> > Signed-off-by: Ganapathi Bhat <[email protected]>
> >> > ---
> >> > drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 5 +++--
> >> > 1 file changed, 3 insertions(+), 2 deletions(-)
> >> >
> >> > diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> >> > b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> >> > index 1edcdda..0149c4a 100644
> >> > --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> >> > +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> >> > @@ -124,9 +124,10 @@ static int mwifiex_11n_dispatch_pkt(struct
> >> mwifiex_private *priv, void *payload)
> >> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
> >> > tbl->rx_reorder_ptr[i] = NULL;
> >> > }
> >> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> >>
> >> So, what is this lock protecting? Is it for protecting the packet itself, or for
> >> protecting the ring buffer?
> >
> > This lock is protecting the ring buffer but with the current change, we are trying also to protect the packet too.
>
> What are you trying to protect the packet from? In other words: what
> other code is trying to make changes to the same structure? I'd agree
Agreed on this question. I don't understand what, specifically, about
"the packet" you're trying to protect, nor what other code might be
accessing it.
> with Brian that on first glance nobody else can have a reference to
> this packet since we've removed it from the global list. If you're
Right, if we're safely removing it from the pseudo ring buffer, then
nobody else should have access to it now.
> trying to protect something internal to this structure or something
> global to mwifiex then it seems like you'd want a different lock...
>
>
> >> As I read it, it's just the latter, since once we've
> >> removed it from the ring buffer (and are about to dispatch it up toward the
> >> networking layer), it's handled only by a single context (or else, protected via
> >> some other common lock(s)).
> >>
> >> If I'm correct above, then I think the current code here is actually safe, since
> >> it holds the lock while removing from the ring buffer -- after it's removed
> >> from the ring, there's no way another thread can find this payload, and so we
> >> can release the lock.
> >
> > There are cases where the ring buffer is iterated by cfg80211 thread:
> > mwifiex_cfg80211_tdls_oper => mwifiex_tdls_oper => mwifiex_tdls_process_disable_link => mwifiex_11n_cleanup_reorder_tbl => mwifiex_del_rx_reorder_entry => mwifiex_11n_dispatch_pkt_until_start_win => {iterates the ring buffer}
> > So, at worst case we can have two threads (main & cfg80211) running mwifiex_11n_dispatch_pkt_until_start_win(), iterating the ring buffer.
>
> Let me see if I'm understanding properly.
>
> There's a global (one for the whole driver) list called the
Just a point of order: it's a single list for each 'mwifiex_private' --
there can actually be multiple virtual interfaces attached to each
physical wiphy interface, and each virtual interface gets a
'mwifiex_private' instance. But otherwise, your descriptions are
accurate I believe.
> "rx_reorder_tbl_ptr". Insertion, deletion, and iteration of elements
> into this list are protected by the global "rx_reorder_tbl_lock" so we
> don't corrupt the linked list structure.
>
> The actual elements in the "rx_reorder_tbl_ptr" are _not_ protected by
> the global "rx_reorder_tbl_lock". I make this statement because I see
> mwifiex_11n_get_rx_reorder_tbl() which drops the lock before returning
> an entry.
Actually, patch 3 is changing that one :)
> ...and it seems sane that there could be a separate (more
> fine grained) lock protecting these elements. It is OK to keep a
> pointer to an element in this list without holding the lock--even if
> it's removed from the list the pointer is still valid. Presumably
> full deletion of the element (and any other access of members of the
> element) is protected by some other lock.
Aiee! You have too much faith. mwifiex_11n_del_rx_reorder_tbl_by_ta() ->
mwifiex_del_rx_reorder_entry(), for instance, has *very* bad handling of
this lock. It looks like:
(a) it does a series of acquire/release pairs on this lock while still
retaining the mwifiex_rx_reorder_tbl pointer. That's bad.
(b) it doesn't have any other lock to protect deletion; it *hopes* that
it cleared out the remaining packets in its buffer
(mwifiex_11n_dispatch_pkt_until_start_win()); but it doesn't hold any
other relevant locks between that and this:
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
That's pretty bad.
I think it's best to work toward holding that lock while anyone is accessing
the entry. So we should only drop the lock when we're done accessing the
element, or when we've removed it from the list.
> Each element in the "rx_reorder_tbl_ptr" list is a "struct
> mwifiex_rx_reorder_tbl". It contains a pseudo-circular buffer
> (win_size elements big) "tbl->rx_reorder_ptr[]". Insertions and
> deletions from the pseudo-circular buffer are supposed to be protected
> by the global "rx_pkt_lock".
Seems right to me.
> In general if you hold an "index" into
> this buffer you should be holding the "rx_pkt_lock",
Sure.
> but if you hold
> one of the elements (each element is a pointer) then you presumably
> don't need the "rx_pkt_lock" (if you do, I'd be curious as to why).
You mean, "if you hold one of these elements [and have removed it from the ring
buffer]" then you don't need the lock? If so, I agree.
> Did I get that right?
>
>
> So overall the "rx_pkt_lock" seems a bit fishy to me.
>
> My first clue that something seems wrong is that "rx_pkt_lock" is
> global (one for the whole driver) but seems to be protecting something
> that is more fine-grained. Locks really ought to be part of the
> structure whose elements they are protecting. Said another way, I'd
> expect "rx_pkt_lock" NOT to be in "priv" but for there to be a
> separate instance of "rx_pkt_lock" inside each "struct
> mwifiex_rx_reorder_tbl". That would make it much more obvious what
> was being protected and also would allow different tables to be worked
> on at the same time.
>
> NOTE: it's probably not the end of the world if there's a single
> global "rx_pkt_lock", it's just less ideal and not all that different
> than having one big lock protecting everything in the driver.
Agreed on the above 2 paragraphs. At a minimum, rx_pkt_lock should be
documented better. But part of that documentation is typically putting the lock
near what it's protecting.
But overall, I'm not sure we're getting much interesting concurrency
here at all, and so I'm not sure if all the (poort attempt at)
fine-grained locking is even helpful. Ganapathi only pointed out
cfg80211 TDLS operations as the "2nd thread of execution" besides the
mwifiex "main thread" -- this thread is only used for configuration
(e.g., setup, teardown, and configuration of TDLS links). And any time
we're holding the "fine grained" rx_pkt_lock, we already are holdling
the larger table lock.
> The other thing that is fishy is that "rx_pkt_lock" doesn't seem to be
> reliably protecting, again as Brian pointed out. Specifically:
>
> * It seems like "start_win" is supposed to be protected by
> "rx_pkt_lock" too, but it's not in
> mwifiex_11n_dispatch_pkt_until_start_win() (and many other places, I
> didn't check). In general any place where we're holding an _index_
> into the table seems like it should have the "rx_pkt_lock" and
> "start_win" is an index. Why do I say this? If you are holding an
> _index_ and something is deleted from the list (or shuffled) then the
> index just won't be valid anymore. Thus if others can be inserting /
> removing / shuffling the list then holding an index isn't safe without
> the lock.
Agreed.
> * There are several places that access "rx_reorder_ptr" without
> grabbing "rx_pkt_lock". That seems wrong. For instance,
> mwifiex_get_rx_reorder_tbl(), but probably other places too.
Ah, yes.
> * Functions like mwifiex_11n_find_last_seq_num() seem quite confused.
> They act on a table entry but for some reason grab the
> "rx_reorder_tbl_lock", which was supposed to be protecting the linked
> list (I think).
Agreed.
> Worse yet mwifiex_11n_find_last_seq_num() returns an
> _index_ into the pseudo-queue. I don't think that is a super valid
> thing to do unless you assume that the caller has the
> rx_reorder_tbl_lock, which it doesn't. Probably
> mwifiex_11n_find_last_seq_num() should require the caller hold
> "rx_pkt_lock".
I'm not sure the right solution here, but it does look wrong today. The
only saving grace here would be if we don't care about this being
slightly inaccurate. For instance, I *think* it's fine to return some of
this out of order.
On a similar note: why does mwifiex_11n_dispatch_pkt_until_start_win()
release rx_pkt_lock in between each index (i.e., the "for (i = 0; i <
pkt_to_send; ++i)" loop)? That means that the entire ring buffer
indexing could change underneath our feet. I'm not sure this is
technically unsafe (we do still grab the lock and do NULL checks), but
it might still cause undesireable results around what packets get
dispatched or now -- i.e., we might not *really* be honoring the window
arguments that were passed in.
> >> On a related note: I don't actually see the ring buffer *insertion* being
> >> protected by this rx_pkt_lock, so is it really useful? See
> >> mwifiex_11n_rx_reorder_pkt(). Perhaps the correct lock is actually
> >> rx_reorder_tbl_lock()? Except that serves multiple purposes it seems...
> >
> > Right, ring buffer insertion is not protected. I think we should be using both rx_reorder_tbl_lock(which is already present) and rx_pkt_lock(we need to add) during insertion (mwifiex_11n_rx_reorder_pkt()).
>
> As far as I see mwifiex_11n_rx_reorder_pkt() doesn't hold
> "rx_reorder_tbl_lock" for the whole function. It just grabs it to
> search the linked list and then drops it. Is that right? Seems OK
> given my current (limited) understanding but it sounded like you were
> expecting it to be held for the whole function?
I think patch 3 fixes that? It tries to grab the lock for (almost) the
entire function.
> IMHO then it needs to grab "rx_pkt_lock" before the access to
> tbl->start_win, but I haven't looked at everything fully.
>
>
> > Also, we should be using rx_pkt_lock instead of rx_reorder_tbl_lock in mwifiex_11n_find_last_seq_num().
> >>
> >> Another thought: from what I can tell, all of these operations are
> >> *only* performed on the main thread, so there's actually no concurrency
> >> involved at all. So we also could probably drop this lock entirely...
> >
> > Let us know your inputs on above observations.
>
> Not sure if my thoughts above made sense or were useful. Hopefully I
> didn't sound too stupid... ;)
Not stupid to me. Thanks for looking.
Brian
> >> I guess the real point is: can you explain better what you intend this lock to
> >> do? Then we can review whether you're accomplishing your intention, or
> >> whether we should improve the usage of this lock in some other way, or
> >> perhaps even kill it entirely.
> >>
> >> Thanks,
> >> Brian
> >>
> >> > if (rx_tmp_ptr)
> >> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
> >> > +
> >> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> >> > }
> >> >
> >> > spin_lock_irqsave(&priv->rx_pkt_lock, flags); @@ -167,8 +168,8 @@
> >> > static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void
> >> *payload)
> >> > }
> >> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
> >> > tbl->rx_reorder_ptr[i] = NULL;
> >> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> >> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
> >> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> >> > }
> >> >
> >> > spin_lock_irqsave(&priv->rx_pkt_lock, flags);
> >> > --
> >> > 1.9.1
> >> >
> >
> > Regards,
> > Ganapathi
Hi Doug/Brian,
Thanks a lot for the comments and the discussion. First of all we will abort the change added by this patch as we don't need rx_pkt_lock acquired to protect the deleted item. Next, we will prepare below changes to address the concerns discussed:
1. Move rx_pkt_lock from mwifiex_private to rx_reorder_tbl
2. Protect all (missing) cases of rx_reorder_ptr array (creation/insertion/iteration) with rx_pkt_lock
3. Protect start_win with rx_pkt_lock
4. Use rx_pkt_lock instead of rx_reorder_tbl_lock in mwifiex_11n_find_last_seq_num ()
5. Protect mwifiex_11n_find_last_seq_num() with rx_pkt_lock
6. Hold rx_pkt_lock for the entire function mwifiex_11n_dispatch_pkt_until_start_win ()
Above changes are mentioned below under their respective comments. Kindly go through:
> Subject: Re: [EXT] Re: [PATCH 1/3] mwifiex: cleanup rx_pkt_lock usage in
> 11n_rxreorder.c
>
> Hi Doug and Ganapathi,
>
> On Mon, Nov 06, 2017 at 04:42:20PM -0800, Doug Anderson wrote:
> > Hi,
> >
> > Please take the review below with a grain of salt since I'm not
> > terribly familiar with this driver. I thought I might be able to help
> > since I had previously looked at some of the spinlock stuff, but after
> > digging through the below I'm not 100% convinced I understand this
> > driver enough to do a quick review...
> >
> > On Thu, Nov 2, 2017 at 3:30 AM, Ganapathi Bhat <[email protected]>
> wrote:
> > > Hi Brian,
> > >
> > >> On Tue, Oct 31, 2017 at 03:12:45PM +0530, Ganapathi Bhat wrote:
> > >> > From: Karthik Ananthapadmanabha <[email protected]>
> > >> >
> > >> > Fix the incorrect usage of rx_pkt_lock spinlock. Realsing the
> > >> > spinlock while the element is still in process is unsafe. The
> > >> > lock must be released only after the element processing is complete.
> > >> >
> > >> > Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
> > >> > Signed-off-by: Ganapathi Bhat <[email protected]>
> > >> > ---
> > >> > drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 5 +++--
> > >> > 1 file changed, 3 insertions(+), 2 deletions(-)
> > >> >
> > >> > diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> > >> > b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> > >> > index 1edcdda..0149c4a 100644
> > >> > --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> > >> > +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> > >> > @@ -124,9 +124,10 @@ static int mwifiex_11n_dispatch_pkt(struct
> > >> mwifiex_private *priv, void *payload)
> > >> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
> > >> > tbl->rx_reorder_ptr[i] = NULL;
> > >> > }
> > >> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> > >>
> > >> So, what is this lock protecting? Is it for protecting the packet
> > >> itself, or for protecting the ring buffer?
> > >
> > > This lock is protecting the ring buffer but with the current change, we are
> trying also to protect the packet too.
> >
> > What are you trying to protect the packet from? In other words: what
> > other code is trying to make changes to the same structure? I'd agree
>
> Agreed on this question. I don't understand what, specifically, about "the
> packet" you're trying to protect, nor what other code might be accessing it.
>
> > with Brian that on first glance nobody else can have a reference to
> > this packet since we've removed it from the global list. If you're
>
> Right, if we're safely removing it from the pseudo ring buffer, then nobody
> else should have access to it now.
>
> > trying to protect something internal to this structure or something
> > global to mwifiex then it seems like you'd want a different lock...
> >
> >
> > >> As I read it, it's just the latter, since once we've removed it
> > >> from the ring buffer (and are about to dispatch it up toward the
> > >> networking layer), it's handled only by a single context (or else,
> > >> protected via some other common lock(s)).
> > >>
> > >> If I'm correct above, then I think the current code here is
> > >> actually safe, since it holds the lock while removing from the ring
> > >> buffer -- after it's removed from the ring, there's no way another
> > >> thread can find this payload, and so we can release the lock.
> > >
> > > There are cases where the ring buffer is iterated by cfg80211 thread:
> > > mwifiex_cfg80211_tdls_oper => mwifiex_tdls_oper =>
> > > mwifiex_tdls_process_disable_link =>
> > >mwifiex_11n_cleanup_reorder_tbl => mwifiex_del_rx_reorder_entry =>
> > >mwifiex_11n_dispatch_pkt_until_start_win => {iterates the ring buffer} So,
> > >at worst case we can have two threads (main & cfg80211) running
> > >mwifiex_11n_dispatch_pkt_until_start_win(), iterating the ring buffer.
> >
> > Let me see if I'm understanding properly.
> >
> > There's a global (one for the whole driver) list called the
>
> Just a point of order: it's a single list for each 'mwifiex_private' -- there can
> actually be multiple virtual interfaces attached to each physical wiphy
> interface, and each virtual interface gets a 'mwifiex_private' instance. But
> otherwise, your descriptions are accurate I believe.
>
> > "rx_reorder_tbl_ptr". Insertion, deletion, and iteration of elements
> > into this list are protected by the global "rx_reorder_tbl_lock" so we
> > don't corrupt the linked list structure.
> >
> > The actual elements in the "rx_reorder_tbl_ptr" are _not_ protected by
> > the global "rx_reorder_tbl_lock". I make this statement because I see
> > mwifiex_11n_get_rx_reorder_tbl() which drops the lock before returning
> > an entry.
>
> Actually, patch 3 is changing that one :)
>
> > ...and it seems sane that there could be a separate (more fine
> > grained) lock protecting these elements. It is OK to keep a pointer
> > to an element in this list without holding the lock--even if it's
> > removed from the list the pointer is still valid. Presumably full
> > deletion of the element (and any other access of members of the
> > element) is protected by some other lock.
>
> Aiee! You have too much faith. mwifiex_11n_del_rx_reorder_tbl_by_ta() ->
> mwifiex_del_rx_reorder_entry(), for instance, has *very* bad handling of
> this lock. It looks like:
> (a) it does a series of acquire/release pairs on this lock while still retaining the
> mwifiex_rx_reorder_tbl pointer. That's bad.
> (b) it doesn't have any other lock to protect deletion; it *hopes* that it
> cleared out the remaining packets in its buffer
> (mwifiex_11n_dispatch_pkt_until_start_win()); but it doesn't hold any other
> relevant locks between that and this:
>
> spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> list_del(&tbl->list);
> spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
>
> kfree(tbl->rx_reorder_ptr);
> kfree(tbl);
>
> That's pretty bad.
>
> I think it's best to work toward holding that lock while anyone is accessing the
> entry. So we should only drop the lock when we're done accessing the
> element, or when we've removed it from the list.
>
> > Each element in the "rx_reorder_tbl_ptr" list is a "struct
> > mwifiex_rx_reorder_tbl". It contains a pseudo-circular buffer
> > (win_size elements big) "tbl->rx_reorder_ptr[]". Insertions and
> > deletions from the pseudo-circular buffer are supposed to be protected
> > by the global "rx_pkt_lock".
>
> Seems right to me.
>
> > In general if you hold an "index" into this buffer you should be
> > holding the "rx_pkt_lock",
>
> Sure.
>
> > but if you hold
> > one of the elements (each element is a pointer) then you presumably
> > don't need the "rx_pkt_lock" (if you do, I'd be curious as to why).
>
> You mean, "if you hold one of these elements [and have removed it from
> the ring buffer]" then you don't need the lock? If so, I agree.
>
> > Did I get that right?
> >
Yes. Actually, I got this now. We removed it from ring buffer and no more locking needed for this element. Sorry Brian for unnecessary confusions. Somehow I missed the point that the element is being deleted.
> >
> > So overall the "rx_pkt_lock" seems a bit fishy to me.
> >
> > My first clue that something seems wrong is that "rx_pkt_lock" is
> > global (one for the whole driver) but seems to be protecting something
> > that is more fine-grained. Locks really ought to be part of the
> > structure whose elements they are protecting. Said another way, I'd
> > expect "rx_pkt_lock" NOT to be in "priv" but for there to be a
> > separate instance of "rx_pkt_lock" inside each "struct
> > mwifiex_rx_reorder_tbl". That would make it much more obvious what
> > was being protected and also would allow different tables to be worked
> > on at the same time.
> >
> > NOTE: it's probably not the end of the world if there's a single
> > global "rx_pkt_lock", it's just less ideal and not all that different
> > than having one big lock protecting everything in the driver.
>
> Agreed on the above 2 paragraphs. At a minimum, rx_pkt_lock should be
> documented better. But part of that documentation is typically putting the
> lock near what it's protecting.
>
Will address this with change #1
> But overall, I'm not sure we're getting much interesting concurrency here at
> all, and so I'm not sure if all the (poort attempt at) fine-grained locking is
> even helpful. Ganapathi only pointed out
> cfg80211 TDLS operations as the "2nd thread of execution" besides the
> mwifiex "main thread" -- this thread is only used for configuration (e.g.,
> setup, teardown, and configuration of TDLS links). And any time we're
> holding the "fine grained" rx_pkt_lock, we already are holdling the larger
> table lock.
>
> > The other thing that is fishy is that "rx_pkt_lock" doesn't seem to be
> > reliably protecting, again as Brian pointed out. Specifically:
> >
> > * It seems like "start_win" is supposed to be protected by
> > "rx_pkt_lock" too, but it's not in
> > mwifiex_11n_dispatch_pkt_until_start_win() (and many other places, I
> > didn't check). In general any place where we're holding an _index_
> > into the table seems like it should have the "rx_pkt_lock" and
> > "start_win" is an index. Why do I say this? If you are holding an
> > _index_ and something is deleted from the list (or shuffled) then the
> > index just won't be valid anymore. Thus if others can be inserting /
> > removing / shuffling the list then holding an index isn't safe without
> > the lock.
>
> Agreed.
Will address this with change #3
>
> > * There are several places that access "rx_reorder_ptr" without
> > grabbing "rx_pkt_lock". That seems wrong. For instance,
> > mwifiex_get_rx_reorder_tbl(), but probably other places too.
>
> Ah, yes.
>
Will address this with change #2
> > * Functions like mwifiex_11n_find_last_seq_num() seem quite confused.
> > They act on a table entry but for some reason grab the
> > "rx_reorder_tbl_lock", which was supposed to be protecting the linked
> > list (I think).
>
> Agreed.
>
Will address this with change #4
> > Worse yet mwifiex_11n_find_last_seq_num() returns an _index_ into the
> > pseudo-queue. I don't think that is a super valid thing to do unless
> > you assume that the caller has the rx_reorder_tbl_lock, which it
> > doesn't. Probably
> > mwifiex_11n_find_last_seq_num() should require the caller hold
> > "rx_pkt_lock".
>
Will address this with change #5
> I'm not sure the right solution here, but it does look wrong today. The only
> saving grace here would be if we don't care about this being slightly
> inaccurate. For instance, I *think* it's fine to return some of this out of order.
>
> On a similar note: why does mwifiex_11n_dispatch_pkt_until_start_win()
> release rx_pkt_lock in between each index (i.e., the "for (i = 0; i <
> pkt_to_send; ++i)" loop)? That means that the entire ring buffer indexing
> could change underneath our feet. I'm not sure this is technically unsafe (we
> do still grab the lock and do NULL checks), but it might still cause undesireable
> results around what packets get dispatched or now -- i.e., we might not
> *really* be honoring the window arguments that were passed in.
>
Will address this with change #6
> > >> On a related note: I don't actually see the ring buffer *insertion*
> > >> being protected by this rx_pkt_lock, so is it really useful? See
> > >> mwifiex_11n_rx_reorder_pkt(). Perhaps the correct lock is actually
> > >> rx_reorder_tbl_lock()? Except that serves multiple purposes it seems...
> > >
> > > Right, ring buffer insertion is not protected. I think we should be using
> both rx_reorder_tbl_lock(which is already present) and rx_pkt_lock(we
> need to add) during insertion (mwifiex_11n_rx_reorder_pkt()).
> >
> > As far as I see mwifiex_11n_rx_reorder_pkt() doesn't hold
> > "rx_reorder_tbl_lock" for the whole function. It just grabs it to
> > search the linked list and then drops it. Is that right? Seems OK
> > given my current (limited) understanding but it sounded like you were
> > expecting it to be held for the whole function?
>
> I think patch 3 fixes that? It tries to grab the lock for (almost) the entire
> function.
>
> > IMHO then it needs to grab "rx_pkt_lock" before the access to
> > tbl->start_win, but I haven't looked at everything fully.
> >
> >
> > > Also, we should be using rx_pkt_lock instead of rx_reorder_tbl_lock in
> mwifiex_11n_find_last_seq_num().
> > >>
> > >> Another thought: from what I can tell, all of these operations are
> > >> *only* performed on the main thread, so there's actually no
> > >> concurrency involved at all. So we also could probably drop this lock
> entirely...
> > >
> > > Let us know your inputs on above observations.
> >
> > Not sure if my thoughts above made sense or were useful. Hopefully I
> > didn't sound too stupid... ;)
>
> Not stupid to me. Thanks for looking.
>
Thanks for all the reviews.
> Brian
>
> > >> I guess the real point is: can you explain better what you intend
> > >> this lock to do? Then we can review whether you're accomplishing
> > >> your intention, or whether we should improve the usage of this lock
> > >> in some other way, or perhaps even kill it entirely.
Brian,
I got it. This change is not needed.
Also, let us know if above changes are all that we needed OR if above changes still fail to accurately address the things mentioned here.
> > >>
> > >> Thanks,
> > >> Brian
> > >>
> > >> > if (rx_tmp_ptr)
> > >> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
> > >> > +
> > >> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> > >> > }
> > >> >
> > >> > spin_lock_irqsave(&priv->rx_pkt_lock, flags); @@ -167,8
> > >> > +168,8 @@ static int mwifiex_11n_dispatch_pkt(struct
> > >> > mwifiex_private *priv, void
> > >> *payload)
> > >> > }
> > >> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
> > >> > tbl->rx_reorder_ptr[i] = NULL;
> > >> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> > >> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
> > >> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> > >> > }
> > >> >
> > >> > spin_lock_irqsave(&priv->rx_pkt_lock, flags);
> > >> > --
> > >> > 1.9.1
> > >> >
> > >
> > > Regards,
> > > Ganapathi
Hi,
I haven't reviewed this patch in its entirety, but I'm pretty sure
you're introducing a reliable AA deadlock. See below.
Are you sure you're actually testing these codepaths?
On Tue, Oct 31, 2017 at 03:12:47PM +0530, Ganapathi Bhat wrote:
> From: Karthik Ananthapadmanabha <[email protected]>
>
> Fix the incorrect usage of rx_reorder_tbl_lock spinlock:
>
> 1. We shouldn't access the fields of the elements returned by
> mwifiex_11n_get_rx_reorder_tbl() after we have released spin
> lock. To fix this, To fix this, aquire the spinlock before
> calling this function and release the lock only after processing
> the corresponding element is complete.
>
> 2. Realsing the spinlock during iteration of the list and holding
> it back before next iteration is unsafe. Fix it by releasing the
> lock only after iteration of the list is complete.
>
> Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
> Signed-off-by: Ganapathi Bhat <[email protected]>
> ---
> .../net/wireless/marvell/mwifiex/11n_rxreorder.c | 34 +++++++++++++++++-----
> drivers/net/wireless/marvell/mwifiex/uap_txrx.c | 3 ++
> 2 files changed, 29 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> index 4631bc2..b99ace8 100644
> --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> @@ -234,23 +234,19 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
>
> /*
> * This function returns the pointer to an entry in Rx reordering
> - * table which matches the given TA/TID pair.
> + * table which matches the given TA/TID pair. The caller must
> + * hold rx_reorder_tbl_lock, before calling this function.
> */
> struct mwifiex_rx_reorder_tbl *
> mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
> {
> struct mwifiex_rx_reorder_tbl *tbl;
> - unsigned long flags;
>
> - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
> if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
> - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
> - flags);
> return tbl;
> }
> }
> - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
>
> return NULL;
> }
> @@ -355,11 +351,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
> * If we get a TID, ta pair which is already present dispatch all the
> * the packets and move the window size until the ssn
> */
> + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
> if (tbl) {
> mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> return;
> }
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> /* if !tbl then create one */
> new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
> if (!new_node)
> @@ -571,15 +570,19 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
> u16 pkt_index;
> bool init_window_shift = false;
> int ret = 0;
> + unsigned long flags;
>
> + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
> if (!tbl) {
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> if (pkt_type != PKT_TYPE_BAR)
> mwifiex_11n_dispatch_pkt(priv, payload);
> return ret;
> }
>
> if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> mwifiex_11n_dispatch_pkt(priv, payload);
> return ret;
> }
> @@ -666,6 +669,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
> if (!tbl->timer_context.timer_is_set ||
> prev_start_win != tbl->start_win)
> mwifiex_11n_rxreorder_timer_restart(tbl);
> +
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> return ret;
> }
>
> @@ -683,6 +688,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
> struct mwifiex_ra_list_tbl *ra_list;
> u8 cleanup_rx_reorder_tbl;
> int tid_down;
> + unsigned long flags;
>
> if (type == TYPE_DELBA_RECEIVE)
> cleanup_rx_reorder_tbl = (initiator) ? true : false;
> @@ -693,14 +699,18 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
> peer_mac, tid, initiator);
>
> if (cleanup_rx_reorder_tbl) {
> + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
> peer_mac);
> if (!tbl) {
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
> + flags);
> mwifiex_dbg(priv->adapter, EVENT,
> "event: TID, TA not found in table\n");
> return;
> }
> mwifiex_del_rx_reorder_entry(priv, tbl);
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> } else {
> ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
> if (!ptx_tbl) {
> @@ -732,6 +742,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
> int tid, win_size;
> struct mwifiex_rx_reorder_tbl *tbl;
> uint16_t block_ack_param_set;
> + unsigned long flags;
>
> block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
>
> @@ -745,17 +756,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
> mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
> add_ba_rsp->peer_mac_addr, tid);
>
> + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
> add_ba_rsp->peer_mac_addr);
> if (tbl)
> mwifiex_del_rx_reorder_entry(priv, tbl);
>
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> return 0;
> }
>
> win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
> >> BLOCKACKPARAM_WINSIZE_POS;
>
> + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
> add_ba_rsp->peer_mac_addr);
> if (tbl) {
> @@ -766,6 +780,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
> else
> tbl->amsdu = false;
> }
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
>
> mwifiex_dbg(priv->adapter, CMD,
> "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
> @@ -806,9 +821,7 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
> spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
^^ Acquisition
> list_for_each_entry_safe(del_tbl_ptr, tmp_node,
> &priv->rx_reorder_tbl_ptr, list) {
> - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
^^ No longer dropping the lock
> mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
^^ This function also acquires the lock
Deadlock!
Brian
> - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> }
> INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
> spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> @@ -933,6 +946,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
> int tlv_buf_left = len;
> int ret;
> u8 *tmp;
> + unsigned long flags;
>
> mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
> event_buf, len);
> @@ -952,14 +966,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
> tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
> tlv_bitmap_len);
>
> + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> rx_reor_tbl_ptr =
> mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
> tlv_rxba->mac);
> if (!rx_reor_tbl_ptr) {
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
> + flags);
> mwifiex_dbg(priv->adapter, ERROR,
> "Can not find rx_reorder_tbl!");
> return;
> }
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
>
> for (i = 0; i < tlv_bitmap_len; i++) {
> for (j = 0 ; j < 8; j++) {
> diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
> index 1e6a62c..21dc14f 100644
> --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
> +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
> @@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
> spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
> }
>
> + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
> if (!priv->ap_11n_enabled ||
> (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
> (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
> ret = mwifiex_handle_uap_rx_forward(priv, skb);
> return ret;
> }
> + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
>
> /* Reorder and send to kernel */
> pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
> --
> 1.9.1
>
Hi Brian,
> On Tue, Oct 31, 2017 at 03:12:45PM +0530, Ganapathi Bhat wrote:
> > From: Karthik Ananthapadmanabha <[email protected]>
> >
> > Fix the incorrect usage of rx_pkt_lock spinlock. Realsing the spinlock
> > while the element is still in process is unsafe. The lock must be
> > released only after the element processing is complete.
> >
> > Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
> > Signed-off-by: Ganapathi Bhat <[email protected]>
> > ---
> > drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 5 +++--
> > 1 file changed, 3 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> > b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> > index 1edcdda..0149c4a 100644
> > --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> > +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
> > @@ -124,9 +124,10 @@ static int mwifiex_11n_dispatch_pkt(struct
> mwifiex_private *priv, void *payload)
> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
> > tbl->rx_reorder_ptr[i] = NULL;
> > }
> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>
> So, what is this lock protecting? Is it for protecting the packet itself, or for
> protecting the ring buffer?
This lock is protecting the ring buffer but with the current change, we are trying also to protect the packet too.
> As I read it, it's just the latter, since once we've
> removed it from the ring buffer (and are about to dispatch it up toward the
> networking layer), it's handled only by a single context (or else, protected via
> some other common lock(s)).
>
> If I'm correct above, then I think the current code here is actually safe, since
> it holds the lock while removing from the ring buffer -- after it's removed
> from the ring, there's no way another thread can find this payload, and so we
> can release the lock.
There are cases where the ring buffer is iterated by cfg80211 thread:
mwifiex_cfg80211_tdls_oper => mwifiex_tdls_oper => mwifiex_tdls_process_disable_link => mwifiex_11n_cleanup_reorder_tbl => mwifiex_del_rx_reorder_entry => mwifiex_11n_dispatch_pkt_until_start_win => {iterates the ring buffer}
So, at worst case we can have two threads (main & cfg80211) running mwifiex_11n_dispatch_pkt_until_start_win(), iterating the ring buffer.
>
> On a related note: I don't actually see the ring buffer *insertion* being
> protected by this rx_pkt_lock, so is it really useful? See
> mwifiex_11n_rx_reorder_pkt(). Perhaps the correct lock is actually
> rx_reorder_tbl_lock()? Except that serves multiple purposes it seems...
Right, ring buffer insertion is not protected. I think we should be using both rx_reorder_tbl_lock(which is already present) and rx_pkt_lock(we need to add) during insertion (mwifiex_11n_rx_reorder_pkt()).
Also, we should be using rx_pkt_lock instead of rx_reorder_tbl_lock in mwifiex_11n_find_last_seq_num().
>
> Another thought: from what I can tell, all of these operations are
> *only* performed on the main thread, so there's actually no concurrency
> involved at all. So we also could probably drop this lock entirely...
Let us know your inputs on above observations.
>
> I guess the real point is: can you explain better what you intend this lock to
> do? Then we can review whether you're accomplishing your intention, or
> whether we should improve the usage of this lock in some other way, or
> perhaps even kill it entirely.
>
> Thanks,
> Brian
>
> > if (rx_tmp_ptr)
> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
> > +
> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> > }
> >
> > spin_lock_irqsave(&priv->rx_pkt_lock, flags); @@ -167,8 +168,8 @@
> > static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void
> *payload)
> > }
> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
> > tbl->rx_reorder_ptr[i] = NULL;
> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
> > }
> >
> > spin_lock_irqsave(&priv->rx_pkt_lock, flags);
> > --
> > 1.9.1
> >
Regards,
Ganapathi
Hi,
Please take the review below with a grain of salt since I'm not
terribly familiar with this driver. I thought I might be able to help
since I had previously looked at some of the spinlock stuff, but after
digging through the below I'm not 100% convinced I understand this
driver enough to do a quick review...
On Thu, Nov 2, 2017 at 3:30 AM, Ganapathi Bhat <[email protected]> wrote:
> Hi Brian,
>
>> On Tue, Oct 31, 2017 at 03:12:45PM +0530, Ganapathi Bhat wrote:
>> > From: Karthik Ananthapadmanabha <[email protected]>
>> >
>> > Fix the incorrect usage of rx_pkt_lock spinlock. Realsing the spinlock
>> > while the element is still in process is unsafe. The lock must be
>> > released only after the element processing is complete.
>> >
>> > Signed-off-by: Karthik Ananthapadmanabha <[email protected]>
>> > Signed-off-by: Ganapathi Bhat <[email protected]>
>> > ---
>> > drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 5 +++--
>> > 1 file changed, 3 insertions(+), 2 deletions(-)
>> >
>> > diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
>> > b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
>> > index 1edcdda..0149c4a 100644
>> > --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
>> > +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
>> > @@ -124,9 +124,10 @@ static int mwifiex_11n_dispatch_pkt(struct
>> mwifiex_private *priv, void *payload)
>> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
>> > tbl->rx_reorder_ptr[i] = NULL;
>> > }
>> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>>
>> So, what is this lock protecting? Is it for protecting the packet itself, or for
>> protecting the ring buffer?
>
> This lock is protecting the ring buffer but with the current change, we are trying also to protect the packet too.
What are you trying to protect the packet from? In other words: what
other code is trying to make changes to the same structure? I'd agree
with Brian that on first glance nobody else can have a reference to
this packet since we've removed it from the global list. If you're
trying to protect something internal to this structure or something
global to mwifiex then it seems like you'd want a different lock...
>> As I read it, it's just the latter, since once we've
>> removed it from the ring buffer (and are about to dispatch it up toward the
>> networking layer), it's handled only by a single context (or else, protected via
>> some other common lock(s)).
>>
>> If I'm correct above, then I think the current code here is actually safe, since
>> it holds the lock while removing from the ring buffer -- after it's removed
>> from the ring, there's no way another thread can find this payload, and so we
>> can release the lock.
>
> There are cases where the ring buffer is iterated by cfg80211 thread:
> mwifiex_cfg80211_tdls_oper => mwifiex_tdls_oper => mwifiex_tdls_process_disable_link => mwifiex_11n_cleanup_reorder_tbl => mwifiex_del_rx_reorder_entry => mwifiex_11n_dispatch_pkt_until_start_win => {iterates the ring buffer}
> So, at worst case we can have two threads (main & cfg80211) running mwifiex_11n_dispatch_pkt_until_start_win(), iterating the ring buffer.
Let me see if I'm understanding properly.
There's a global (one for the whole driver) list called the
"rx_reorder_tbl_ptr". Insertion, deletion, and iteration of elements
into this list are protected by the global "rx_reorder_tbl_lock" so we
don't corrupt the linked list structure.
The actual elements in the "rx_reorder_tbl_ptr" are _not_ protected by
the global "rx_reorder_tbl_lock". I make this statement because I see
mwifiex_11n_get_rx_reorder_tbl() which drops the lock before returning
an entry. ...and it seems sane that there could be a separate (more
fine grained) lock protecting these elements. It is OK to keep a
pointer to an element in this list without holding the lock--even if
it's removed from the list the pointer is still valid. Presumably
full deletion of the element (and any other access of members of the
element) is protected by some other lock.
Each element in the "rx_reorder_tbl_ptr" list is a "struct
mwifiex_rx_reorder_tbl". It contains a pseudo-circular buffer
(win_size elements big) "tbl->rx_reorder_ptr[]". Insertions and
deletions from the pseudo-circular buffer are supposed to be protected
by the global "rx_pkt_lock". In general if you hold an "index" into
this buffer you should be holding the "rx_pkt_lock", but if you hold
one of the elements (each element is a pointer) then you presumably
don't need the "rx_pkt_lock" (if you do, I'd be curious as to why).
Did I get that right?
So overall the "rx_pkt_lock" seems a bit fishy to me.
My first clue that something seems wrong is that "rx_pkt_lock" is
global (one for the whole driver) but seems to be protecting something
that is more fine-grained. Locks really ought to be part of the
structure whose elements they are protecting. Said another way, I'd
expect "rx_pkt_lock" NOT to be in "priv" but for there to be a
separate instance of "rx_pkt_lock" inside each "struct
mwifiex_rx_reorder_tbl". That would make it much more obvious what
was being protected and also would allow different tables to be worked
on at the same time.
NOTE: it's probably not the end of the world if there's a single
global "rx_pkt_lock", it's just less ideal and not all that different
than having one big lock protecting everything in the driver.
The other thing that is fishy is that "rx_pkt_lock" doesn't seem to be
reliably protecting, again as Brian pointed out. Specifically:
* It seems like "start_win" is supposed to be protected by
"rx_pkt_lock" too, but it's not in
mwifiex_11n_dispatch_pkt_until_start_win() (and many other places, I
didn't check). In general any place where we're holding an _index_
into the table seems like it should have the "rx_pkt_lock" and
"start_win" is an index. Why do I say this? If you are holding an
_index_ and something is deleted from the list (or shuffled) then the
index just won't be valid anymore. Thus if others can be inserting /
removing / shuffling the list then holding an index isn't safe without
the lock.
* There are several places that access "rx_reorder_ptr" without
grabbing "rx_pkt_lock". That seems wrong. For instance,
mwifiex_get_rx_reorder_tbl(), but probably other places too.
* Functions like mwifiex_11n_find_last_seq_num() seem quite confused.
They act on a table entry but for some reason grab the
"rx_reorder_tbl_lock", which was supposed to be protecting the linked
list (I think). Worse yet mwifiex_11n_find_last_seq_num() returns an
_index_ into the pseudo-queue. I don't think that is a super valid
thing to do unless you assume that the caller has the
rx_reorder_tbl_lock, which it doesn't. Probably
mwifiex_11n_find_last_seq_num() should require the caller hold
"rx_pkt_lock".
>> On a related note: I don't actually see the ring buffer *insertion* being
>> protected by this rx_pkt_lock, so is it really useful? See
>> mwifiex_11n_rx_reorder_pkt(). Perhaps the correct lock is actually
>> rx_reorder_tbl_lock()? Except that serves multiple purposes it seems...
>
> Right, ring buffer insertion is not protected. I think we should be using both rx_reorder_tbl_lock(which is already present) and rx_pkt_lock(we need to add) during insertion (mwifiex_11n_rx_reorder_pkt()).
As far as I see mwifiex_11n_rx_reorder_pkt() doesn't hold
"rx_reorder_tbl_lock" for the whole function. It just grabs it to
search the linked list and then drops it. Is that right? Seems OK
given my current (limited) understanding but it sounded like you were
expecting it to be held for the whole function?
IMHO then it needs to grab "rx_pkt_lock" before the access to
tbl->start_win, but I haven't looked at everything fully.
> Also, we should be using rx_pkt_lock instead of rx_reorder_tbl_lock in mwifiex_11n_find_last_seq_num().
>>
>> Another thought: from what I can tell, all of these operations are
>> *only* performed on the main thread, so there's actually no concurrency
>> involved at all. So we also could probably drop this lock entirely...
>
> Let us know your inputs on above observations.
Not sure if my thoughts above made sense or were useful. Hopefully I
didn't sound too stupid... ;)
>> I guess the real point is: can you explain better what you intend this lock to
>> do? Then we can review whether you're accomplishing your intention, or
>> whether we should improve the usage of this lock in some other way, or
>> perhaps even kill it entirely.
>>
>> Thanks,
>> Brian
>>
>> > if (rx_tmp_ptr)
>> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
>> > +
>> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>> > }
>> >
>> > spin_lock_irqsave(&priv->rx_pkt_lock, flags); @@ -167,8 +168,8 @@
>> > static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void
>> *payload)
>> > }
>> > rx_tmp_ptr = tbl->rx_reorder_ptr[i];
>> > tbl->rx_reorder_ptr[i] = NULL;
>> > - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>> > mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
>> > + spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
>> > }
>> >
>> > spin_lock_irqsave(&priv->rx_pkt_lock, flags);
>> > --
>> > 1.9.1
>> >
>
> Regards,
> Ganapathi
SGkgRG91Zy9Ccmlhbg0KDQo+IC0tLS0tT3JpZ2luYWwgTWVzc2FnZS0tLS0tDQo+IEZyb206IGRp
YW5kZXJzQGdvb2dsZS5jb20gW21haWx0bzpkaWFuZGVyc0Bnb29nbGUuY29tXSBPbiBCZWhhbGYg
T2YNCj4gRG91ZyBBbmRlcnNvbg0KPiBTZW50OiBXZWRuZXNkYXksIE5vdmVtYmVyIDA4LCAyMDE3
IDI6NDUgQU0NCj4gVG86IEdhbmFwYXRoaSBCaGF0DQo+IENjOiBCcmlhbiBOb3JyaXM7IGxpbnV4
LXdpcmVsZXNzQHZnZXIua2VybmVsLm9yZzsgQ2F0aHkgTHVvOyBYaW5taW5nIEh1Ow0KPiBaaGl5
dWFuIFlhbmc7IEphbWVzIENhbzsgTWFuZ2VzaCBNYWx1c2FyZTsgS2FydGhpayBEb2RkYXllbm5l
Z2VyZQ0KPiBBbmFudGhhcGFkbWFuYWJoYQ0KPiBTdWJqZWN0OiBSZTogW0VYVF0gUmU6IFtQQVRD
SCAxLzNdIG13aWZpZXg6IGNsZWFudXAgcnhfcGt0X2xvY2sgdXNhZ2UgaW4NCj4gMTFuX3J4cmVv
cmRlci5jDQo+DQo+IEhpLA0KPg0KPiBPbiBUdWUsIE5vdiA3LCAyMDE3IGF0IDg6MjUgQU0sIEdh
bmFwYXRoaSBCaGF0IDxnYmhhdEBtYXJ2ZWxsLmNvbT4NCj4gd3JvdGU6DQo+ID4gSGkgRG91Zy9C
cmlhbiwNCj4gPg0KPiA+IFRoYW5rcyBhIGxvdCBmb3IgdGhlIGNvbW1lbnRzIGFuZCB0aGUgZGlz
Y3Vzc2lvbi4gRmlyc3Qgb2YgYWxsIHdlIHdpbGwgYWJvcnQNCj4gdGhlIGNoYW5nZSBhZGRlZCBi
eSB0aGlzIHBhdGNoIGFzIHdlIGRvbid0IG5lZWQgcnhfcGt0X2xvY2sgYWNxdWlyZWQgdG8NCj4g
cHJvdGVjdCB0aGUgZGVsZXRlZCBpdGVtLiAgTmV4dCwgd2Ugd2lsbCBwcmVwYXJlIGJlbG93IGNo
YW5nZXMgdG8gYWRkcmVzcw0KPiB0aGUgY29uY2VybnMgZGlzY3Vzc2VkOg0KPiA+IDEuIE1vdmUg
cnhfcGt0X2xvY2sgZnJvbSBtd2lmaWV4X3ByaXZhdGUgdG8gcnhfcmVvcmRlcl90YmwNCj4NCj4g
Li4ub3IsIHBvc3NpYmx5LCByZW1vdmUgcnhfcGt0X2xvY2sgY29tcGxldGVseS4gIFNlZSBteSBv
dGhlciByZXNwb25zZS4uLg0KQWZ0ZXIgY2hlY2tpbmcgdGhlIGZsb3csIEkgYW0gdGhpbmtpbmcg
b2YgcmVtb3ZpbmcgYmVsb3cgMyBzcGluIGxvY2tzOg0KcnhfcmVvcmRlcl90YmxfbG9jaw0Kcnhf
cGt0X2xvY2sNCnR4X2JhX3N0cmVhbV90YmxfbG9jaw0KQXMgeW91IGJvdGggaGF2ZSBwb2ludGVk
IGVhcmxpZXIsIHRoZXkgdXNlZCBieSBtYWluIHRocmVhZCBhbmQgb2NjYXNpb25hbGx5IGJ5IGNm
ZzgwMjExIChmb3IgVERMUyBjb25maWd1cmF0aW9uKS4NCkNhbiB5b3UgcGxlYXNlIGNvbW1lbnQg
b24gdGhpcyBjaGFuZ2U/IElzIGl0IE9LIHRvIGlnbm9yZSBjZmc4MDIxMSBoZXJlLg0KPg0KPiAt
RG91Zw0KDQpSZWdhcmRzLA0KR2FuYXBhdGhpDQo=
SGkgRG91Zy9CcmlhbiwNCj4gPiA+IDEuIE1vdmUgcnhfcGt0X2xvY2sgZnJvbSBtd2lmaWV4X3By
aXZhdGUgdG8gcnhfcmVvcmRlcl90YmwNCj4gPg0KPiA+IC4uLm9yLCBwb3NzaWJseSwgcmVtb3Zl
IHJ4X3BrdF9sb2NrIGNvbXBsZXRlbHkuICBTZWUgbXkgb3RoZXIgcmVzcG9uc2UuLi4NCj4gQWZ0
ZXIgY2hlY2tpbmcgdGhlIGZsb3csIEkgYW0gdGhpbmtpbmcgb2YgcmVtb3ZpbmcgYmVsb3cgMyBz
cGluIGxvY2tzOg0KPiByeF9yZW9yZGVyX3RibF9sb2NrDQo+IHJ4X3BrdF9sb2NrDQo+IHR4X2Jh
X3N0cmVhbV90YmxfbG9jaw0KPiBBcyB5b3UgYm90aCBoYXZlIHBvaW50ZWQgZWFybGllciwgdGhl
eSB1c2VkIGJ5IG1haW4gdGhyZWFkIGFuZCBvY2Nhc2lvbmFsbHkNCj4gYnkgY2ZnODAyMTEgKGZv
ciBURExTIGNvbmZpZ3VyYXRpb24pLg0KPiBDYW4geW91IHBsZWFzZSBjb21tZW50IG9uIHRoaXMg
Y2hhbmdlPyBJcyBpdCBPSyB0byBpZ25vcmUgY2ZnODAyMTEgaGVyZS4NCldlIGhhdmUgd29ya2Vk
IG9uIHRoZSBkaXNjdXNzZWQgY2hhbmdlIGluIHRoaXMgdGhyZWFkIGFuZCBoYXZlIGNvbWUgdXAg
d2l0aCBhIDIgcGF0Y2hlcyB0byBjbGVhbnVwIHJ4X3Jlb3JkZXJfdGJsIGxvY2suDQpLaW5kbHkg
cmV2aWV3IHRoZSBwYXRjaGVzIHVwc3RyZWFtZWQgYmVsb3c6DQpodHRwczovL3BhdGNod29yay5r
ZXJuZWwub3JnL3BhdGNoLzEwNDg1MzY5Lw0KaHR0cHM6Ly9wYXRjaHdvcmsua2VybmVsLm9yZy9w
YXRjaC8xMDQ4NTM2Ny8NCj4gPg0KPiA+IC1Eb3VnDQo+DQo+IFJlZ2FyZHMsDQo+IEdhbmFwYXRo
aQ0KVGhhbmtzICYgUmVnYXJkcywNCkdhbmFwYXRoaQ0K