2014-09-09 21:22:20

by Thomas Huehn

[permalink] [raw]
Subject: [PATCH v2 0/2] Unify & improve Minstrel & Minstrel_HT rate control

This patch series consolidates rate statistic variables between Minstrel and
Minstrel_HT. It improves the sorting function to find the max throughput rate
set in Minstrel_HT.

---
v2: fix coding style, thx to Felix Fietkau



2014-09-09 21:22:20

by Thomas Huehn

[permalink] [raw]
Subject: [PATCH v2 1/2] mac80211: Unify rate statistic variables between Minstrel & Minstrel_HT

Minstrel and Mintrel_HT used there own structs to keep track of rate
statistics. Unify those variables in struct minstrel_rate_states and
move it to rc80211_minstrel.h for common usage. This is a clean-up
patch to prepare Minstrel and Minstrel_HT codebase for upcoming TPC.

Signed-off-by: Thomas Huehn <[email protected]>
---
net/mac80211/rc80211_minstrel.c | 98 +++++++++++++++++----------------
net/mac80211/rc80211_minstrel.h | 43 ++++++++-------
net/mac80211/rc80211_minstrel_debugfs.c | 19 ++++---
net/mac80211/rc80211_minstrel_ht.h | 22 --------
4 files changed, 85 insertions(+), 97 deletions(-)

diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 1c1469c..2baa7ed 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -75,7 +75,7 @@ minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
int j = MAX_THR_RATES;

- while (j > 0 && mi->r[i].cur_tp > mi->r[tp_list[j - 1]].cur_tp)
+ while (j > 0 && mi->r[i].stats.cur_tp > mi->r[tp_list[j - 1]].stats.cur_tp)
j--;
if (j < MAX_THR_RATES - 1)
memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1));
@@ -92,7 +92,7 @@ minstrel_set_rate(struct minstrel_sta_info *mi, struct ieee80211_sta_rates *rate
ratetbl->rate[offset].idx = r->rix;
ratetbl->rate[offset].count = r->adjusted_retry_count;
ratetbl->rate[offset].count_cts = r->retry_count_cts;
- ratetbl->rate[offset].count_rts = r->retry_count_rtscts;
+ ratetbl->rate[offset].count_rts = r->stats.retry_count_rtscts;
}

static void
@@ -140,44 +140,46 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)

for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
+ struct minstrel_rate_stats *mrs = &mi->r[i].stats;

usecs = mr->perfect_tx_time;
if (!usecs)
usecs = 1000000;

- if (unlikely(mr->attempts > 0)) {
- mr->sample_skipped = 0;
- mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
- mr->succ_hist += mr->success;
- mr->att_hist += mr->attempts;
- mr->probability = minstrel_ewma(mr->probability,
- mr->cur_prob,
- EWMA_LEVEL);
+ if (unlikely(mrs->attempts > 0)) {
+ mrs->sample_skipped = 0;
+ mrs->cur_prob = MINSTREL_FRAC(mrs->success,
+ mrs->attempts);
+ mrs->succ_hist += mrs->success;
+ mrs->att_hist += mrs->attempts;
+ mrs->probability = minstrel_ewma(mrs->probability,
+ mrs->cur_prob,
+ EWMA_LEVEL);
} else
- mr->sample_skipped++;
+ mrs->sample_skipped++;

- mr->last_success = mr->success;
- mr->last_attempts = mr->attempts;
- mr->success = 0;
- mr->attempts = 0;
+ mrs->last_success = mrs->success;
+ mrs->last_attempts = mrs->attempts;
+ mrs->success = 0;
+ mrs->attempts = 0;

/* Update throughput per rate, reset thr. below 10% success */
- if (mr->probability < MINSTREL_FRAC(10, 100))
- mr->cur_tp = 0;
+ if (mrs->probability < MINSTREL_FRAC(10, 100))
+ mrs->cur_tp = 0;
else
- mr->cur_tp = mr->probability * (1000000 / usecs);
+ mrs->cur_tp = mrs->probability * (1000000 / usecs);

/* Sample less often below the 10% chance of success.
* Sample less often above the 95% chance of success. */
- if (mr->probability > MINSTREL_FRAC(95, 100) ||
- mr->probability < MINSTREL_FRAC(10, 100)) {
- mr->adjusted_retry_count = mr->retry_count >> 1;
+ if (mrs->probability > MINSTREL_FRAC(95, 100) ||
+ mrs->probability < MINSTREL_FRAC(10, 100)) {
+ mr->adjusted_retry_count = mrs->retry_count >> 1;
if (mr->adjusted_retry_count > 2)
mr->adjusted_retry_count = 2;
mr->sample_limit = 4;
} else {
mr->sample_limit = -1;
- mr->adjusted_retry_count = mr->retry_count;
+ mr->adjusted_retry_count = mrs->retry_count;
}
if (!mr->adjusted_retry_count)
mr->adjusted_retry_count = 2;
@@ -190,11 +192,11 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
* choose the maximum throughput rate as max_prob_rate
* (2) if all success probabilities < 95%, the rate with
* highest success probability is choosen as max_prob_rate */
- if (mr->probability >= MINSTREL_FRAC(95, 100)) {
- if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp)
+ if (mrs->probability >= MINSTREL_FRAC(95, 100)) {
+ if (mrs->cur_tp >= mi->r[tmp_prob_rate].stats.cur_tp)
tmp_prob_rate = i;
} else {
- if (mr->probability >= mi->r[tmp_prob_rate].probability)
+ if (mrs->probability >= mi->r[tmp_prob_rate].stats.probability)
tmp_prob_rate = i;
}
}
@@ -240,14 +242,14 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (ndx < 0)
continue;

- mi->r[ndx].attempts += ar[i].count;
+ mi->r[ndx].stats.attempts += ar[i].count;

if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0))
- mi->r[ndx].success += success;
+ mi->r[ndx].stats.success += success;
}

if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
- mi->sample_count++;
+ mi->sample_packets++;

if (mi->sample_deferred > 0)
mi->sample_deferred--;
@@ -265,7 +267,7 @@ minstrel_get_retry_count(struct minstrel_rate *mr,
unsigned int retry = mr->adjusted_retry_count;

if (info->control.use_rts)
- retry = max(2U, min(mr->retry_count_rtscts, retry));
+ retry = max(2U, min(mr->stats.retry_count_rtscts, retry));
else if (info->control.use_cts_prot)
retry = max(2U, min(mr->retry_count_cts, retry));
return retry;
@@ -317,15 +319,15 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
sampling_ratio = mp->lookaround_rate;

/* increase sum packet counter */
- mi->packet_count++;
+ mi->total_packets++;

#ifdef CONFIG_MAC80211_DEBUGFS
if (mp->fixed_rate_idx != -1)
return;
#endif

- delta = (mi->packet_count * sampling_ratio / 100) -
- (mi->sample_count + mi->sample_deferred / 2);
+ delta = (mi->total_packets * sampling_ratio / 100) -
+ (mi->sample_packets + mi->sample_deferred / 2);

/* delta < 0: no sampling required */
prev_sample = mi->prev_sample;
@@ -333,10 +335,10 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
if (delta < 0 || (!mrr_capable && prev_sample))
return;

- if (mi->packet_count >= 10000) {
+ if (mi->total_packets >= 10000) {
mi->sample_deferred = 0;
- mi->sample_count = 0;
- mi->packet_count = 0;
+ mi->sample_packets = 0;
+ mi->total_packets = 0;
} else if (delta > mi->n_rates * 2) {
/* With multi-rate retry, not every planned sample
* attempt actually gets used, due to the way the retry
@@ -347,7 +349,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
* starts getting worse, minstrel would start bursting
* out lots of sampling frames, which would result
* in a large throughput loss. */
- mi->sample_count += (delta - mi->n_rates * 2);
+ mi->sample_packets += (delta - mi->n_rates * 2);
}

/* get next random rate sample */
@@ -361,7 +363,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
*/
if (mrr_capable &&
msr->perfect_tx_time > mr->perfect_tx_time &&
- msr->sample_skipped < 20) {
+ msr->stats.sample_skipped < 20) {
/* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
* packets that have the sampling rate deferred to the
* second MRR stage. Increase the sample counter only
@@ -375,7 +377,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
if (!msr->sample_limit != 0)
return;

- mi->sample_count++;
+ mi->sample_packets++;
if (msr->sample_limit > 0)
msr->sample_limit--;
}
@@ -384,7 +386,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
* has a probability of >95%, we shouldn't be attempting
* to use it, as this only wastes precious airtime */
if (!mrr_capable &&
- (mi->r[ndx].probability > MINSTREL_FRAC(95, 100)))
+ (mi->r[ndx].stats.probability > MINSTREL_FRAC(95, 100)))
return;

mi->prev_sample = true;
@@ -459,6 +461,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,

for (i = 0; i < sband->n_bitrates; i++) {
struct minstrel_rate *mr = &mi->r[n];
+ struct minstrel_rate_stats *mrs = &mi->r[n].stats;
unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
unsigned int tx_time_single;
unsigned int cw = mp->cw_min;
@@ -471,6 +474,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,

n++;
memset(mr, 0, sizeof(*mr));
+ memset(mrs, 0, sizeof(*mrs));

mr->rix = i;
shift = ieee80211_chandef_get_shift(chandef);
@@ -482,9 +486,9 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
mr->sample_limit = -1;
- mr->retry_count = 1;
+ mrs->retry_count = 1;
mr->retry_count_cts = 1;
- mr->retry_count_rtscts = 1;
+ mrs->retry_count_rtscts = 1;
tx_time = mr->perfect_tx_time + mi->sp_ack_dur;
do {
/* add one retransmission */
@@ -501,13 +505,13 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
(mr->retry_count_cts < mp->max_retry))
mr->retry_count_cts++;
if ((tx_time_rtscts < mp->segment_size) &&
- (mr->retry_count_rtscts < mp->max_retry))
- mr->retry_count_rtscts++;
+ (mrs->retry_count_rtscts < mp->max_retry))
+ mrs->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
- (++mr->retry_count < mp->max_retry));
- mr->adjusted_retry_count = mr->retry_count;
+ (++mr->stats.retry_count < mp->max_retry));
+ mr->adjusted_retry_count = mrs->retry_count;
if (!(sband->bitrates[i].flags & IEEE80211_RATE_ERP_G))
- mr->retry_count_cts = mr->retry_count;
+ mr->retry_count_cts = mrs->retry_count;
}

for (i = n; i < sband->n_bitrates; i++) {
@@ -665,7 +669,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
/* convert pkt per sec in kbps (1200 is the average pkt size used for
* computing cur_tp
*/
- return MINSTREL_TRUNC(mi->r[idx].cur_tp) * 1200 * 8 / 1024;
+ return MINSTREL_TRUNC(mi->r[idx].stats.cur_tp) * 1200 * 8 / 1024;
}

const struct rate_control_ops mac80211_minstrel = {
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 046d1bd..97eca86 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -31,6 +31,27 @@ minstrel_ewma(int old, int new, int weight)
return (new * (EWMA_DIV - weight) + old * weight) / EWMA_DIV;
}

+struct minstrel_rate_stats {
+ /* current / last sampling period attempts/success counters */
+ unsigned int attempts, last_attempts;
+ unsigned int success, last_success;
+
+ /* total attempts/success counters */
+ u64 att_hist, succ_hist;
+
+ /* current throughput */
+ unsigned int cur_tp;
+
+ /* packet delivery probabilities */
+ unsigned int cur_prob, probability;
+
+ /* maximum retry counts */
+ unsigned int retry_count;
+ unsigned int retry_count_rtscts;
+
+ u8 sample_skipped;
+ bool retry_updated;
+};

struct minstrel_rate {
int bitrate;
@@ -40,26 +61,10 @@ struct minstrel_rate {
unsigned int ack_time;

int sample_limit;
- unsigned int retry_count;
unsigned int retry_count_cts;
- unsigned int retry_count_rtscts;
unsigned int adjusted_retry_count;

- u32 success;
- u32 attempts;
- u32 last_attempts;
- u32 last_success;
- u8 sample_skipped;
-
- /* parts per thousand */
- u32 cur_prob;
- u32 probability;
-
- /* per-rate throughput */
- u32 cur_tp;
-
- u64 succ_hist;
- u64 att_hist;
+ struct minstrel_rate_stats stats;
};

struct minstrel_sta_info {
@@ -73,8 +78,8 @@ struct minstrel_sta_info {

u8 max_tp_rate[MAX_THR_RATES];
u8 max_prob_rate;
- unsigned int packet_count;
- unsigned int sample_count;
+ unsigned int total_packets;
+ unsigned int sample_packets;
int sample_deferred;

unsigned int sample_row;
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index fd0b9ca..edde723 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -72,6 +72,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
"this succ/attempt success attempts\n");
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
+ struct minstrel_rate_stats *mrs = &mi->r[i].stats;

*(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
*(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
@@ -81,24 +82,24 @@ minstrel_stats_open(struct inode *inode, struct file *file)
p += sprintf(p, "%3u%s", mr->bitrate / 2,
(mr->bitrate & 1 ? ".5" : " "));

- tp = MINSTREL_TRUNC(mr->cur_tp / 10);
- prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
- eprob = MINSTREL_TRUNC(mr->probability * 1000);
+ tp = MINSTREL_TRUNC(mrs->cur_tp / 10);
+ prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
+ eprob = MINSTREL_TRUNC(mrs->probability * 1000);

p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
" %3u(%3u) %8llu %8llu\n",
tp / 10, tp % 10,
eprob / 10, eprob % 10,
prob / 10, prob % 10,
- mr->last_success,
- mr->last_attempts,
- (unsigned long long)mr->succ_hist,
- (unsigned long long)mr->att_hist);
+ mrs->last_success,
+ mrs->last_attempts,
+ (unsigned long long)mrs->succ_hist,
+ (unsigned long long)mrs->att_hist);
}
p += sprintf(p, "\nTotal packet count:: ideal %d "
"lookaround %d\n\n",
- mi->packet_count - mi->sample_count,
- mi->sample_count);
+ mi->total_packets - mi->sample_packets,
+ mi->sample_packets);
ms->len = p - ms->buf;

return 0;
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index d655586..5fee938 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -26,28 +26,6 @@ struct mcs_group {

extern const struct mcs_group minstrel_mcs_groups[];

-struct minstrel_rate_stats {
- /* current / last sampling period attempts/success counters */
- unsigned int attempts, last_attempts;
- unsigned int success, last_success;
-
- /* total attempts/success counters */
- u64 att_hist, succ_hist;
-
- /* current throughput */
- unsigned int cur_tp;
-
- /* packet delivery probabilities */
- unsigned int cur_prob, probability;
-
- /* maximum retry counts */
- unsigned int retry_count;
- unsigned int retry_count_rtscts;
-
- bool retry_updated;
- u8 sample_skipped;
-};
-
struct minstrel_mcs_group_data {
u8 index;
u8 column;
--
2.1.0


2014-09-11 10:09:45

by Johannes Berg

[permalink] [raw]
Subject: Re: [PATCH v2 2/2] mac80211: improve minstrel_ht rate sorting by throughput & probability

On Tue, 2014-09-09 at 23:22 +0200, Thomas Huehn wrote:
> This patch improves the way minstrel_ht sorts rates according to throughput
> and success probability. 3 FOR-loops across the entire rate and mcs group set
> in function minstrel_ht_update_stats() which where used to determine the
> fastest, second fastest and most robust rate are reduced to 2 FOR-loop.
>
> The sorted list of rates according throughput is extended to the best four
> rates as we need them in upcoming joint rate and power control. The sorting
> is done via the new function minstrel_ht_sort_best_tp_rates(). The annotation
> of those 4 best throughput rates in the debugfs file rc-stats is changes to:
> "A,B,C,D", where A is the fastest rate and C the 4th fastest.
>
> ---
> v2: fix coding style issues, thx the Felix Fietkau
>
> Signed-off-by: Thomas Huehn <[email protected]>
> Tested-by: Stefan Venz <[email protected]>

The S-o-b etc. should be *before* the --- cutoff ... I'll fix it
manually this time.

johannes


2014-09-09 21:36:41

by Felix Fietkau

[permalink] [raw]
Subject: Re: [PATCH v2 0/2] Unify & improve Minstrel & Minstrel_HT rate control

On 2014-09-09 23:22, Thomas Huehn wrote:
> This patch series consolidates rate statistic variables between Minstrel and
> Minstrel_HT. It improves the sorting function to find the max throughput rate
> set in Minstrel_HT.
For this series:

Acked-by: Felix Fietkau <[email protected]>

2014-09-11 10:09:16

by Johannes Berg

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] mac80211: Unify rate statistic variables between Minstrel & Minstrel_HT

On Tue, 2014-09-09 at 23:22 +0200, Thomas Huehn wrote:
> Minstrel and Mintrel_HT used there own structs to keep track of rate
> statistics. Unify those variables in struct minstrel_rate_states and
> move it to rc80211_minstrel.h for common usage. This is a clean-up
> patch to prepare Minstrel and Minstrel_HT codebase for upcoming TPC.

Applied, thanks Thomas & Felix.

johannes


2014-09-28 22:08:21

by Karl Beldan

[permalink] [raw]
Subject: Re: [PATCH v2 2/2] mac80211: improve minstrel_ht rate sorting by throughput & probability

On Tue, Sep 09, 2014 at 11:22:14PM +0200, Thomas Huehn wrote:
> This patch improves the way minstrel_ht sorts rates according to throughput
> and success probability. 3 FOR-loops across the entire rate and mcs group set
> in function minstrel_ht_update_stats() which where used to determine the
> fastest, second fastest and most robust rate are reduced to 2 FOR-loop.
>
> The sorted list of rates according throughput is extended to the best four
> rates as we need them in upcoming joint rate and power control. The sorting
> is done via the new function minstrel_ht_sort_best_tp_rates(). The annotation
> of those 4 best throughput rates in the debugfs file rc-stats is changes to:
> "A,B,C,D", where A is the fastest rate and C the 4th fastest.
>
> ---
> v2: fix coding style issues, thx the Felix Fietkau
>
> Signed-off-by: Thomas Huehn <[email protected]>
> Tested-by: Stefan Venz <[email protected]>
> ---
> net/mac80211/rc80211_minstrel_ht.c | 303 +++++++++++++++++++----------
> net/mac80211/rc80211_minstrel_ht.h | 19 +-
> net/mac80211/rc80211_minstrel_ht_debugfs.c | 10 +-
> 3 files changed, 213 insertions(+), 119 deletions(-)
>
> diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
> index 85c1e74..df90ce2 100644
> --- a/net/mac80211/rc80211_minstrel_ht.c
> +++ b/net/mac80211/rc80211_minstrel_ht.c
> @@ -135,7 +135,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
> static int
> minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
> {
> - return GROUP_IDX((rate->idx / 8) + 1,
> + return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
> !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
> !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
> }

Hi,

Here you should have kept 8, GROUP_IDX requires the number of streams.
I will rebase the vht RFC I sent a year ago tonight and also send the
fix for 7a5e3fa breakage.

Karl

2014-09-09 21:22:20

by Thomas Huehn

[permalink] [raw]
Subject: [PATCH v2 2/2] mac80211: improve minstrel_ht rate sorting by throughput & probability

This patch improves the way minstrel_ht sorts rates according to throughput
and success probability. 3 FOR-loops across the entire rate and mcs group set
in function minstrel_ht_update_stats() which where used to determine the
fastest, second fastest and most robust rate are reduced to 2 FOR-loop.

The sorted list of rates according throughput is extended to the best four
rates as we need them in upcoming joint rate and power control. The sorting
is done via the new function minstrel_ht_sort_best_tp_rates(). The annotation
of those 4 best throughput rates in the debugfs file rc-stats is changes to:
"A,B,C,D", where A is the fastest rate and C the 4th fastest.

---
v2: fix coding style issues, thx the Felix Fietkau

Signed-off-by: Thomas Huehn <[email protected]>
Tested-by: Stefan Venz <[email protected]>
---
net/mac80211/rc80211_minstrel_ht.c | 303 +++++++++++++++++++----------
net/mac80211/rc80211_minstrel_ht.h | 19 +-
net/mac80211/rc80211_minstrel_ht_debugfs.c | 10 +-
3 files changed, 213 insertions(+), 119 deletions(-)

diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 85c1e74..df90ce2 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -135,7 +135,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
- return GROUP_IDX((rate->idx / 8) + 1,
+ return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
@@ -233,12 +233,151 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
}

/*
+ * Find & sort topmost throughput rates
+ *
+ * If multiple rates provide equal throughput the sorting is based on their
+ * current success probability. Higher success probability is preferred among
+ * MCS groups, CCK rates do not provide aggregation and are therefore at last.
+ */
+static void
+minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
+ u8 *tp_list)
+{
+ int cur_group, cur_idx, cur_thr, cur_prob;
+ int tmp_group, tmp_idx, tmp_thr, tmp_prob;
+ int j = MAX_THR_RATES;
+
+ cur_group = index / MCS_GROUP_RATES;
+ cur_idx = index % MCS_GROUP_RATES;
+ cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
+ cur_prob = mi->groups[cur_group].rates[cur_idx].probability;
+
+ tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
+ tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
+ tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
+
+ while (j > 0 && (cur_thr > tmp_thr ||
+ (cur_thr == tmp_thr && cur_prob > tmp_prob))) {
+ j--;
+ tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
+ tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
+ tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
+ }
+
+ if (j < MAX_THR_RATES - 1) {
+ memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
+ (MAX_THR_RATES - (j + 1))));
+ }
+ if (j < MAX_THR_RATES)
+ tp_list[j] = index;
+}
+
+/*
+ * Find and set the topmost probability rate per sta and per group
+ */
+static void
+minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index)
+{
+ struct minstrel_mcs_group_data *mg;
+ struct minstrel_rate_stats *mr;
+ int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group;
+
+ mg = &mi->groups[index / MCS_GROUP_RATES];
+ mr = &mg->rates[index % MCS_GROUP_RATES];
+
+ tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
+ tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
+ tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
+
+ /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
+ * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
+ max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES;
+ if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) &&
+ (max_tp_group != MINSTREL_CCK_GROUP))
+ return;
+
+ if (mr->probability > MINSTREL_FRAC(75, 100)) {
+ if (mr->cur_tp > tmp_tp)
+ mi->max_prob_rate = index;
+ if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp)
+ mg->max_group_prob_rate = index;
+ } else {
+ if (mr->probability > tmp_prob)
+ mi->max_prob_rate = index;
+ if (mr->probability > mg->rates[mg->max_group_prob_rate].probability)
+ mg->max_group_prob_rate = index;
+ }
+}
+
+
+/*
+ * Assign new rate set per sta and use CCK rates only if the fastest
+ * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
+ * rate sets where MCS and CCK rates are mixed, because CCK rates can
+ * not use aggregation.
+ */
+static void
+minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
+ u8 tmp_mcs_tp_rate[MAX_THR_RATES],
+ u8 tmp_cck_tp_rate[MAX_THR_RATES])
+{
+ unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp;
+ int i;
+
+ tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
+ tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
+ tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+
+ tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
+ tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
+ tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+
+ if (tmp_cck_tp > tmp_mcs_tp) {
+ for(i = 0; i < MAX_THR_RATES; i++) {
+ minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i],
+ tmp_mcs_tp_rate);
+ }
+ }
+
+}
+
+/*
+ * Try to increase robustness of max_prob rate by decrease number of
+ * streams if possible.
+ */
+static inline void
+minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
+{
+ struct minstrel_mcs_group_data *mg;
+ struct minstrel_rate_stats *mr;
+ int tmp_max_streams, group;
+ int tmp_tp = 0;
+
+ tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
+ MCS_GROUP_RATES].streams;
+ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
+ mg = &mi->groups[group];
+ if (!mg->supported || group == MINSTREL_CCK_GROUP)
+ continue;
+ mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate);
+ if (tmp_tp < mr->cur_tp &&
+ (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
+ mi->max_prob_rate = mg->max_group_prob_rate;
+ tmp_tp = mr->cur_tp;
+ }
+ }
+}
+
+/*
* Update rate statistics and select new primary rates
*
* Rules for rate selection:
* - max_prob_rate must use only one stream, as a tradeoff between delivery
* probability and throughput during strong fluctuations
- * - as long as the max prob rate has a probability of more than 3/4, pick
+ * - as long as the max prob rate has a probability of more than 75%, pick
* higher throughput rates, even if the probablity is a bit lower
*/
static void
@@ -246,9 +385,9 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mr;
- int cur_prob, cur_prob_tp, cur_tp, cur_tp2;
- int group, i, index;
- bool mi_rates_valid = false;
+ int group, i, j;
+ u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
+ u8 tmp_cck_tp_rate[MAX_THR_RATES], index;

if (mi->ampdu_packets > 0) {
mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
@@ -260,13 +399,14 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mi->sample_slow = 0;
mi->sample_count = 0;

- for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
- bool mg_rates_valid = false;
+ /* Initialize global rate indexes */
+ for(j = 0; j < MAX_THR_RATES; j++){
+ tmp_mcs_tp_rate[j] = 0;
+ tmp_cck_tp_rate[j] = 0;
+ }

- cur_prob = 0;
- cur_prob_tp = 0;
- cur_tp = 0;
- cur_tp2 = 0;
+ /* Find best rate sets within all MCS groups*/
+ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {

mg = &mi->groups[group];
if (!mg->supported)
@@ -274,24 +414,16 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)

mi->sample_count++;

+ /* (re)Initialize group rate indexes */
+ for(j = 0; j < MAX_THR_RATES; j++)
+ tmp_group_tp_rate[j] = group;
+
for (i = 0; i < MCS_GROUP_RATES; i++) {
if (!(mg->supported & BIT(i)))
continue;

index = MCS_GROUP_RATES * group + i;

- /* initialize rates selections starting indexes */
- if (!mg_rates_valid) {
- mg->max_tp_rate = mg->max_tp_rate2 =
- mg->max_prob_rate = i;
- if (!mi_rates_valid) {
- mi->max_tp_rate = mi->max_tp_rate2 =
- mi->max_prob_rate = index;
- mi_rates_valid = true;
- }
- mg_rates_valid = true;
- }
-
mr = &mg->rates[i];
mr->retry_updated = false;
minstrel_calc_rate_ewma(mr);
@@ -300,82 +432,47 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
if (!mr->cur_tp)
continue;

- if ((mr->cur_tp > cur_prob_tp && mr->probability >
- MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
- mg->max_prob_rate = index;
- cur_prob = mr->probability;
- cur_prob_tp = mr->cur_tp;
- }
-
- if (mr->cur_tp > cur_tp) {
- swap(index, mg->max_tp_rate);
- cur_tp = mr->cur_tp;
- mr = minstrel_get_ratestats(mi, index);
- }
-
- if (index >= mg->max_tp_rate)
- continue;
-
- if (mr->cur_tp > cur_tp2) {
- mg->max_tp_rate2 = index;
- cur_tp2 = mr->cur_tp;
+ /* Find max throughput rate set */
+ if (group != MINSTREL_CCK_GROUP) {
+ minstrel_ht_sort_best_tp_rates(mi, index,
+ tmp_mcs_tp_rate);
+ } else if (group == MINSTREL_CCK_GROUP) {
+ minstrel_ht_sort_best_tp_rates(mi, index,
+ tmp_cck_tp_rate);
}
- }
- }

- /* try to sample all available rates during each interval */
- mi->sample_count *= 8;
+ /* Find max throughput rate set within a group */
+ minstrel_ht_sort_best_tp_rates(mi, index,
+ tmp_group_tp_rate);

- cur_prob = 0;
- cur_prob_tp = 0;
- cur_tp = 0;
- cur_tp2 = 0;
- for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
- mg = &mi->groups[group];
- if (!mg->supported)
- continue;
-
- mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
- if (cur_tp < mr->cur_tp) {
- mi->max_tp_rate2 = mi->max_tp_rate;
- cur_tp2 = cur_tp;
- mi->max_tp_rate = mg->max_tp_rate;
- cur_tp = mr->cur_tp;
- mi->max_prob_streams = minstrel_mcs_groups[group].streams - 1;
+ /* Find max probability rate per group and global */
+ minstrel_ht_set_best_prob_rate(mi, index);
}

- mr = minstrel_get_ratestats(mi, mg->max_tp_rate2);
- if (cur_tp2 < mr->cur_tp) {
- mi->max_tp_rate2 = mg->max_tp_rate2;
- cur_tp2 = mr->cur_tp;
- }
+ memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
+ sizeof(mg->max_group_tp_rate));
}

- if (mi->max_prob_streams < 1)
- mi->max_prob_streams = 1;
+ /* Assign new rate set per sta */
+ minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate);
+ memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));

- for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
- mg = &mi->groups[group];
- if (!mg->supported)
- continue;
- mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
- if (cur_prob_tp < mr->cur_tp &&
- minstrel_mcs_groups[group].streams <= mi->max_prob_streams) {
- mi->max_prob_rate = mg->max_prob_rate;
- cur_prob = mr->cur_prob;
- cur_prob_tp = mr->cur_tp;
- }
- }
+ /* Try to increase robustness of max_prob_rate*/
+ minstrel_ht_prob_rate_reduce_streams(mi);
+
+ /* try to sample all available rates during each interval */
+ mi->sample_count *= 8;

#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
if (mp->fixed_rate_idx != -1) {
- mi->max_tp_rate = mp->fixed_rate_idx;
- mi->max_tp_rate2 = mp->fixed_rate_idx;
+ for (i = 0; i < 4; i++)
+ mi->max_tp_rate[i] = mp->fixed_rate_idx;
mi->max_prob_rate = mp->fixed_rate_idx;
}
#endif

+ /* Reset update timer */
mi->stats_update = jiffies;
}

@@ -420,8 +517,7 @@ minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
}

static void
-minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
- bool primary)
+minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary)
{
int group, orig_group;

@@ -437,9 +533,9 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
continue;

if (primary)
- *idx = mi->groups[group].max_tp_rate;
+ *idx = mi->groups[group].max_group_tp_rate[0];
else
- *idx = mi->groups[group].max_tp_rate2;
+ *idx = mi->groups[group].max_group_tp_rate[1];
break;
}
}
@@ -524,19 +620,19 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
* check for sudden death of spatial multiplexing,
* downgrade to a lower number of streams if necessary.
*/
- rate = minstrel_get_ratestats(mi, mi->max_tp_rate);
+ rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
if (rate->attempts > 30 &&
MINSTREL_FRAC(rate->success, rate->attempts) <
MINSTREL_FRAC(20, 100)) {
- minstrel_downgrade_rate(mi, &mi->max_tp_rate, true);
+ minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
update = true;
}

- rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2);
+ rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
if (rate2->attempts > 30 &&
MINSTREL_FRAC(rate2->success, rate2->attempts) <
MINSTREL_FRAC(20, 100)) {
- minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false);
+ minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
update = true;
}

@@ -661,12 +757,12 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
if (!rates)
return;

- /* Start with max_tp_rate */
- minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate);
+ /* Start with max_tp_rate[0] */
+ minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);

if (mp->hw->max_rates >= 3) {
- /* At least 3 tx rates supported, use max_tp_rate2 next */
- minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate2);
+ /* At least 3 tx rates supported, use max_tp_rate[1] next */
+ minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]);
}

if (mp->hw->max_rates >= 2) {
@@ -691,7 +787,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_rate_stats *mr;
struct minstrel_mcs_group_data *mg;
- unsigned int sample_dur, sample_group;
+ unsigned int sample_dur, sample_group, cur_max_tp_streams;
int sample_idx = 0;

if (mi->sample_wait > 0) {
@@ -718,8 +814,8 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
* to the frame. Hence, don't use sampling for the currently
* used rates.
*/
- if (sample_idx == mi->max_tp_rate ||
- sample_idx == mi->max_tp_rate2 ||
+ if (sample_idx == mi->max_tp_rate[0] ||
+ sample_idx == mi->max_tp_rate[1] ||
sample_idx == mi->max_prob_rate)
return -1;

@@ -734,9 +830,12 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
* Make sure that lower rates get sampled only occasionally,
* if the link is working perfectly.
*/
+
+ cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
+ MCS_GROUP_RATES].streams;
sample_dur = minstrel_get_duration(sample_idx);
- if (sample_dur >= minstrel_get_duration(mi->max_tp_rate2) &&
- (mi->max_prob_streams <
+ if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) &&
+ (cur_max_tp_streams - 1 <
minstrel_mcs_groups[sample_group].streams ||
sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
if (mr->sample_skipped < 20)
@@ -1041,8 +1140,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
if (!msp->is_ht)
return mac80211_minstrel.get_expected_throughput(priv_sta);

- i = mi->max_tp_rate / MCS_GROUP_RATES;
- j = mi->max_tp_rate % MCS_GROUP_RATES;
+ i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
+ j = mi->max_tp_rate[0] % MCS_GROUP_RATES;

/* convert cur_tp from pkt per second in kbps */
return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 5fee938..01570e0 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -33,10 +33,9 @@ struct minstrel_mcs_group_data {
/* bitfield of supported MCS rates of this group */
u8 supported;

- /* selected primary rates */
- unsigned int max_tp_rate;
- unsigned int max_tp_rate2;
- unsigned int max_prob_rate;
+ /* sorted rate set within a MCS group*/
+ u8 max_group_tp_rate[MAX_THR_RATES];
+ u8 max_group_prob_rate;

/* MCS rate statistics */
struct minstrel_rate_stats rates[MCS_GROUP_RATES];
@@ -52,15 +51,9 @@ struct minstrel_ht_sta {
/* ampdu length (EWMA) */
unsigned int avg_ampdu_len;

- /* best throughput rate */
- unsigned int max_tp_rate;
-
- /* second best throughput rate */
- unsigned int max_tp_rate2;
-
- /* best probability rate */
- unsigned int max_prob_rate;
- unsigned int max_prob_streams;
+ /* overall sorted rate set */
+ u8 max_tp_rate[MAX_THR_RATES];
+ u8 max_prob_rate;

/* time of last status update */
unsigned long stats_update;
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 3e7d793..a72ad46 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -46,8 +46,10 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
else
p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);

- *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
- *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
+ *(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' ';
+ *(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' ';
+ *(p++) = (idx == mi->max_tp_rate[2]) ? 'C' : ' ';
+ *(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' ';
*(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';

if (i == max_mcs) {
@@ -100,8 +102,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)

file->private_data = ms;
p = ms->buf;
- p += sprintf(p, "type rate throughput ewma prob this prob "
- "retry this succ/attempt success attempts\n");
+ p += sprintf(p, "type rate throughput ewma prob "
+ "this prob retry this succ/attempt success attempts\n");

p = minstrel_ht_stats_dump(mi, max_mcs, p);
for (i = 0; i < max_mcs; i++)
--
2.1.0


2014-10-07 08:44:44

by Karl Beldan

[permalink] [raw]
Subject: Re: [PATCH v2 2/2] mac80211: improve minstrel_ht rate sorting by throughput & probability

Hi,

On Tue, Sep 09, 2014 at 11:22:14PM +0200, Thomas Huehn wrote:
> @@ -260,13 +399,14 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
> mi->sample_slow = 0;
> mi->sample_count = 0;
>
> - for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
> - bool mg_rates_valid = false;
> + /* Initialize global rate indexes */
> + for(j = 0; j < MAX_THR_RATES; j++){
> + tmp_mcs_tp_rate[j] = 0;
> + tmp_cck_tp_rate[j] = 0;
> + }
>

Pb if a driver doesn't support the first entry it.
Maybe using a mi->min_ridx set in minstrel_ht_update_caps would be
practical.

> @@ -274,24 +414,16 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
>
> mi->sample_count++;
>
> + /* (re)Initialize group rate indexes */
> + for(j = 0; j < MAX_THR_RATES; j++)
> + tmp_group_tp_rate[j] = group;
> +

You meant group * MCS_GROUP_RATES, no ?
And group * MCS_GROUP_RATES + ffs(mg->supported) - 1 would be correct
but this corner case is not easily triggerable, ATM.

--
Karl

2014-11-16 20:53:14

by Jouni Malinen

[permalink] [raw]
Subject: Re: [PATCH v2 2/2] mac80211: improve minstrel_ht rate sorting by throughput & probability

On Tue, Sep 09, 2014 at 11:22:14PM +0200, Thomas Huehn wrote:
> This patch improves the way minstrel_ht sorts rates according to throughput
> and success probability. 3 FOR-loops across the entire rate and mcs group set
> in function minstrel_ht_update_stats() which where used to determine the
> fastest, second fastest and most robust rate are reduced to 2 FOR-loop.
>
> The sorted list of rates according throughput is extended to the best four
> rates as we need them in upcoming joint rate and power control. The sorting
> is done via the new function minstrel_ht_sort_best_tp_rates(). The annotation
> of those 4 best throughput rates in the debugfs file rc-stats is changes to:
> "A,B,C,D", where A is the fastest rate and C the 4th fastest.

I'm not sure whether this was triggered by this specific commit or a
more recent rc80211_minstrel_ht.c change, but I'm seeing relatively
frequent kernel panic in mac80211_hwsim test cases in that
minstrel_ht_sort_best_tp_rates() function that was added here. Any idea
what could be causing this? Details of the crash below:

> +static void
> +minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
> + u8 *tp_list)
> +{
> + int cur_group, cur_idx, cur_thr, cur_prob;
> + int tmp_group, tmp_idx, tmp_thr, tmp_prob;
> + int j = MAX_THR_RATES;
> +
> + cur_group = index / MCS_GROUP_RATES;
> + cur_idx = index % MCS_GROUP_RATES;
> + cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
> + cur_prob = mi->groups[cur_group].rates[cur_idx].probability;
> +
> + tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
> + tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
> + tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
> + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
> +
> + while (j > 0 && (cur_thr > tmp_thr ||
> + (cur_thr == tmp_thr && cur_prob > tmp_prob))) {
> + j--;
> + tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
> + tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
> + tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;

This tmp_thr assignment line is the one that the RIP
(minstrel_ht_sort_best_tp_rates+0xfe/0x160) resolves to.

That's net/mac80211/rc80211_minstrel_ht.c:407 in the current
wireless-testing.git snapshot.

> + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
> + }
> +
> + if (j < MAX_THR_RATES - 1) {
> + memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
> + (MAX_THR_RATES - (j + 1))));
> + }
> + if (j < MAX_THR_RATES)
> + tp_list[j] = index;
> +}


Kernel log looks like this:

[ 231.605848] wlan1: authenticate with 02:00:00:00:03:00
[ 231.606511] wlan1: capabilities/regulatory prevented using AP HT/VHT configuration, downgraded
[ 231.607293] wlan1: send auth to 02:00:00:00:03:00 (try 1/3)
[ 231.609349] wlan1: authenticated
[ 231.620137] wlan1: associate with 02:00:00:00:03:00 (try 1/3)
[ 231.624999] wlan1: RX AssocResp from 02:00:00:00:03:00 (capab=0x411 status=0 aid=2)
[ 231.626416] wlan1: associated
[ 231.683007] BUG: unable to handle kernel paging request at ffff8800201019d0
[ 231.684283] IP: [<ffffffff814c0dbe>] minstrel_ht_sort_best_tp_rates+0xfe/0x160
[ 231.685261] PGD 282f067 PUD 2830067 PMD 0
[ 231.685854] Oops: 0000 [#1] PREEMPT SMP
[ 231.686434] CPU: 0 PID: 585 Comm: wpa_supplicant Not tainted 3.18.0-rc4-wl+ #358
[ 231.687338] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
[ 231.688413] task: ffff88001df02040 ti: ffff88001dd14000 task.ti: ffff88001dd14000
[ 231.689213] RIP: 0010:[<ffffffff814c0dbe>] [<ffffffff814c0dbe>] minstrel_ht_sort_best_tp_rates+0xfe/0x160
[ 231.690432] RSP: 0018:ffff88001fc03d38 EFLAGS: 00010246
[ 231.691102] RAX: ffff8800201019c8 RBX: 0000000000000006 RCX: 0000000000000118
[ 231.692032] RDX: ffff88001fc03d90 RSI: 0000000000010000 RDI: ffff88001fd68000
[ 231.692859] RBP: ffff88001fc03d48 R08: 0000000000001999 R09: 0000000000000000
[ 231.692905] R10: ffff88001fc03d8c R11: 0000000000000cb2 R12: ffff88001fd681c8
[ 231.692905] R13: 0000000000000006 R14: 0000000000000006 R15: ffff88001fd68198
[ 231.692905] FS: 00007f336cdee740(0000) GS:ffff88001fc00000(0000) knlGS:0000000000000000
[ 231.692905] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 231.692905] CR2: ffff8800201019d0 CR3: 000000001dcf9000 CR4: 00000000000006b0
[ 231.692905] Stack:
[ 231.692905] ffff88001fd6804a ffff88001fd681c8 ffff88001fc03dd8 ffffffff814c16a8
[ 231.692905] ffff88001fc03d78 ffff88001e157e2c ffff88001fd68000 0006ff8800000001
[ 231.692905] 0000000000000000 0000000000000006 ffff88001fc03e08 0000000000000000
[ 231.692905] Call Trace:
[ 231.692905] <IRQ>
[ 231.692905] [<ffffffff814c16a8>] minstrel_ht_update_stats.isra.7+0x208/0x5d0
[ 231.692905] [<ffffffff814c1f9c>] minstrel_ht_tx_status+0x52c/0x5e0
[ 231.692905] [<ffffffff81434b8b>] ieee80211_tx_status+0x99b/0x1450
[ 231.692905] [<ffffffff81434770>] ? ieee80211_tx_status+0x580/0x1450
[ 231.692905] [<ffffffff814e29a5>] ? _raw_spin_unlock_irqrestore+0x55/0x80
[ 231.692905] [<ffffffff81430d32>] ieee80211_tasklet_handler+0x62/0xe0
[ 231.692905] [<ffffffff8103f3d7>] tasklet_action+0xe7/0xf0
[ 231.692905] [<ffffffff8103e780>] __do_softirq+0x150/0x610
[ 231.692905] [<ffffffff812ea74a>] ? __dev_queue_xmit+0x2aa/0x8e0
[ 231.692905] [<ffffffff814e518c>] do_softirq_own_stack+0x1c/0x30
[ 231.692905] <EOI>
[ 231.692905] [<ffffffff8103ed2d>] do_softirq+0x7d/0x90
[ 231.692905] [<ffffffff8103ee27>] __local_bh_enable_ip+0xe7/0xf0
[ 231.692905] [<ffffffff812ea773>] __dev_queue_xmit+0x2d3/0x8e0
[ 231.692905] [<ffffffff812ea4f0>] ? __dev_queue_xmit+0x50/0x8e0
[ 231.692905] [<ffffffff812eae00>] dev_queue_xmit+0x10/0x20
[ 231.692905] [<ffffffff813cdcba>] packet_sendmsg+0xdfa/0x10a0
[ 231.692905] [<ffffffff812cec59>] sock_sendmsg+0x69/0x90
[ 231.692905] [<ffffffff812ce8f5>] ? move_addr_to_kernel+0x45/0x70
[ 231.692905] [<ffffffff812d02e2>] SyS_sendto+0x112/0x150
[ 231.692905] [<ffffffff814e3755>] ? sysret_check+0x22/0x5d
[ 231.692905] [<ffffffff81080de5>] ? trace_hardirqs_on_caller+0x105/0x1d0
[ 231.692905] [<ffffffff811e7eab>] ? trace_hardirqs_on_thunk+0x3a/0x3f
[ 231.692905] [<ffffffff814e3729>] system_call_fastpath+0x12/0x17
[ 231.692905] Code: 00 01 c0 29 c1 0f b7 c9 48 8d 04 cd 00 00 00 00 48 c1 e1 06 48 29 c1 4b 8d 04 c0 48 c1 e0 06 48 01 c8 41 83 e9 01 48 8d 44 07 70 <8b> 48 08 8b 40 10 75 9d bf 02 00 00 00 31 f6 b8 06 00 00 00 4c
[ 231.692905] RIP [<ffffffff814c0dbe>] minstrel_ht_sort_best_tp_rates+0xfe/0x160
[ 231.692905] RSP <ffff88001fc03d38>
[ 231.692905] CR2: ffff8800201019d0
[ 231.692905] ---[ end trace ab3554bcb77c14a7 ]---
[ 231.692905] Kernel panic - not syncing: Fatal exception in interrupt

--
Jouni Malinen PGP id EFC895FA