mac80211: add an improved moving average algorithm to minstrel

Improves rate control responsiveness and performance

Signed-off-by: Felix Fietkau <nbd@nbd.name>
[reworked to apply on 4.19.79 mac80211 + renumbered + refreshed]
Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
v19.07.3_mercusys_ac12_duma
Felix Fietkau 5 years ago committed by Koen Vandeputte
parent 8cf67728d3
commit c51a39d4a5

@ -0,0 +1,61 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Sat, 28 Sep 2019 15:44:06 +0200
Subject: [PATCH] mac80211: minstrel: remove divisions in tx status path
Use a slightly different threshold for downgrading spatial streams to
make it easier to calculate without divisions.
Slightly reduces CPU overhead.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -295,8 +295,7 @@ minstrel_tx_status(void *priv, struct ie
if (mi->sample_deferred > 0)
mi->sample_deferred--;
- if (time_after(jiffies, mi->last_stats_update +
- (mp->update_interval * HZ) / 1000))
+ if (time_after(jiffies, mi->last_stats_update + mp->update_interval))
minstrel_update_stats(mp, mi);
}
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -937,23 +937,21 @@ minstrel_ht_tx_status(void *priv, struct
*/
rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
if (rate->attempts > 30 &&
- MINSTREL_FRAC(rate->success, rate->attempts) <
- MINSTREL_FRAC(20, 100)) {
+ rate->success < rate->attempts / 4) {
minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
update = true;
}
rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
if (rate2->attempts > 30 &&
- MINSTREL_FRAC(rate2->success, rate2->attempts) <
- MINSTREL_FRAC(20, 100)) {
+ rate2->success < rate2->attempts / 4) {
minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
update = true;
}
}
if (time_after(jiffies, mi->last_stats_update +
- (mp->update_interval / 2 * HZ) / 1000)) {
+ mp->update_interval / 2)) {
update = true;
minstrel_ht_update_stats(mp, mi, true);
}
@@ -1640,7 +1638,7 @@ minstrel_ht_alloc(struct ieee80211_hw *h
mp->has_mrr = true;
mp->hw = hw;
- mp->update_interval = 100;
+ mp->update_interval = HZ / 10;
#ifdef CPTCFG_MAC80211_DEBUGFS
mp->fixed_rate_idx = (u32) -1;

@ -0,0 +1,235 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Sat, 28 Sep 2019 15:46:06 +0200
Subject: [PATCH] mac80211: minstrel_ht: replace rate stats ewma with a
better moving average
Rate success probability usually fluctuates a lot under normal conditions.
With a simple EWMA, noise and fluctuation can be reduced by increasing the
window length, but that comes at the cost of introducing lag on sudden
changes.
This change replaces the EWMA implementation with a moving average that's
designed to significantly reduce lag while keeping a bigger window size
by being better at filtering out noise.
It is only slightly more expensive than the simple EWMA and still avoids
divisions in its calculation.
The algorithm is adapted from an implementation intended for a completely
different field (stock market trading), where the tradeoff of lag vs
noise filtering is equally important.
The algorithm works in the same way as the "smoothing filter" from
http://www.stockspotter.com/files/PredictiveIndicators.pdf adapted for
fixed-point math with some constants, using only addition, bit shifts
and multiplication
To better make use of the filtering and bigger window size, the update
interval is cut in half.
For testing, the algorithm can be reverted to the older one via debugfs
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -157,14 +157,18 @@ minstrel_update_rates(struct minstrel_pr
* Recalculate statistics and counters of a given rate
*/
void
-minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
+minstrel_calc_rate_stats(struct minstrel_priv *mp,
+ struct minstrel_rate_stats *mrs)
{
unsigned int cur_prob;
if (unlikely(mrs->attempts > 0)) {
mrs->sample_skipped = 0;
cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
- if (unlikely(!mrs->att_hist)) {
+ if (mp->new_avg) {
+ mrs->prob_ewma = minstrel_filter_avg_add(&mrs->avg,
+ cur_prob);
+ } else if (unlikely(!mrs->att_hist)) {
mrs->prob_ewma = cur_prob;
} else {
/* update exponential weighted moving variance */
@@ -206,7 +210,7 @@ minstrel_update_stats(struct minstrel_pr
struct minstrel_rate_stats *tmp_mrs = &mi->r[tmp_prob_rate].stats;
/* Update statistics of success probability per rate */
- minstrel_calc_rate_stats(mrs);
+ minstrel_calc_rate_stats(mp, mrs);
/* Sample less often below the 10% chance of success.
* Sample less often above the 95% chance of success. */
@@ -295,7 +299,8 @@ minstrel_tx_status(void *priv, struct ie
if (mi->sample_deferred > 0)
mi->sample_deferred--;
- if (time_after(jiffies, mi->last_stats_update + mp->update_interval))
+ if (time_after(jiffies, mi->last_stats_update +
+ mp->update_interval / (mp->new_avg ? 2 : 1)))
minstrel_update_stats(mp, mi);
}
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -22,6 +22,21 @@
#define MAX_THR_RATES 4
/*
+ * Coefficients for moving average with noise filter (period=16),
+ * scaled by 10 bits
+ *
+ * a1 = exp(-pi * sqrt(2) / period)
+ * coeff2 = 2 * a1 * cos(sqrt(2) * 2 * pi / period)
+ * coeff3 = -sqr(a1)
+ * coeff1 = 1 - coeff2 - coeff3
+ */
+#define MINSTREL_AVG_COEFF1 (MINSTREL_FRAC(1, 1) - \
+ MINSTREL_AVG_COEFF2 - \
+ MINSTREL_AVG_COEFF3)
+#define MINSTREL_AVG_COEFF2 0x00001499
+#define MINSTREL_AVG_COEFF3 -0x0000092e
+
+/*
* Perform EWMA (Exponentially Weighted Moving Average) calculation
*/
static inline int
@@ -48,6 +63,41 @@ minstrel_ewmv(int old_ewmv, int cur_prob
return weight * (old_ewmv + MINSTREL_TRUNC(diff * incr)) / EWMA_DIV;
}
+struct minstrel_avg_ctx {
+ s32 prev[2];
+};
+
+static inline int minstrel_filter_avg_add(struct minstrel_avg_ctx *ctx, s32 in)
+{
+ s32 out_1 = ctx->prev[0];
+ s32 out_2 = ctx->prev[1];
+ s32 val;
+
+ if (!in)
+ in += 1;
+
+ if (!out_1) {
+ val = out_1 = in;
+ goto out;
+ }
+
+ val = MINSTREL_AVG_COEFF1 * in;
+ val += MINSTREL_AVG_COEFF2 * out_1;
+ val += MINSTREL_AVG_COEFF3 * out_2;
+ val >>= MINSTREL_SCALE;
+
+ if (val > 1 << MINSTREL_SCALE)
+ val = 1 << MINSTREL_SCALE;
+ if (val < 0)
+ val = 1;
+
+out:
+ ctx->prev[1] = out_1;
+ ctx->prev[0] = val;
+
+ return val;
+}
+
struct minstrel_rate_stats {
/* current / last sampling period attempts/success counters */
u16 attempts, last_attempts;
@@ -56,6 +106,8 @@ struct minstrel_rate_stats {
/* total attempts/success counters */
u32 att_hist, succ_hist;
+ struct minstrel_avg_ctx avg;
+
/* statistis of packet delivery probability
* prob_ewma - exponential weighted moving average of prob
* prob_ewmsd - exp. weighted moving standard deviation of prob */
@@ -114,6 +166,7 @@ struct minstrel_sta_info {
struct minstrel_priv {
struct ieee80211_hw *hw;
bool has_mrr;
+ bool new_avg;
u32 sample_switch;
unsigned int cw_min;
unsigned int cw_max;
@@ -153,7 +206,8 @@ extern const struct rate_control_ops mac
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
/* Recalculate success probabilities and counters for a given rate using EWMA */
-void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs);
+void minstrel_calc_rate_stats(struct minstrel_priv *mp,
+ struct minstrel_rate_stats *mrs);
int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
/* debugfs */
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -704,7 +704,7 @@ minstrel_ht_update_stats(struct minstrel
mrs = &mg->rates[i];
mrs->retry_updated = false;
- minstrel_calc_rate_stats(mrs);
+ minstrel_calc_rate_stats(mp, mrs);
cur_prob = mrs->prob_ewma;
if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
@@ -740,6 +740,8 @@ minstrel_ht_update_stats(struct minstrel
/* try to sample all available rates during each interval */
mi->sample_count *= 8;
+ if (mp->new_avg)
+ mi->sample_count /= 2;
if (sample)
minstrel_ht_rate_sample_switch(mp, mi);
@@ -856,6 +858,7 @@ minstrel_ht_tx_status(void *priv, struct
struct ieee80211_tx_rate *ar = info->status.rates;
struct minstrel_rate_stats *rate, *rate2, *rate_sample = NULL;
struct minstrel_priv *mp = priv;
+ u32 update_interval = mp->update_interval / 2;
bool last, update = false;
bool sample_status = false;
int i;
@@ -910,6 +913,10 @@ minstrel_ht_tx_status(void *priv, struct
switch (mi->sample_mode) {
case MINSTREL_SAMPLE_IDLE:
+ if (mp->new_avg &&
+ (mp->hw->max_rates > 1 ||
+ mi->total_packets_cur < SAMPLE_SWITCH_THR))
+ update_interval /= 2;
break;
case MINSTREL_SAMPLE_ACTIVE:
@@ -950,8 +957,7 @@ minstrel_ht_tx_status(void *priv, struct
}
}
- if (time_after(jiffies, mi->last_stats_update +
- mp->update_interval / 2)) {
+ if (time_after(jiffies, mi->last_stats_update + update_interval)) {
update = true;
minstrel_ht_update_stats(mp, mi, true);
}
@@ -1639,6 +1645,7 @@ minstrel_ht_alloc(struct ieee80211_hw *h
mp->hw = hw;
mp->update_interval = HZ / 10;
+ mp->new_avg = true;
#ifdef CPTCFG_MAC80211_DEBUGFS
mp->fixed_rate_idx = (u32) -1;
@@ -1646,6 +1653,8 @@ minstrel_ht_alloc(struct ieee80211_hw *h
&mp->fixed_rate_idx);
debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir,
&mp->sample_switch);
+ debugfs_create_bool("new_avg", S_IRUGO | S_IWUSR, debugfsdir,
+ &mp->new_avg);
#endif
minstrel_ht_init_cck_rates(mp);

@ -0,0 +1,437 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Tue, 8 Oct 2019 18:54:46 +0200
Subject: [PATCH] mac80211: minstrel_ht: rename prob_ewma to prob_avg, use it
for the new average
Reduces per-rate data structure size
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -70,7 +70,7 @@ rix_to_ndx(struct minstrel_sta_info *mi,
}
/* return current EMWA throughput */
-int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
+int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg)
{
int usecs;
@@ -79,13 +79,13 @@ int minstrel_get_tp_avg(struct minstrel_
usecs = 1000000;
/* reset thr. below 10% success */
- if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100))
+ if (mr->stats.prob_avg < MINSTREL_FRAC(10, 100))
return 0;
- if (prob_ewma > MINSTREL_FRAC(90, 100))
+ if (prob_avg > MINSTREL_FRAC(90, 100))
return MINSTREL_TRUNC(100000 * (MINSTREL_FRAC(90, 100) / usecs));
else
- return MINSTREL_TRUNC(100000 * (prob_ewma / usecs));
+ return MINSTREL_TRUNC(100000 * (prob_avg / usecs));
}
/* find & sort topmost throughput rates */
@@ -98,8 +98,8 @@ minstrel_sort_best_tp_rates(struct minst
for (j = MAX_THR_RATES; j > 0; --j) {
tmp_mrs = &mi->r[tp_list[j - 1]].stats;
- if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
- minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+ if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_avg) <=
+ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_avg))
break;
}
@@ -166,21 +166,21 @@ minstrel_calc_rate_stats(struct minstrel
mrs->sample_skipped = 0;
cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
if (mp->new_avg) {
- mrs->prob_ewma = minstrel_filter_avg_add(&mrs->avg,
- cur_prob);
+ minstrel_filter_avg_add(&mrs->prob_avg,
+ &mrs->prob_avg_1, cur_prob);
} else if (unlikely(!mrs->att_hist)) {
- mrs->prob_ewma = cur_prob;
+ mrs->prob_avg = cur_prob;
} else {
/* update exponential weighted moving variance */
mrs->prob_ewmv = minstrel_ewmv(mrs->prob_ewmv,
cur_prob,
- mrs->prob_ewma,
+ mrs->prob_avg,
EWMA_LEVEL);
/*update exponential weighted moving avarage */
- mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
- cur_prob,
- EWMA_LEVEL);
+ mrs->prob_avg = minstrel_ewma(mrs->prob_avg,
+ cur_prob,
+ EWMA_LEVEL);
}
mrs->att_hist += mrs->attempts;
mrs->succ_hist += mrs->success;
@@ -214,8 +214,8 @@ minstrel_update_stats(struct minstrel_pr
/* Sample less often below the 10% chance of success.
* Sample less often above the 95% chance of success. */
- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
- mrs->prob_ewma < MINSTREL_FRAC(10, 100)) {
+ if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
+ mrs->prob_avg < MINSTREL_FRAC(10, 100)) {
mr->adjusted_retry_count = mrs->retry_count >> 1;
if (mr->adjusted_retry_count > 2)
mr->adjusted_retry_count = 2;
@@ -235,14 +235,14 @@ minstrel_update_stats(struct minstrel_pr
* choose the maximum throughput rate as max_prob_rate
* (2) if all success probabilities < 95%, the rate with
* highest success probability is chosen as max_prob_rate */
- if (mrs->prob_ewma >= MINSTREL_FRAC(95, 100)) {
- tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_ewma);
+ if (mrs->prob_avg >= MINSTREL_FRAC(95, 100)) {
+ tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_avg);
tmp_prob_tp = minstrel_get_tp_avg(&mi->r[tmp_prob_rate],
- tmp_mrs->prob_ewma);
+ tmp_mrs->prob_avg);
if (tmp_cur_tp >= tmp_prob_tp)
tmp_prob_rate = i;
} else {
- if (mrs->prob_ewma >= tmp_mrs->prob_ewma)
+ if (mrs->prob_avg >= tmp_mrs->prob_avg)
tmp_prob_rate = i;
}
}
@@ -436,7 +436,7 @@ minstrel_get_rate(void *priv, struct iee
* has a probability of >95%, we shouldn't be attempting
* to use it, as this only wastes precious airtime */
if (!mrr_capable &&
- (mi->r[ndx].stats.prob_ewma > MINSTREL_FRAC(95, 100)))
+ (mi->r[ndx].stats.prob_avg > MINSTREL_FRAC(95, 100)))
return;
mi->prev_sample = true;
@@ -587,7 +587,7 @@ static u32 minstrel_get_expected_through
* computing cur_tp
*/
tmp_mrs = &mi->r[idx].stats;
- tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
+ tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_avg) * 10;
tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
return tmp_cur_tp;
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -63,14 +63,10 @@ minstrel_ewmv(int old_ewmv, int cur_prob
return weight * (old_ewmv + MINSTREL_TRUNC(diff * incr)) / EWMA_DIV;
}
-struct minstrel_avg_ctx {
- s32 prev[2];
-};
-
-static inline int minstrel_filter_avg_add(struct minstrel_avg_ctx *ctx, s32 in)
+static inline int minstrel_filter_avg_add(u16 *prev_1, u16 *prev_2, s32 in)
{
- s32 out_1 = ctx->prev[0];
- s32 out_2 = ctx->prev[1];
+ s32 out_1 = *prev_1;
+ s32 out_2 = *prev_2;
s32 val;
if (!in)
@@ -92,8 +88,8 @@ static inline int minstrel_filter_avg_ad
val = 1;
out:
- ctx->prev[1] = out_1;
- ctx->prev[0] = val;
+ *prev_2 = out_1;
+ *prev_1 = val;
return val;
}
@@ -106,14 +102,15 @@ struct minstrel_rate_stats {
/* total attempts/success counters */
u32 att_hist, succ_hist;
- struct minstrel_avg_ctx avg;
-
/* statistis of packet delivery probability
* prob_ewma - exponential weighted moving average of prob
* prob_ewmsd - exp. weighted moving standard deviation of prob */
- u16 prob_ewma;
u16 prob_ewmv;
+ /* prob_avg - moving average of prob */
+ u16 prob_avg;
+ u16 prob_avg_1;
+
/* maximum retry counts */
u8 retry_count;
u8 retry_count_rtscts;
@@ -208,7 +205,7 @@ void minstrel_add_sta_debugfs(void *priv
/* Recalculate success probabilities and counters for a given rate using EWMA */
void minstrel_calc_rate_stats(struct minstrel_priv *mp,
struct minstrel_rate_stats *mrs);
-int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
+int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg);
/* debugfs */
int minstrel_stats_open(struct inode *inode, struct file *file);
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -91,8 +91,9 @@ minstrel_stats_open(struct inode *inode,
p += sprintf(p, "%6u ", mr->perfect_tx_time);
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
- tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
+
prob_ewmsd = minstrel_get_ewmsd10(mrs);
p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
@@ -151,8 +152,8 @@ minstrel_stats_csv_open(struct inode *in
p += sprintf(p, "%u,",mr->perfect_tx_time);
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
- tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
prob_ewmsd = minstrel_get_ewmsd10(mrs);
p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u,"
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -313,12 +313,12 @@ minstrel_ht_avg_ampdu_len(struct minstre
*/
int
minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
- int prob_ewma)
+ int prob_avg)
{
unsigned int nsecs = 0;
/* do not account throughput if sucess prob is below 10% */
- if (prob_ewma < MINSTREL_FRAC(10, 100))
+ if (prob_avg < MINSTREL_FRAC(10, 100))
return 0;
if (group != MINSTREL_CCK_GROUP)
@@ -332,11 +332,11 @@ minstrel_ht_get_tp_avg(struct minstrel_h
* account for collision related packet error rate fluctuation
* (prob is scaled - see MINSTREL_FRAC above)
*/
- if (prob_ewma > MINSTREL_FRAC(90, 100))
+ if (prob_avg > MINSTREL_FRAC(90, 100))
return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000)
/ nsecs));
else
- return MINSTREL_TRUNC(100000 * ((prob_ewma * 1000) / nsecs));
+ return MINSTREL_TRUNC(100000 * ((prob_avg * 1000) / nsecs));
}
/*
@@ -356,13 +356,13 @@ minstrel_ht_sort_best_tp_rates(struct mi
cur_group = index / MCS_GROUP_RATES;
cur_idx = index % MCS_GROUP_RATES;
- cur_prob = mi->groups[cur_group].rates[cur_idx].prob_ewma;
+ cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
do {
tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
tmp_prob);
if (cur_tp_avg < tmp_tp_avg ||
@@ -399,7 +399,7 @@ minstrel_ht_set_best_prob_rate(struct mi
tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
@@ -411,11 +411,11 @@ minstrel_ht_set_best_prob_rate(struct mi
max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
- max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
+ max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
- if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
+ if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
- mrs->prob_ewma);
+ mrs->prob_avg);
if (cur_tp_avg > tmp_tp_avg)
mi->max_prob_rate = index;
@@ -425,9 +425,9 @@ minstrel_ht_set_best_prob_rate(struct mi
if (cur_tp_avg > max_gpr_tp_avg)
mg->max_group_prob_rate = index;
} else {
- if (mrs->prob_ewma > tmp_prob)
+ if (mrs->prob_avg > tmp_prob)
mi->max_prob_rate = index;
- if (mrs->prob_ewma > max_gpr_prob)
+ if (mrs->prob_avg > max_gpr_prob)
mg->max_group_prob_rate = index;
}
}
@@ -449,12 +449,12 @@ minstrel_ht_assign_best_tp_rates(struct
tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
if (tmp_cck_tp_rate && tmp_cck_tp > tmp_mcs_tp) {
@@ -485,7 +485,7 @@ minstrel_ht_prob_rate_reduce_streams(str
continue;
tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
- tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma;
+ tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
(minstrel_mcs_groups[group].streams < tmp_max_streams)) {
@@ -590,7 +590,7 @@ minstrel_ht_rate_sample_switch(struct mi
* If that fails, look again for a rate that is at least as fast
*/
mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
- faster_rate = mrs->prob_ewma > MINSTREL_FRAC(75, 100);
+ faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100);
minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
if (!n_rates && faster_rate)
minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
@@ -705,7 +705,7 @@ minstrel_ht_update_stats(struct minstrel
mrs = &mg->rates[i];
mrs->retry_updated = false;
minstrel_calc_rate_stats(mp, mrs);
- cur_prob = mrs->prob_ewma;
+ cur_prob = mrs->prob_avg;
if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
continue;
@@ -979,7 +979,7 @@ minstrel_calc_retransmit(struct minstrel
unsigned int overhead = 0, overhead_rtscts = 0;
mrs = minstrel_get_ratestats(mi, index);
- if (mrs->prob_ewma < MINSTREL_FRAC(1, 10)) {
+ if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) {
mrs->retry_count = 1;
mrs->retry_count_rtscts = 1;
return;
@@ -1036,7 +1036,7 @@ minstrel_ht_set_rate(struct minstrel_pri
if (!mrs->retry_updated)
minstrel_calc_retransmit(mp, mi, index);
- if (mrs->prob_ewma < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
+ if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
ratetbl->rate[offset].count = 2;
ratetbl->rate[offset].count_rts = 2;
ratetbl->rate[offset].count_cts = 2;
@@ -1070,11 +1070,11 @@ minstrel_ht_set_rate(struct minstrel_pri
}
static inline int
-minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate)
+minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
{
int group = rate / MCS_GROUP_RATES;
rate %= MCS_GROUP_RATES;
- return mi->groups[group].rates[rate].prob_ewma;
+ return mi->groups[group].rates[rate].prob_avg;
}
static int
@@ -1086,7 +1086,7 @@ minstrel_ht_get_max_amsdu_len(struct min
unsigned int duration;
/* Disable A-MSDU if max_prob_rate is bad */
- if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
+ if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100))
return 1;
duration = g->duration[rate];
@@ -1109,7 +1109,7 @@ minstrel_ht_get_max_amsdu_len(struct min
* data packet size
*/
if (duration > MCS_DURATION(1, 0, 260) ||
- (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
+ (minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
MINSTREL_FRAC(75, 100)))
return 3200;
@@ -1216,7 +1216,7 @@ minstrel_get_sample_rate(struct minstrel
* rate, to avoid wasting airtime.
*/
sample_dur = minstrel_get_duration(sample_idx);
- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
+ if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
return -1;
@@ -1679,7 +1679,7 @@ static u32 minstrel_ht_get_expected_thro
i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
- prob = mi->groups[i].rates[j].prob_ewma;
+ prob = mi->groups[i].rates[j].prob_avg;
/* convert tp_avg from pkt per second in kbps */
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -122,6 +122,6 @@ struct minstrel_ht_sta_priv {
void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
- int prob_ewma);
+ int prob_avg);
#endif
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -102,8 +102,8 @@ minstrel_ht_stats_dump(struct minstrel_h
p += sprintf(p, "%6u ", tx_time);
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
prob_ewmsd = minstrel_get_ewmsd10(mrs);
p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
@@ -250,8 +250,8 @@ minstrel_ht_stats_csv_dump(struct minstr
p += sprintf(p, "%u,", tx_time);
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
prob_ewmsd = minstrel_get_ewmsd10(mrs);
p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,"
Loading…
Cancel
Save