v1->v2:
- migrated api to random.h, ccing lkml
- dropped second patch for now
Daniel Borkmann (2):
random: add prandom_u32_range and prandom_u32_max helpers
net: migrate direct users to prandom_u32_max
drivers/net/team/team_mode_random.c | 8 +-------
include/linux/random.h | 31 ++++++++++++++++++++++++++++++-
include/net/red.h | 2 +-
net/802/garp.c | 3 ++-
net/802/mrp.c | 3 ++-
net/packet/af_packet.c | 2 +-
net/sched/sch_choke.c | 8 +-------
7 files changed, 38 insertions(+), 19 deletions(-)
--
1.7.11.7
We have implemented the same function over and over, so introduce
generic helpers that unify these implementations in order to migrate
such code to use them. Make the API similarly to randomize_range()
for consistency. prandom_u32_range() generates numbers in [start, end]
interval and prandom_u32_max() generates numbers in [0, end] interval.
Signed-off-by: Daniel Borkmann <[email protected]>
Cc: Theodore Ts'o <[email protected]>
Cc: Joe Perches <[email protected]>
Cc: [email protected]
---
include/linux/random.h | 31 ++++++++++++++++++++++++++++++-
1 file changed, 30 insertions(+), 1 deletion(-)
diff --git a/include/linux/random.h b/include/linux/random.h
index 3b9377d..17c91c2 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -8,7 +8,6 @@
#include <uapi/linux/random.h>
-
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
@@ -32,6 +31,36 @@ void prandom_seed(u32 seed);
u32 prandom_u32_state(struct rnd_state *);
void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
+/**
+ * prandom_u32_range - return a random number in interval [start, end]
+ * @start: lower interval endpoint
+ * @end: higher interval endpoint
+ *
+ * Returns a number that is in the given interval:
+ *
+ * [...... <range> .....]
+ * start end
+ *
+ * Callers need to make sure that start <= end. Note that the result
+ * depends on PRNG being well distributed in [0, ~0U] space. Here we
+ * use maximally equidistributed combined Tausworthe generator.
+ */
+static inline u32 prandom_u32_range(u32 start, u32 end)
+{
+ return (u32)(((u64) prandom_u32() * (end + 1 - start)) >> 32) + start;
+}
+
+/**
+ * prandom_u32_max - return a random number in interval [0, max]
+ * @max: higher interval endpoint
+ *
+ * Returns a number that is in interval [0, end].
+ */
+static inline u32 prandom_u32_max(u32 end)
+{
+ return prandom_u32_range(0, end);
+}
+
/*
* Handle minimum values for seeds
*/
--
1.7.11.7
Users that directly use or reimplement what we have in prandom_u32_max()
can be migrated for now to use it directly, so that we can reduce code size
and avoid reimplementations. That's obvious, follow-up patches could inspect
modulo use cases for possible migration as well.
Signed-off-by: Daniel Borkmann <[email protected]>
---
drivers/net/team/team_mode_random.c | 8 +-------
include/net/red.h | 2 +-
net/802/garp.c | 3 ++-
net/802/mrp.c | 3 ++-
net/packet/af_packet.c | 2 +-
net/sched/sch_choke.c | 8 +-------
6 files changed, 8 insertions(+), 18 deletions(-)
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 7f032e2..0dbd1eb 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -13,20 +13,14 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
-#include <linux/reciprocal_div.h>
#include <linux/if_team.h>
-static u32 random_N(unsigned int N)
-{
- return reciprocal_divide(prandom_u32(), N);
-}
-
static bool rnd_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
int port_index;
- port_index = random_N(team->en_port_count);
+ port_index = prandom_u32_max(team->en_port_count - 1);
port = team_get_port_by_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
diff --git a/include/net/red.h b/include/net/red.h
index ef46058..56f3c0c 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -303,7 +303,7 @@ static inline unsigned long red_calc_qavg(const struct red_parms *p,
static inline u32 red_random(const struct red_parms *p)
{
- return reciprocal_divide(net_random(), p->max_P_reciprocal);
+ return prandom_u32_max(p->max_P_reciprocal - 1);
}
static inline int red_mark_probability(const struct red_parms *p,
diff --git a/net/802/garp.c b/net/802/garp.c
index 5d9630a..b4be421 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -397,7 +397,8 @@ static void garp_join_timer_arm(struct garp_applicant *app)
{
unsigned long delay;
- delay = (u64)msecs_to_jiffies(garp_join_time) * net_random() >> 32;
+ delay = prandom_u32_max(msecs_to_jiffies(garp_join_time) - 1);
+
mod_timer(&app->join_timer, jiffies + delay);
}
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 1eb05d8..1a08ae7 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -578,7 +578,8 @@ static void mrp_join_timer_arm(struct mrp_applicant *app)
{
unsigned long delay;
- delay = (u64)msecs_to_jiffies(mrp_join_time) * net_random() >> 32;
+ delay = prandom_u32_max(msecs_to_jiffies(mrp_join_time) - 1);
+
mod_timer(&app->join_timer, jiffies + delay);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2e8286b..1c1ccf9 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1162,7 +1162,7 @@ static unsigned int fanout_demux_rnd(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
- return reciprocal_divide(prandom_u32(), num);
+ return prandom_u32_max(num - 1);
}
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index ef53ab8..7a73fbf 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -77,12 +77,6 @@ struct choke_sched_data {
struct sk_buff **tab;
};
-/* deliver a random number between 0 and N - 1 */
-static u32 random_N(unsigned int N)
-{
- return reciprocal_divide(prandom_u32(), N);
-}
-
/* number of elements in queue including holes */
static unsigned int choke_len(const struct choke_sched_data *q)
{
@@ -233,7 +227,7 @@ static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
int retrys = 3;
do {
- *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
+ *pidx = (q->head + prandom_u32_max(choke_len(q) - 1)) & q->tab_mask;
skb = q->tab[*pidx];
if (skb)
return skb;
--
1.7.11.7
On Wed, 2013-09-04 at 14:37 +0200, Daniel Borkmann wrote:
> Users that directly use or reimplement what we have in prandom_u32_max()
> can be migrated for now to use it directly, so that we can reduce code size
> and avoid reimplementations. That's obvious, follow-up patches could inspect
> modulo use cases for possible migration as well.
>
> Signed-off-by: Daniel Borkmann <[email protected]>
> ---
> drivers/net/team/team_mode_random.c | 8 +-------
> include/net/red.h | 2 +-
> net/802/garp.c | 3 ++-
> net/802/mrp.c | 3 ++-
> net/packet/af_packet.c | 2 +-
> net/sched/sch_choke.c | 8 +-------
> 6 files changed, 8 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
> index 7f032e2..0dbd1eb 100644
> --- a/drivers/net/team/team_mode_random.c
> +++ b/drivers/net/team/team_mode_random.c
> @@ -13,20 +13,14 @@
> #include <linux/module.h>
> #include <linux/init.h>
> #include <linux/skbuff.h>
> -#include <linux/reciprocal_div.h>
> #include <linux/if_team.h>
>
> -static u32 random_N(unsigned int N)
> -{
> - return reciprocal_divide(prandom_u32(), N);
> -}
> -
> static bool rnd_transmit(struct team *team, struct sk_buff *skb)
> {
> struct team_port *port;
> int port_index;
>
> - port_index = random_N(team->en_port_count);
> + port_index = prandom_u32_max(team->en_port_count - 1);
Note the random_N(0) gave 0, while prandom_u32_max(0 - 1) can return any
number in [0 ... ~0U]
On 09/04/2013 02:52 PM, Eric Dumazet wrote:
> On Wed, 2013-09-04 at 14:37 +0200, Daniel Borkmann wrote:
>> Users that directly use or reimplement what we have in prandom_u32_max()
>> can be migrated for now to use it directly, so that we can reduce code size
>> and avoid reimplementations. That's obvious, follow-up patches could inspect
>> modulo use cases for possible migration as well.
>>
>> Signed-off-by: Daniel Borkmann <[email protected]>
>> ---
>> drivers/net/team/team_mode_random.c | 8 +-------
>> include/net/red.h | 2 +-
>> net/802/garp.c | 3 ++-
>> net/802/mrp.c | 3 ++-
>> net/packet/af_packet.c | 2 +-
>> net/sched/sch_choke.c | 8 +-------
>> 6 files changed, 8 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
>> index 7f032e2..0dbd1eb 100644
>> --- a/drivers/net/team/team_mode_random.c
>> +++ b/drivers/net/team/team_mode_random.c
>> @@ -13,20 +13,14 @@
>> #include <linux/module.h>
>> #include <linux/init.h>
>> #include <linux/skbuff.h>
>> -#include <linux/reciprocal_div.h>
>> #include <linux/if_team.h>
>>
>> -static u32 random_N(unsigned int N)
>> -{
>> - return reciprocal_divide(prandom_u32(), N);
>> -}
>> -
>> static bool rnd_transmit(struct team *team, struct sk_buff *skb)
>> {
>> struct team_port *port;
>> int port_index;
>>
>> - port_index = random_N(team->en_port_count);
>> + port_index = prandom_u32_max(team->en_port_count - 1);
>
>
> Note the random_N(0) gave 0, while prandom_u32_max(0 - 1) can return any
> number in [0 ... ~0U]
Very true, that was stupid. Thanks for catching!
On Wed, 2013-09-04 at 14:37 +0200, Daniel Borkmann wrote:
> We have implemented the same function over and over, so introduce
> generic helpers that unify these implementations in order to migrate
> such code to use them. Make the API similarly to randomize_range()
> for consistency. prandom_u32_range() generates numbers in [start, end]
> interval and prandom_u32_max() generates numbers in [0, end] interval.
I think these helpers can in many cases cause
poorer compiler generated object code.
> +/**
> + * prandom_u32_range - return a random number in interval [start, end]
> + * @start: lower interval endpoint
> + * @end: higher interval endpoint
> + *
> + * Returns a number that is in the given interval:
> + *
> + * [...... <range> .....]
> + * start end
> + *
> + * Callers need to make sure that start <= end. Note that the result
> + * depends on PRNG being well distributed in [0, ~0U] space. Here we
> + * use maximally equidistributed combined Tausworthe generator.
> + */
> +static inline u32 prandom_u32_range(u32 start, u32 end)
> +{
> + return (u32)(((u64) prandom_u32() * (end + 1 - start)) >> 32) + start;
> +}
This is effectively:
return (prandom_u32() % (end - start)) + start;
and if start and end are constant, gcc can optimize the
division by constant to a 32 bit multiply/shift/add.
I think if you add __builtin_constant_p tests for start
and end and expand the code a little you can still get
the optimizations done.