2020-03-28 16:46:48

by George Spelvin

[permalink] [raw]
Subject: [RFC PATCH v1 07/50] mm/slab: Use simpler Fisher-Yates shuffle

In addition to using reciprocal_scale rather than %, use
the initialize-while-shuffling form of Fisher-Yates.

Rather than swapping list[i] and list[rand] immediately after
initializing list[i] = i, copy list[i] = list[rand] and then
initialize list[rand] = i.

Note that 0 <= rand <= i, so if rand == i, the first step copies
uninitialized memory to itself before the second step initializes it.

This whole pre-computed shuffle list algorithm really needs a more
extensive overhaul. It's basically a very-special-purpose PRNG
created to amortize the overhead of get_random_int(). But there
are more efficient ways to use the 32 random bits that returns
than just choosing a random starting point modulo cachep->num.

Signed-off-by: George Spelvin <[email protected]>
Cc: Thomas Garnier <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
---
mm/slab.c | 25 +++++++++----------------
mm/slab_common.c | 15 +++++++--------
2 files changed, 16 insertions(+), 24 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index a89633603b2d7..d9499d54afa59 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2404,7 +2404,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
} else {
state->list = cachep->random_seq;
state->count = count;
- state->pos = rand % count;
+ state->pos = reciprocal_scale(rand, count);
ret = true;
}
return ret;
@@ -2418,20 +2418,13 @@ static freelist_idx_t next_random_slot(union freelist_init_state *state)
return state->list[state->pos++];
}

-/* Swap two freelist entries */
-static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
-{
- swap(((freelist_idx_t *)page->freelist)[a],
- ((freelist_idx_t *)page->freelist)[b]);
-}
-
/*
* Shuffle the freelist initialization state based on pre-computed lists.
* return true if the list was successfully shuffled, false otherwise.
*/
static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
{
- unsigned int objfreelist = 0, i, rand, count = cachep->num;
+ unsigned int objfreelist = 0, i, count = cachep->num;
union freelist_init_state state;
bool precomputed;

@@ -2456,14 +2449,14 @@ static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
* Later use a pre-computed list for speed.
*/
if (!precomputed) {
- for (i = 0; i < count; i++)
- set_free_obj(page, i, i);
-
/* Fisher-Yates shuffle */
- for (i = count - 1; i > 0; i--) {
- rand = prandom_u32_state(&state.rnd_state);
- rand %= (i + 1);
- swap_free_obj(page, i, rand);
+ set_free_obj(page, 0, 0);
+ for (i = 1; i < count; i++) {
+ unsigned int rand = prandom_u32_state(&state.rnd_state);
+
+ rand = reciprocal_scale(rand, i + 1);
+ set_free_obj(page, i, get_free_obj(page, rand));
+ set_free_obj(page, rand, i);
}
} else {
for (i = 0; i < count; i++)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0d95ddea13b0d..67908fc842d98 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1349,17 +1349,16 @@ EXPORT_SYMBOL(kmalloc_order_trace);
static void freelist_randomize(struct rnd_state *state, unsigned int *list,
unsigned int count)
{
- unsigned int rand;
unsigned int i;

- for (i = 0; i < count; i++)
- list[i] = i;
-
/* Fisher-Yates shuffle */
- for (i = count - 1; i > 0; i--) {
- rand = prandom_u32_state(state);
- rand %= (i + 1);
- swap(list[i], list[rand]);
+ list[0] = 0;
+ for (i = 1; i < count; i++) {
+ unsigned int rand = prandom_u32_state(state);
+
+ rand = reciprocal_scale(rand, i + 1);
+ list[i] = list[rand];
+ list[rand] = i;
}
}

--
2.26.0