The parameter kfence_sample_interval can be set via boot parameter
and late shell command, which is convenient for automated tests and
KFENCE parameter optimization. However, KFENCE test case just uses
compile-time CONFIG_KFENCE_SAMPLE_INTERVAL, which will make KFENCE
test case not run as users desired. Export kfence_sample_interval,
so that KFENCE test case can use run-time-set sample interval.
Signed-off-by: Peng Liu <[email protected]>
---
v2->v3:
- Revise change log description
v1->v2:
- Use EXPORT_SYMBOL_GPL replace EXPORT_SYMBOL
include/linux/kfence.h | 2 ++
mm/kfence/core.c | 3 ++-
mm/kfence/kfence_test.c | 8 ++++----
3 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 4b5e3679a72c..f49e64222628 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -17,6 +17,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>
+extern unsigned long kfence_sample_interval;
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 5ad40e3add45..13128fa13062 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -47,7 +47,8 @@
static bool kfence_enabled __read_mostly;
-static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index a22b1af85577..50dbb815a2a8 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -268,13 +268,13 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
/*
* Especially for non-preemption kernels, ensure the allocation-gate
* timer can catch up: after @resched_after, every failed allocation
* attempt yields, to ensure the allocation-gate timer is scheduled.
*/
- resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
+ resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
do {
if (test_cache)
alloc = kmem_cache_alloc(test_cache, gfp);
@@ -608,7 +608,7 @@ static void test_gfpzero(struct kunit *test)
int i;
/* Skip if we think it'd take too long. */
- KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100);
+ KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
setup_test_cache(test, size, 0, NULL);
buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
@@ -739,7 +739,7 @@ static void test_memcache_alloc_bulk(struct kunit *test)
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
do {
void *objects[100];
int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
--
2.18.0.huawei.25
On Mon, 7 Feb 2022 at 04:29, 'Peng Liu' via kasan-dev
<[email protected]> wrote:
>
> The parameter kfence_sample_interval can be set via boot parameter
> and late shell command, which is convenient for automated tests and
> KFENCE parameter optimization. However, KFENCE test case just uses
> compile-time CONFIG_KFENCE_SAMPLE_INTERVAL, which will make KFENCE
> test case not run as users desired. Export kfence_sample_interval,
> so that KFENCE test case can use run-time-set sample interval.
>
> Signed-off-by: Peng Liu <[email protected]>
Reviewed-by: Marco Elver <[email protected]>
Thank you.
> ---
> v2->v3:
> - Revise change log description
> v1->v2:
> - Use EXPORT_SYMBOL_GPL replace EXPORT_SYMBOL
>
> include/linux/kfence.h | 2 ++
> mm/kfence/core.c | 3 ++-
> mm/kfence/kfence_test.c | 8 ++++----
> 3 files changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/kfence.h b/include/linux/kfence.h
> index 4b5e3679a72c..f49e64222628 100644
> --- a/include/linux/kfence.h
> +++ b/include/linux/kfence.h
> @@ -17,6 +17,8 @@
> #include <linux/atomic.h>
> #include <linux/static_key.h>
>
> +extern unsigned long kfence_sample_interval;
> +
> /*
> * We allocate an even number of pages, as it simplifies calculations to map
> * address to metadata indices; effectively, the very first page serves as an
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 5ad40e3add45..13128fa13062 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -47,7 +47,8 @@
>
> static bool kfence_enabled __read_mostly;
>
> -static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
> +unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
> +EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
>
> #ifdef MODULE_PARAM_PREFIX
> #undef MODULE_PARAM_PREFIX
> diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
> index a22b1af85577..50dbb815a2a8 100644
> --- a/mm/kfence/kfence_test.c
> +++ b/mm/kfence/kfence_test.c
> @@ -268,13 +268,13 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
> * 100x the sample interval should be more than enough to ensure we get
> * a KFENCE allocation eventually.
> */
> - timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
> + timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
> /*
> * Especially for non-preemption kernels, ensure the allocation-gate
> * timer can catch up: after @resched_after, every failed allocation
> * attempt yields, to ensure the allocation-gate timer is scheduled.
> */
> - resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
> + resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
> do {
> if (test_cache)
> alloc = kmem_cache_alloc(test_cache, gfp);
> @@ -608,7 +608,7 @@ static void test_gfpzero(struct kunit *test)
> int i;
>
> /* Skip if we think it'd take too long. */
> - KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100);
> + KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
>
> setup_test_cache(test, size, 0, NULL);
> buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
> @@ -739,7 +739,7 @@ static void test_memcache_alloc_bulk(struct kunit *test)
> * 100x the sample interval should be more than enough to ensure we get
> * a KFENCE allocation eventually.
> */
> - timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
> + timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
> do {
> void *objects[100];
> int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
> --
> 2.18.0.huawei.25
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20220207034432.185532-1-liupeng256%40huawei.com.