From: Borislav Petkov <[email protected]>
Commit
e2b32e678513 ("x86, kaslr: randomize module base load address")
made module base address randomization unconditional and didn't regard
disabled KASLR due to CONFIG_HIBERNATION and command line option
"nokaslr". For more info see (now reverted) commit:
f47233c2d34f ("x86/mm/ASLR: Propagate base load address calculation")
In order to propagate ASLR status to kernel proper, we need a single bit
in boot_params.hdr.loadflags and we've chosen bit 1 thus leaving the
top-down allocated bits for bits supposed to be used by the bootloader.
Originally-by: Jiri Kosina <[email protected]>
Suggested-by: "H. Peter Anvin" <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
---
Documentation/x86/boot.txt | 6 ++++++
arch/x86/boot/compressed/aslr.c | 5 ++++-
arch/x86/boot/compressed/misc.c | 5 ++++-
arch/x86/boot/compressed/misc.h | 6 ++++--
arch/x86/include/asm/setup.h | 5 +++++
arch/x86/include/uapi/asm/bootparam.h | 1 +
arch/x86/kernel/module.c | 11 ++---------
arch/x86/kernel/setup.c | 12 ++++++++----
8 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index a75e3adaa39d..0e1756e680c1 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -406,6 +406,12 @@ Protocol: 2.00+
- If 0, the protected-mode code is loaded at 0x10000.
- If 1, the protected-mode code is loaded at 0x100000.
+ Bit 1 (read): ALSR_FLAG
+ - Used internally by the compressed kernel to communicate
+ ASLR status to kernel proper.
+ If 1, ASLR enabled.
+ If 0, ASLR disabled.
+
Bit 5 (write): QUIET_FLAG
- If 0, print early messages.
- If 1, suppress early messages.
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index bb1376381985..370e47d763b0 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -295,7 +295,8 @@ static unsigned long find_random_addr(unsigned long minimum,
return slots_fetch_random();
}
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size)
@@ -315,6 +316,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
}
#endif
+ boot_params->hdr.loadflags |= ASLR_FLAG;
+
/* Record the various known unsafe memory ranges. */
mem_avoid_init((unsigned long)input, input_size,
(unsigned long)output, output_size);
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index a950864a64da..ca83518e405e 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
real_mode = rmode;
+ /* Clear it for solely in-kernel use */
+ real_mode->hdr.loadflags &= ~ASLR_FLAG;
+
sanitize_boot_params(real_mode);
if (real_mode->screen_info.orig_video_mode == 7) {
@@ -401,7 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
* the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections.
*/
- output = choose_kernel_location(input_data, input_len, output,
+ output = choose_kernel_location(real_mode, input_data, input_len, output,
output_len > run_size ? output_len
: run_size);
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 04477d68403f..89dd0d78013a 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
#if CONFIG_RANDOMIZE_BASE
/* aslr.c */
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size);
@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
bool has_cpuflag(int flag);
#else
static inline
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size)
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ff4e7b236e21..87097428a44d 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -66,6 +66,11 @@ static inline void x86_ce4100_early_setup(void) { }
*/
extern struct boot_params boot_params;
+static inline bool kaslr_enabled(void)
+{
+ return !!(boot_params.hdr.loadflags & ASLR_FLAG);
+}
+
/*
* Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines.
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 225b0988043a..cd495e5d7d9d 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -15,6 +15,7 @@
/* loadflags */
#define LOADED_HIGH (1<<0)
+#define ASLR_FLAG (1<<1)
#define QUIET_FLAG (1<<5)
#define KEEP_SEGMENTS (1<<6)
#define CAN_USE_HEAP (1<<7)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index d1ac80b72c72..005c03e93fc5 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -33,6 +33,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/setup.h>
#if 0
#define DEBUGP(fmt, ...) \
@@ -47,21 +48,13 @@ do { \
#ifdef CONFIG_RANDOMIZE_BASE
static unsigned long module_load_offset;
-static int randomize_modules = 1;
/* Mutex protects the module_load_offset. */
static DEFINE_MUTEX(module_kaslr_mutex);
-static int __init parse_nokaslr(char *p)
-{
- randomize_modules = 0;
- return 0;
-}
-early_param("nokaslr", parse_nokaslr);
-
static unsigned long int get_module_load_offset(void)
{
- if (randomize_modules) {
+ if (kaslr_enabled()) {
mutex_lock(&module_kaslr_mutex);
/*
* Calculate the module_load_offset the first time this
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5437af09f204..b14cedb1e1ee 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -832,10 +832,14 @@ static void __init trim_low_memory_range(void)
static int
dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{
- pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
- "(relocation range: 0x%lx-0x%lx)\n",
- (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
- __START_KERNEL_map, MODULES_VADDR-1);
+ if (kaslr_enabled())
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
+ (unsigned long)&_text - __START_KERNEL,
+ __START_KERNEL,
+ __START_KERNEL_map,
+ MODULES_VADDR-1);
+ else
+ pr_emerg("Kernel Offset: disabled\n");
return 0;
}
--
2.3.3
From: Borislav Petkov <[email protected]>
Date: Wed, 1 Apr 2015 12:49:52 +0200
Subject: [PATCH v1.1] x86/mm/ASLR: Propagate ASLR status to kernel proper
Commit
e2b32e678513 ("x86, kaslr: randomize module base load address")
made module base address randomization unconditional and didn't regard
disabled KASLR due to CONFIG_HIBERNATION and command line option
"nokaslr". For more info see (now reverted) commit:
f47233c2d34f ("x86/mm/ASLR: Propagate base load address calculation")
In order to propagate ASLR status to kernel proper, we need a single bit
in boot_params.hdr.loadflags and we've chosen bit 1 thus leaving the
top-down allocated bits for bits supposed to be used by the bootloader.
Originally-by: Jiri Kosina <[email protected]>
Suggested-by: "H. Peter Anvin" <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
---
v1.1: Correct ASLR_FLAG bit type in boot.txt
Documentation/x86/boot.txt | 6 ++++++
arch/x86/boot/compressed/aslr.c | 5 ++++-
arch/x86/boot/compressed/misc.c | 5 ++++-
arch/x86/boot/compressed/misc.h | 6 ++++--
arch/x86/include/asm/setup.h | 5 +++++
arch/x86/include/uapi/asm/bootparam.h | 1 +
arch/x86/kernel/module.c | 11 ++---------
arch/x86/kernel/setup.c | 12 ++++++++----
8 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index a75e3adaa39d..f84a03eea773 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -406,6 +406,12 @@ Protocol: 2.00+
- If 0, the protected-mode code is loaded at 0x10000.
- If 1, the protected-mode code is loaded at 0x100000.
+ Bit 1 (kernel internal): ALSR_FLAG
+ - Used internally by the compressed kernel to communicate
+ ASLR status to kernel proper.
+ If 1, ASLR enabled.
+ If 0, ASLR disabled.
+
Bit 5 (write): QUIET_FLAG
- If 0, print early messages.
- If 1, suppress early messages.
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index bb1376381985..370e47d763b0 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -295,7 +295,8 @@ static unsigned long find_random_addr(unsigned long minimum,
return slots_fetch_random();
}
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size)
@@ -315,6 +316,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
}
#endif
+ boot_params->hdr.loadflags |= ASLR_FLAG;
+
/* Record the various known unsafe memory ranges. */
mem_avoid_init((unsigned long)input, input_size,
(unsigned long)output, output_size);
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index a950864a64da..ca83518e405e 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
real_mode = rmode;
+ /* Clear it for solely in-kernel use */
+ real_mode->hdr.loadflags &= ~ASLR_FLAG;
+
sanitize_boot_params(real_mode);
if (real_mode->screen_info.orig_video_mode == 7) {
@@ -401,7 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
* the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections.
*/
- output = choose_kernel_location(input_data, input_len, output,
+ output = choose_kernel_location(real_mode, input_data, input_len, output,
output_len > run_size ? output_len
: run_size);
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 04477d68403f..89dd0d78013a 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
#if CONFIG_RANDOMIZE_BASE
/* aslr.c */
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size);
@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
bool has_cpuflag(int flag);
#else
static inline
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size)
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ff4e7b236e21..87097428a44d 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -66,6 +66,11 @@ static inline void x86_ce4100_early_setup(void) { }
*/
extern struct boot_params boot_params;
+static inline bool kaslr_enabled(void)
+{
+ return !!(boot_params.hdr.loadflags & ASLR_FLAG);
+}
+
/*
* Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines.
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 225b0988043a..cd495e5d7d9d 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -15,6 +15,7 @@
/* loadflags */
#define LOADED_HIGH (1<<0)
+#define ASLR_FLAG (1<<1)
#define QUIET_FLAG (1<<5)
#define KEEP_SEGMENTS (1<<6)
#define CAN_USE_HEAP (1<<7)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index d1ac80b72c72..005c03e93fc5 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -33,6 +33,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/setup.h>
#if 0
#define DEBUGP(fmt, ...) \
@@ -47,21 +48,13 @@ do { \
#ifdef CONFIG_RANDOMIZE_BASE
static unsigned long module_load_offset;
-static int randomize_modules = 1;
/* Mutex protects the module_load_offset. */
static DEFINE_MUTEX(module_kaslr_mutex);
-static int __init parse_nokaslr(char *p)
-{
- randomize_modules = 0;
- return 0;
-}
-early_param("nokaslr", parse_nokaslr);
-
static unsigned long int get_module_load_offset(void)
{
- if (randomize_modules) {
+ if (kaslr_enabled()) {
mutex_lock(&module_kaslr_mutex);
/*
* Calculate the module_load_offset the first time this
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5437af09f204..b14cedb1e1ee 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -832,10 +832,14 @@ static void __init trim_low_memory_range(void)
static int
dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{
- pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
- "(relocation range: 0x%lx-0x%lx)\n",
- (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
- __START_KERNEL_map, MODULES_VADDR-1);
+ if (kaslr_enabled())
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
+ (unsigned long)&_text - __START_KERNEL,
+ __START_KERNEL,
+ __START_KERNEL_map,
+ MODULES_VADDR-1);
+ else
+ pr_emerg("Kernel Offset: disabled\n");
return 0;
}
--
2.3.3
--
Regards/Gruss,
Boris.
ECO tip #101: Trim your mails when you reply.
--
* Borislav Petkov <[email protected]> wrote:
> --- a/arch/x86/kernel/module.c
> +++ b/arch/x86/kernel/module.c
> @@ -33,6 +33,7 @@
>
> #include <asm/page.h>
> #include <asm/pgtable.h>
> +#include <asm/setup.h>
>
> #if 0
> #define DEBUGP(fmt, ...) \
> @@ -47,21 +48,13 @@ do { \
>
> #ifdef CONFIG_RANDOMIZE_BASE
> static unsigned long module_load_offset;
> -static int randomize_modules = 1;
>
> /* Mutex protects the module_load_offset. */
> static DEFINE_MUTEX(module_kaslr_mutex);
>
> -static int __init parse_nokaslr(char *p)
> -{
> - randomize_modules = 0;
> - return 0;
> -}
> -early_param("nokaslr", parse_nokaslr);
So doesn't a 'nokaslr' boot option still make sense, to be able to
debug KASLR failures and such?
> + if (kaslr_enabled())
> + pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
> + (unsigned long)&_text - __START_KERNEL,
> + __START_KERNEL,
> + __START_KERNEL_map,
> + MODULES_VADDR-1);
> + else
> + pr_emerg("Kernel Offset: disabled\n");
Nit: curly braces for multi-line statements and so.
Thanks,
Ingo
On Thu, Apr 02, 2015 at 01:07:13PM +0200, Ingo Molnar wrote:
> So doesn't a 'nokaslr' boot option still make sense, to be able to
> debug KASLR failures and such?
That's still parsed in arch/x86/boot/compressed/aslr.c
> > + if (kaslr_enabled())
> > + pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
> > + (unsigned long)&_text - __START_KERNEL,
> > + __START_KERNEL,
> > + __START_KERNEL_map,
> > + MODULES_VADDR-1);
> > + else
> > + pr_emerg("Kernel Offset: disabled\n");
>
> Nit: curly braces for multi-line statements and so.
I guess by multi-line you mean multiple source lines...
--
Regards/Gruss,
Boris.
ECO tip #101: Trim your mails when you reply.
--
* Borislav Petkov <[email protected]> wrote:
> On Thu, Apr 02, 2015 at 01:07:13PM +0200, Ingo Molnar wrote:
> > So doesn't a 'nokaslr' boot option still make sense, to be able to
> > debug KASLR failures and such?
>
> That's still parsed in arch/x86/boot/compressed/aslr.c
So was this duplication dead code in essence?
>
> > > + if (kaslr_enabled())
> > > + pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
> > > + (unsigned long)&_text - __START_KERNEL,
> > > + __START_KERNEL,
> > > + __START_KERNEL_map,
> > > + MODULES_VADDR-1);
> > > + else
> > > + pr_emerg("Kernel Offset: disabled\n");
> >
> > Nit: curly braces for multi-line statements and so.
>
> I guess by multi-line you mean multiple source lines...
Yeah.
Thanks,
Ingo
On Thu, Apr 02, 2015 at 01:29:30PM +0200, Ingo Molnar wrote:
> So was this duplication dead code in essence?
See e2b32e678513.
Looks like it was parsing the cmdline option for a second time in the
kernel proper (vs first one which we still parse in the compressed
kernel).
hpa said that we probably could solve it this way here too but using a
bit in loadflags is cleaner and nicer. IMO :)
> > I guess by multi-line you mean multiple source lines...
>
> Yeah.
Here you go:
---
From: Borislav Petkov <[email protected]>
Subject: [PATCH v1.2] x86/mm/ASLR: Propagate ASLR status to kernel proper
Commit
e2b32e678513 ("x86, kaslr: randomize module base load address")
made module base address randomization unconditional and didn't regard
disabled KASLR due to CONFIG_HIBERNATION and command line option
"nokaslr". For more info see (now reverted) commit:
f47233c2d34f ("x86/mm/ASLR: Propagate base load address calculation")
In order to propagate ASLR status to kernel proper, we need a single bit
in boot_params.hdr.loadflags and we've chosen bit 1 thus leaving the
top-down allocated bits for bits supposed to be used by the bootloader.
Originally-by: Jiri Kosina <[email protected]>
Suggested-by: "H. Peter Anvin" <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
---
v1.1: Correct ASLR_FLAG bit type in boot.txt
v1.2: Add braces around multiline statement in dump_kernel_offset()
Documentation/x86/boot.txt | 6 ++++++
arch/x86/boot/compressed/aslr.c | 5 ++++-
arch/x86/boot/compressed/misc.c | 5 ++++-
arch/x86/boot/compressed/misc.h | 6 ++++--
arch/x86/include/asm/setup.h | 5 +++++
arch/x86/include/uapi/asm/bootparam.h | 1 +
arch/x86/kernel/module.c | 11 ++---------
arch/x86/kernel/setup.c | 12 ++++++++----
8 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index a75e3adaa39d..f84a03eea773 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -406,6 +406,12 @@ Protocol: 2.00+
- If 0, the protected-mode code is loaded at 0x10000.
- If 1, the protected-mode code is loaded at 0x100000.
+ Bit 1 (kernel internal): ALSR_FLAG
+ - Used internally by the compressed kernel to communicate
+ ASLR status to kernel proper.
+ If 1, ASLR enabled.
+ If 0, ASLR disabled.
+
Bit 5 (write): QUIET_FLAG
- If 0, print early messages.
- If 1, suppress early messages.
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index bb1376381985..370e47d763b0 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -295,7 +295,8 @@ static unsigned long find_random_addr(unsigned long minimum,
return slots_fetch_random();
}
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size)
@@ -315,6 +316,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
}
#endif
+ boot_params->hdr.loadflags |= ASLR_FLAG;
+
/* Record the various known unsafe memory ranges. */
mem_avoid_init((unsigned long)input, input_size,
(unsigned long)output, output_size);
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index a950864a64da..ca83518e405e 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
real_mode = rmode;
+ /* Clear it for solely in-kernel use */
+ real_mode->hdr.loadflags &= ~ASLR_FLAG;
+
sanitize_boot_params(real_mode);
if (real_mode->screen_info.orig_video_mode == 7) {
@@ -401,7 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
* the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections.
*/
- output = choose_kernel_location(input_data, input_len, output,
+ output = choose_kernel_location(real_mode, input_data, input_len, output,
output_len > run_size ? output_len
: run_size);
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 04477d68403f..89dd0d78013a 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
#if CONFIG_RANDOMIZE_BASE
/* aslr.c */
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size);
@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
bool has_cpuflag(int flag);
#else
static inline
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
+ unsigned char *input,
unsigned long input_size,
unsigned char *output,
unsigned long output_size)
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ff4e7b236e21..87097428a44d 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -66,6 +66,11 @@ static inline void x86_ce4100_early_setup(void) { }
*/
extern struct boot_params boot_params;
+static inline bool kaslr_enabled(void)
+{
+ return !!(boot_params.hdr.loadflags & ASLR_FLAG);
+}
+
/*
* Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines.
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 225b0988043a..cd495e5d7d9d 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -15,6 +15,7 @@
/* loadflags */
#define LOADED_HIGH (1<<0)
+#define ASLR_FLAG (1<<1)
#define QUIET_FLAG (1<<5)
#define KEEP_SEGMENTS (1<<6)
#define CAN_USE_HEAP (1<<7)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index d1ac80b72c72..005c03e93fc5 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -33,6 +33,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/setup.h>
#if 0
#define DEBUGP(fmt, ...) \
@@ -47,21 +48,13 @@ do { \
#ifdef CONFIG_RANDOMIZE_BASE
static unsigned long module_load_offset;
-static int randomize_modules = 1;
/* Mutex protects the module_load_offset. */
static DEFINE_MUTEX(module_kaslr_mutex);
-static int __init parse_nokaslr(char *p)
-{
- randomize_modules = 0;
- return 0;
-}
-early_param("nokaslr", parse_nokaslr);
-
static unsigned long int get_module_load_offset(void)
{
- if (randomize_modules) {
+ if (kaslr_enabled()) {
mutex_lock(&module_kaslr_mutex);
/*
* Calculate the module_load_offset the first time this
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5437af09f204..5aa119453c41 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -832,10 +832,14 @@ static void __init trim_low_memory_range(void)
static int
dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{
- pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
- "(relocation range: 0x%lx-0x%lx)\n",
- (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
- __START_KERNEL_map, MODULES_VADDR-1);
+ if (kaslr_enabled()) {
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
+ (unsigned long)&_text - __START_KERNEL,
+ __START_KERNEL,
+ __START_KERNEL_map,
+ MODULES_VADDR-1);
+ } else
+ pr_emerg("Kernel Offset: disabled\n");
return 0;
}
--
2.3.3
--
Regards/Gruss,
Boris.
ECO tip #101: Trim your mails when you reply.
--
On Thu, Apr 2, 2015 at 4:50 AM, Borislav Petkov <[email protected]> wrote:
> On Thu, Apr 02, 2015 at 01:29:30PM +0200, Ingo Molnar wrote:
>> So was this duplication dead code in essence?
>
> See e2b32e678513.
>
> Looks like it was parsing the cmdline option for a second time in the
> kernel proper (vs first one which we still parse in the compressed
> kernel).
>
> hpa said that we probably could solve it this way here too but using a
> bit in loadflags is cleaner and nicer. IMO :)
>
>> > I guess by multi-line you mean multiple source lines...
>>
>> Yeah.
>
> Here you go:
>
> ---
> From: Borislav Petkov <[email protected]>
> Subject: [PATCH v1.2] x86/mm/ASLR: Propagate ASLR status to kernel proper
>
> Commit
>
> e2b32e678513 ("x86, kaslr: randomize module base load address")
>
> made module base address randomization unconditional and didn't regard
> disabled KASLR due to CONFIG_HIBERNATION and command line option
> "nokaslr". For more info see (now reverted) commit:
>
> f47233c2d34f ("x86/mm/ASLR: Propagate base load address calculation")
>
> In order to propagate ASLR status to kernel proper, we need a single bit
> in boot_params.hdr.loadflags and we've chosen bit 1 thus leaving the
> top-down allocated bits for bits supposed to be used by the bootloader.
>
> diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
> index bb1376381985..370e47d763b0 100644
> --- a/arch/x86/boot/compressed/aslr.c
> +++ b/arch/x86/boot/compressed/aslr.c
> @@ -295,7 +295,8 @@ static unsigned long find_random_addr(unsigned long minimum,
> return slots_fetch_random();
> }
>
> -unsigned char *choose_kernel_location(unsigned char *input,
> +unsigned char *choose_kernel_location(struct boot_params *boot_params,
> + unsigned char *input,
> unsigned long input_size,
> unsigned char *output,
> unsigned long output_size)
> @@ -315,6 +316,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
> }
> #endif
>
> + boot_params->hdr.loadflags |= ASLR_FLAG;
> +
> /* Record the various known unsafe memory ranges. */
> mem_avoid_init((unsigned long)input, input_size,
> (unsigned long)output, output_size);
> diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
> index a950864a64da..ca83518e405e 100644
> --- a/arch/x86/boot/compressed/misc.c
> +++ b/arch/x86/boot/compressed/misc.c
> @@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
>
> real_mode = rmode;
>
> + /* Clear it for solely in-kernel use */
> + real_mode->hdr.loadflags &= ~ASLR_FLAG;
> +
> sanitize_boot_params(real_mode);
>
> if (real_mode->screen_info.orig_video_mode == 7) {
> @@ -401,7 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
> * the entire decompressed kernel plus relocation table, or the
> * entire decompressed kernel plus .bss and .brk sections.
> */
> - output = choose_kernel_location(input_data, input_len, output,
> + output = choose_kernel_location(real_mode, input_data, input_len, output,
> output_len > run_size ? output_len
> : run_size);
>
> diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
> index 04477d68403f..89dd0d78013a 100644
> --- a/arch/x86/boot/compressed/misc.h
> +++ b/arch/x86/boot/compressed/misc.h
> @@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
>
> #if CONFIG_RANDOMIZE_BASE
> /* aslr.c */
> -unsigned char *choose_kernel_location(unsigned char *input,
> +unsigned char *choose_kernel_location(struct boot_params *boot_params,
> + unsigned char *input,
> unsigned long input_size,
> unsigned char *output,
> unsigned long output_size);
> @@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
> bool has_cpuflag(int flag);
> #else
> static inline
> -unsigned char *choose_kernel_location(unsigned char *input,
> +unsigned char *choose_kernel_location(struct boot_params *boot_params,
> + unsigned char *input,
> unsigned long input_size,
> unsigned char *output,
> unsigned long output_size)
Why do you need to pass around pointer to boot_params around?
The real_mode is global variable.
Yinghai