Syzbot reported shift-out-of-bounds bug in profile_init().
The problem was in incorrect prof_shift. Since prof_shift value comes from
userspace we need to check this value to avoid too big shift.
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Reported-and-tested-by: [email protected]
Suggested-by: Tetsuo Handa <[email protected]>
Signed-off-by: Pavel Skripkin <[email protected]>
---
kernel/profile.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/kernel/profile.c b/kernel/profile.c
index c2ebddb5e974..c905931e3c3b 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -42,6 +42,7 @@ struct profile_hit {
static atomic_t *prof_buffer;
static unsigned long prof_len, prof_shift;
+#define MAX_PROF_SHIFT (sizeof(prof_shift) * 8)
int prof_on __read_mostly;
EXPORT_SYMBOL_GPL(prof_on);
@@ -67,7 +68,7 @@ int profile_setup(char *str)
if (str[strlen(sleepstr)] == ',')
str += strlen(sleepstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
+ prof_shift = clamp(par, 0, (int) MAX_PROF_SHIFT - 1);
pr_info("kernel sleep profiling enabled (shift: %ld)\n",
prof_shift);
#else
@@ -78,7 +79,7 @@ int profile_setup(char *str)
if (str[strlen(schedstr)] == ',')
str += strlen(schedstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
+ prof_shift = clamp(par, 0, (int) MAX_PROF_SHIFT - 1);
pr_info("kernel schedule profiling enabled (shift: %ld)\n",
prof_shift);
} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
@@ -86,11 +87,11 @@ int profile_setup(char *str)
if (str[strlen(kvmstr)] == ',')
str += strlen(kvmstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
+ prof_shift = clamp(par, 0, (int) MAX_PROF_SHIFT - 1);
pr_info("kernel KVM profiling enabled (shift: %ld)\n",
prof_shift);
} else if (get_option(&str, &par)) {
- prof_shift = par;
+ prof_shift = clamp(par, 0, (int) MAX_PROF_SHIFT - 1);
prof_on = CPU_PROFILING;
pr_info("kernel profiling enabled (shift: %ld)\n",
prof_shift);
--
2.32.0
On 2021/07/17 4:04, Pavel Skripkin wrote:
> Syzbot reported shift-out-of-bounds bug in profile_init().
> The problem was in incorrect prof_shift. Since prof_shift value comes from
> userspace we need to check this value to avoid too big shift.
>
> Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
> Reported-and-tested-by: [email protected]
> Suggested-by: Tetsuo Handa <[email protected]>
> Signed-off-by: Pavel Skripkin <[email protected]>
> ---
> kernel/profile.c | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/profile.c b/kernel/profile.c
> index c2ebddb5e974..c905931e3c3b 100644
> --- a/kernel/profile.c
> +++ b/kernel/profile.c
> @@ -42,6 +42,7 @@ struct profile_hit {
>
> static atomic_t *prof_buffer;
> static unsigned long prof_len, prof_shift;
> +#define MAX_PROF_SHIFT (sizeof(prof_shift) * 8)
I came to think that we should directly use BITS_PER_LONG, for
the integer value which is subjected to shift operation is e.g.
(_etext - _stext)
part of
prof_len = (_etext - _stext) >> prof_shift;
in profile_init().
Since "unsigned char" will be sufficient for holding BITS_PER_LONG - 1,
defining MAX_PROF_SHIFT based on size of prof_shift is incorrect.
Also, there is
unsigned int sample_step = 1 << prof_shift;
in read_profile(). This may result in shift-out-of-bounds on BITS_PER_LONG == 64
architecture. Shouldn't this variable changed from "unsigned int" to "unsigned long"
and use 1UL instead of 1 ?
On 8/8/21 2:21 PM, Tetsuo Handa wrote:
> On 2021/07/17 4:04, Pavel Skripkin wrote:
>> Syzbot reported shift-out-of-bounds bug in profile_init().
>> The problem was in incorrect prof_shift. Since prof_shift value comes from
>> userspace we need to check this value to avoid too big shift.
>>
>> Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
>> Reported-and-tested-by: [email protected]
>> Suggested-by: Tetsuo Handa <[email protected]>
>> Signed-off-by: Pavel Skripkin <[email protected]>
>> ---
>> kernel/profile.c | 9 +++++----
>> 1 file changed, 5 insertions(+), 4 deletions(-)
>>
>> diff --git a/kernel/profile.c b/kernel/profile.c
>> index c2ebddb5e974..c905931e3c3b 100644
>> --- a/kernel/profile.c
>> +++ b/kernel/profile.c
>> @@ -42,6 +42,7 @@ struct profile_hit {
>>
>> static atomic_t *prof_buffer;
>> static unsigned long prof_len, prof_shift;
>> +#define MAX_PROF_SHIFT (sizeof(prof_shift) * 8)
>
> I came to think that we should directly use BITS_PER_LONG, for
> the integer value which is subjected to shift operation is e.g.
>
> (_etext - _stext)
>
> part of
>
> prof_len = (_etext - _stext) >> prof_shift;
>
> in profile_init().
>
> Since "unsigned char" will be sufficient for holding BITS_PER_LONG - 1,
> defining MAX_PROF_SHIFT based on size of prof_shift is incorrect.
>
I don't get it, sorry. Do you mean, that
#define MAX_PROF_SHIFT BITS_PER_LONG
is better solution here? And as I understand we can change prof_shift
type from "unsigned long" to "unsigned short", rigth?
> Also, there is
>
> unsigned int sample_step = 1 << prof_shift;
>
> in read_profile(). This may result in shift-out-of-bounds on BITS_PER_LONG == 64
> architecture. Shouldn't this variable changed from "unsigned int" to "unsigned long"
> and use 1UL instead of 1 ?
>
Yep, it's necessary. Will do it in v2
With regards,
Pavel Skripkin
On 2021/08/13 19:56, Pavel Skripkin wrote:
> I don't get it, sorry. Do you mean, that
>
> #define MAX_PROF_SHIFT BITS_PER_LONG
>
> is better solution here?
Yes, but I feel we don't need to define MAX_PROF_SHIFT because
the type of the integer value which is subjected to shift operation
will be always "unsigned long" and will unlikely change in the future.
> And as I understand we can change prof_shift type from "unsigned long" to "unsigned short", rigth?
Yes, "unsigned int" or "unsigned short int", or even "u8" (I don't know
whether compilers generate bad code if "u8" is used). At least, since
get_option() stores result into "int", receiving via "unsigned int" will
be enough.
Syzbot reported shift-out-of-bounds bug in profile_init().
The problem was in incorrect prof_shift. Since prof_shift value comes from
userspace we need to clamp this value into [0, BITS_PER_LONG -1]
boundaries.
Second possible shiht-out-of-bounds was found by Tetsuo:
sample_step local variable in read_profile() had "unsigned int" type,
but prof_shift allows to make a BITS_PER_LONG shift. So, to prevent
possible shiht-out-of-bounds sample_step type was changed to
"unsigned long".
Also, "unsigned short int" will be sufficient for storing
[0, BITS_PER_LONG] value, that's why there is no need for
"unsigned long" prof_shift.
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Reported-and-tested-by: [email protected]
Suggested-by: Tetsuo Handa <[email protected]>
Signed-off-by: Pavel Skripkin <[email protected]>
---
Changes in v2:
1. Fixed possible shiht-out-of-bounds in read_profile()
(Reported by Tetsuo)
2. Changed prof_shift type from "unsigned long" to
"unsigned short int"
---
kernel/profile.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/kernel/profile.c b/kernel/profile.c
index c2ebddb5e974..eb9c7f0f5ac5 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -41,7 +41,8 @@ struct profile_hit {
#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
static atomic_t *prof_buffer;
-static unsigned long prof_len, prof_shift;
+static unsigned long prof_len;
+static unsigned short int prof_shift;
int prof_on __read_mostly;
EXPORT_SYMBOL_GPL(prof_on);
@@ -67,8 +68,8 @@ int profile_setup(char *str)
if (str[strlen(sleepstr)] == ',')
str += strlen(sleepstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
- pr_info("kernel sleep profiling enabled (shift: %ld)\n",
+ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
+ pr_info("kernel sleep profiling enabled (shift: %u)\n",
prof_shift);
#else
pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
@@ -78,21 +79,21 @@ int profile_setup(char *str)
if (str[strlen(schedstr)] == ',')
str += strlen(schedstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
- pr_info("kernel schedule profiling enabled (shift: %ld)\n",
+ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
+ pr_info("kernel schedule profiling enabled (shift: %u)\n",
prof_shift);
} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
prof_on = KVM_PROFILING;
if (str[strlen(kvmstr)] == ',')
str += strlen(kvmstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
- pr_info("kernel KVM profiling enabled (shift: %ld)\n",
+ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
+ pr_info("kernel KVM profiling enabled (shift: %u)\n",
prof_shift);
} else if (get_option(&str, &par)) {
- prof_shift = par;
+ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
prof_on = CPU_PROFILING;
- pr_info("kernel profiling enabled (shift: %ld)\n",
+ pr_info("kernel profiling enabled (shift: %u)\n",
prof_shift);
}
return 1;
@@ -468,7 +469,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
unsigned long p = *ppos;
ssize_t read;
char *pnt;
- unsigned int sample_step = 1 << prof_shift;
+ unsigned long sample_step = 1UL << prof_shift;
profile_flip_buffers();
if (p >= (prof_len+1)*sizeof(unsigned int))
--
2.32.0
Who's taking this patch? Or should Andrew just take it through his tree?
-- Steve
On Fri, 13 Aug 2021 17:00:22 +0300
Pavel Skripkin <[email protected]> wrote:
> Syzbot reported shift-out-of-bounds bug in profile_init().
> The problem was in incorrect prof_shift. Since prof_shift value comes from
> userspace we need to clamp this value into [0, BITS_PER_LONG -1]
> boundaries.
>
> Second possible shiht-out-of-bounds was found by Tetsuo:
> sample_step local variable in read_profile() had "unsigned int" type,
> but prof_shift allows to make a BITS_PER_LONG shift. So, to prevent
> possible shiht-out-of-bounds sample_step type was changed to
> "unsigned long".
>
> Also, "unsigned short int" will be sufficient for storing
> [0, BITS_PER_LONG] value, that's why there is no need for
> "unsigned long" prof_shift.
>
> Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
> Reported-and-tested-by: [email protected]
> Suggested-by: Tetsuo Handa <[email protected]>
> Signed-off-by: Pavel Skripkin <[email protected]>
> ---
>
> Changes in v2:
> 1. Fixed possible shiht-out-of-bounds in read_profile()
> (Reported by Tetsuo)
>
> 2. Changed prof_shift type from "unsigned long" to
> "unsigned short int"
>
> ---
> kernel/profile.c | 21 +++++++++++----------
> 1 file changed, 11 insertions(+), 10 deletions(-)
>
> diff --git a/kernel/profile.c b/kernel/profile.c
> index c2ebddb5e974..eb9c7f0f5ac5 100644
> --- a/kernel/profile.c
> +++ b/kernel/profile.c
> @@ -41,7 +41,8 @@ struct profile_hit {
> #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
>
> static atomic_t *prof_buffer;
> -static unsigned long prof_len, prof_shift;
> +static unsigned long prof_len;
> +static unsigned short int prof_shift;
>
> int prof_on __read_mostly;
> EXPORT_SYMBOL_GPL(prof_on);
> @@ -67,8 +68,8 @@ int profile_setup(char *str)
> if (str[strlen(sleepstr)] == ',')
> str += strlen(sleepstr) + 1;
> if (get_option(&str, &par))
> - prof_shift = par;
> - pr_info("kernel sleep profiling enabled (shift: %ld)\n",
> + prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
> + pr_info("kernel sleep profiling enabled (shift: %u)\n",
> prof_shift);
> #else
> pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
> @@ -78,21 +79,21 @@ int profile_setup(char *str)
> if (str[strlen(schedstr)] == ',')
> str += strlen(schedstr) + 1;
> if (get_option(&str, &par))
> - prof_shift = par;
> - pr_info("kernel schedule profiling enabled (shift: %ld)\n",
> + prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
> + pr_info("kernel schedule profiling enabled (shift: %u)\n",
> prof_shift);
> } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
> prof_on = KVM_PROFILING;
> if (str[strlen(kvmstr)] == ',')
> str += strlen(kvmstr) + 1;
> if (get_option(&str, &par))
> - prof_shift = par;
> - pr_info("kernel KVM profiling enabled (shift: %ld)\n",
> + prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
> + pr_info("kernel KVM profiling enabled (shift: %u)\n",
> prof_shift);
> } else if (get_option(&str, &par)) {
> - prof_shift = par;
> + prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
> prof_on = CPU_PROFILING;
> - pr_info("kernel profiling enabled (shift: %ld)\n",
> + pr_info("kernel profiling enabled (shift: %u)\n",
> prof_shift);
> }
> return 1;
> @@ -468,7 +469,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
> unsigned long p = *ppos;
> ssize_t read;
> char *pnt;
> - unsigned int sample_step = 1 << prof_shift;
> + unsigned long sample_step = 1UL << prof_shift;
>
> profile_flip_buffers();
> if (p >= (prof_len+1)*sizeof(unsigned int))
Andrew, can you take this patch?
On 2021/08/20 5:46, Steven Rostedt wrote:
>
> Who's taking this patch? Or should Andrew just take it through his tree?
>
> -- Steve
>