The cond_resched() can be used to yield the CPU resource if
CONFIG_PREEMPT is not defined. Otherwise, cond_resched() is a dummy
function. In order to avoid kernel thread occupying entire CPU,
when CONFIG_PREEMPT=y, the kernel thread needs to follow the
rescheduling mechanism like a user thread.
Signed-off-by: Vincent Chen <[email protected]>
---
arch/riscv/kernel/asm-offsets.c | 1 +
arch/riscv/kernel/entry.S | 18 +++++++++++++++++-
2 files changed, 18 insertions(+), 1 deletions(-)
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 6a92a2f..dac9834 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -39,6 +39,7 @@ void asm_offsets(void)
OFFSET(TASK_STACK, task_struct, stack);
OFFSET(TASK_TI, task_struct, thread_info);
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+ OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 13d4826..728b72d 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -144,6 +144,10 @@ _save_context:
REG_L x2, PT_SP(sp)
.endm
+#if !IS_ENABLED(CONFIG_PREEMPT)
+#define resume_kernel restore_all
+#endif
+
ENTRY(handle_exception)
SAVE_ALL
@@ -228,7 +232,7 @@ ret_from_exception:
REG_L s0, PT_SSTATUS(sp)
csrc sstatus, SR_SIE
andi s0, s0, SR_SPP
- bnez s0, restore_all
+ bnez s0, resume_kernel
resume_userspace:
/* Interrupts must be disabled here so flags are checked atomically */
@@ -250,6 +254,18 @@ restore_all:
RESTORE_ALL
sret
+#if IS_ENABLED(CONFIG_PREEMPT)
+resume_kernel:
+ REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
+ bnez s0, restore_all
+need_resched:
+ REG_L s0, TASK_TI_FLAGS(tp)
+ andi s0, s0, _TIF_NEED_RESCHED
+ beqz s0, restore_all
+ call preempt_schedule_irq
+ j need_resched
+#endif
+
work_pending:
/* Enter slow path for supplementary processing */
la ra, ret_from_exception
--
1.7.1
On Thu, Jan 03, 2019 at 11:32:33AM +0800, Vincent Chen wrote:
> The cond_resched() can be used to yield the CPU resource if
> CONFIG_PREEMPT is not defined. Otherwise, cond_resched() is a dummy
> function. In order to avoid kernel thread occupying entire CPU,
> when CONFIG_PREEMPT=y, the kernel thread needs to follow the
> rescheduling mechanism like a user thread.
>
> Signed-off-by: Vincent Chen <[email protected]>
This patch seems to do the trick. I no longer see a problem with
CONFIG_PREEMPT=y and the various lock torture tests enabled, as
previously reported.
Nice catch and fix.
Tested-by: Guenter Roeck <[email protected]>
Guenter
> ---
> arch/riscv/kernel/asm-offsets.c | 1 +
> arch/riscv/kernel/entry.S | 18 +++++++++++++++++-
> 2 files changed, 18 insertions(+), 1 deletions(-)
>
> diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
> index 6a92a2f..dac9834 100644
> --- a/arch/riscv/kernel/asm-offsets.c
> +++ b/arch/riscv/kernel/asm-offsets.c
> @@ -39,6 +39,7 @@ void asm_offsets(void)
> OFFSET(TASK_STACK, task_struct, stack);
> OFFSET(TASK_TI, task_struct, thread_info);
> OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
> + OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
> OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
> OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
> OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
> diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> index 13d4826..728b72d 100644
> --- a/arch/riscv/kernel/entry.S
> +++ b/arch/riscv/kernel/entry.S
> @@ -144,6 +144,10 @@ _save_context:
> REG_L x2, PT_SP(sp)
> .endm
>
> +#if !IS_ENABLED(CONFIG_PREEMPT)
> +#define resume_kernel restore_all
> +#endif
> +
> ENTRY(handle_exception)
> SAVE_ALL
>
> @@ -228,7 +232,7 @@ ret_from_exception:
> REG_L s0, PT_SSTATUS(sp)
> csrc sstatus, SR_SIE
> andi s0, s0, SR_SPP
> - bnez s0, restore_all
> + bnez s0, resume_kernel
>
> resume_userspace:
> /* Interrupts must be disabled here so flags are checked atomically */
> @@ -250,6 +254,18 @@ restore_all:
> RESTORE_ALL
> sret
>
> +#if IS_ENABLED(CONFIG_PREEMPT)
> +resume_kernel:
> + REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
> + bnez s0, restore_all
> +need_resched:
> + REG_L s0, TASK_TI_FLAGS(tp)
> + andi s0, s0, _TIF_NEED_RESCHED
> + beqz s0, restore_all
> + call preempt_schedule_irq
> + j need_resched
> +#endif
> +
> work_pending:
> /* Enter slow path for supplementary processing */
> la ra, ret_from_exception
On Wed, 02 Jan 2019 21:45:55 PST (-0800), [email protected] wrote:
> On Thu, Jan 03, 2019 at 11:32:33AM +0800, Vincent Chen wrote:
>> The cond_resched() can be used to yield the CPU resource if
>> CONFIG_PREEMPT is not defined. Otherwise, cond_resched() is a dummy
>> function. In order to avoid kernel thread occupying entire CPU,
>> when CONFIG_PREEMPT=y, the kernel thread needs to follow the
>> rescheduling mechanism like a user thread.
>>
>> Signed-off-by: Vincent Chen <[email protected]>
>
> This patch seems to do the trick. I no longer see a problem with
> CONFIG_PREEMPT=y and the various lock torture tests enabled, as
> previously reported.
>
> Nice catch and fix.
>
> Tested-by: Guenter Roeck <[email protected]>
>
> Guenter
>
>> ---
>> arch/riscv/kernel/asm-offsets.c | 1 +
>> arch/riscv/kernel/entry.S | 18 +++++++++++++++++-
>> 2 files changed, 18 insertions(+), 1 deletions(-)
>>
>> diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
>> index 6a92a2f..dac9834 100644
>> --- a/arch/riscv/kernel/asm-offsets.c
>> +++ b/arch/riscv/kernel/asm-offsets.c
>> @@ -39,6 +39,7 @@ void asm_offsets(void)
>> OFFSET(TASK_STACK, task_struct, stack);
>> OFFSET(TASK_TI, task_struct, thread_info);
>> OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
>> + OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
>> OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
>> OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
>> OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
>> diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
>> index 13d4826..728b72d 100644
>> --- a/arch/riscv/kernel/entry.S
>> +++ b/arch/riscv/kernel/entry.S
>> @@ -144,6 +144,10 @@ _save_context:
>> REG_L x2, PT_SP(sp)
>> .endm
>>
>> +#if !IS_ENABLED(CONFIG_PREEMPT)
>> +#define resume_kernel restore_all
>> +#endif
>> +
I don't like preprocessor stuff if we can avoid it, are you OK if I squash in
the following diff:
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index cfbad2f689c3..fd9b57c8b4ce 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -145,7 +145,7 @@ _save_context:
.endm
#if !IS_ENABLED(CONFIG_PREEMPT)
-#define resume_kernel restore_all
+.set resume_kernel, restore_all
#endif
ENTRY(handle_exception)
I think that should do the same thing, but at link time instead of in the
preprocessor -- that makes it a bit less likely to bit us in the future.
>> ENTRY(handle_exception)
>> SAVE_ALL
>>
>> @@ -228,7 +232,7 @@ ret_from_exception:
>> REG_L s0, PT_SSTATUS(sp)
>> csrc sstatus, SR_SIE
>> andi s0, s0, SR_SPP
>> - bnez s0, restore_all
>> + bnez s0, resume_kernel
>>
>> resume_userspace:
>> /* Interrupts must be disabled here so flags are checked atomically */
>> @@ -250,6 +254,18 @@ restore_all:
>> RESTORE_ALL
>> sret
>>
>> +#if IS_ENABLED(CONFIG_PREEMPT)
>> +resume_kernel:
>> + REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
>> + bnez s0, restore_all
>> +need_resched:
>> + REG_L s0, TASK_TI_FLAGS(tp)
>> + andi s0, s0, _TIF_NEED_RESCHED
>> + beqz s0, restore_all
>> + call preempt_schedule_irq
>> + j need_resched
>> +#endif
>> +
>> work_pending:
>> /* Enter slow path for supplementary processing */
>> la ra, ret_from_exception
I'm just going to assume you're OK with the squash and drop this into my plans
for the next RC, let me know if that's not OK.
Thanks for fixing this!
On Wed, Jan 23, 2019 at 02:58:51AM +0800, Palmer Dabbelt wrote:
> On Wed, 02 Jan 2019 21:45:55 PST (-0800), [email protected] wrote:
> > On Thu, Jan 03, 2019 at 11:32:33AM +0800, Vincent Chen wrote:
> >> The cond_resched() can be used to yield the CPU resource if
> >> CONFIG_PREEMPT is not defined. Otherwise, cond_resched() is a dummy
> >> function. In order to avoid kernel thread occupying entire CPU,
> >> when CONFIG_PREEMPT=y, the kernel thread needs to follow the
> >> rescheduling mechanism like a user thread.
> >>
> >> Signed-off-by: Vincent Chen <[email protected]>
> >
> > This patch seems to do the trick. I no longer see a problem with
> > CONFIG_PREEMPT=y and the various lock torture tests enabled, as
> > previously reported.
> >
> > Nice catch and fix.
> >
> > Tested-by: Guenter Roeck <[email protected]>
> >
> > Guenter
> >
> >> ---
> >> arch/riscv/kernel/asm-offsets.c | 1 +
> >> arch/riscv/kernel/entry.S | 18 +++++++++++++++++-
> >> 2 files changed, 18 insertions(+), 1 deletions(-)
> >>
> >> diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
> >> index 6a92a2f..dac9834 100644
> >> --- a/arch/riscv/kernel/asm-offsets.c
> >> +++ b/arch/riscv/kernel/asm-offsets.c
> >> @@ -39,6 +39,7 @@ void asm_offsets(void)
> >> OFFSET(TASK_STACK, task_struct, stack);
> >> OFFSET(TASK_TI, task_struct, thread_info);
> >> OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
> >> + OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
> >> OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
> >> OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
> >> OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
> >> diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> >> index 13d4826..728b72d 100644
> >> --- a/arch/riscv/kernel/entry.S
> >> +++ b/arch/riscv/kernel/entry.S
> >> @@ -144,6 +144,10 @@ _save_context:
> >> REG_L x2, PT_SP(sp)
> >> .endm
> >>
> >> +#if !IS_ENABLED(CONFIG_PREEMPT)
> >> +#define resume_kernel restore_all
> >> +#endif
> >> +
>
> I don't like preprocessor stuff if we can avoid it, are you OK if I squash in
> the following diff:
>
> diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> index cfbad2f689c3..fd9b57c8b4ce 100644
> --- a/arch/riscv/kernel/entry.S
> +++ b/arch/riscv/kernel/entry.S
> @@ -145,7 +145,7 @@ _save_context:
> .endm
>
> #if !IS_ENABLED(CONFIG_PREEMPT)
> -#define resume_kernel restore_all
> +.set resume_kernel, restore_all
> #endif
>
> ENTRY(handle_exception)
>
> I think that should do the same thing, but at link time instead of in the
> preprocessor -- that makes it a bit less likely to bit us in the future.
>
> >> ENTRY(handle_exception)
> >> SAVE_ALL
> >>
> >> @@ -228,7 +232,7 @@ ret_from_exception:
> >> REG_L s0, PT_SSTATUS(sp)
> >> csrc sstatus, SR_SIE
> >> andi s0, s0, SR_SPP
> >> - bnez s0, restore_all
> >> + bnez s0, resume_kernel
> >>
> >> resume_userspace:
> >> /* Interrupts must be disabled here so flags are checked atomically */
> >> @@ -250,6 +254,18 @@ restore_all:
> >> RESTORE_ALL
> >> sret
> >>
> >> +#if IS_ENABLED(CONFIG_PREEMPT)
> >> +resume_kernel:
> >> + REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
> >> + bnez s0, restore_all
> >> +need_resched:
> >> + REG_L s0, TASK_TI_FLAGS(tp)
> >> + andi s0, s0, _TIF_NEED_RESCHED
> >> + beqz s0, restore_all
> >> + call preempt_schedule_irq
> >> + j need_resched
> >> +#endif
> >> +
> >> work_pending:
> >> /* Enter slow path for supplementary processing */
> >> la ra, ret_from_exception
>
> I'm just going to assume you're OK with the squash and drop this into my plans
> for the next RC, let me know if that's not OK.
>
> Thanks for fixing this!
OK, it's fine for me.
Vincent