With halt-polling supported, there is checking for pending events
or interrupts when vcpu executes idle instruction. Pending interrupts
include injected SW interrupts and passthrough HW interrupts, such as
HW timer interrupts, since HW timer works still even if vcpu exists
from VM mode.
Since HW timer pending interrupt can be set directly with CSR status
register, and pending HW timer interrupt checking is used in vcpu block
checking function, it is not necessary to switch to sw timer during
halt-polling. This patch adds preemption disabling in function
kvm_cpu_has_pending_timer, and removes SW timer switching in idle
instruction emulation function.
Signed-off-by: Bibo Mao <[email protected]>
---
arch/loongarch/kvm/exit.c | 13 ++-----------
arch/loongarch/kvm/timer.c | 13 ++++++++++---
arch/loongarch/kvm/vcpu.c | 9 ++++++++-
3 files changed, 20 insertions(+), 15 deletions(-)
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index ce8de3fa472c..e708a1786d6b 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -200,17 +200,8 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
++vcpu->stat.idle_exits;
trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
- if (!kvm_arch_vcpu_runnable(vcpu)) {
- /*
- * Switch to the software timer before halt-polling/blocking as
- * the guest's timer may be a break event for the vCPU, and the
- * hypervisor timer runs only when the CPU is in guest mode.
- * Switch before halt-polling so that KVM recognizes an expired
- * timer before blocking.
- */
- kvm_save_timer(vcpu);
- kvm_vcpu_block(vcpu);
- }
+ if (!kvm_arch_vcpu_runnable(vcpu))
+ kvm_vcpu_halt(vcpu);
return EMULATE_DONE;
}
diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
index 284bf553fefe..437e960d8fdb 100644
--- a/arch/loongarch/kvm/timer.c
+++ b/arch/loongarch/kvm/timer.c
@@ -155,11 +155,18 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
*/
hrtimer_cancel(&vcpu->arch.swtimer);
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
- } else
+ } else if (vcpu->stat.generic.blocking) {
/*
- * Inject timer interrupt so that hall polling can dectect and exit
+ * Inject timer interrupt so that hall polling can dectect and
+ * exit.
+ * VCPU is scheduled out already and sleeps in rcuwait queue and
+ * will not poll pending events again. kvm_queue_irq is not
+ * enough, hrtimer swtimer should be used here.
*/
- kvm_queue_irq(vcpu, INT_TI);
+ expire = ktime_add_ns(ktime_get(), 10); // 10ns is enough here?
+ vcpu->arch.expire = expire;
+ hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
+ }
}
/*
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 73d0c2b9c1a5..42663a345bd1 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -187,8 +187,15 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return kvm_pending_timer(vcpu) ||
+ int ret;
+
+ /* protect from TOD sync and vcpu_load/put */
+ preempt_disable();
+ ret = kvm_pending_timer(vcpu) ||
kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
+ preempt_enable();
+
+ return ret;
}
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
--
2.39.3
Reviewed-by: Tianrui Zhao <[email protected]>
?? 2023/11/16 ????10:30, Bibo Mao д??:
> With halt-polling supported, there is checking for pending events
> or interrupts when vcpu executes idle instruction. Pending interrupts
> include injected SW interrupts and passthrough HW interrupts, such as
> HW timer interrupts, since HW timer works still even if vcpu exists
> from VM mode.
>
> Since HW timer pending interrupt can be set directly with CSR status
> register, and pending HW timer interrupt checking is used in vcpu block
> checking function, it is not necessary to switch to sw timer during
> halt-polling. This patch adds preemption disabling in function
> kvm_cpu_has_pending_timer, and removes SW timer switching in idle
> instruction emulation function.
>
> Signed-off-by: Bibo Mao <[email protected]>
> ---
> arch/loongarch/kvm/exit.c | 13 ++-----------
> arch/loongarch/kvm/timer.c | 13 ++++++++++---
> arch/loongarch/kvm/vcpu.c | 9 ++++++++-
> 3 files changed, 20 insertions(+), 15 deletions(-)
>
> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
> index ce8de3fa472c..e708a1786d6b 100644
> --- a/arch/loongarch/kvm/exit.c
> +++ b/arch/loongarch/kvm/exit.c
> @@ -200,17 +200,8 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
> ++vcpu->stat.idle_exits;
> trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
>
> - if (!kvm_arch_vcpu_runnable(vcpu)) {
> - /*
> - * Switch to the software timer before halt-polling/blocking as
> - * the guest's timer may be a break event for the vCPU, and the
> - * hypervisor timer runs only when the CPU is in guest mode.
> - * Switch before halt-polling so that KVM recognizes an expired
> - * timer before blocking.
> - */
> - kvm_save_timer(vcpu);
> - kvm_vcpu_block(vcpu);
> - }
> + if (!kvm_arch_vcpu_runnable(vcpu))
> + kvm_vcpu_halt(vcpu);
>
> return EMULATE_DONE;
> }
> diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
> index 284bf553fefe..437e960d8fdb 100644
> --- a/arch/loongarch/kvm/timer.c
> +++ b/arch/loongarch/kvm/timer.c
> @@ -155,11 +155,18 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
> */
> hrtimer_cancel(&vcpu->arch.swtimer);
> hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
> - } else
> + } else if (vcpu->stat.generic.blocking) {
> /*
> - * Inject timer interrupt so that hall polling can dectect and exit
> + * Inject timer interrupt so that hall polling can dectect and
> + * exit.
> + * VCPU is scheduled out already and sleeps in rcuwait queue and
> + * will not poll pending events again. kvm_queue_irq is not
> + * enough, hrtimer swtimer should be used here.
> */
> - kvm_queue_irq(vcpu, INT_TI);
> + expire = ktime_add_ns(ktime_get(), 10); // 10ns is enough here?
> + vcpu->arch.expire = expire;
> + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
> + }
> }
>
> /*
> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> index 73d0c2b9c1a5..42663a345bd1 100644
> --- a/arch/loongarch/kvm/vcpu.c
> +++ b/arch/loongarch/kvm/vcpu.c
> @@ -187,8 +187,15 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
>
> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
> {
> - return kvm_pending_timer(vcpu) ||
> + int ret;
> +
> + /* protect from TOD sync and vcpu_load/put */
> + preempt_disable();
> + ret = kvm_pending_timer(vcpu) ||
> kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
> + preempt_enable();
> +
> + return ret;
> }
>
> int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
This series looks good to me, If Paolo agrees, I will apply to
loongarch-next after [1] is taken into the kvm tree (otherwise there
will be build errors).
[1] https://lore.kernel.org/loongarch/CAAhV-H63QkfSw+Esn8oW2PDEsCnTRPFqkj8X-x8i9cH3AS0k9w@mail.gmail.com/T/#t
On Mon, Dec 4, 2023 at 4:45 PM zhaotianrui <[email protected]> wrote:
>
> Reviewed-by: Tianrui Zhao <[email protected]>
>
> 在 2023/11/16 上午10:30, Bibo Mao 写道:
> > With halt-polling supported, there is checking for pending events
> > or interrupts when vcpu executes idle instruction. Pending interrupts
> > include injected SW interrupts and passthrough HW interrupts, such as
> > HW timer interrupts, since HW timer works still even if vcpu exists
> > from VM mode.
> >
> > Since HW timer pending interrupt can be set directly with CSR status
> > register, and pending HW timer interrupt checking is used in vcpu block
> > checking function, it is not necessary to switch to sw timer during
> > halt-polling. This patch adds preemption disabling in function
> > kvm_cpu_has_pending_timer, and removes SW timer switching in idle
> > instruction emulation function.
> >
> > Signed-off-by: Bibo Mao <[email protected]>
> > ---
> > arch/loongarch/kvm/exit.c | 13 ++-----------
> > arch/loongarch/kvm/timer.c | 13 ++++++++++---
> > arch/loongarch/kvm/vcpu.c | 9 ++++++++-
> > 3 files changed, 20 insertions(+), 15 deletions(-)
> >
> > diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
> > index ce8de3fa472c..e708a1786d6b 100644
> > --- a/arch/loongarch/kvm/exit.c
> > +++ b/arch/loongarch/kvm/exit.c
> > @@ -200,17 +200,8 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
> > ++vcpu->stat.idle_exits;
> > trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
> >
> > - if (!kvm_arch_vcpu_runnable(vcpu)) {
> > - /*
> > - * Switch to the software timer before halt-polling/blocking as
> > - * the guest's timer may be a break event for the vCPU, and the
> > - * hypervisor timer runs only when the CPU is in guest mode.
> > - * Switch before halt-polling so that KVM recognizes an expired
> > - * timer before blocking.
> > - */
> > - kvm_save_timer(vcpu);
> > - kvm_vcpu_block(vcpu);
> > - }
> > + if (!kvm_arch_vcpu_runnable(vcpu))
> > + kvm_vcpu_halt(vcpu);
> >
> > return EMULATE_DONE;
> > }
> > diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
> > index 284bf553fefe..437e960d8fdb 100644
> > --- a/arch/loongarch/kvm/timer.c
> > +++ b/arch/loongarch/kvm/timer.c
> > @@ -155,11 +155,18 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
> > */
> > hrtimer_cancel(&vcpu->arch.swtimer);
> > hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
> > - } else
> > + } else if (vcpu->stat.generic.blocking) {
> > /*
> > - * Inject timer interrupt so that hall polling can dectect and exit
> > + * Inject timer interrupt so that hall polling can dectect and
> > + * exit.
> > + * VCPU is scheduled out already and sleeps in rcuwait queue and
> > + * will not poll pending events again. kvm_queue_irq is not
> > + * enough, hrtimer swtimer should be used here.
> > */
> > - kvm_queue_irq(vcpu, INT_TI);
> > + expire = ktime_add_ns(ktime_get(), 10); // 10ns is enough here?
> > + vcpu->arch.expire = expire;
> > + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
> > + }
> > }
> >
> > /*
> > diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> > index 73d0c2b9c1a5..42663a345bd1 100644
> > --- a/arch/loongarch/kvm/vcpu.c
> > +++ b/arch/loongarch/kvm/vcpu.c
> > @@ -187,8 +187,15 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
> >
> > int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
> > {
> > - return kvm_pending_timer(vcpu) ||
> > + int ret;
> > +
> > + /* protect from TOD sync and vcpu_load/put */
> > + preempt_disable();
> > + ret = kvm_pending_timer(vcpu) ||
> > kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
> > + preempt_enable();
> > +
> > + return ret;
> > }
> >
> > int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
>
>
On 2023/12/5 上午10:20, Huacai Chen wrote:
> This series looks good to me, If Paolo agrees, I will apply to
> loongarch-next after [1] is taken into the kvm tree (otherwise there
> will be build errors).
>
> [1] https://lore.kernel.org/loongarch/CAAhV-H63QkfSw+Esn8oW2PDEsCnTRPFqkj8X-x8i9cH3AS0k9w@mail.gmail.com/T/#t
>
Got it, and thanks for your information.
Regards
Bibo Mao
> On Mon, Dec 4, 2023 at 4:45 PM zhaotianrui <[email protected]> wrote:
>>
>> Reviewed-by: Tianrui Zhao <[email protected]>
>>
>> 在 2023/11/16 上午10:30, Bibo Mao 写道:
>>> With halt-polling supported, there is checking for pending events
>>> or interrupts when vcpu executes idle instruction. Pending interrupts
>>> include injected SW interrupts and passthrough HW interrupts, such as
>>> HW timer interrupts, since HW timer works still even if vcpu exists
>>> from VM mode.
>>>
>>> Since HW timer pending interrupt can be set directly with CSR status
>>> register, and pending HW timer interrupt checking is used in vcpu block
>>> checking function, it is not necessary to switch to sw timer during
>>> halt-polling. This patch adds preemption disabling in function
>>> kvm_cpu_has_pending_timer, and removes SW timer switching in idle
>>> instruction emulation function.
>>>
>>> Signed-off-by: Bibo Mao <[email protected]>
>>> ---
>>> arch/loongarch/kvm/exit.c | 13 ++-----------
>>> arch/loongarch/kvm/timer.c | 13 ++++++++++---
>>> arch/loongarch/kvm/vcpu.c | 9 ++++++++-
>>> 3 files changed, 20 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
>>> index ce8de3fa472c..e708a1786d6b 100644
>>> --- a/arch/loongarch/kvm/exit.c
>>> +++ b/arch/loongarch/kvm/exit.c
>>> @@ -200,17 +200,8 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
>>> ++vcpu->stat.idle_exits;
>>> trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
>>>
>>> - if (!kvm_arch_vcpu_runnable(vcpu)) {
>>> - /*
>>> - * Switch to the software timer before halt-polling/blocking as
>>> - * the guest's timer may be a break event for the vCPU, and the
>>> - * hypervisor timer runs only when the CPU is in guest mode.
>>> - * Switch before halt-polling so that KVM recognizes an expired
>>> - * timer before blocking.
>>> - */
>>> - kvm_save_timer(vcpu);
>>> - kvm_vcpu_block(vcpu);
>>> - }
>>> + if (!kvm_arch_vcpu_runnable(vcpu))
>>> + kvm_vcpu_halt(vcpu);
>>>
>>> return EMULATE_DONE;
>>> }
>>> diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
>>> index 284bf553fefe..437e960d8fdb 100644
>>> --- a/arch/loongarch/kvm/timer.c
>>> +++ b/arch/loongarch/kvm/timer.c
>>> @@ -155,11 +155,18 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
>>> */
>>> hrtimer_cancel(&vcpu->arch.swtimer);
>>> hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
>>> - } else
>>> + } else if (vcpu->stat.generic.blocking) {
>>> /*
>>> - * Inject timer interrupt so that hall polling can dectect and exit
>>> + * Inject timer interrupt so that hall polling can dectect and
>>> + * exit.
>>> + * VCPU is scheduled out already and sleeps in rcuwait queue and
>>> + * will not poll pending events again. kvm_queue_irq is not
>>> + * enough, hrtimer swtimer should be used here.
>>> */
>>> - kvm_queue_irq(vcpu, INT_TI);
>>> + expire = ktime_add_ns(ktime_get(), 10); // 10ns is enough here?
>>> + vcpu->arch.expire = expire;
>>> + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
>>> + }
>>> }
>>>
>>> /*
>>> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
>>> index 73d0c2b9c1a5..42663a345bd1 100644
>>> --- a/arch/loongarch/kvm/vcpu.c
>>> +++ b/arch/loongarch/kvm/vcpu.c
>>> @@ -187,8 +187,15 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
>>>
>>> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
>>> {
>>> - return kvm_pending_timer(vcpu) ||
>>> + int ret;
>>> +
>>> + /* protect from TOD sync and vcpu_load/put */
>>> + preempt_disable();
>>> + ret = kvm_pending_timer(vcpu) ||
>>> kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
>>> + preempt_enable();
>>> +
>>> + return ret;
>>> }
>>>
>>> int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
>>
>>