While discussing AP changes, we discovered that we will have to force
a CPU using the vSIE to regenerate/reload shadow data structures. For now,
we have no mechanism for that.
E.g. when clearing AP masks later, we could still have a vSIE CPU making
use of AP adapters as the masks might not be considered yet in the vSIE
data structures. We need a way to block entering the vSIE and regenerate
all shadow data structures once done.
Looks like we can achieve that by simply simulating an ordinary SIE
entry/exit in the VCPU sie control block (while entering the vSIE loop).
This way, we can support blocking and also synchronous CPU requests.
Only compile tested.
David Hildenbrand (2):
KVM: s390: vsie: simulate VCPU SIE entry/exit
KVM: s390: introduce and use KVM_REQ_VSIE_RESTART
arch/s390/include/asm/kvm_host.h | 1 +
arch/s390/kvm/kvm-s390.c | 16 ++++++++++++++--
arch/s390/kvm/kvm-s390.h | 1 +
arch/s390/kvm/vsie.c | 20 ++++++++++++++++++--
4 files changed, 34 insertions(+), 4 deletions(-)
--
2.17.1
VCPU requests and VCPU blocking right now don't take care of the vSIE
(as it was not necessary until now). But we want to have VCPU requests
that will also be handled before running the vSIE again.
So let's simulate a SIE entry when entering the vSIE loop and check
for PROG_ flags. The existing infrastructure (e.g. exit_sie()) will then
detect that the SIE (in form of the vSIE execution loop) is running and
properly kick the vSIE CPU, resulting in it leaving the vSIE loop and
therefore the vSIE interception handler, allowing it to handle VCPU
requests.
E.g. if we want to modify the crycb of the VCPU and make sure that any
masks also get applied to the VSIE crycb shadow (which uses masks from the
VCPU crycb), we will need a way to hinder the vSIE from running and make
sure to process the updated crycb before reentering the vSIE again.
Signed-off-by: David Hildenbrand <[email protected]>
---
arch/s390/kvm/kvm-s390.c | 9 ++++++++-
arch/s390/kvm/kvm-s390.h | 1 +
arch/s390/kvm/vsie.c | 20 ++++++++++++++++++--
3 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 91ad4a9425c0..c87734a31fdb 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2766,18 +2766,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
exit_sie(vcpu);
}
+bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
+{
+ return atomic_read(&vcpu->arch.sie_block->prog20) &
+ (PROG_BLOCK_SIE | PROG_REQUEST);
+}
+
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
/*
- * Kick a guest cpu out of SIE and wait until SIE is not running.
+ * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
* If the CPU is not running (e.g. waiting as idle) the function will
* return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
+ kvm_s390_vsie_kick(vcpu);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax();
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 981e3ba97461..1f6e36cdce0d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
+bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu);
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 63844b95c22c..faac06886f77 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -989,6 +989,17 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
int rc = 0;
+ /*
+ * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
+ * and VCPU requests can hinder the whole vSIE loop from running
+ * and lead to an immediate exit. We do it at this point (not
+ * earlier), so kvm_s390_vsie_kick() works correctly already.
+ */
+ vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
+ barrier();
+ if (kvm_s390_vcpu_sie_inhibited(vcpu))
+ return 0;
+
while (1) {
rc = acquire_gmap_shadow(vcpu, vsie_page);
if (!rc)
@@ -1004,10 +1015,14 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (rc == -EAGAIN)
rc = 0;
if (rc || scb_s->icptcode || signal_pending(current) ||
- kvm_s390_vcpu_has_irq(vcpu, 0))
+ kvm_s390_vcpu_has_irq(vcpu, 0) ||
+ kvm_s390_vcpu_sie_inhibited(vcpu))
break;
}
+ barrier();
+ vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
+
if (rc == -EFAULT) {
/*
* Addressing exceptions are always presentes as intercepts.
@@ -1121,7 +1136,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
if (unlikely(scb_addr & 0x1ffUL))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0))
+ if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
+ kvm_s390_vcpu_sie_inhibited(vcpu))
return 0;
vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
--
2.17.1
When we change the crycb (or execution controls), we also have to make sure
that the vSIE shadow datastructures properly consider the changed
values before rerunning the vSIE. We can achieve that by simply using a
VCPU request now.
This has to be a synchronous request (== handled before entering the
(v)SIE again).
The request will make sure that the vSIE handler is left, and that the
request will be processed (NOP), therefore forcing a reload of all
vSIE data (including rebuilding the crycb) when re-entering the vSIE
interception handler the next time.
Signed-off-by: David Hildenbrand <[email protected]>
---
arch/s390/include/asm/kvm_host.h | 1 +
arch/s390/kvm/kvm-s390.c | 7 ++++++-
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 29c940bf8506..75d39628f21d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
#define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
+#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5)
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c87734a31fdb..2fdc017d91f0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -842,8 +842,11 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
kvm_s390_vcpu_block_all(kvm);
- kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_vcpu_crypto_setup(vcpu);
+ /* recreate the shadow crycb by leaving the VSIE handler */
+ kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
+ }
kvm_s390_vcpu_unblock_all(kvm);
}
@@ -3201,6 +3204,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
/* nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ /* we left the vsie handler, nothing to do, just clear the request */
+ kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
return 0;
}
--
2.17.1
On 07/08/2018 14:51, David Hildenbrand wrote:
> While discussing AP changes, we discovered that we will have to force
> a CPU using the vSIE to regenerate/reload shadow data structures. For now,
> we have no mechanism for that.
>
> E.g. when clearing AP masks later, we could still have a vSIE CPU making
> use of AP adapters as the masks might not be considered yet in the vSIE
> data structures. We need a way to block entering the vSIE and regenerate
> all shadow data structures once done.
>
> Looks like we can achieve that by simply simulating an ordinary SIE
> entry/exit in the VCPU sie control block (while entering the vSIE loop).
>
> This way, we can support blocking and also synchronous CPU requests.
>
> Only compile tested.
>
> David Hildenbrand (2):
> KVM: s390: vsie: simulate VCPU SIE entry/exit
> KVM: s390: introduce and use KVM_REQ_VSIE_RESTART
>
> arch/s390/include/asm/kvm_host.h | 1 +
> arch/s390/kvm/kvm-s390.c | 16 ++++++++++++++--
> arch/s390/kvm/kvm-s390.h | 1 +
> arch/s390/kvm/vsie.c | 20 ++++++++++++++++++--
> 4 files changed, 34 insertions(+), 4 deletions(-)
>
Thanks David for your patches.
Following my tests, they work fine for our AP use case.
Best regards,
Pierre
--
Pierre Morel
Linux/KVM/QEMU in Böblingen - Germany
On 07.08.2018 19:15, Pierre Morel wrote:
> On 07/08/2018 14:51, David Hildenbrand wrote:
>> While discussing AP changes, we discovered that we will have to force
>> a CPU using the vSIE to regenerate/reload shadow data structures. For now,
>> we have no mechanism for that.
>>
>> E.g. when clearing AP masks later, we could still have a vSIE CPU making
>> use of AP adapters as the masks might not be considered yet in the vSIE
>> data structures. We need a way to block entering the vSIE and regenerate
>> all shadow data structures once done.
>>
>> Looks like we can achieve that by simply simulating an ordinary SIE
>> entry/exit in the VCPU sie control block (while entering the vSIE loop).
>>
>> This way, we can support blocking and also synchronous CPU requests.
>>
>> Only compile tested.
>>
>> David Hildenbrand (2):
>> KVM: s390: vsie: simulate VCPU SIE entry/exit
>> KVM: s390: introduce and use KVM_REQ_VSIE_RESTART
>>
>> arch/s390/include/asm/kvm_host.h | 1 +
>> arch/s390/kvm/kvm-s390.c | 16 ++++++++++++++--
>> arch/s390/kvm/kvm-s390.h | 1 +
>> arch/s390/kvm/vsie.c | 20 ++++++++++++++++++--
>> 4 files changed, 34 insertions(+), 4 deletions(-)
>>
> Thanks David for your patches.
>
> Following my tests, they work fine for our AP use case.
Perfect, let's wait for some more comments. You can carry them along in
the AP series for now, maybe Christian/Janosch will decide to pick them
up earlier.
If there is some more work to be done due to review comments, I'll jump in.
>
> Best regards,
>
> Pierre
>
>
--
Thanks,
David / dhildenb
On 07/08/2018 14:51, David Hildenbrand wrote:
> When we change the crycb (or execution controls), we also have to make sure
> that the vSIE shadow datastructures properly consider the changed
> values before rerunning the vSIE. We can achieve that by simply using a
> VCPU request now.
>
> This has to be a synchronous request (== handled before entering the
> (v)SIE again).
>
> The request will make sure that the vSIE handler is left, and that the
> request will be processed (NOP), therefore forcing a reload of all
> vSIE data (including rebuilding the crycb) when re-entering the vSIE
> interception handler the next time.
>
> Signed-off-by: David Hildenbrand <[email protected]>
> ---
> arch/s390/include/asm/kvm_host.h | 1 +
> arch/s390/kvm/kvm-s390.c | 7 ++++++-
> 2 files changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
> index 29c940bf8506..75d39628f21d 100644
> --- a/arch/s390/include/asm/kvm_host.h
> +++ b/arch/s390/include/asm/kvm_host.h
> @@ -44,6 +44,7 @@
> #define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
> #define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
> #define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
> +#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5)
>
> #define SIGP_CTRL_C 0x80
> #define SIGP_CTRL_SCN_MASK 0x3f
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index c87734a31fdb..2fdc017d91f0 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -842,8 +842,11 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
>
> kvm_s390_vcpu_block_all(kvm);
>
> - kvm_for_each_vcpu(i, vcpu, kvm)
> + kvm_for_each_vcpu(i, vcpu, kvm) {
> kvm_s390_vcpu_crypto_setup(vcpu);
> + /* recreate the shadow crycb by leaving the VSIE handler */
> + kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
> + }
>
> kvm_s390_vcpu_unblock_all(kvm);
> }
> @@ -3201,6 +3204,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
>
> /* nothing to do, just clear the request */
> kvm_clear_request(KVM_REQ_UNHALT, vcpu);
> + /* we left the vsie handler, nothing to do, just clear the request */
> + kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
>
> return 0;
> }
Reviewed-by: Pierre Morel<[email protected]>
Tested-by: Pierre Morel<[email protected]>
--
Pierre Morel
Linux/KVM/QEMU in Böblingen - Germany
On 07/08/2018 14:51, David Hildenbrand wrote:
> VCPU requests and VCPU blocking right now don't take care of the vSIE
> (as it was not necessary until now). But we want to have VCPU requests
> that will also be handled before running the vSIE again.
>
> So let's simulate a SIE entry when entering the vSIE loop and check
> for PROG_ flags. The existing infrastructure (e.g. exit_sie()) will then
> detect that the SIE (in form of the vSIE execution loop) is running and
> properly kick the vSIE CPU, resulting in it leaving the vSIE loop and
> therefore the vSIE interception handler, allowing it to handle VCPU
> requests.
>
> E.g. if we want to modify the crycb of the VCPU and make sure that any
> masks also get applied to the VSIE crycb shadow (which uses masks from the
> VCPU crycb), we will need a way to hinder the vSIE from running and make
> sure to process the updated crycb before reentering the vSIE again.
>
> Signed-off-by: David Hildenbrand <[email protected]>
> ---
> arch/s390/kvm/kvm-s390.c | 9 ++++++++-
> arch/s390/kvm/kvm-s390.h | 1 +
> arch/s390/kvm/vsie.c | 20 ++++++++++++++++++--
> 3 files changed, 27 insertions(+), 3 deletions(-)
>
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 91ad4a9425c0..c87734a31fdb 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -2766,18 +2766,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
> exit_sie(vcpu);
> }
>
> +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
> +{
> + return atomic_read(&vcpu->arch.sie_block->prog20) &
> + (PROG_BLOCK_SIE | PROG_REQUEST);
> +}
> +
> static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
> {
> atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
> }
>
> /*
> - * Kick a guest cpu out of SIE and wait until SIE is not running.
> + * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
> * If the CPU is not running (e.g. waiting as idle) the function will
> * return immediately. */
> void exit_sie(struct kvm_vcpu *vcpu)
> {
> kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
> + kvm_s390_vsie_kick(vcpu);
> while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
> cpu_relax();
> }
> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
> index 981e3ba97461..1f6e36cdce0d 100644
> --- a/arch/s390/kvm/kvm-s390.h
> +++ b/arch/s390/kvm/kvm-s390.h
> @@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
> void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
> void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
> void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
> +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
> void exit_sie(struct kvm_vcpu *vcpu);
> void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
> int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
> diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
> index 63844b95c22c..faac06886f77 100644
> --- a/arch/s390/kvm/vsie.c
> +++ b/arch/s390/kvm/vsie.c
> @@ -989,6 +989,17 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
> struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
> int rc = 0;
>
> + /*
> + * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
> + * and VCPU requests can hinder the whole vSIE loop from running
> + * and lead to an immediate exit. We do it at this point (not
> + * earlier), so kvm_s390_vsie_kick() works correctly already.
> + */
> + vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
> + barrier();
> + if (kvm_s390_vcpu_sie_inhibited(vcpu))
> + return 0;
> +
> while (1) {
> rc = acquire_gmap_shadow(vcpu, vsie_page);
> if (!rc)
> @@ -1004,10 +1015,14 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
> if (rc == -EAGAIN)
> rc = 0;
> if (rc || scb_s->icptcode || signal_pending(current) ||
> - kvm_s390_vcpu_has_irq(vcpu, 0))
> + kvm_s390_vcpu_has_irq(vcpu, 0) ||
> + kvm_s390_vcpu_sie_inhibited(vcpu))
> break;
> }
>
> + barrier();
> + vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
> +
> if (rc == -EFAULT) {
> /*
> * Addressing exceptions are always presentes as intercepts.
> @@ -1121,7 +1136,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
> if (unlikely(scb_addr & 0x1ffUL))
> return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
>
> - if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0))
> + if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
> + kvm_s390_vcpu_sie_inhibited(vcpu))
> return 0;
>
> vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
Reviewed-by: Pierre Morel<[email protected]>
Tested-by: Pierre Morel<[email protected]>
--
Pierre Morel
Linux/KVM/QEMU in Böblingen - Germany
On Tue, 7 Aug 2018 14:51:30 +0200
David Hildenbrand <[email protected]> wrote:
> VCPU requests and VCPU blocking right now don't take care of the vSIE
> (as it was not necessary until now). But we want to have VCPU requests
> that will also be handled before running the vSIE again.
>
> So let's simulate a SIE entry when entering the vSIE loop and check
> for PROG_ flags. The existing infrastructure (e.g. exit_sie()) will then
> detect that the SIE (in form of the vSIE execution loop) is running and
> properly kick the vSIE CPU, resulting in it leaving the vSIE loop and
> therefore the vSIE interception handler, allowing it to handle VCPU
> requests.
>
> E.g. if we want to modify the crycb of the VCPU and make sure that any
> masks also get applied to the VSIE crycb shadow (which uses masks from the
> VCPU crycb), we will need a way to hinder the vSIE from running and make
> sure to process the updated crycb before reentering the vSIE again.
>
> Signed-off-by: David Hildenbrand <[email protected]>
> ---
> arch/s390/kvm/kvm-s390.c | 9 ++++++++-
> arch/s390/kvm/kvm-s390.h | 1 +
> arch/s390/kvm/vsie.c | 20 ++++++++++++++++++--
> 3 files changed, 27 insertions(+), 3 deletions(-)
Reviewed-by: Cornelia Huck <[email protected]>
On Tue, 7 Aug 2018 14:51:31 +0200
David Hildenbrand <[email protected]> wrote:
> When we change the crycb (or execution controls), we also have to make sure
> that the vSIE shadow datastructures properly consider the changed
> values before rerunning the vSIE. We can achieve that by simply using a
> VCPU request now.
Is this actually a concrete problem right now, or does this only become
a real concern with vfio-ap?
>
> This has to be a synchronous request (== handled before entering the
> (v)SIE again).
>
> The request will make sure that the vSIE handler is left, and that the
> request will be processed (NOP), therefore forcing a reload of all
> vSIE data (including rebuilding the crycb) when re-entering the vSIE
> interception handler the next time.
>
> Signed-off-by: David Hildenbrand <[email protected]>
> ---
> arch/s390/include/asm/kvm_host.h | 1 +
> arch/s390/kvm/kvm-s390.c | 7 ++++++-
> 2 files changed, 7 insertions(+), 1 deletion(-)
Reviewed-by: Cornelia Huck <[email protected]>
On 08.08.2018 14:47, Cornelia Huck wrote:
> On Tue, 7 Aug 2018 14:51:31 +0200
> David Hildenbrand <[email protected]> wrote:
>
>> When we change the crycb (or execution controls), we also have to make sure
>> that the vSIE shadow datastructures properly consider the changed
>> values before rerunning the vSIE. We can achieve that by simply using a
>> VCPU request now.
>
> Is this actually a concrete problem right now, or does this only become
> a real concern with vfio-ap?
The use case I implemented is not a real problem right now, at least not
in practice.
In QEMU, s390_crypto_reset() triggers this code, but it is only called
when all VCPUs are stopped (== no one in vSIE).
So this change is the right thing to do ("who knows what user space
does"), but not critical (we're only dealing with wrapping masks right
now). It is a problem once we allow to access adapters (vfio-ap).
>
>>
>> This has to be a synchronous request (== handled before entering the
>> (v)SIE again).
>>
>> The request will make sure that the vSIE handler is left, and that the
>> request will be processed (NOP), therefore forcing a reload of all
>> vSIE data (including rebuilding the crycb) when re-entering the vSIE
>> interception handler the next time.
>>
>> Signed-off-by: David Hildenbrand <[email protected]>
>> ---
>> arch/s390/include/asm/kvm_host.h | 1 +
>> arch/s390/kvm/kvm-s390.c | 7 ++++++-
>> 2 files changed, 7 insertions(+), 1 deletion(-)
>
> Reviewed-by: Cornelia Huck <[email protected]>
>
--
Thanks,
David / dhildenb
On 07.08.2018 14:51, David Hildenbrand wrote:
> VCPU requests and VCPU blocking right now don't take care of the vSIE
> (as it was not necessary until now). But we want to have VCPU requests
> that will also be handled before running the vSIE again.
>
> So let's simulate a SIE entry when entering the vSIE loop and check
> for PROG_ flags. The existing infrastructure (e.g. exit_sie()) will then
> detect that the SIE (in form of the vSIE execution loop) is running and
> properly kick the vSIE CPU, resulting in it leaving the vSIE loop and
> therefore the vSIE interception handler, allowing it to handle VCPU
> requests.
>
> E.g. if we want to modify the crycb of the VCPU and make sure that any
> masks also get applied to the VSIE crycb shadow (which uses masks from the
> VCPU crycb), we will need a way to hinder the vSIE from running and make
> sure to process the updated crycb before reentering the vSIE again.
>
> Signed-off-by: David Hildenbrand <[email protected]>
Finally found some time:
Reviewed-by: Janosch Frank <[email protected]>
As the first user will be AP, I guess the patches will be queued with
them. Thanks for helping out :)
On 07.08.2018 14:51, David Hildenbrand wrote:
> When we change the crycb (or execution controls), we also have to make sure
> that the vSIE shadow datastructures properly consider the changed
> values before rerunning the vSIE. We can achieve that by simply using a
> VCPU request now.
>
> This has to be a synchronous request (== handled before entering the
> (v)SIE again).
>
> The request will make sure that the vSIE handler is left, and that the
> request will be processed (NOP), therefore forcing a reload of all
> vSIE data (including rebuilding the crycb) when re-entering the vSIE
> interception handler the next time.
>
> Signed-off-by: David Hildenbrand <[email protected]>
Reviewed-by: Janosch Frank <[email protected]>
On 07.08.2018 14:51, David Hildenbrand wrote:
> VCPU requests and VCPU blocking right now don't take care of the vSIE
> (as it was not necessary until now). But we want to have VCPU requests
> that will also be handled before running the vSIE again.
>
> So let's simulate a SIE entry when entering the vSIE loop and check
> for PROG_ flags. The existing infrastructure (e.g. exit_sie()) will then
> detect that the SIE (in form of the vSIE execution loop) is running and
> properly kick the vSIE CPU, resulting in it leaving the vSIE loop and
> therefore the vSIE interception handler, allowing it to handle VCPU
> requests.
>
> E.g. if we want to modify the crycb of the VCPU and make sure that any
> masks also get applied to the VSIE crycb shadow (which uses masks from the
> VCPU crycb), we will need a way to hinder the vSIE from running and make
> sure to process the updated crycb before reentering the vSIE again.
>
> Signed-off-by: David Hildenbrand <[email protected]>
> ---
> arch/s390/kvm/kvm-s390.c | 9 ++++++++-
> arch/s390/kvm/kvm-s390.h | 1 +
> arch/s390/kvm/vsie.c | 20 ++++++++++++++++++--
> 3 files changed, 27 insertions(+), 3 deletions(-)
>
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 91ad4a9425c0..c87734a31fdb 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -2766,18 +2766,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
> exit_sie(vcpu);
> }
>
> +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
> +{
> + return atomic_read(&vcpu->arch.sie_block->prog20) &
> + (PROG_BLOCK_SIE | PROG_REQUEST);
> +}
> +
> static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
> {
> atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
> }
>
> /*
> - * Kick a guest cpu out of SIE and wait until SIE is not running.
> + * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
> * If the CPU is not running (e.g. waiting as idle) the function will
> * return immediately. */
> void exit_sie(struct kvm_vcpu *vcpu)
> {
> kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
> + kvm_s390_vsie_kick(vcpu);
> while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
> cpu_relax();
> }
> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
> index 981e3ba97461..1f6e36cdce0d 100644
> --- a/arch/s390/kvm/kvm-s390.h
> +++ b/arch/s390/kvm/kvm-s390.h
> @@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
> void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
> void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
> void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
> +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
> void exit_sie(struct kvm_vcpu *vcpu);
> void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
> int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
> diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
> index 63844b95c22c..faac06886f77 100644
> --- a/arch/s390/kvm/vsie.c
> +++ b/arch/s390/kvm/vsie.c
> @@ -989,6 +989,17 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
> struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
> int rc = 0;
>
> + /*
> + * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
> + * and VCPU requests can hinder the whole vSIE loop from running
> + * and lead to an immediate exit. We do it at this point (not
> + * earlier), so kvm_s390_vsie_kick() works correctly already.
> + */
> + vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
> + barrier();
> + if (kvm_s390_vcpu_sie_inhibited(vcpu))
> + return 0;
> +
> while (1) {
> rc = acquire_gmap_shadow(vcpu, vsie_page);
> if (!rc)
> @@ -1004,10 +1015,14 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
> if (rc == -EAGAIN)
> rc = 0;
> if (rc || scb_s->icptcode || signal_pending(current) ||
> - kvm_s390_vcpu_has_irq(vcpu, 0))
> + kvm_s390_vcpu_has_irq(vcpu, 0) ||
> + kvm_s390_vcpu_sie_inhibited(vcpu))
> break;
> }
>
> + barrier();
> + vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
> +
I am thinking about moving this down to the actual sie64 call. We
eventually take locks and even call into MM code (to resolve faults)
inside do_vsie_run(). I think this extra overhead can be avoided (where
any caller - e.g. on prefix unmaps has to wait).
--
Thanks,
David / dhildenb