2020-07-29 13:23:27

by Joerg Roedel

[permalink] [raw]
Subject: [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack

From: Joerg Roedel <[email protected]>

Do not allocate a vmcb_control_area and a vmcb_save_area on the stack,
as these structures will become larger with future extenstions of
SVM and thus the svm_set_nested_state() function will become a too large
stack frame.

Signed-off-by: Joerg Roedel <[email protected]>
---
arch/x86/kvm/svm/nested.c | 44 ++++++++++++++++++++++++++-------------
1 file changed, 30 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 61378a3c2ce4..f3c3c4e1ca7f 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1061,8 +1061,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
struct vmcb *hsave = svm->nested.hsave;
struct vmcb __user *user_vmcb = (struct vmcb __user *)
&user_kvm_nested_state->data.svm[0];
- struct vmcb_control_area ctl;
- struct vmcb_save_area save;
+ struct vmcb_control_area *ctl;
+ struct vmcb_save_area *save;
+ int ret;
u32 cr0;

if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
@@ -1096,13 +1097,22 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
return -EINVAL;
- if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl)))
- return -EFAULT;
- if (copy_from_user(&save, &user_vmcb->save, sizeof(save)))
- return -EFAULT;

- if (!nested_vmcb_check_controls(&ctl))
- return -EINVAL;
+ ret = -ENOMEM;
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ save = kzalloc(sizeof(*save), GFP_KERNEL);
+ if (!ctl || !save)
+ goto out_free;
+
+ ret = -EFAULT;
+ if (copy_from_user(ctl, &user_vmcb->control, sizeof(ctl)))
+ goto out_free;
+ if (copy_from_user(save, &user_vmcb->save, sizeof(save)))
+ goto out_free;
+
+ ret = -EINVAL;
+ if (!nested_vmcb_check_controls(ctl))
+ goto out_free;

/*
* Processor state contains L2 state. Check that it is
@@ -1110,15 +1120,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
*/
cr0 = kvm_read_cr0(vcpu);
if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
- return -EINVAL;
+ goto out_free;

/*
* Validate host state saved from before VMRUN (see
* nested_svm_check_permissions).
* TODO: validate reserved bits for all saved state.
*/
- if (!(save.cr0 & X86_CR0_PG))
- return -EINVAL;
+ if (!(save->cr0 & X86_CR0_PG))
+ goto out_free;

/*
* All checks done, we can enter guest mode. L1 control fields
@@ -1127,15 +1137,21 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
* contains saved L1 state.
*/
copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
- hsave->save = save;
+ hsave->save = *save;

svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa;
- load_nested_vmcb_control(svm, &ctl);
+ load_nested_vmcb_control(svm, ctl);
nested_prepare_vmcb_control(svm);

out_set_gif:
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
- return 0;
+
+ ret = 0;
+out_free:
+ kfree(save);
+ kfree(ctl);
+
+ return ret;
}

struct kvm_x86_nested_ops svm_nested_ops = {
--
2.17.1


2020-07-29 15:17:52

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack

On Wed, Jul 29, 2020 at 03:22:31PM +0200, Joerg Roedel wrote:
> From: Joerg Roedel <[email protected]>
>
> Do not allocate a vmcb_control_area and a vmcb_save_area on the stack,
> as these structures will become larger with future extenstions of
> SVM and thus the svm_set_nested_state() function will become a too large
> stack frame.

Speaking of too large, would it be overly paranoid to add:

BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) <
KVM_STATE_NESTED_SVM_VMCB_SIZE)

More so for documentation than for any real concern that the SVM architecture
will do something silly, e.g. to make it obvious that patch 2 in this series
won't break backwards compatibility.

> Signed-off-by: Joerg Roedel <[email protected]>
> ---
> arch/x86/kvm/svm/nested.c | 44 ++++++++++++++++++++++++++-------------
> 1 file changed, 30 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 61378a3c2ce4..f3c3c4e1ca7f 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -1061,8 +1061,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
> struct vmcb *hsave = svm->nested.hsave;
> struct vmcb __user *user_vmcb = (struct vmcb __user *)
> &user_kvm_nested_state->data.svm[0];
> - struct vmcb_control_area ctl;
> - struct vmcb_save_area save;
> + struct vmcb_control_area *ctl;
> + struct vmcb_save_area *save;
> + int ret;
> u32 cr0;
>
> if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
> @@ -1096,13 +1097,22 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
> return -EINVAL;
> if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
> return -EINVAL;
> - if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl)))
> - return -EFAULT;
> - if (copy_from_user(&save, &user_vmcb->save, sizeof(save)))
> - return -EFAULT;
>
> - if (!nested_vmcb_check_controls(&ctl))
> - return -EINVAL;
> + ret = -ENOMEM;
> + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
> + save = kzalloc(sizeof(*save), GFP_KERNEL);
> + if (!ctl || !save)
> + goto out_free;
> +
> + ret = -EFAULT;
> + if (copy_from_user(ctl, &user_vmcb->control, sizeof(ctl)))

The sizeof() calc is wrong, this is now calculating the size of the pointer,
not the size of the struct. It'd need to be sizeof(*ctl).

> + goto out_free;
> + if (copy_from_user(save, &user_vmcb->save, sizeof(save)))

Same bug here.

> + goto out_free;
> +
> + ret = -EINVAL;
> + if (!nested_vmcb_check_controls(ctl))
> + goto out_free;
>
> /*
> * Processor state contains L2 state. Check that it is
> @@ -1110,15 +1120,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
> */
> cr0 = kvm_read_cr0(vcpu);
> if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
> - return -EINVAL;
> + goto out_free;
>
> /*
> * Validate host state saved from before VMRUN (see
> * nested_svm_check_permissions).
> * TODO: validate reserved bits for all saved state.
> */
> - if (!(save.cr0 & X86_CR0_PG))
> - return -EINVAL;
> + if (!(save->cr0 & X86_CR0_PG))
> + goto out_free;
>
> /*
> * All checks done, we can enter guest mode. L1 control fields
> @@ -1127,15 +1137,21 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
> * contains saved L1 state.
> */
> copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
> - hsave->save = save;
> + hsave->save = *save;
>
> svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa;
> - load_nested_vmcb_control(svm, &ctl);
> + load_nested_vmcb_control(svm, ctl);
> nested_prepare_vmcb_control(svm);
>
> out_set_gif:
> svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
> - return 0;
> +
> + ret = 0;
> +out_free:
> + kfree(save);
> + kfree(ctl);
> +
> + return ret;
> }
>
> struct kvm_x86_nested_ops svm_nested_ops = {
> --
> 2.17.1
>

2020-07-30 12:39:35

by Joerg Roedel

[permalink] [raw]
Subject: Re: [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack

Hi Sean,

thanks for your review!

On Wed, Jul 29, 2020 at 08:14:55AM -0700, Sean Christopherson wrote:
> On Wed, Jul 29, 2020 at 03:22:31PM +0200, Joerg Roedel wrote:
> Speaking of too large, would it be overly paranoid to add:
>
> BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) <
> KVM_STATE_NESTED_SVM_VMCB_SIZE)
>
> More so for documentation than for any real concern that the SVM architecture
> will do something silly, e.g. to make it obvious that patch 2 in this series
> won't break backwards compatibility.

The check should actually be '>', but then it makes sense. The control-
and save-area together are still way smaller than 4k. I will add the
check for '>' to this patch.

> > + ret = -EFAULT;
> > + if (copy_from_user(ctl, &user_vmcb->control, sizeof(ctl)))
>
> The sizeof() calc is wrong, this is now calculating the size of the pointer,
> not the size of the struct. It'd need to be sizeof(*ctl).
>
> > + goto out_free;
> > + if (copy_from_user(save, &user_vmcb->save, sizeof(save)))
>
> Same bug here.

Thanks, fixed that.

Regards,

Joerg