2020-06-25 08:04:05

by Joerg Roedel

[permalink] [raw]
Subject: [PATCH 0/4] KVM: SVM: Code move follow-up

From: Joerg Roedel <[email protected]>

Hi,

here is small series to follow-up on the review comments for moving
the kvm-amd module code to its own sub-directory. The comments were
only about renaming structs and symbols, so there are no functional
changes in these patches.

The comments addressed here are all from [1].

Regards,

Joerg

[1] https://lore.kernel.org/lkml/[email protected]/

Joerg Roedel (4):
KVM: SVM: Rename struct nested_state to svm_nested_state
KVM: SVM: Add vmcb_ prefix to mark_*() functions
KVM: SVM: Add svm_ prefix to set/clr/is_intercept()
KVM: SVM: Rename svm_nested_virtualize_tpr() to
nested_svm_virtualize_tpr()

arch/x86/kvm/svm/avic.c | 2 +-
arch/x86/kvm/svm/nested.c | 8 +--
arch/x86/kvm/svm/sev.c | 2 +-
arch/x86/kvm/svm/svm.c | 138 +++++++++++++++++++-------------------
arch/x86/kvm/svm/svm.h | 20 +++---
5 files changed, 85 insertions(+), 85 deletions(-)

--
2.27.0


2020-06-25 08:04:11

by Joerg Roedel

[permalink] [raw]
Subject: [PATCH 4/4] KVM: SVM: Rename svm_nested_virtualize_tpr() to nested_svm_virtualize_tpr()

From: Joerg Roedel <[email protected]>

Match the naming with other nested svm functions.

No functional changes.

Signed-off-by: Joerg Roedel <[email protected]>
---
arch/x86/kvm/svm/svm.c | 6 +++---
arch/x86/kvm/svm/svm.h | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index e0b0321d95d4..b0d551bcf2a0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3040,7 +3040,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);

- if (svm_nested_virtualize_tpr(vcpu))
+ if (nested_svm_virtualize_tpr(vcpu))
return;

clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
@@ -3234,7 +3234,7 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

- if (svm_nested_virtualize_tpr(vcpu))
+ if (nested_svm_virtualize_tpr(vcpu))
return;

if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
@@ -3248,7 +3248,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr8;

- if (svm_nested_virtualize_tpr(vcpu) ||
+ if (nested_svm_virtualize_tpr(vcpu) ||
kvm_vcpu_apicv_active(vcpu))
return;

diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 003e89008331..8907efda0b0a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -365,7 +365,7 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */

-static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
+static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

--
2.27.0

2020-06-25 08:06:21

by Joerg Roedel

[permalink] [raw]
Subject: [PATCH 2/4] KVM: SVM: Add vmcb_ prefix to mark_*() functions

From: Joerg Roedel <[email protected]>

Make it more clear what data structure these functions operate on.

No functional changes.

Signed-off-by: Joerg Roedel <[email protected]>
---
arch/x86/kvm/svm/avic.c | 2 +-
arch/x86/kvm/svm/nested.c | 6 +++---
arch/x86/kvm/svm/sev.c | 2 +-
arch/x86/kvm/svm/svm.c | 44 +++++++++++++++++++--------------------
arch/x86/kvm/svm/svm.h | 8 +++----
5 files changed, 31 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index e80daa98682f..ac830cd50830 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -665,7 +665,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
} else {
vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
}
- mark_dirty(vmcb, VMCB_AVIC);
+ vmcb_mark_dirty(vmcb, VMCB_AVIC);

svm_set_pi_irte_mode(vcpu, activated);
}
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 6bceafb19108..9845fab381bc 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -106,7 +106,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
{
struct vmcb_control_area *c, *h, *g;

- mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);

if (!is_guest_mode(&svm->vcpu))
return;
@@ -375,7 +375,7 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
*/
recalc_intercepts(svm);

- mark_all_dirty(svm->vmcb);
+ vmcb_mark_all_dirty(svm->vmcb);
}

void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
@@ -598,7 +598,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;

- mark_all_dirty(svm->vmcb);
+ vmcb_mark_all_dirty(svm->vmcb);

trace_kvm_nested_vmexit_inject(nested_vmcb->control.exit_code,
nested_vmcb->control.exit_info_1,
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 5573a97f1520..5eebfdcdbf4c 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1186,5 +1186,5 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
svm->last_cpu = cpu;
sd->sev_vmcbs[asid] = svm->vmcb;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
- mark_dirty(svm->vmcb, VMCB_ASID);
+ vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c0da4dd78ac5..4d9e19bb888f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -282,7 +282,7 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
}

svm->vmcb->save.efer = efer | EFER_SVME;
- mark_dirty(svm->vmcb, VMCB_CR);
+ vmcb_mark_dirty(svm->vmcb, VMCB_CR);
}

static int is_external_interrupt(u32 info)
@@ -713,7 +713,7 @@ static void grow_ple_window(struct kvm_vcpu *vcpu)
pause_filter_count_max);

if (control->pause_filter_count != old) {
- mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
trace_kvm_ple_window_update(vcpu->vcpu_id,
control->pause_filter_count, old);
}
@@ -731,7 +731,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
pause_filter_count_shrink,
pause_filter_count);
if (control->pause_filter_count != old) {
- mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
trace_kvm_ple_window_update(vcpu->vcpu_id,
control->pause_filter_count, old);
}
@@ -966,7 +966,7 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)

svm->vmcb->control.tsc_offset = offset + g_tsc_offset;

- mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
return svm->vmcb->control.tsc_offset;
}

@@ -1123,7 +1123,7 @@ static void init_vmcb(struct vcpu_svm *svm)
clr_exception_intercept(svm, UD_VECTOR);
}

- mark_all_dirty(svm->vmcb);
+ vmcb_mark_all_dirty(svm->vmcb);

enable_gif(svm);

@@ -1257,7 +1257,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

if (unlikely(cpu != vcpu->cpu)) {
svm->asid_generation = 0;
- mark_all_dirty(svm->vmcb);
+ vmcb_mark_all_dirty(svm->vmcb);
}

#ifdef CONFIG_X86_64
@@ -1367,7 +1367,7 @@ static void svm_set_vintr(struct vcpu_svm *svm)
control->int_ctl &= ~V_INTR_PRIO_MASK;
control->int_ctl |= V_IRQ_MASK |
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
- mark_dirty(svm->vmcb, VMCB_INTR);
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
}

static void svm_clear_vintr(struct vcpu_svm *svm)
@@ -1385,7 +1385,7 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
}

- mark_dirty(svm->vmcb, VMCB_INTR);
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
}

static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
@@ -1503,7 +1503,7 @@ static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)

svm->vmcb->save.idtr.limit = dt->size;
svm->vmcb->save.idtr.base = dt->address ;
- mark_dirty(svm->vmcb, VMCB_DT);
+ vmcb_mark_dirty(svm->vmcb, VMCB_DT);
}

static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
@@ -1520,7 +1520,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)

svm->vmcb->save.gdtr.limit = dt->size;
svm->vmcb->save.gdtr.base = dt->address ;
- mark_dirty(svm->vmcb, VMCB_DT);
+ vmcb_mark_dirty(svm->vmcb, VMCB_DT);
}

static void update_cr0_intercept(struct vcpu_svm *svm)
@@ -1531,7 +1531,7 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
| (gcr0 & SVM_CR0_SELECTIVE_MASK);

- mark_dirty(svm->vmcb, VMCB_CR);
+ vmcb_mark_dirty(svm->vmcb, VMCB_CR);

if (gcr0 == *hcr0) {
clr_cr_intercept(svm, INTERCEPT_CR0_READ);
@@ -1572,7 +1572,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
- mark_dirty(svm->vmcb, VMCB_CR);
+ vmcb_mark_dirty(svm->vmcb, VMCB_CR);
update_cr0_intercept(svm);
}

@@ -1592,7 +1592,7 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
cr4 |= X86_CR4_PAE;
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
- mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
+ vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
return 0;
}

@@ -1624,7 +1624,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
/* This is symmetric with svm_get_segment() */
svm->vmcb->save.cpl = (var->dpl & 3);

- mark_dirty(svm->vmcb, VMCB_SEG);
+ vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
}

static void update_bp_intercept(struct kvm_vcpu *vcpu)
@@ -1651,7 +1651,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->asid_generation = sd->asid_generation;
svm->vmcb->control.asid = sd->next_asid++;

- mark_dirty(svm->vmcb, VMCB_ASID);
+ vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
}

static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
@@ -1660,7 +1660,7 @@ static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)

if (unlikely(value != vmcb->save.dr6)) {
vmcb->save.dr6 = value;
- mark_dirty(vmcb, VMCB_DR);
+ vmcb_mark_dirty(vmcb, VMCB_DR);
}
}

@@ -1687,7 +1687,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
struct vcpu_svm *svm = to_svm(vcpu);

svm->vmcb->save.dr7 = value;
- mark_dirty(svm->vmcb, VMCB_DR);
+ vmcb_mark_dirty(svm->vmcb, VMCB_DR);
}

static int pf_interception(struct vcpu_svm *svm)
@@ -2512,7 +2512,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
return 1;
vcpu->arch.pat = data;
svm->vmcb->save.g_pat = data;
- mark_dirty(svm->vmcb, VMCB_NPT);
+ vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
break;
case MSR_IA32_SPEC_CTRL:
if (!msr->host_initiated &&
@@ -2617,7 +2617,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
return 1;

svm->vmcb->save.dbgctl = data;
- mark_dirty(svm->vmcb, VMCB_LBR);
+ vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
if (data & (1ULL<<0))
svm_enable_lbrv(svm);
else
@@ -3477,7 +3477,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);

- mark_all_clean(svm->vmcb);
+ vmcb_mark_all_clean(svm->vmcb);
return exit_fastpath;
}

@@ -3489,7 +3489,7 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
cr3 = __sme_set(root);
if (npt_enabled) {
svm->vmcb->control.nested_cr3 = cr3;
- mark_dirty(svm->vmcb, VMCB_NPT);
+ vmcb_mark_dirty(svm->vmcb, VMCB_NPT);

/* Loading L2's CR3 is handled by enter_svm_guest_mode. */
if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
@@ -3498,7 +3498,7 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
}

svm->vmcb->save.cr3 = cr3;
- mark_dirty(svm->vmcb, VMCB_CR);
+ vmcb_mark_dirty(svm->vmcb, VMCB_CR);
}

static int is_disabled(void)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 6a1864a1f029..76116f5b0d01 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -188,18 +188,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
return container_of(kvm, struct kvm_svm, kvm);
}

-static inline void mark_all_dirty(struct vmcb *vmcb)
+static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
{
vmcb->control.clean = 0;
}

-static inline void mark_all_clean(struct vmcb *vmcb)
+static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
{
vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
& ~VMCB_ALWAYS_DIRTY_MASK;
}

-static inline void mark_dirty(struct vmcb *vmcb, int bit)
+static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
}
@@ -420,7 +420,7 @@ extern int avic;
static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
{
svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
- mark_dirty(svm->vmcb, VMCB_AVIC);
+ vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
}

static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
--
2.27.0

2020-06-25 08:06:28

by Joerg Roedel

[permalink] [raw]
Subject: [PATCH 1/4] KVM: SVM: Rename struct nested_state to svm_nested_state

From: Joerg Roedel <[email protected]>

Renaming is only needed in the svm.h header file.

No functional changes.

Signed-off-by: Joerg Roedel <[email protected]>
---
arch/x86/kvm/svm/svm.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 6ac4c00a5d82..6a1864a1f029 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -81,7 +81,7 @@ struct kvm_svm {

struct kvm_vcpu;

-struct nested_state {
+struct svm_nested_state {
struct vmcb *hsave;
u64 hsave_msr;
u64 vm_cr_msr;
@@ -133,7 +133,7 @@ struct vcpu_svm {

ulong nmi_iret_rip;

- struct nested_state nested;
+ struct svm_nested_state nested;

bool nmi_singlestep;
u64 nmi_singlestep_guest_rflags;
--
2.27.0

2020-06-25 08:08:03

by Joerg Roedel

[permalink] [raw]
Subject: [PATCH 3/4] KVM: SVM: Add svm_ prefix to set/clr/is_intercept()

From: Joerg Roedel <[email protected]>

Make clear the symbols belong to the SVM code when they are built-in.

No functional changes.

Signed-off-by: Joerg Roedel <[email protected]>
---
arch/x86/kvm/svm/nested.c | 2 +-
arch/x86/kvm/svm/svm.c | 88 +++++++++++++++++++--------------------
arch/x86/kvm/svm/svm.h | 6 +--
3 files changed, 48 insertions(+), 48 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 9845fab381bc..8fc59ce75182 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -258,7 +258,7 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm)
/* Only a few fields of int_ctl are written by the processor. */
mask = V_IRQ_MASK | V_TPR_MASK;
if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
- is_intercept(svm, INTERCEPT_VINTR)) {
+ svm_is_intercept(svm, INTERCEPT_VINTR)) {
/*
* In order to request an interrupt window, L0 is usurping
* svm->vmcb->control.int_ctl and possibly setting V_IRQ
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 4d9e19bb888f..e0b0321d95d4 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1002,38 +1002,38 @@ static void init_vmcb(struct vcpu_svm *svm)
if (enable_vmware_backdoor)
set_exception_intercept(svm, GP_VECTOR);

- set_intercept(svm, INTERCEPT_INTR);
- set_intercept(svm, INTERCEPT_NMI);
- set_intercept(svm, INTERCEPT_SMI);
- set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
- set_intercept(svm, INTERCEPT_RDPMC);
- set_intercept(svm, INTERCEPT_CPUID);
- set_intercept(svm, INTERCEPT_INVD);
- set_intercept(svm, INTERCEPT_INVLPG);
- set_intercept(svm, INTERCEPT_INVLPGA);
- set_intercept(svm, INTERCEPT_IOIO_PROT);
- set_intercept(svm, INTERCEPT_MSR_PROT);
- set_intercept(svm, INTERCEPT_TASK_SWITCH);
- set_intercept(svm, INTERCEPT_SHUTDOWN);
- set_intercept(svm, INTERCEPT_VMRUN);
- set_intercept(svm, INTERCEPT_VMMCALL);
- set_intercept(svm, INTERCEPT_VMLOAD);
- set_intercept(svm, INTERCEPT_VMSAVE);
- set_intercept(svm, INTERCEPT_STGI);
- set_intercept(svm, INTERCEPT_CLGI);
- set_intercept(svm, INTERCEPT_SKINIT);
- set_intercept(svm, INTERCEPT_WBINVD);
- set_intercept(svm, INTERCEPT_XSETBV);
- set_intercept(svm, INTERCEPT_RDPRU);
- set_intercept(svm, INTERCEPT_RSM);
+ svm_set_intercept(svm, INTERCEPT_INTR);
+ svm_set_intercept(svm, INTERCEPT_NMI);
+ svm_set_intercept(svm, INTERCEPT_SMI);
+ svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
+ svm_set_intercept(svm, INTERCEPT_RDPMC);
+ svm_set_intercept(svm, INTERCEPT_CPUID);
+ svm_set_intercept(svm, INTERCEPT_INVD);
+ svm_set_intercept(svm, INTERCEPT_INVLPG);
+ svm_set_intercept(svm, INTERCEPT_INVLPGA);
+ svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
+ svm_set_intercept(svm, INTERCEPT_MSR_PROT);
+ svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
+ svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
+ svm_set_intercept(svm, INTERCEPT_VMRUN);
+ svm_set_intercept(svm, INTERCEPT_VMMCALL);
+ svm_set_intercept(svm, INTERCEPT_VMLOAD);
+ svm_set_intercept(svm, INTERCEPT_VMSAVE);
+ svm_set_intercept(svm, INTERCEPT_STGI);
+ svm_set_intercept(svm, INTERCEPT_CLGI);
+ svm_set_intercept(svm, INTERCEPT_SKINIT);
+ svm_set_intercept(svm, INTERCEPT_WBINVD);
+ svm_set_intercept(svm, INTERCEPT_XSETBV);
+ svm_set_intercept(svm, INTERCEPT_RDPRU);
+ svm_set_intercept(svm, INTERCEPT_RSM);

if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
- set_intercept(svm, INTERCEPT_MONITOR);
- set_intercept(svm, INTERCEPT_MWAIT);
+ svm_set_intercept(svm, INTERCEPT_MONITOR);
+ svm_set_intercept(svm, INTERCEPT_MWAIT);
}

if (!kvm_hlt_in_guest(svm->vcpu.kvm))
- set_intercept(svm, INTERCEPT_HLT);
+ svm_set_intercept(svm, INTERCEPT_HLT);

control->iopm_base_pa = __sme_set(iopm_base);
control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
@@ -1077,7 +1077,7 @@ static void init_vmcb(struct vcpu_svm *svm)
if (npt_enabled) {
/* Setup VMCB for Nested Paging */
control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
- clr_intercept(svm, INTERCEPT_INVLPG);
+ svm_clr_intercept(svm, INTERCEPT_INVLPG);
clr_exception_intercept(svm, PF_VECTOR);
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
@@ -1094,9 +1094,9 @@ static void init_vmcb(struct vcpu_svm *svm)
control->pause_filter_count = pause_filter_count;
if (pause_filter_thresh)
control->pause_filter_thresh = pause_filter_thresh;
- set_intercept(svm, INTERCEPT_PAUSE);
+ svm_set_intercept(svm, INTERCEPT_PAUSE);
} else {
- clr_intercept(svm, INTERCEPT_PAUSE);
+ svm_clr_intercept(svm, INTERCEPT_PAUSE);
}

if (kvm_vcpu_apicv_active(&svm->vcpu))
@@ -1107,14 +1107,14 @@ static void init_vmcb(struct vcpu_svm *svm)
* in VMCB and clear intercepts to avoid #VMEXIT.
*/
if (vls) {
- clr_intercept(svm, INTERCEPT_VMLOAD);
- clr_intercept(svm, INTERCEPT_VMSAVE);
+ svm_clr_intercept(svm, INTERCEPT_VMLOAD);
+ svm_clr_intercept(svm, INTERCEPT_VMSAVE);
svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
}

if (vgif) {
- clr_intercept(svm, INTERCEPT_STGI);
- clr_intercept(svm, INTERCEPT_CLGI);
+ svm_clr_intercept(svm, INTERCEPT_STGI);
+ svm_clr_intercept(svm, INTERCEPT_CLGI);
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
}

@@ -1356,7 +1356,7 @@ static void svm_set_vintr(struct vcpu_svm *svm)

/* The following fields are ignored when AVIC is enabled */
WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
- set_intercept(svm, INTERCEPT_VINTR);
+ svm_set_intercept(svm, INTERCEPT_VINTR);

/*
* This is just a dummy VINTR to actually cause a vmexit to happen.
@@ -1373,7 +1373,7 @@ static void svm_set_vintr(struct vcpu_svm *svm)
static void svm_clear_vintr(struct vcpu_svm *svm)
{
const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
- clr_intercept(svm, INTERCEPT_VINTR);
+ svm_clr_intercept(svm, INTERCEPT_VINTR);

/* Drop int_ctl fields related to VINTR injection. */
svm->vmcb->control.int_ctl &= mask;
@@ -2000,8 +2000,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value)
* again while processing KVM_REQ_EVENT if needed.
*/
if (vgif_enabled(svm))
- clr_intercept(svm, INTERCEPT_STGI);
- if (is_intercept(svm, INTERCEPT_VINTR))
+ svm_clr_intercept(svm, INTERCEPT_STGI);
+ if (svm_is_intercept(svm, INTERCEPT_VINTR))
svm_clear_vintr(svm);

enable_gif(svm);
@@ -2162,7 +2162,7 @@ static int cpuid_interception(struct vcpu_svm *svm)
static int iret_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.nmi_window_exits;
- clr_intercept(svm, INTERCEPT_IRET);
+ svm_clr_intercept(svm, INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
@@ -3019,7 +3019,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)

svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
- set_intercept(svm, INTERCEPT_IRET);
+ svm_set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}

@@ -3096,10 +3096,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)

if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK;
- set_intercept(svm, INTERCEPT_IRET);
+ svm_set_intercept(svm, INTERCEPT_IRET);
} else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
- clr_intercept(svm, INTERCEPT_IRET);
+ svm_clr_intercept(svm, INTERCEPT_IRET);
}
}

@@ -3179,7 +3179,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)

if (!gif_set(svm)) {
if (vgif_enabled(svm))
- set_intercept(svm, INTERCEPT_STGI);
+ svm_set_intercept(svm, INTERCEPT_STGI);
return; /* STGI will cause a vm exit */
}

@@ -3863,7 +3863,7 @@ static void enable_smi_window(struct kvm_vcpu *vcpu)

if (!gif_set(svm)) {
if (vgif_enabled(svm))
- set_intercept(svm, INTERCEPT_STGI);
+ svm_set_intercept(svm, INTERCEPT_STGI);
/* STGI will cause a vm exit */
} else {
/* We must be in SMM; RSM will cause a vmexit anyway. */
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 76116f5b0d01..003e89008331 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -293,7 +293,7 @@ static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
recalc_intercepts(svm);
}

-static inline void set_intercept(struct vcpu_svm *svm, int bit)
+static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);

@@ -302,7 +302,7 @@ static inline void set_intercept(struct vcpu_svm *svm, int bit)
recalc_intercepts(svm);
}

-static inline void clr_intercept(struct vcpu_svm *svm, int bit)
+static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);

@@ -311,7 +311,7 @@ static inline void clr_intercept(struct vcpu_svm *svm, int bit)
recalc_intercepts(svm);
}

-static inline bool is_intercept(struct vcpu_svm *svm, int bit)
+static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
{
return (svm->vmcb->control.intercept & (1ULL << bit)) != 0;
}
--
2.27.0

2020-06-25 10:00:11

by Vitaly Kuznetsov

[permalink] [raw]
Subject: Re: [PATCH 0/4] KVM: SVM: Code move follow-up

Joerg Roedel <[email protected]> writes:

> From: Joerg Roedel <[email protected]>
>
> Hi,
>
> here is small series to follow-up on the review comments for moving
> the kvm-amd module code to its own sub-directory. The comments were
> only about renaming structs and symbols, so there are no functional
> changes in these patches.
>
> The comments addressed here are all from [1].
>
> Regards,
>
> Joerg
>
> [1] https://lore.kernel.org/lkml/[email protected]/
>

Thank you for the follow-up!

> Joerg Roedel (4):
> KVM: SVM: Rename struct nested_state to svm_nested_state
> KVM: SVM: Add vmcb_ prefix to mark_*() functions
> KVM: SVM: Add svm_ prefix to set/clr/is_intercept()
> KVM: SVM: Rename svm_nested_virtualize_tpr() to
> nested_svm_virtualize_tpr()
>
> arch/x86/kvm/svm/avic.c | 2 +-
> arch/x86/kvm/svm/nested.c | 8 +--
> arch/x86/kvm/svm/sev.c | 2 +-
> arch/x86/kvm/svm/svm.c | 138 +++++++++++++++++++-------------------
> arch/x86/kvm/svm/svm.h | 20 +++---
> 5 files changed, 85 insertions(+), 85 deletions(-)

Series:
Reviewed-by: Vitaly Kuznetsov <[email protected]>

--
Vitaly

2020-06-25 12:24:39

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH 0/4] KVM: SVM: Code move follow-up

On 25/06/20 11:34, Vitaly Kuznetsov wrote:
> Joerg Roedel <[email protected]> writes:
>
>> From: Joerg Roedel <[email protected]>
>>
>> Hi,
>>
>> here is small series to follow-up on the review comments for moving
>> the kvm-amd module code to its own sub-directory. The comments were
>> only about renaming structs and symbols, so there are no functional
>> changes in these patches.
>>
>> The comments addressed here are all from [1].
>>
>> Regards,
>>
>> Joerg
>>
>> [1] https://lore.kernel.org/lkml/[email protected]/
>>

Queued, thanks.

Paolo

> Thank you for the follow-up!
>
>> Joerg Roedel (4):
>> KVM: SVM: Rename struct nested_state to svm_nested_state
>> KVM: SVM: Add vmcb_ prefix to mark_*() functions
>> KVM: SVM: Add svm_ prefix to set/clr/is_intercept()
>> KVM: SVM: Rename svm_nested_virtualize_tpr() to
>> nested_svm_virtualize_tpr()
>>
>> arch/x86/kvm/svm/avic.c | 2 +-
>> arch/x86/kvm/svm/nested.c | 8 +--
>> arch/x86/kvm/svm/sev.c | 2 +-
>> arch/x86/kvm/svm/svm.c | 138 +++++++++++++++++++-------------------
>> arch/x86/kvm/svm/svm.h | 20 +++---
>> 5 files changed, 85 insertions(+), 85 deletions(-)
>
> Series:
> Reviewed-by: Vitaly Kuznetsov <[email protected]>
>

2020-06-26 00:01:01

by Krish Sadhukhan

[permalink] [raw]
Subject: Re: [PATCH 0/4] KVM: SVM: Code move follow-up


On 6/25/20 1:03 AM, Joerg Roedel wrote:
> From: Joerg Roedel <[email protected]>
>
> Hi,
>
> here is small series to follow-up on the review comments for moving
> the kvm-amd module code to its own sub-directory. The comments were
> only about renaming structs and symbols, so there are no functional
> changes in these patches.
>
> The comments addressed here are all from [1].
>
> Regards,
>
> Joerg
>
> [1] https://lore.kernel.org/lkml/[email protected]/
>
> Joerg Roedel (4):
> KVM: SVM: Rename struct nested_state to svm_nested_state
> KVM: SVM: Add vmcb_ prefix to mark_*() functions
> KVM: SVM: Add svm_ prefix to set/clr/is_intercept()
> KVM: SVM: Rename svm_nested_virtualize_tpr() to
> nested_svm_virtualize_tpr()
>
> arch/x86/kvm/svm/avic.c | 2 +-
> arch/x86/kvm/svm/nested.c | 8 +--
> arch/x86/kvm/svm/sev.c | 2 +-
> arch/x86/kvm/svm/svm.c | 138 +++++++++++++++++++-------------------
> arch/x86/kvm/svm/svm.h | 20 +++---
> 5 files changed, 85 insertions(+), 85 deletions(-)
>
Reviewed-by: Krish Sadhukhan <[email protected]>

2020-06-26 00:01:28

by Krish Sadhukhan

[permalink] [raw]
Subject: Re: [PATCH 4/4] KVM: SVM: Rename svm_nested_virtualize_tpr() to nested_svm_virtualize_tpr()


On 6/25/20 1:03 AM, Joerg Roedel wrote:
> From: Joerg Roedel <[email protected]>
>
> Match the naming with other nested svm functions.
>
> No functional changes.
>
> Signed-off-by: Joerg Roedel <[email protected]>
> ---
> arch/x86/kvm/svm/svm.c | 6 +++---
> arch/x86/kvm/svm/svm.h | 2 +-
> 2 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index e0b0321d95d4..b0d551bcf2a0 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3040,7 +3040,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
>
> - if (svm_nested_virtualize_tpr(vcpu))
> + if (nested_svm_virtualize_tpr(vcpu))
> return;
>
> clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
> @@ -3234,7 +3234,7 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
>
> - if (svm_nested_virtualize_tpr(vcpu))
> + if (nested_svm_virtualize_tpr(vcpu))
> return;
>
> if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
> @@ -3248,7 +3248,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
> struct vcpu_svm *svm = to_svm(vcpu);
> u64 cr8;
>
> - if (svm_nested_virtualize_tpr(vcpu) ||
> + if (nested_svm_virtualize_tpr(vcpu) ||
> kvm_vcpu_apicv_active(vcpu))
> return;
>
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 003e89008331..8907efda0b0a 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -365,7 +365,7 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
> #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
> #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
>
> -static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
> +static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
>
Should the name be changed to svm_nested_virtualized_tpr ?  Because
svm_nested_virtualize_tpr implies that the action of the function is to
virtualize the TPR.