2022-12-02 19:18:49

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH] KVM: x86: remove unnecessary exports

Several symbols are not used by vendor modules but still exported.
Removing them ensures that new coupling between kvm.ko and kvm-*.ko
is noticed and reviewed.

Co-developed-by: Sean Christopherson <[email protected]>
Co-developed-by: Like Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Signed-off-by: Like Xu <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/kvm/hyperv.c | 1 -
arch/x86/kvm/irq.c | 2 --
arch/x86/kvm/lapic.c | 3 ---
arch/x86/kvm/x86.c | 8 --------
4 files changed, 14 deletions(-)

diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 2c7f2a26421e..cc3e8c7d0850 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -898,7 +898,6 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
return false;
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
-EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);

int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index d8d50558f165..a70952eca905 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -31,7 +31,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)

return r;
}
-EXPORT_SYMBOL(kvm_cpu_has_pending_timer);

/*
* check if there is a pending userspace external interrupt
@@ -150,7 +149,6 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
if (kvm_xen_timer_enabled(vcpu))
kvm_xen_inject_timer_irqs(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);

void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 8224ac8b617a..4efdb4a4d72c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -160,7 +160,6 @@ bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
&& !(kvm_mwait_in_guest(vcpu->kvm) ||
kvm_can_post_timer_interrupt(vcpu));
}
-EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);

static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
{
@@ -1914,7 +1913,6 @@ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)

return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
}
-EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);

static void cancel_hv_timer(struct kvm_lapic *apic)
{
@@ -2432,7 +2430,6 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
}
}
-EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);

void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
{
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 152ea4993b76..4825773886f9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -463,7 +463,6 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
return vcpu->arch.apic_base;
}
-EXPORT_SYMBOL_GPL(kvm_get_apic_base);

enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
{
@@ -491,7 +490,6 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
kvm_recalculate_apic_map(vcpu->kvm);
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_set_apic_base);

/*
* Handle a fault on a hardware virtualization (VMX or SVM) instruction.
@@ -782,7 +780,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
fault->address);
}
-EXPORT_SYMBOL_GPL(kvm_inject_page_fault);

void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
@@ -811,7 +808,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu)
atomic_inc(&vcpu->arch.nmi_queued);
kvm_make_request(KVM_REQ_NMI, vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_inject_nmi);

void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
@@ -836,7 +832,6 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return false;
}
-EXPORT_SYMBOL_GPL(kvm_require_cpl);

bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{
@@ -2069,7 +2064,6 @@ int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
{
return kvm_skip_emulated_instruction(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_as_nop);

int kvm_emulate_invd(struct kvm_vcpu *vcpu)
{
@@ -2515,7 +2509,6 @@ u64 kvm_scale_tsc(u64 tsc, u64 ratio)

return _tsc;
}
-EXPORT_SYMBOL_GPL(kvm_scale_tsc);

static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
@@ -12068,7 +12061,6 @@ bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);

bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{
--
2.31.1


2022-12-05 12:06:22

by Like Xu

[permalink] [raw]
Subject: Re: [PATCH] KVM: x86: remove unnecessary exports

On 3/12/2022 2:37 am, Paolo Bonzini wrote:
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -898,7 +898,6 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
> return false;
> return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
> }
> -EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);

The kvm_hv_verify_vp_assist() is used in svm object where
kvm_hv_assist_page_enabled() is called,
so the deletion of this line now brings a link failure. The first version of the
patch set did not catch
such a simple error, and I assume another discussion thread's in-flight changes
were merged into.

We may add this line back to easily fix this merge conflict.