Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932814AbbFIAgw (ORCPT ); Mon, 8 Jun 2015 20:36:52 -0400 Received: from mail-pd0-f176.google.com ([209.85.192.176]:33880 "EHLO mail-pd0-f176.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753694AbbFIAgh (ORCPT ); Mon, 8 Jun 2015 20:36:37 -0400 MIME-Version: 1.0 In-Reply-To: <1432983566-15773-13-git-send-email-guangrong.xiao@linux.intel.com> References: <1432983566-15773-1-git-send-email-guangrong.xiao@linux.intel.com> <1432983566-15773-13-git-send-email-guangrong.xiao@linux.intel.com> From: David Matlack Date: Mon, 8 Jun 2015 17:36:16 -0700 Message-ID: Subject: Re: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type To: Xiao Guangrong Cc: Paolo Bonzini , Gleb Natapov , Marcelo Tosatti , kvm list , "linux-kernel@vger.kernel.org" Content-Type: text/plain; charset=UTF-8 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7440 Lines: 231 On Sat, May 30, 2015 at 3:59 AM, Xiao Guangrong wrote: > It walks all MTRRs and gets all the memory cache type setting for the > specified range also it checks if the range is fully covered by MTRRs > > Signed-off-by: Xiao Guangrong > --- > arch/x86/kvm/mtrr.c | 183 ++++++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 183 insertions(+) > > diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c > index e59d138..35f86303 100644 > --- a/arch/x86/kvm/mtrr.c > +++ b/arch/x86/kvm/mtrr.c > @@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu) > INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head); > } > > +struct mtrr_looker { > + /* input fields. */ > + struct kvm_mtrr *mtrr_state; > + u64 start; > + u64 end; > + > + /* output fields. */ > + int mem_type; > + /* [start, end) is fully covered in MTRRs? */ s/fully/not fully/ ? > + bool partial_map; > + > + /* private fields. */ > + union { > + /* used for fixed MTRRs. */ > + struct { > + int index; > + int seg; > + }; > + > + /* used for var MTRRs. */ > + struct { > + struct kvm_mtrr_range *range; > + /* max address has been covered in var MTRRs. */ > + u64 start_max; > + }; > + }; > + > + bool fixed; > +}; > + > +static void mtrr_lookup_init(struct mtrr_looker *looker, > + struct kvm_mtrr *mtrr_state, u64 start, u64 end) > +{ > + looker->mtrr_state = mtrr_state; > + looker->start = start; > + looker->end = end; > +} > + > +static u64 fixed_mtrr_range_end_addr(int seg, int index) > +{ > + struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; > + > + return mtrr_seg->start + mtrr_seg->range_size * index; Should be (index + 1)? > +} > + > +static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker) > +{ > + int seg, index; > + > + if (!looker->mtrr_state->fixed_mtrr_enabled) > + return false; > + > + seg = fixed_mtrr_addr_to_seg(looker->start); > + if (seg < 0) > + return false; > + > + looker->fixed = true; > + index = fixed_mtrr_addr_seg_to_range_index(looker->start, seg); > + looker->index = index; > + looker->seg = seg; > + looker->mem_type = looker->mtrr_state->fixed_ranges[index]; > + looker->start = fixed_mtrr_range_end_addr(seg, index); > + return true; > +} > + > +static bool match_var_range(struct mtrr_looker *looker, > + struct kvm_mtrr_range *range) > +{ > + u64 start, end; > + > + var_mtrr_range(range, &start, &end); > + if (!(start >= looker->end || end <= looker->start)) { > + looker->range = range; > + looker->mem_type = range->base & 0xff; > + > + /* > + * the function is called when we do kvm_mtrr.head walking > + * that means range has the minimum base address interleaves > + * with [looker->start_max, looker->end). > + */ I'm having trouble understanding this comment. I think this is what you are trying to say: this function is called when we do kvm_mtrr.head walking. range has the minimum base address which interleaves [looker->start_max, looker->end). Let me know if I parsed it wrong. > + looker->partial_map |= looker->start_max < start; > + > + /* update the max address has been covered. */ > + looker->start_max = max(looker->start_max, end); > + return true; > + } > + > + return false; > +} > + > +static void mtrr_lookup_var_start(struct mtrr_looker *looker) > +{ > + struct kvm_mtrr *mtrr_state = looker->mtrr_state; > + struct kvm_mtrr_range *range; > + > + looker->fixed = false; > + looker->partial_map = false; > + looker->start_max = looker->start; > + looker->mem_type = -1; > + > + list_for_each_entry(range, &mtrr_state->head, node) > + if (match_var_range(looker, range)) > + return; > + > + looker->partial_map = true; > +} > + > +static void mtrr_lookup_fixed_next(struct mtrr_looker *looker) > +{ > + struct fixed_mtrr_segment *eseg = &fixed_seg_table[looker->seg]; > + struct kvm_mtrr *mtrr_state = looker->mtrr_state; > + u64 end; > + > + if (looker->start >= looker->end) { > + looker->mem_type = -1; > + looker->partial_map = false; > + return; > + } > + > + WARN_ON(!looker->fixed); > + > + looker->index++; > + end = fixed_mtrr_range_end_addr(looker->seg, looker->index); > + > + /* switch to next segment. */ > + if (end >= eseg->end) { > + looker->seg++; > + looker->index = 0; > + > + /* have looked up for all fixed MTRRs. */ > + if (looker->seg >= ARRAY_SIZE(fixed_seg_table)) > + return mtrr_lookup_var_start(looker); > + > + end = fixed_mtrr_range_end_addr(looker->seg, looker->index); > + } > + > + looker->mem_type = mtrr_state->fixed_ranges[looker->index]; > + looker->start = end; > +} > + > +static void mtrr_lookup_var_next(struct mtrr_looker *looker) > +{ > + struct kvm_mtrr *mtrr_state = looker->mtrr_state; > + > + WARN_ON(looker->fixed); > + > + looker->mem_type = -1; > + > + list_for_each_entry_continue(looker->range, &mtrr_state->head, node) > + if (match_var_range(looker, looker->range)) > + return; > + > + looker->partial_map |= looker->start_max < looker->end; > +} > + > +static void mtrr_lookup_start(struct mtrr_looker *looker) > +{ > + looker->mem_type = -1; > + > + if (!looker->mtrr_state->mtrr_enabled) { > + looker->partial_map = true; > + return; > + } > + > + if (!mtrr_lookup_fixed_start(looker)) > + mtrr_lookup_var_start(looker); > +} > + > +static void mtrr_lookup_next(struct mtrr_looker *looker) > +{ > + WARN_ON(looker->mem_type == -1); > + > + if (looker->fixed) > + mtrr_lookup_fixed_next(looker); > + else > + mtrr_lookup_var_next(looker); > +} > + > +#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \ > + for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_), \ > + mtrr_lookup_start(_looker_); (_looker_)->mem_type != -1; \ > + mtrr_lookup_next(_looker_)) > + > u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) > { > struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; > -- > 2.1.0 > > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/