Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757561AbaJIPEs (ORCPT ); Thu, 9 Oct 2014 11:04:48 -0400 Received: from e28smtp09.in.ibm.com ([122.248.162.9]:43374 "EHLO e28smtp09.in.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757291AbaJIPEf (ORCPT ); Thu, 9 Oct 2014 11:04:35 -0400 From: "Aneesh Kumar K.V" To: Michael Neuling , greg@kroah.com, arnd@arndb.de, mpe@ellerman.id.au, benh@kernel.crashing.org Cc: mikey@neuling.org, anton@samba.org, linux-kernel@vger.kernel.org, linuxppc-dev@ozlabs.org, jk@ozlabs.org, imunsie@au1.ibm.com, cbe-oss-dev@lists.ozlabs.org Subject: Re: [PATCH v4 02/16] powerpc/cell: Move data segment faulting code out of cell platform In-Reply-To: <1412758505-23495-3-git-send-email-mikey@neuling.org> References: <1412758505-23495-1-git-send-email-mikey@neuling.org> <1412758505-23495-3-git-send-email-mikey@neuling.org> User-Agent: Notmuch/0.18.1 (http://notmuchmail.org) Emacs/24.3.91.1 (x86_64-unknown-linux-gnu) Date: Thu, 09 Oct 2014 20:34:26 +0530 Message-ID: <877g092sjp.fsf@linux.vnet.ibm.com> MIME-Version: 1.0 Content-Type: text/plain X-TM-AS-MML: disable X-Content-Scanned: Fidelis XPS MAILER x-cbid: 14100915-0033-0000-0000-000002320BD2 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Michael Neuling writes: > From: Ian Munsie > > __spu_trap_data_seg() currently contains code to determine the VSID and ESID > required for a particular EA and mm struct. > > This code is generically useful for other co-processors. This moves the code of > the cell platform so it can be used by other powerpc code. It also adds 1TB > segment handling which Cell didn't support. The new function is called > copro_calculate_slb(). > > This also moves the internal struct spu_slb to a generic struct copro_slb which > is now used in the Cell and copro code. We use this new struct instead of > passing around esid and vsid parameters. > Reviewed-by: Aneesh Kumar K.V > Signed-off-by: Ian Munsie > Signed-off-by: Michael Neuling > --- > arch/powerpc/include/asm/copro.h | 7 +++++ > arch/powerpc/include/asm/mmu-hash64.h | 7 +++++ > arch/powerpc/mm/copro_fault.c | 46 ++++++++++++++++++++++++++++ > arch/powerpc/mm/slb.c | 3 -- > arch/powerpc/platforms/cell/spu_base.c | 55 ++++++---------------------------- > 5 files changed, 69 insertions(+), 49 deletions(-) > > diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h > index 51cae85..b0e6a18 100644 > --- a/arch/powerpc/include/asm/copro.h > +++ b/arch/powerpc/include/asm/copro.h > @@ -10,7 +10,14 @@ > #ifndef _ASM_POWERPC_COPRO_H > #define _ASM_POWERPC_COPRO_H > > +struct copro_slb > +{ > + u64 esid, vsid; > +}; > + > int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, > unsigned long dsisr, unsigned *flt); > > +int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); > + > #endif /* _ASM_POWERPC_COPRO_H */ > diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h > index d765144..aeabd02 100644 > --- a/arch/powerpc/include/asm/mmu-hash64.h > +++ b/arch/powerpc/include/asm/mmu-hash64.h > @@ -190,6 +190,13 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) > > #ifndef __ASSEMBLY__ > > +static inline int slb_vsid_shift(int ssize) > +{ > + if (ssize == MMU_SEGSIZE_256M) > + return SLB_VSID_SHIFT; > + return SLB_VSID_SHIFT_1T; > +} > + > static inline int segment_shift(int ssize) > { > if (ssize == MMU_SEGSIZE_256M) > diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c > index ba7df14..a15a23e 100644 > --- a/arch/powerpc/mm/copro_fault.c > +++ b/arch/powerpc/mm/copro_fault.c > @@ -24,6 +24,7 @@ > #include > #include > #include > +#include > > /* > * This ought to be kept in sync with the powerpc specific do_page_fault > @@ -90,3 +91,48 @@ out_unlock: > return ret; > } > EXPORT_SYMBOL_GPL(copro_handle_mm_fault); > + > +int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) > +{ > + u64 vsid; > + int psize, ssize; > + > + slb->esid = (ea & ESID_MASK) | SLB_ESID_V; > + > + switch (REGION_ID(ea)) { > + case USER_REGION_ID: > + pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); > + psize = get_slice_psize(mm, ea); > + ssize = user_segment_size(ea); > + vsid = get_vsid(mm->context.id, ea, ssize); > + break; > + case VMALLOC_REGION_ID: > + pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea); > + if (ea < VMALLOC_END) > + psize = mmu_vmalloc_psize; > + else > + psize = mmu_io_psize; > + ssize = mmu_kernel_ssize; > + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); > + break; > + case KERNEL_REGION_ID: > + pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea); > + psize = mmu_linear_psize; > + ssize = mmu_kernel_ssize; > + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); > + break; > + default: > + pr_debug("%s: invalid region access at %016llx\n", __func__, ea); > + return 1; > + } > + > + vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER; > + > + vsid |= mmu_psize_defs[psize].sllp | > + ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); > + s/0/SLB_VSID_B_256M/ > + slb->vsid = vsid; > + > + return 0; > +} > +EXPORT_SYMBOL_GPL(copro_calculate_slb); > diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c > index 0399a67..6e450ca 100644 > --- a/arch/powerpc/mm/slb.c > +++ b/arch/powerpc/mm/slb.c > @@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long ea, int ssize, > return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; > } > > -#define slb_vsid_shift(ssize) \ > - ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T) > - > static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, > unsigned long flags) > { > diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c > index 2930d1e..ffcbd24 100644 > --- a/arch/powerpc/platforms/cell/spu_base.c > +++ b/arch/powerpc/platforms/cell/spu_base.c > @@ -76,10 +76,6 @@ static LIST_HEAD(spu_full_list); > static DEFINE_SPINLOCK(spu_full_list_lock); > static DEFINE_MUTEX(spu_full_list_mutex); > > -struct spu_slb { > - u64 esid, vsid; > -}; > - > void spu_invalidate_slbs(struct spu *spu) > { > struct spu_priv2 __iomem *priv2 = spu->priv2; > @@ -149,7 +145,7 @@ static void spu_restart_dma(struct spu *spu) > } > } > > -static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) > +static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb) > { > struct spu_priv2 __iomem *priv2 = spu->priv2; > > @@ -167,45 +163,12 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) > > static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) > { > - struct mm_struct *mm = spu->mm; > - struct spu_slb slb; > - int psize; > - > - pr_debug("%s\n", __func__); > - > - slb.esid = (ea & ESID_MASK) | SLB_ESID_V; > + struct copro_slb slb; > + int ret; > > - switch(REGION_ID(ea)) { > - case USER_REGION_ID: > -#ifdef CONFIG_PPC_MM_SLICES > - psize = get_slice_psize(mm, ea); > -#else > - psize = mm->context.user_psize; > -#endif > - slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) > - << SLB_VSID_SHIFT) | SLB_VSID_USER; > - break; > - case VMALLOC_REGION_ID: > - if (ea < VMALLOC_END) > - psize = mmu_vmalloc_psize; > - else > - psize = mmu_io_psize; > - slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) > - << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; > - break; > - case KERNEL_REGION_ID: > - psize = mmu_linear_psize; > - slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) > - << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; > - break; > - default: > - /* Future: support kernel segments so that drivers > - * can use SPUs. > - */ > - pr_debug("invalid region access at %016lx\n", ea); > - return 1; > - } > - slb.vsid |= mmu_psize_defs[psize].sllp; > + ret = copro_calculate_slb(spu->mm, ea, &slb); > + if (ret) > + return ret; > > spu_load_slb(spu, spu->slb_replace, &slb); > > @@ -253,7 +216,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) > return 0; > } > > -static void __spu_kernel_slb(void *addr, struct spu_slb *slb) > +static void __spu_kernel_slb(void *addr, struct copro_slb *slb) > { > unsigned long ea = (unsigned long)addr; > u64 llp; > @@ -272,7 +235,7 @@ static void __spu_kernel_slb(void *addr, struct spu_slb *slb) > * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the > * address @new_addr is present. > */ > -static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, > +static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, > void *new_addr) > { > unsigned long ea = (unsigned long)new_addr; > @@ -297,7 +260,7 @@ static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, > void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, > void *code, int code_size) > { > - struct spu_slb slbs[4]; > + struct copro_slb slbs[4]; > int i, nr_slbs = 0; > /* start and end addresses of both mappings */ > void *addrs[] = { > -- > 1.9.1 > > -- > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html > Please read the FAQ at http://www.tux.org/lkml/ -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/