Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758303AbYGHMJg (ORCPT ); Tue, 8 Jul 2008 08:09:36 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1752132AbYGHMJX (ORCPT ); Tue, 8 Jul 2008 08:09:23 -0400 Received: from 238.225.broadband7.iol.cz ([88.102.225.238]:25508 "EHLO monstr.eu" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1758222AbYGHMJV (ORCPT ); Tue, 8 Jul 2008 08:09:21 -0400 From: monstr@monstr.eu To: linux-kernel@vger.kernel.org Cc: monstr@seznam.cz, arnd@arndb.de, linux-arch@vger.kernel.org, stephen.neuendorffer@xilinx.com, John.Linn@xilinx.com, john.williams@petalogix.com, matthew@wil.cx, will.newton@gmail.com, drepper@redhat.com, microblaze-uclinux@itee.uq.edu.au, grant.likely@secretlab.ca, vapier.adi@gmail.com, alan@lxorguk.ukuu.org.uk, hpa@zytor.com, lethal@linux-sh.org, florian@openwrt.org, Michal Simek Subject: [PATCH 10/58] microblaze_v5: cache support Date: Tue, 8 Jul 2008 13:59:10 +0200 Message-Id: X-Mailer: git-send-email 1.5.4.GIT In-Reply-To: <4409daf2ac356e902a8f091bb5908eb8a90218bc.1215517976.git.monstr@monstr.eu> References: <1215518398-5057-1-git-send-email-monstr@monstr.eu> <80a2e46f2fb93812ab12bf79c703e8e2d6b0faa0.1215517976.git.monstr@monstr.eu> <58f35f498bac29e7105c589c06567e86c5a42dd5.1215517976.git.monstr@monstr.eu> <810775b1bb678003923039726a9153ee34fb67b4.1215517976.git.monstr@monstr.eu> <2a24e5bc2cfbd349613ef10c716a28f04ce24a9f.1215517976.git.monstr@monstr.eu> <3171c5cf21eefc79665165f4a14bc5b68dd03f95.1215517976.git.monstr@monstr.eu> <9be4eff2f4d015023c453eaec3b3473a44380491.1215517976.git.monstr@monstr.eu> <4409daf2ac356e902a8f091bb5908eb8a90218bc.1215517976.git.monstr@monstr.eu> In-Reply-To: <80a2e46f2fb93812ab12bf79c703e8e2d6b0faa0.1215517976.git.monstr@monstr.eu> References: <80a2e46f2fb93812ab12bf79c703e8e2d6b0faa0.1215517976.git.monstr@monstr.eu> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 10854 Lines: 412 From: Michal Simek Acked-by: Stephen Neuendorffer Signed-off-by: Michal Simek --- arch/microblaze/kernel/cpu/cache.c | 255 +++++++++++++++++++++++++++++++++++ include/asm-microblaze/cache.h | 43 ++++++ include/asm-microblaze/cacheflush.h | 75 ++++++++++ 3 files changed, 373 insertions(+), 0 deletions(-) create mode 100644 arch/microblaze/kernel/cpu/cache.c create mode 100644 include/asm-microblaze/cache.h create mode 100644 include/asm-microblaze/cacheflush.h diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c new file mode 100644 index 0000000..9dcf3e9 --- /dev/null +++ b/arch/microblaze/kernel/cpu/cache.c @@ -0,0 +1,255 @@ +/* + * Cache control for MicroBlaze cache memories + * + * Copyright (C) 2007 Michal Simek + * Copyright (C) 2007 PetaLogix + * Copyright (C) 2007 John Williams + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + */ + +#include +#include +#include + +/* Exported functions */ + +void _enable_icache(void) +{ + if (cpuinfo.use_icache) { +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + __asm__ __volatile__ (" \ + msrset r0, %0; \ + nop; " \ + : \ + : "i" (MSR_ICE) \ + : "memory"); +#else + __asm__ __volatile__ (" \ + mfs r12, rmsr; \ + ori r12, r12, %0; \ + mts rmsr, r12; \ + nop; " \ + : \ + : "i" (MSR_ICE) \ + : "memory", "r12"); +#endif + } +} + +void _disable_icache(void) +{ + if (cpuinfo.use_icache) { +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + __asm__ __volatile__ (" \ + msrclr r0, %0; \ + nop; " \ + : \ + : "i" (MSR_ICE) \ + : "memory"); +#else + __asm__ __volatile__ (" \ + mfs r12, rmsr; \ + andi r12, r12, ~%0; \ + mts rmsr, r12; \ + nop; " \ + : \ + : "i" (MSR_ICE) \ + : "memory", "r12"); +#endif + } +} + +void _invalidate_icache(unsigned int addr) +{ + if (cpuinfo.use_icache) { + __asm__ __volatile__ (" \ + wic %0, r0" \ + : \ + : "r" (addr)); + } +} + +void _enable_dcache(void) +{ + if (cpuinfo.use_dcache) { +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + __asm__ __volatile__ (" \ + msrset r0, %0; \ + nop; " \ + : \ + : "i" (MSR_DCE) \ + : "memory"); +#else + __asm__ __volatile__ (" \ + mfs r12, rmsr; \ + ori r12, r12, %0; \ + mts rmsr, r12; \ + nop; " \ + : \ + : "i" (MSR_DCE) \ + : "memory", "r12"); +#endif + } +} + +void _disable_dcache(void) +{ + if (cpuinfo.use_dcache) { +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR + __asm__ __volatile__ (" \ + msrclr r0, %0; \ + nop; " \ + : \ + : "i" (MSR_DCE) \ + : "memory"); +#else + __asm__ __volatile__ (" \ + mfs r12, rmsr; \ + andi r12, r12, ~%0; \ + mts rmsr, r12; \ + nop; " \ + : \ + : "i" (MSR_DCE) \ + : "memory", "r12"); +#endif + } +} + +void _invalidate_dcache(unsigned int addr) +{ + if (cpuinfo.use_dcache) + __asm__ __volatile__ (" \ + wdc %0, r0" \ + : \ + : "r" (addr)); +} + +void __invalidate_icache_all(void) +{ + unsigned int i; + unsigned flags; + + if (cpuinfo.use_icache) { + local_irq_save(flags); + __disable_icache(); + + /* Just loop through cache size and invalidate, no need to add + CACHE_BASE address */ + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line) + __invalidate_icache(i); + + __enable_icache(); + local_irq_restore(flags); + } +} + +void __invalidate_icache_range(unsigned long start, unsigned long end) +{ + unsigned int i; + unsigned flags; + unsigned int align; + + if (cpuinfo.use_icache) { + /* + * No need to cover entire cache range, + * just cover cache footprint + */ + end = min(start + cpuinfo.icache_size, end); + align = ~(cpuinfo.icache_line - 1); + start &= align; /* Make sure we are aligned */ + /* Push end up to the next cache line */ + end = ((end & align) + cpuinfo.icache_line); + + local_irq_save(flags); + __disable_icache(); + + for (i = start; i < end; i += cpuinfo.icache_line) + __invalidate_icache(i); + + __enable_icache(); + local_irq_restore(flags); + } +} + +void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page) +{ + __invalidate_icache_all(); +} + +void __invalidate_icache_user_range(struct vm_area_struct *vma, + struct page *page, unsigned long adr, + int len) +{ + __invalidate_icache_all(); +} + +void __invalidate_cache_sigtramp(unsigned long addr) +{ + __invalidate_icache_range(addr, addr + 8); +} + +void __invalidate_dcache_all(void) +{ + unsigned int i; + unsigned flags; + + if (cpuinfo.use_dcache) { + local_irq_save(flags); + __disable_dcache(); + + /* + * Just loop through cache size and invalidate, + * no need to add CACHE_BASE address + */ + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line) + __invalidate_dcache(i); + + __enable_dcache(); + local_irq_restore(flags); + } +} + +void __invalidate_dcache_range(unsigned long start, unsigned long end) +{ + unsigned int i; + unsigned flags; + unsigned int align; + + if (cpuinfo.use_dcache) { + /* + * No need to cover entire cache range, + * just cover cache footprint + */ + end = min(start + cpuinfo.dcache_size, end); + align = ~(cpuinfo.dcache_line - 1); + start &= align; /* Make sure we are aligned */ + /* Push end up to the next cache line */ + end = ((end & align) + cpuinfo.dcache_line); + local_irq_save(flags); + __disable_dcache(); + + for (i = start; i < end; i += cpuinfo.dcache_line) + __invalidate_dcache(i); + + __enable_dcache(); + local_irq_restore(flags); + } +} + +void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page) +{ + __invalidate_dcache_all(); +} + +void __invalidate_dcache_user_range(struct vm_area_struct *vma, + struct page *page, unsigned long adr, + int len) +{ + __invalidate_dcache_all(); +} diff --git a/include/asm-microblaze/cache.h b/include/asm-microblaze/cache.h new file mode 100644 index 0000000..16a647d --- /dev/null +++ b/include/asm-microblaze/cache.h @@ -0,0 +1,43 @@ +/* + * Cache operations + * + * Copyright (C) 2007 Michal Simek + * Copyright (C) 2003 John Williams + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + */ + +#ifndef _ASM_MICROBLAZE_CACHE_H +#define _ASM_MICROBLAZE_CACHE_H + +#include + +#ifndef L1_CACHE_BYTES +/* word-granular cache in microblaze */ +#define L1_CACHE_BYTES 4 +#endif + +void _enable_icache(void); +void _disable_icache(void); +void _invalidate_icache(unsigned int addr); + +#define __enable_icache() _enable_icache() +#define __disable_icache() _disable_icache() +#define __invalidate_icache(addr) _invalidate_icache(addr) + +void _enable_dcache(void); +void _disable_dcache(void); +void _invalidate_dcache(unsigned int addr); + +#define __enable_dcache() _enable_dcache() +#define __disable_dcache() _disable_dcache() +#define __invalidate_dcache(addr) _invalidate_dcache(addr) + +/* FIXME - I don't think this is right */ +#ifdef CONFIG_XILINX_UNCACHED_SHADOW +#define UNCACHED_SHADOW_MASK (CONFIG_XILINX_ERAM_SIZE) +#endif + +#endif /* _ASM_MICROBLAZE_CACHE_H */ diff --git a/include/asm-microblaze/cacheflush.h b/include/asm-microblaze/cacheflush.h new file mode 100644 index 0000000..6afe017 --- /dev/null +++ b/include/asm-microblaze/cacheflush.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 PetaLogix + * Copyright (C) 2007 John Williams + * based on v850 version which was + * Copyright (C) 2001,02,03 NEC Electronics Corporation + * Copyright (C) 2001,02,03 Miles Bader + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + */ + +#ifndef _ASM_MICROBLAZE_CACHEFLUSH_H +#define _ASM_MICROBLAZE_CACHEFLUSH_H + +/* Somebody depends on this; sigh... */ +#include + +/* + * Cache handling functions. + * Microblaze has a write-through data cache, and no icache snooping of dcache + */ + +#define flush_cache_all() __invalidate_cache_all() +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) __invalidate_cache_all() +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) + +#define flush_dcache_range(start, end) __invalidate_dcache_range(start, end) +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#define flush_icache_range(start, len) __invalidate_icache_range(start, len) +#define flush_icache_page(vma, pg) do { } while (0) + +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +struct page; +struct mm_struct; +struct vm_area_struct; + +/* see arch/microblaze/kernel/cache.c */ +extern void __invalidate_icache_all(void); +extern void __invalidate_icache_range(unsigned long start, unsigned long end); +extern void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page); +extern void __invalidate_icache_user_range(struct vm_area_struct *vma, + struct page *page, + unsigned long adr, int len); +extern void __invalidate_cache_sigtramp(unsigned long addr); + +extern void __invalidate_dcache_all(void); +extern void __invalidate_dcache_range(unsigned long start, unsigned long end); +extern void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page); +extern void __invalidate_dcache_user_range(struct vm_area_struct *vma, + struct page *page, + unsigned long adr, int len); + +extern inline void __invalidate_cache_all(void) +{ + __invalidate_icache_all(); + __invalidate_dcache_all(); +} + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { memcpy((dst), (src), (len)); \ + flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \ +} while (0) + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy((dst), (src), (len)) + +#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ -- 1.5.4.GIT -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/