asm-generic/percpu.h is *NOT* included by every asm/percpu.h out there.
Namely, alpha can't use it since it needs the var name in
SHIFT_PERCPU_PTR - &per_cpu_var(var) won't do at all. So adding stuff
to asm-generic/percpu.h and expecting it to be picked by everything
is not going to work.
Frankly, I'd rather have SHIFT_PERCPU_PTR() calling conventions changed,
but for now the patch below will do.
Signed-off-by: Al Viro <[email protected]>
---
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 3495e8e..fffec74 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -75,4 +75,30 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
+# define percpu_read(var) \
+ ({ \
+ typeof(per_cpu_var(var)) __tmp_var__; \
+ __tmp_var__ = get_cpu_var(var); \
+ put_cpu_var(var); \
+ __tmp_var__; \
+ })
+
+#define __percpu_generic_to_op(var, val, op) \
+do { \
+ get_cpu_var(var) op val; \
+ put_cpu_var(var); \
+} while (0)
+
+# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
+
+# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
+
+# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
+
+# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
+
+# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
+
+# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
+
#endif /* __ALPHA_PERCPU_H */
On Fri, Apr 10, 2009 at 04:12:24PM +0100, Al Viro wrote:
> asm-generic/percpu.h is *NOT* included by every asm/percpu.h out there.
> Namely, alpha can't use it since it needs the var name in
> SHIFT_PERCPU_PTR - &per_cpu_var(var) won't do at all. So adding stuff
> to asm-generic/percpu.h and expecting it to be picked by everything
> is not going to work.
>
> Frankly, I'd rather have SHIFT_PERCPU_PTR() calling conventions changed,
> but for now the patch below will do.
>
> Signed-off-by: Al Viro <[email protected]>
Alternative patch (changing SHIFT_PERCPU_PTR() arguments and switching
alpha to asm-generic/percpu.h) would be this (only build-tested, though):
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 3495e8e..1c52a2d 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -3,33 +3,8 @@
#include <linux/compiler.h>
#include <linux/threads.h>
-/*
- * Determine the real variable name from the name visible in the
- * kernel sources.
- */
-#define per_cpu_var(var) per_cpu__##var
-
#ifdef CONFIG_SMP
-
-/*
- * per_cpu_offset() is the offset that has to be added to a
- * percpu variable to get to the instance for a certain processor.
- */
-extern unsigned long __per_cpu_offset[NR_CPUS];
-
-#define per_cpu_offset(x) (__per_cpu_offset[x])
-
-#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
-#ifdef CONFIG_DEBUG_PREEMPT
-#define my_cpu_offset per_cpu_offset(smp_processor_id())
-#else
-#define my_cpu_offset __my_cpu_offset
-#endif
-
-#ifndef MODULE
-#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
-#define PER_CPU_ATTRIBUTES
-#else
+#ifdef MODULE
/*
* To calculate addresses of locally defined variables, GCC uses 32-bit
* displacement from the GP. Which doesn't work for per cpu variables in
@@ -39,40 +14,15 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* ldq instruction with a 'literal' relocation.
*/
#define SHIFT_PERCPU_PTR(var, offset) ({ \
- extern int simple_identifier_##var(void); \
unsigned long __ptr, tmp_gp; \
asm ( "br %1, 1f \n\
1: ldgp %1, 0(%1) \n\
ldq %0, per_cpu__" #var"(%1)\t!literal" \
: "=&r"(__ptr), "=&r"(tmp_gp)); \
(typeof(&per_cpu_var(var)))(__ptr + (offset)); })
-
-#define PER_CPU_ATTRIBUTES __used
-
#endif /* MODULE */
-
-/*
- * A percpu variable may point to a discarded regions. The following are
- * established ways to produce a usable pointer from the percpu variable
- * offset.
- */
-#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
-#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
-#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
-
-#else /* ! SMP */
-
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
-#define __get_cpu_var(var) per_cpu_var(var)
-#define __raw_get_cpu_var(var) per_cpu_var(var)
-
-#define PER_CPU_ATTRIBUTES
-
#endif /* SMP */
-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
+#include <asm-generic/percpu.h>
#endif /* __ALPHA_PERCPU_H */
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 408d60b..6a71d73 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -13,20 +13,18 @@
*/
#if defined(__s390x__) && defined(MODULE)
-#define SHIFT_PERCPU_PTR(ptr,offset) (({ \
- extern int simple_identifier_##var(void); \
+#define SHIFT_PERCPU_PTR(var,offset) (({ \
unsigned long *__ptr; \
asm ( "larl %0, %1@GOTENT" \
- : "=a" (__ptr) : "X" (ptr) ); \
- (typeof(ptr))((*__ptr) + (offset)); }))
+ : "=a" (__ptr) : "X" (&per_cpu_var(var)) ); \
+ (typeof(&per_cpu_var(var)))((*__ptr) + (offset)); }))
#else
-#define SHIFT_PERCPU_PTR(ptr, offset) (({ \
- extern int simple_identifier_##var(void); \
+#define SHIFT_PERCPU_PTR(var, offset) (({ \
unsigned long __ptr; \
- asm ( "" : "=a" (__ptr) : "0" (ptr) ); \
- (typeof(ptr)) (__ptr + (offset)); }))
+ asm ( "" : "=a" (__ptr) : "0" (&per_cpu_var(var)) ); \
+ (typeof(&per_cpu_var(var))) (__ptr + (offset)); }))
#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 00f45ff..7463700 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -42,10 +42,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
/*
* Add a offset to a pointer but keep the pointer as is.
*
- * Only S390 provides its own means of moving the pointer.
+ * Only S390 and Alpha provide their own means of moving the pointer.
*/
#ifndef SHIFT_PERCPU_PTR
-#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
+#define SHIFT_PERCPU_PTR(__v, __offset) RELOC_HIDE(&per_cpu_var(__v), (__offset))
#endif
/*
@@ -54,11 +54,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* offset.
*/
#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
+ (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
Hmm. I certainly personally _much_ prefer this version.
It looks like this actually simplifies things for S390 too (no more
simple_identifier_##var games), and generally just looks better. And
seeing that the S390 special case is no longer S390-specific is also a
good sign, imho.
Sorry for including the whole message, but I'm adding s390 and percpu
people to the cc. Guys - original uncorrupted patch on lkml.
Linus
---
On Fri, 10 Apr 2009, Al Viro wrote:
> On Fri, Apr 10, 2009 at 04:12:24PM +0100, Al Viro wrote:
> > asm-generic/percpu.h is *NOT* included by every asm/percpu.h out there.
> > Namely, alpha can't use it since it needs the var name in
> > SHIFT_PERCPU_PTR - &per_cpu_var(var) won't do at all. So adding stuff
> > to asm-generic/percpu.h and expecting it to be picked by everything
> > is not going to work.
> >
> > Frankly, I'd rather have SHIFT_PERCPU_PTR() calling conventions changed,
> > but for now the patch below will do.
> >
> > Signed-off-by: Al Viro <[email protected]>
>
> Alternative patch (changing SHIFT_PERCPU_PTR() arguments and switching
> alpha to asm-generic/percpu.h) would be this (only build-tested, though):
>
> diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
> index 3495e8e..1c52a2d 100644
> --- a/arch/alpha/include/asm/percpu.h
> +++ b/arch/alpha/include/asm/percpu.h
> @@ -3,33 +3,8 @@
> #include <linux/compiler.h>
> #include <linux/threads.h>
>
> -/*
> - * Determine the real variable name from the name visible in the
> - * kernel sources.
> - */
> -#define per_cpu_var(var) per_cpu__##var
> -
> #ifdef CONFIG_SMP
> -
> -/*
> - * per_cpu_offset() is the offset that has to be added to a
> - * percpu variable to get to the instance for a certain processor.
> - */
> -extern unsigned long __per_cpu_offset[NR_CPUS];
> -
> -#define per_cpu_offset(x) (__per_cpu_offset[x])
> -
> -#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
> -#ifdef CONFIG_DEBUG_PREEMPT
> -#define my_cpu_offset per_cpu_offset(smp_processor_id())
> -#else
> -#define my_cpu_offset __my_cpu_offset
> -#endif
> -
> -#ifndef MODULE
> -#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
> -#define PER_CPU_ATTRIBUTES
> -#else
> +#ifdef MODULE
> /*
> * To calculate addresses of locally defined variables, GCC uses 32-bit
> * displacement from the GP. Which doesn't work for per cpu variables in
> @@ -39,40 +14,15 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
> * ldq instruction with a 'literal' relocation.
> */
> #define SHIFT_PERCPU_PTR(var, offset) ({ \
> - extern int simple_identifier_##var(void); \
> unsigned long __ptr, tmp_gp; \
> asm ( "br %1, 1f \n\
> 1: ldgp %1, 0(%1) \n\
> ldq %0, per_cpu__" #var"(%1)\t!literal" \
> : "=&r"(__ptr), "=&r"(tmp_gp)); \
> (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
> -
> -#define PER_CPU_ATTRIBUTES __used
> -
> #endif /* MODULE */
> -
> -/*
> - * A percpu variable may point to a discarded regions. The following are
> - * established ways to produce a usable pointer from the percpu variable
> - * offset.
> - */
> -#define per_cpu(var, cpu) \
> - (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
> -#define __get_cpu_var(var) \
> - (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
> -#define __raw_get_cpu_var(var) \
> - (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
> -
> -#else /* ! SMP */
> -
> -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
> -#define __get_cpu_var(var) per_cpu_var(var)
> -#define __raw_get_cpu_var(var) per_cpu_var(var)
> -
> -#define PER_CPU_ATTRIBUTES
> -
> #endif /* SMP */
>
> -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
> +#include <asm-generic/percpu.h>
>
> #endif /* __ALPHA_PERCPU_H */
> diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
> index 408d60b..6a71d73 100644
> --- a/arch/s390/include/asm/percpu.h
> +++ b/arch/s390/include/asm/percpu.h
> @@ -13,20 +13,18 @@
> */
> #if defined(__s390x__) && defined(MODULE)
>
> -#define SHIFT_PERCPU_PTR(ptr,offset) (({ \
> - extern int simple_identifier_##var(void); \
> +#define SHIFT_PERCPU_PTR(var,offset) (({ \
> unsigned long *__ptr; \
> asm ( "larl %0, %1@GOTENT" \
> - : "=a" (__ptr) : "X" (ptr) ); \
> - (typeof(ptr))((*__ptr) + (offset)); }))
> + : "=a" (__ptr) : "X" (&per_cpu_var(var)) ); \
> + (typeof(&per_cpu_var(var)))((*__ptr) + (offset)); }))
>
> #else
>
> -#define SHIFT_PERCPU_PTR(ptr, offset) (({ \
> - extern int simple_identifier_##var(void); \
> +#define SHIFT_PERCPU_PTR(var, offset) (({ \
> unsigned long __ptr; \
> - asm ( "" : "=a" (__ptr) : "0" (ptr) ); \
> - (typeof(ptr)) (__ptr + (offset)); }))
> + asm ( "" : "=a" (__ptr) : "0" (&per_cpu_var(var)) ); \
> + (typeof(&per_cpu_var(var))) (__ptr + (offset)); }))
>
> #endif
>
> diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
> index 00f45ff..7463700 100644
> --- a/include/asm-generic/percpu.h
> +++ b/include/asm-generic/percpu.h
> @@ -42,10 +42,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
> /*
> * Add a offset to a pointer but keep the pointer as is.
> *
> - * Only S390 provides its own means of moving the pointer.
> + * Only S390 and Alpha provide their own means of moving the pointer.
> */
> #ifndef SHIFT_PERCPU_PTR
> -#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
> +#define SHIFT_PERCPU_PTR(__v, __offset) RELOC_HIDE(&per_cpu_var(__v), (__offset))
> #endif
>
> /*
> @@ -54,11 +54,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
> * offset.
> */
> #define per_cpu(var, cpu) \
> - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
> + (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
> #define __get_cpu_var(var) \
> - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
> + (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
> #define __raw_get_cpu_var(var) \
> - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
> + (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
>
>
> #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
>
On Fri, Apr 10, 2009 at 09:21:08AM -0700, Linus Torvalds wrote:
>
> Hmm. I certainly personally _much_ prefer this version.
>
> It looks like this actually simplifies things for S390 too (no more
> simple_identifier_##var games), and generally just looks better. And
> seeing that the S390 special case is no longer S390-specific is also a
> good sign, imho.
simple_identifier_##var has been debris from back when Ingo forced
s390 to use asm-generic/percpu.h; if you look at it, you'll see that
a) it hadn't worked (look at the argument list carefully) and
b) get_cpu_var() has the working version anyway
Less obvious part is that if it actually *would* work, we'd be screwed -
unlike get_cpu_var() this sucker is called for things that are not
simple variables; didn't happen for alpha, but did for s390.
IOW, that's just an old piece of junk that got removed now.
On Fri, Apr 10, 2009 at 05:50:30PM +0100, Al Viro wrote:
> On Fri, Apr 10, 2009 at 09:21:08AM -0700, Linus Torvalds wrote:
> >
> > Hmm. I certainly personally _much_ prefer this version.
> >
> > It looks like this actually simplifies things for S390 too (no more
> > simple_identifier_##var games), and generally just looks better. And
> > seeing that the S390 special case is no longer S390-specific is also a
> > good sign, imho.
>
> simple_identifier_##var has been debris from back when Ingo forced
> s390 to use asm-generic/percpu.h; if you look at it, you'll see that
> a) it hadn't worked (look at the argument list carefully) and
> b) get_cpu_var() has the working version anyway
>
> Less obvious part is that if it actually *would* work, we'd be screwed -
> unlike get_cpu_var() this sucker is called for things that are not
> simple variables; didn't happen for alpha, but did for s390.
>
> IOW, that's just an old piece of junk that got removed now.
Fsck... It breaks per_cpu_ptr() :-/ OK, back to square one, then...
On Fri, Apr 10, 2009 at 06:05:07PM +0100, Al Viro wrote:
> On Fri, Apr 10, 2009 at 05:50:30PM +0100, Al Viro wrote:
> > On Fri, Apr 10, 2009 at 09:21:08AM -0700, Linus Torvalds wrote:
> > >
> > > Hmm. I certainly personally _much_ prefer this version.
> > >
> > > It looks like this actually simplifies things for S390 too (no more
> > > simple_identifier_##var games), and generally just looks better. And
> > > seeing that the S390 special case is no longer S390-specific is also a
> > > good sign, imho.
> >
> > simple_identifier_##var has been debris from back when Ingo forced
> > s390 to use asm-generic/percpu.h; if you look at it, you'll see that
> > a) it hadn't worked (look at the argument list carefully) and
> > b) get_cpu_var() has the working version anyway
> >
> > Less obvious part is that if it actually *would* work, we'd be screwed -
> > unlike get_cpu_var() this sucker is called for things that are not
> > simple variables; didn't happen for alpha, but did for s390.
> >
> > IOW, that's just an old piece of junk that got removed now.
>
> Fsck... It breaks per_cpu_ptr() :-/ OK, back to square one, then...
... and the reason we didn't step into that one on alpha et.al. is that it
doesn't have HAVE_DYNAMIC_PER_CPU_AREA - only x86 does. It's *still*
fixable, but we need a different primitive for that one.
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 3495e8e..1c52a2d 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -3,33 +3,8 @@
#include <linux/compiler.h>
#include <linux/threads.h>
-/*
- * Determine the real variable name from the name visible in the
- * kernel sources.
- */
-#define per_cpu_var(var) per_cpu__##var
-
#ifdef CONFIG_SMP
-
-/*
- * per_cpu_offset() is the offset that has to be added to a
- * percpu variable to get to the instance for a certain processor.
- */
-extern unsigned long __per_cpu_offset[NR_CPUS];
-
-#define per_cpu_offset(x) (__per_cpu_offset[x])
-
-#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
-#ifdef CONFIG_DEBUG_PREEMPT
-#define my_cpu_offset per_cpu_offset(smp_processor_id())
-#else
-#define my_cpu_offset __my_cpu_offset
-#endif
-
-#ifndef MODULE
-#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
-#define PER_CPU_ATTRIBUTES
-#else
+#ifdef MODULE
/*
* To calculate addresses of locally defined variables, GCC uses 32-bit
* displacement from the GP. Which doesn't work for per cpu variables in
@@ -39,40 +14,15 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* ldq instruction with a 'literal' relocation.
*/
#define SHIFT_PERCPU_PTR(var, offset) ({ \
- extern int simple_identifier_##var(void); \
unsigned long __ptr, tmp_gp; \
asm ( "br %1, 1f \n\
1: ldgp %1, 0(%1) \n\
ldq %0, per_cpu__" #var"(%1)\t!literal" \
: "=&r"(__ptr), "=&r"(tmp_gp)); \
(typeof(&per_cpu_var(var)))(__ptr + (offset)); })
-
-#define PER_CPU_ATTRIBUTES __used
-
#endif /* MODULE */
-
-/*
- * A percpu variable may point to a discarded regions. The following are
- * established ways to produce a usable pointer from the percpu variable
- * offset.
- */
-#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
-#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
-#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
-
-#else /* ! SMP */
-
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
-#define __get_cpu_var(var) per_cpu_var(var)
-#define __raw_get_cpu_var(var) per_cpu_var(var)
-
-#define PER_CPU_ATTRIBUTES
-
#endif /* SMP */
-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
+#include <asm-generic/percpu.h>
#endif /* __ALPHA_PERCPU_H */
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 408d60b..6a71d73 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -13,20 +13,18 @@
*/
#if defined(__s390x__) && defined(MODULE)
-#define SHIFT_PERCPU_PTR(ptr,offset) (({ \
- extern int simple_identifier_##var(void); \
+#define SHIFT_PERCPU_PTR(var,offset) (({ \
unsigned long *__ptr; \
asm ( "larl %0, %1@GOTENT" \
- : "=a" (__ptr) : "X" (ptr) ); \
- (typeof(ptr))((*__ptr) + (offset)); }))
+ : "=a" (__ptr) : "X" (&per_cpu_var(var)) ); \
+ (typeof(&per_cpu_var(var)))((*__ptr) + (offset)); }))
#else
-#define SHIFT_PERCPU_PTR(ptr, offset) (({ \
- extern int simple_identifier_##var(void); \
+#define SHIFT_PERCPU_PTR(var, offset) (({ \
unsigned long __ptr; \
- asm ( "" : "=a" (__ptr) : "0" (ptr) ); \
- (typeof(ptr)) (__ptr + (offset)); }))
+ asm ( "" : "=a" (__ptr) : "0" (&per_cpu_var(var)) ); \
+ (typeof(&per_cpu_var(var))) (__ptr + (offset)); }))
#endif
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index aee103b..5ff94d2 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -202,4 +202,6 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
#endif /* !CONFIG_SMP */
+#define PERCPU_PTR RELOC_HIDE
+
#endif /* _ASM_X86_PERCPU_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 00f45ff..4543078 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -42,10 +42,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
/*
* Add a offset to a pointer but keep the pointer as is.
*
- * Only S390 provides its own means of moving the pointer.
+ * Only S390 and Alpha provide their own means of moving the pointer.
*/
#ifndef SHIFT_PERCPU_PTR
-#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
+#define SHIFT_PERCPU_PTR(__v, __offset) RELOC_HIDE((&per_cpu_var(__v)), (__offset))
#endif
/*
@@ -54,11 +54,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* offset.
*/
#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
+ (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index ee5615d..11a8a11 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -120,7 +120,7 @@ extern ssize_t __init pcpu_embed_first_chunk(
* dynamically allocated. Non-atomic access to the current CPU's
* version should probably be combined with get_cpu()/put_cpu().
*/
-#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
+#define per_cpu_ptr(ptr, cpu) PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void *__alloc_reserved_percpu(size_t size, size_t align);
On Fri, Apr 10, 2009 at 06:14:15PM +0100, Al Viro wrote:
> ... and the reason we didn't step into that one on alpha et.al. is that it
> doesn't have HAVE_DYNAMIC_PER_CPU_AREA - only x86 does. It's *still*
> fixable, but we need a different primitive for that one.
NACKed, better patch appended, which makes &per_cpu__foo a real pointer
without asm hacks.
Actually, all of this has been discussed on lkml; here is the latest
variant that makes everybody more or less happy, at least there are
no objections from percpu folks and Martin (similar fix should work
for s390 as well).
Ivan.
---
alpha: fix for static percpu variables
Work around 32-bit GP-relative addressing of local per-cpu variables
in modules. This is needed to make the dynamic per-cpu allocator
work on alpha.
Signed-off-by: Ivan Kokshaysky <[email protected]>
---
arch/alpha/Makefile | 1 +
arch/alpha/include/asm/percpu.h | 76 +++++++--------------------------------
arch/alpha/kernel/module.lds | 3 ++
include/linux/percpu.h | 2 +
4 files changed, 19 insertions(+), 63 deletions(-)
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 4759fe7..d7b3a3f 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -11,6 +11,7 @@
NM := $(NM) -B
LDFLAGS_vmlinux := -static -N #-relax
+LDFLAGS_MODULE += -T $(srctree)/arch/alpha/kernel/module.lds
CHECKFLAGS += -D__alpha__ -m64
cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
cflags-y += $(call cc-option, -fno-jump-tables)
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 3495e8e..d49b8d4 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -1,78 +1,28 @@
#ifndef __ALPHA_PERCPU_H
#define __ALPHA_PERCPU_H
#include <linux/compiler.h>
-#include <linux/threads.h>
-/*
- * Determine the real variable name from the name visible in the
- * kernel sources.
- */
-#define per_cpu_var(var) per_cpu__##var
-
-#ifdef CONFIG_SMP
-
-/*
- * per_cpu_offset() is the offset that has to be added to a
- * percpu variable to get to the instance for a certain processor.
- */
-extern unsigned long __per_cpu_offset[NR_CPUS];
-
-#define per_cpu_offset(x) (__per_cpu_offset[x])
-
-#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
-#ifdef CONFIG_DEBUG_PREEMPT
-#define my_cpu_offset per_cpu_offset(smp_processor_id())
-#else
-#define my_cpu_offset __my_cpu_offset
-#endif
-
-#ifndef MODULE
-#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
-#define PER_CPU_ATTRIBUTES
-#else
+#if defined(MODULE) && defined(CONFIG_SMP)
/*
* To calculate addresses of locally defined variables, GCC uses 32-bit
* displacement from the GP. Which doesn't work for per cpu variables in
* modules, as an offset to the kernel per cpu area is way above 4G.
*
* This forces allocation of a GOT entry for per cpu variable using
- * ldq instruction with a 'literal' relocation.
+ * "weak" attribute (as the compiler must assume an external reference);
+ * to make this work we have to neutralize possible "static" storage
+ * class specifier with a dummy variable.
*/
-#define SHIFT_PERCPU_PTR(var, offset) ({ \
- extern int simple_identifier_##var(void); \
- unsigned long __ptr, tmp_gp; \
- asm ( "br %1, 1f \n\
- 1: ldgp %1, 0(%1) \n\
- ldq %0, per_cpu__" #var"(%1)\t!literal" \
- : "=&r"(__ptr), "=&r"(tmp_gp)); \
- (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
-
-#define PER_CPU_ATTRIBUTES __used
-
-#endif /* MODULE */
-
-/*
- * A percpu variable may point to a discarded regions. The following are
- * established ways to produce a usable pointer from the percpu variable
- * offset.
- */
-#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
-#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
-#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
-
-#else /* ! SMP */
-
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
-#define __get_cpu_var(var) per_cpu_var(var)
-#define __raw_get_cpu_var(var) per_cpu_var(var)
-
-#define PER_CPU_ATTRIBUTES
+#define DEFINE_PER_CPU_SECTION(type, name, section) \
+ __attribute__((__section__(".discard"), __unused__)) \
+ char __dummy__##name; \
+ __attribute__((__section__(".discard"))) \
+ char __per_cpu_multiple_def__##name; \
+ __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
+ __weak __typeof__(type) per_cpu__##name
-#endif /* SMP */
+#endif /* MODULE && SMP */
-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
+#include <asm-generic/percpu.h>
#endif /* __ALPHA_PERCPU_H */
diff --git a/arch/alpha/kernel/module.lds b/arch/alpha/kernel/module.lds
new file mode 100644
index 0000000..bffc6d3
--- /dev/null
+++ b/arch/alpha/kernel/module.lds
@@ -0,0 +1,3 @@
+SECTIONS {
+ /DISCARD/ : { *(.discard) }
+}
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index ee5615d..865f749 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -33,9 +33,11 @@
#endif
+#ifndef DEFINE_PER_CPU_SECTION
#define DEFINE_PER_CPU_SECTION(type, name, section) \
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+#endif
#define DEFINE_PER_CPU(type, name) \
DEFINE_PER_CPU_SECTION(type, name, "")
On Fri, 10 Apr 2009, Ivan Kokshaysky wrote:
>
> Actually, all of this has been discussed on lkml; here is the latest
> variant that makes everybody more or less happy, at least there are
> no objections from percpu folks and Martin (similar fix should work
> for s390 as well).
Is there any reason why this version of DEFINE_PER_CPU_SECTION wouldn't
work on _any_ architecture? IOW, do we even need the #ifdef's and
per-arch #define?
Linus
Linus Torvalds wrote:
>
> On Fri, 10 Apr 2009, Ivan Kokshaysky wrote:
>> Actually, all of this has been discussed on lkml; here is the latest
>> variant that makes everybody more or less happy, at least there are
>> no objections from percpu folks and Martin (similar fix should work
>> for s390 as well).
>
> Is there any reason why this version of DEFINE_PER_CPU_SECTION wouldn't
> work on _any_ architecture? IOW, do we even need the #ifdef's and
> per-arch #define?
It should work for all archs but only alpha and s390 require __weak__
and other archs can use actual static or global definitions, but then
again we'll need to add the __per_cpu_multiple_def_ thing to make sure
no two static definitions clash anyway, so there isn't much point in
keeping things separate.
I was waiting for responses on the original thread. Is everyone okay
with having the 'static per-cpu variables in different compile units
can't have the same name' restriction?
Thanks.
--
tejun
Long term, I think what Ivan proposed with a bit of addition so that
combination of DECLARE_PER_CPU() and static DEFINE_PER_CPU() triggers
compile error but I think it's better to do it when converting non-x86
archs to dynamic percpu allocator which is scheduled for the next
merge window. For the time being, how about just moving the generic
percpu_*() accessors to linux/percpu.h? asm-generic/percpu.h is meant
to carry generic stuff for low level stuff - declarations, definitions
and pointer offset calculation and so on but not for generic
interface.
I currently can't build or test stuff so the patch is not tested at
all. Ingo, can you please test whether this would work?
Thanks.
---
include/asm-generic/percpu.h | 52 -------------------------------------------
include/linux/percpu.h | 52 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 52 insertions(+), 52 deletions(-)
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 00f45ff..b0e63c6 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,56 +80,4 @@ extern void setup_per_cpu_areas(void);
#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
__typeof__(type) per_cpu_var(name)
-/*
- * Optional methods for optimized non-lvalue per-cpu variable access.
- *
- * @var can be a percpu variable or a field of it and its size should
- * equal char, int or long. percpu_read() evaluates to a lvalue and
- * all others to void.
- *
- * These operations are guaranteed to be atomic w.r.t. preemption.
- * The generic versions use plain get/put_cpu_var(). Archs are
- * encouraged to implement single-instruction alternatives which don't
- * require preemption protection.
- */
-#ifndef percpu_read
-# define percpu_read(var) \
- ({ \
- typeof(per_cpu_var(var)) __tmp_var__; \
- __tmp_var__ = get_cpu_var(var); \
- put_cpu_var(var); \
- __tmp_var__; \
- })
-#endif
-
-#define __percpu_generic_to_op(var, val, op) \
-do { \
- get_cpu_var(var) op val; \
- put_cpu_var(var); \
-} while (0)
-
-#ifndef percpu_write
-# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
-#endif
-
-#ifndef percpu_add
-# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
-#endif
-
-#ifndef percpu_sub
-# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
-#endif
-
-#ifndef percpu_and
-# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
-#endif
-
-#ifndef percpu_or
-# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
-#endif
-
-#ifndef percpu_xor
-# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
-#endif
-
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index ee5615d..cfda2d5 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -168,4 +168,56 @@ static inline void free_percpu(void *p)
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
__alignof__(type))
+/*
+ * Optional methods for optimized non-lvalue per-cpu variable access.
+ *
+ * @var can be a percpu variable or a field of it and its size should
+ * equal char, int or long. percpu_read() evaluates to a lvalue and
+ * all others to void.
+ *
+ * These operations are guaranteed to be atomic w.r.t. preemption.
+ * The generic versions use plain get/put_cpu_var(). Archs are
+ * encouraged to implement single-instruction alternatives which don't
+ * require preemption protection.
+ */
+#ifndef percpu_read
+# define percpu_read(var) \
+ ({ \
+ typeof(per_cpu_var(var)) __tmp_var__; \
+ __tmp_var__ = get_cpu_var(var); \
+ put_cpu_var(var); \
+ __tmp_var__; \
+ })
+#endif
+
+#define __percpu_generic_to_op(var, val, op) \
+do { \
+ get_cpu_var(var) op val; \
+ put_cpu_var(var); \
+} while (0)
+
+#ifndef percpu_write
+# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
+#endif
+
+#ifndef percpu_add
+# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
+#endif
+
+#ifndef percpu_sub
+# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
+#endif
+
+#ifndef percpu_and
+# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
+#endif
+
+#ifndef percpu_or
+# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
+#endif
+
+#ifndef percpu_xor
+# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
+#endif
+
#endif /* __LINUX_PERCPU_H */
* Tejun Heo <[email protected]> wrote:
> Long term, I think what Ivan proposed with a bit of addition so
> that combination of DECLARE_PER_CPU() and static DEFINE_PER_CPU()
> triggers compile error but I think it's better to do it when
> converting non-x86 archs to dynamic percpu allocator which is
> scheduled for the next merge window. For the time being, how
> about just moving the generic percpu_*() accessors to
> linux/percpu.h? asm-generic/percpu.h is meant to carry generic
> stuff for low level stuff - declarations, definitions and pointer
> offset calculation and so on but not for generic interface.
>
> I currently can't build or test stuff so the patch is not tested
> at all. Ingo, can you please test whether this would work?
Yes - but i'm somewhat confused - i frequently cross-built Alpha and
other architectures as well.
So exactly what problem do we have here - has some devel tree grown
new use of these APIs in generic code, without waiting for all
arches to be properly converted?
(sounds of rummaging around)
Oh drat:
| commit 4e69489a0ac11a9b62a25923975bfc370a30eae5
| Author: Eric Dumazet <[email protected]>
| Date: Sat Apr 4 16:41:09 2009 -0700
|
| socket: use percpu_add() while updating sockets_in_use
Now i understand the rush ...
Yes, moving the APIs to the generic header should properly expose
the wrapped default implementations. I'll test your patch in a
minute.
Ingo
* Ingo Molnar <[email protected]> wrote:
> Now i understand the rush ...
>
> Yes, moving the APIs to the generic header should properly expose
> the wrapped default implementations. I'll test your patch in a
> minute.
Alpha builds fine with your patch:
LD .tmp_vmlinux2
KSYM .tmp_kallsyms2.S
AS .tmp_kallsyms2.o
LD vmlinux
SYSMAP System.map
Linus,
If that patch is OK to you too, please pull the latest
core-fixes-for-linus git tree from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git core-fixes-for-linus
it includes this fix and a pending mutex fixlet.
Thanks,
Ingo
------------------>
Heiko Carstens (1):
mutex: have non-spinning mutexes on s390 by default
Tejun Heo (1):
percpu: unbreak alpha percpu
arch/Kconfig | 3 ++
arch/s390/Kconfig | 1 +
include/asm-generic/percpu.h | 52 ------------------------------------------
include/linux/percpu.h | 52 ++++++++++++++++++++++++++++++++++++++++++
kernel/mutex.c | 3 +-
5 files changed, 58 insertions(+), 53 deletions(-)
diff --git a/arch/Kconfig b/arch/Kconfig
index dc81b34..78a35e9 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -109,3 +109,6 @@ config HAVE_CLK
config HAVE_DMA_API_DEBUG
bool
+
+config HAVE_DEFAULT_NO_SPIN_MUTEXES
+ bool
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dcb667c..2eca5fe 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -82,6 +82,7 @@ config S390
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_SYSCALL_WRAPPERS
select HAVE_FUNCTION_TRACER
+ select HAVE_DEFAULT_NO_SPIN_MUTEXES
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 00f45ff..b0e63c6 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,56 +80,4 @@ extern void setup_per_cpu_areas(void);
#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
__typeof__(type) per_cpu_var(name)
-/*
- * Optional methods for optimized non-lvalue per-cpu variable access.
- *
- * @var can be a percpu variable or a field of it and its size should
- * equal char, int or long. percpu_read() evaluates to a lvalue and
- * all others to void.
- *
- * These operations are guaranteed to be atomic w.r.t. preemption.
- * The generic versions use plain get/put_cpu_var(). Archs are
- * encouraged to implement single-instruction alternatives which don't
- * require preemption protection.
- */
-#ifndef percpu_read
-# define percpu_read(var) \
- ({ \
- typeof(per_cpu_var(var)) __tmp_var__; \
- __tmp_var__ = get_cpu_var(var); \
- put_cpu_var(var); \
- __tmp_var__; \
- })
-#endif
-
-#define __percpu_generic_to_op(var, val, op) \
-do { \
- get_cpu_var(var) op val; \
- put_cpu_var(var); \
-} while (0)
-
-#ifndef percpu_write
-# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
-#endif
-
-#ifndef percpu_add
-# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
-#endif
-
-#ifndef percpu_sub
-# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
-#endif
-
-#ifndef percpu_and
-# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
-#endif
-
-#ifndef percpu_or
-# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
-#endif
-
-#ifndef percpu_xor
-# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
-#endif
-
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index ee5615d..cfda2d5 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -168,4 +168,56 @@ static inline void free_percpu(void *p)
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
__alignof__(type))
+/*
+ * Optional methods for optimized non-lvalue per-cpu variable access.
+ *
+ * @var can be a percpu variable or a field of it and its size should
+ * equal char, int or long. percpu_read() evaluates to a lvalue and
+ * all others to void.
+ *
+ * These operations are guaranteed to be atomic w.r.t. preemption.
+ * The generic versions use plain get/put_cpu_var(). Archs are
+ * encouraged to implement single-instruction alternatives which don't
+ * require preemption protection.
+ */
+#ifndef percpu_read
+# define percpu_read(var) \
+ ({ \
+ typeof(per_cpu_var(var)) __tmp_var__; \
+ __tmp_var__ = get_cpu_var(var); \
+ put_cpu_var(var); \
+ __tmp_var__; \
+ })
+#endif
+
+#define __percpu_generic_to_op(var, val, op) \
+do { \
+ get_cpu_var(var) op val; \
+ put_cpu_var(var); \
+} while (0)
+
+#ifndef percpu_write
+# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
+#endif
+
+#ifndef percpu_add
+# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
+#endif
+
+#ifndef percpu_sub
+# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
+#endif
+
+#ifndef percpu_and
+# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
+#endif
+
+#ifndef percpu_or
+# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
+#endif
+
+#ifndef percpu_xor
+# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
+#endif
+
#endif /* __LINUX_PERCPU_H */
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 5d79781..507cf2b 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -148,7 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
preempt_disable();
mutex_acquire(&lock->dep_map, subclass, 0, ip);
-#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \
+ !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES)
/*
* Optimistic spinning.
*
On Fri, 10 Apr 2009 09:21:08 -0700 (PDT)
Linus Torvalds <[email protected]> wrote:
>
> Hmm. I certainly personally _much_ prefer this version.
>
> It looks like this actually simplifies things for S390 too (no more
> simple_identifier_##var games), and generally just looks better. And
> seeing that the S390 special case is no longer S390-specific is also a
> good sign, imho.
>
> Sorry for including the whole message, but I'm adding s390 and percpu
> people to the cc. Guys - original uncorrupted patch on lkml.
> > diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
> > index 408d60b..6a71d73 100644
> > --- a/arch/s390/include/asm/percpu.h
> > +++ b/arch/s390/include/asm/percpu.h
> > @@ -13,20 +13,18 @@
> > */
> > #if defined(__s390x__) && defined(MODULE)
> >
> > -#define SHIFT_PERCPU_PTR(ptr,offset) (({ \
> > - extern int simple_identifier_##var(void); \
> > +#define SHIFT_PERCPU_PTR(var,offset) (({ \
> > unsigned long *__ptr; \
> > asm ( "larl %0, %1@GOTENT" \
> > - : "=a" (__ptr) : "X" (ptr) ); \
> > - (typeof(ptr))((*__ptr) + (offset)); }))
> > + : "=a" (__ptr) : "X" (&per_cpu_var(var)) ); \
> > + (typeof(&per_cpu_var(var)))((*__ptr) + (offset)); }))
> >
> > #else
> >
> > -#define SHIFT_PERCPU_PTR(ptr, offset) (({ \
> > - extern int simple_identifier_##var(void); \
> > +#define SHIFT_PERCPU_PTR(var, offset) (({ \
> > unsigned long __ptr; \
> > - asm ( "" : "=a" (__ptr) : "0" (ptr) ); \
> > - (typeof(ptr)) (__ptr + (offset)); }))
> > + asm ( "" : "=a" (__ptr) : "0" (&per_cpu_var(var)) ); \
> > + (typeof(&per_cpu_var(var))) (__ptr + (offset)); }))
> >
> > #endif
> >
I would like to get rid of that SHIFT_PERCPU_PTR mess. The patch from
Ivan will allow this, it uses a dummy variable to void the effect of a
static modifier for percpu variables in modules. The percpu variable
itself will be defined non-static, my gut feeling is that this is a
dirty little trick that might bite us in the future.
Another solution which I personally would prefer is to ban the use of
static percpu variables. Then the compiler will use the GOT to get the
address of percpu variables without any dirty tricks.
--
blue skies,
Martin.
"Reality continues to ruin my life." - Calvin.
Hello, Martin.
Martin Schwidefsky wrote:
> I would like to get rid of that SHIFT_PERCPU_PTR mess. The patch from
> Ivan will allow this, it uses a dummy variable to void the effect of a
> static modifier for percpu variables in modules. The percpu variable
> itself will be defined non-static, my gut feeling is that this is a
> dirty little trick that might bite us in the future.
> Another solution which I personally would prefer is to ban the use of
> static percpu variables. Then the compiler will use the GOT to get the
> address of percpu variables without any dirty tricks.
Hmmm... even if we can make combination of DECLARE_PER_CPU() and
static DEFINE_PER_CPU() trigger compile error? That pretty much
implements most of what the programmer intends by "static" but yeah
it's a bit convoluted.
Thanks.
--
tejun
On Fri, Apr 10, 2009 at 09:40:55PM +0200, Ingo Molnar wrote:
>
> * Ingo Molnar <[email protected]> wrote:
>
> > Now i understand the rush ...
> >
> > Yes, moving the APIs to the generic header should properly expose
> > the wrapped default implementations. I'll test your patch in a
> > minute.
>
> Alpha builds fine with your patch:
>
> LD .tmp_vmlinux2
> KSYM .tmp_kallsyms2.S
> AS .tmp_kallsyms2.o
> LD vmlinux
> SYSMAP System.map
>
> Linus,
>
> If that patch is OK to you too, please pull the latest
> core-fixes-for-linus git tree from:
>
> git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git core-fixes-for-linus
>
> it includes this fix and a pending mutex fixlet.
(Belated) ACK. As for unification of that stuff, I'll gladly leave that
as SEP...