Goal is to have vmstat_shepherd to transfer from
per-CPU counters to global counters remotely. For this,
an atomic this_cpu_cmpxchg is necessary.
Following the kernel convention for cmpxchg/cmpxchg_local,
change ARM's this_cpu_cmpxchg_ helpers to be atomic,
and add this_cpu_cmpxchg_local_ helpers which are not atomic.
Signed-off-by: Marcelo Tosatti <[email protected]>
Index: linux-vmstat-remote/arch/arm64/include/asm/percpu.h
===================================================================
--- linux-vmstat-remote.orig/arch/arm64/include/asm/percpu.h
+++ linux-vmstat-remote/arch/arm64/include/asm/percpu.h
@@ -232,13 +232,23 @@ PERCPU_RET_OP(add, add, ldadd)
_pcp_protect_return(xchg_relaxed, pcp, val)
#define this_cpu_cmpxchg_1(pcp, o, n) \
- _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+ _pcp_protect_return(cmpxchg, pcp, o, n)
#define this_cpu_cmpxchg_2(pcp, o, n) \
- _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+ _pcp_protect_return(cmpxchg, pcp, o, n)
#define this_cpu_cmpxchg_4(pcp, o, n) \
- _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+ _pcp_protect_return(cmpxchg, pcp, o, n)
#define this_cpu_cmpxchg_8(pcp, o, n) \
+ _pcp_protect_return(cmpxchg, pcp, o, n)
+
+#define this_cpu_cmpxchg_local_1(pcp, o, n) \
_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+#define this_cpu_cmpxchg_local_2(pcp, o, n) \
+ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+#define this_cpu_cmpxchg_local_4(pcp, o, n) \
+ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+#define this_cpu_cmpxchg_local_8(pcp, o, n) \
+ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+
#ifdef __KVM_NVHE_HYPERVISOR__
extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);