This series adds live patching support for powerpc (ppc64le only ATM).
It's unchanged since the version I posted on March 24, with the exception that
I've dropped the first patch, which was a testing-only patch.
If there's no further comments I'll put this in a topic branch in the next day
or two and Jiri & I will both merge that into next.
cheers
Michael Ellerman (5):
ftrace: Make ftrace_location_range() global
livepatch: Allow architectures to specify an alternate ftrace location
powerpc/livepatch: Add livepatch header
powerpc/livepatch: Add livepatch stack to struct thread_info
powerpc/livepatch: Add live patching support on ppc64le
arch/powerpc/Kconfig | 3 ++
arch/powerpc/include/asm/livepatch.h | 62 ++++++++++++++++++++++
arch/powerpc/include/asm/thread_info.h | 4 +-
arch/powerpc/kernel/asm-offsets.c | 4 ++
arch/powerpc/kernel/entry_64.S | 97 ++++++++++++++++++++++++++++++++++
arch/powerpc/kernel/irq.c | 3 ++
arch/powerpc/kernel/process.c | 6 ++-
arch/powerpc/kernel/setup_64.c | 17 +++---
include/linux/ftrace.h | 1 +
kernel/livepatch/core.c | 34 ++++++++++--
kernel/trace/ftrace.c | 14 ++++-
11 files changed, 232 insertions(+), 13 deletions(-)
create mode 100644 arch/powerpc/include/asm/livepatch.h
--
2.5.0
In order to support live patching on powerpc we would like to call
ftrace_location_range(), so make it global.
Signed-off-by: Torsten Duwe <[email protected]>
Signed-off-by: Balbir Singh <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
---
include/linux/ftrace.h | 1 +
kernel/trace/ftrace.c | 14 +++++++++++++-
2 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dea12a6e413b..66a36a815f0a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip);
+unsigned long ftrace_location_range(unsigned long start, unsigned long end);
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b1870fbd2b67..7e8d792da963 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1530,7 +1530,19 @@ static int ftrace_cmp_recs(const void *a, const void *b)
return 0;
}
-static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+/**
+ * ftrace_location_range - return the first address of a traced location
+ * if it touches the given ip range
+ * @start: start of range to search.
+ * @end: end of range to search (inclusive). @end points to the last byte
+ * to check.
+ *
+ * Returns rec->ip if the related ftrace location is a least partly within
+ * the given address range. That is, the first address of the instruction
+ * that is either a NOP or call to the function tracer. It checks the ftrace
+ * internal tables to determine if the address belongs or not.
+ */
+unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec;
--
2.5.0
In order to support live patching we need to maintain an alternate
stack of TOC & LR values. We use the base of the stack for this, and
store the "live patch stack pointer" in struct thread_info.
Unlike the other fields of thread_info, we can not statically initialise
that value, so it must be done at run time.
This patch just adds the code to support that, it is not enabled until
the next patch which actually adds live patch support.
Signed-off-by: Michael Ellerman <[email protected]>
Acked-by: Balbir Singh <[email protected]>
---
arch/powerpc/include/asm/livepatch.h | 8 ++++++++
arch/powerpc/include/asm/thread_info.h | 4 +++-
arch/powerpc/kernel/irq.c | 3 +++
arch/powerpc/kernel/process.c | 6 +++++-
arch/powerpc/kernel/setup_64.c | 17 ++++++++++-------
5 files changed, 29 insertions(+), 9 deletions(-)
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index ad36e8e34fa1..a402f7f94896 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -49,6 +49,14 @@ static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
*/
return ftrace_location_range(faddr, faddr + 16);
}
+
+static inline void klp_init_thread_info(struct thread_info *ti)
+{
+ /* + 1 to account for STACK_END_MAGIC */
+ ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
+}
+#else
+static void klp_init_thread_info(struct thread_info *ti) { }
#endif /* CONFIG_LIVEPATCH */
#endif /* _ASM_POWERPC_LIVEPATCH_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 7efee4a3240b..8febc3f66d53 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -43,7 +43,9 @@ struct thread_info {
int preempt_count; /* 0 => preemptable,
<0 => BUG */
unsigned long local_flags; /* private flags for thread */
-
+#ifdef CONFIG_LIVEPATCH
+ unsigned long *livepatch_sp;
+#endif
/* low level flags - has atomic operations done on it */
unsigned long flags ____cacheline_aligned_in_smp;
};
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 290559df1e8b..3cb46a3b1de7 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,6 +66,7 @@
#include <asm/udbg.h>
#include <asm/smp.h>
#include <asm/debug.h>
+#include <asm/livepatch.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -607,10 +608,12 @@ void irq_ctx_init(void)
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
tp = softirq_ctx[i];
tp->cpu = i;
+ klp_init_thread_info(tp);
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
tp = hardirq_ctx[i];
tp->cpu = i;
+ klp_init_thread_info(tp);
}
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b8500b4ac7fe..2a9280b945e0 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -55,6 +55,8 @@
#include <asm/firmware.h>
#endif
#include <asm/code-patching.h>
+#include <asm/livepatch.h>
+
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -1400,13 +1402,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
extern void ret_from_kernel_thread(void);
void (*f)(void);
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ struct thread_info *ti = task_thread_info(p);
+
+ klp_init_thread_info(ti);
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
- struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
/* function */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f98be8383a39..96d4a2b23d0f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -69,6 +69,7 @@
#include <asm/kvm_ppc.h>
#include <asm/hugetlb.h>
#include <asm/epapr_hcalls.h>
+#include <asm/livepatch.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -667,16 +668,16 @@ static void __init emergency_stack_init(void)
limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) {
- unsigned long sp;
- sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
- sp += THREAD_SIZE;
- paca[i].emergency_sp = __va(sp);
+ struct thread_info *ti;
+ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
+ klp_init_thread_info(ti);
+ paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for machine check exception handling. */
- sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
- sp += THREAD_SIZE;
- paca[i].mc_emergency_sp = __va(sp);
+ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
+ klp_init_thread_info(ti);
+ paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif
}
}
@@ -700,6 +701,8 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.panic)
setup_panic();
+ klp_init_thread_info(&init_thread_info);
+
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
--
2.5.0
Add the kconfig logic & assembly support for handling live patched
functions. This depends on DYNAMIC_FTRACE_WITH_REGS, which in turn
depends on the new -mprofile-kernel ftrace ABI, which is only supported
currently on ppc64le.
Live patching is handled by a special ftrace handler. This means it runs
from ftrace_caller(). The live patch handler modifies the NIP so as to
redirect the return from ftrace_caller() to the new patched function.
However there is one particularly tricky case we need to handle.
If a function A calls another function B, and it is known at link time
that they share the same TOC, then A will not save or restore its TOC,
and will call the local entry point of B.
When we live patch B, we replace it with a new function C, which may
not have the same TOC as A. At live patch time it's too late to modify A
to do the TOC save/restore, so the live patching code must interpose
itself between A and C, and do the TOC save/restore that A omitted.
An additionaly complication is that the livepatch code can not create a
stack frame in order to save the TOC. That is because if C takes > 8
arguments, or is varargs, A will have written the arguments for C in
A's stack frame.
To solve this, we introduce a "livepatch stack" which grows upward from
the base of the regular stack, and is used to store the TOC & LR when
calling a live patched function.
When the patched function returns, we retrieve the real LR & TOC from
the livepatch stack, restore them, and pop the livepatch "stack frame".
Signed-off-by: Michael Ellerman <[email protected]>
Reviewed-by: Torsten Duwe <[email protected]>
Reviewed-by: Balbir Singh <[email protected]>
---
arch/powerpc/Kconfig | 3 ++
arch/powerpc/kernel/asm-offsets.c | 4 ++
arch/powerpc/kernel/entry_64.S | 97 +++++++++++++++++++++++++++++++++++++++
3 files changed, 104 insertions(+)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 7cd32c038286..ed0603102442 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -160,6 +160,7 @@ config PPC
select HAVE_ARCH_SECCOMP_FILTER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+ select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -1107,3 +1108,5 @@ config PPC_LIB_RHEAP
bool
source "arch/powerpc/kvm/Kconfig"
+
+source "kernel/livepatch/Kconfig"
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0d0183d3180a..c9370d4e36bd 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -86,6 +86,10 @@ int main(void)
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_LIVEPATCH
+ DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
+#endif
+
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 9916d150b28c..39a79c89a4b6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/magic.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -1248,6 +1249,9 @@ _GLOBAL(ftrace_caller)
addi r3,r3,function_trace_op@toc@l
ld r5,0(r3)
+#ifdef CONFIG_LIVEPATCH
+ mr r14,r7 /* remember old NIP */
+#endif
/* Calculate ip from nip-4 into r3 for call below */
subi r3, r7, MCOUNT_INSN_SIZE
@@ -1272,6 +1276,9 @@ ftrace_call:
/* Load ctr with the possibly modified NIP */
ld r3, _NIP(r1)
mtctr r3
+#ifdef CONFIG_LIVEPATCH
+ cmpd r14,r3 /* has NIP been altered? */
+#endif
/* Restore gprs */
REST_8GPRS(0,r1)
@@ -1289,6 +1296,11 @@ ftrace_call:
ld r0, LRSAVE(r1)
mtlr r0
+#ifdef CONFIG_LIVEPATCH
+ /* Based on the cmpd above, if the NIP was altered handle livepatch */
+ bne- livepatch_handler
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
stdu r1, -112(r1)
.globl ftrace_graph_call
@@ -1305,6 +1317,91 @@ _GLOBAL(ftrace_graph_stub)
_GLOBAL(ftrace_stub)
blr
+
+#ifdef CONFIG_LIVEPATCH
+ /*
+ * This function runs in the mcount context, between two functions. As
+ * such it can only clobber registers which are volatile and used in
+ * function linkage.
+ *
+ * We get here when a function A, calls another function B, but B has
+ * been live patched with a new function C.
+ *
+ * On entry:
+ * - we have no stack frame and can not allocate one
+ * - LR points back to the original caller (in A)
+ * - CTR holds the new NIP in C
+ * - r0 & r12 are free
+ *
+ * r0 can't be used as the base register for a DS-form load or store, so
+ * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
+ */
+livepatch_handler:
+ CURRENT_THREAD_INFO(r12, r1)
+
+ /* Save stack pointer into r0 */
+ mr r0, r1
+
+ /* Allocate 3 x 8 bytes */
+ ld r1, TI_livepatch_sp(r12)
+ addi r1, r1, 24
+ std r1, TI_livepatch_sp(r12)
+
+ /* Save toc & real LR on livepatch stack */
+ std r2, -24(r1)
+ mflr r12
+ std r12, -16(r1)
+
+ /* Store stack end marker */
+ lis r12, STACK_END_MAGIC@h
+ ori r12, r12, STACK_END_MAGIC@l
+ std r12, -8(r1)
+
+ /* Restore real stack pointer */
+ mr r1, r0
+
+ /* Put ctr in r12 for global entry and branch there */
+ mfctr r12
+ bctrl
+
+ /*
+ * Now we are returning from the patched function to the original
+ * caller A. We are free to use r0 and r12, and we can use r2 until we
+ * restore it.
+ */
+
+ CURRENT_THREAD_INFO(r12, r1)
+
+ /* Save stack pointer into r0 */
+ mr r0, r1
+
+ ld r1, TI_livepatch_sp(r12)
+
+ /* Check stack marker hasn't been trashed */
+ lis r2, STACK_END_MAGIC@h
+ ori r2, r2, STACK_END_MAGIC@l
+ ld r12, -8(r1)
+1: tdne r12, r2
+ EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
+
+ /* Restore LR & toc from livepatch stack */
+ ld r12, -16(r1)
+ mtlr r12
+ ld r2, -24(r1)
+
+ /* Pop livepatch stack frame */
+ CURRENT_THREAD_INFO(r12, r0)
+ subi r1, r1, 24
+ std r1, TI_livepatch_sp(r12)
+
+ /* Restore real stack pointer */
+ mr r1, r0
+
+ /* Return to original caller of live patched function */
+ blr
+#endif
+
+
#else
_GLOBAL_TOC(_mcount)
/* Taken from output of objdump from lib64/glibc */
--
2.5.0
When livepatch tries to patch a function it takes the function address
and asks ftrace to install the livepatch handler at that location.
ftrace will look for an mcount call site at that exact address.
On powerpc the mcount location is not the first instruction of the
function, and in fact it's not at a constant offset from the start of
the function. To accommodate this add a hook which arch code can
override to customise the behaviour.
Signed-off-by: Torsten Duwe <[email protected]>
Signed-off-by: Balbir Singh <[email protected]>
Signed-off-by: Petr Mladek <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
---
kernel/livepatch/core.c | 34 +++++++++++++++++++++++++++++++---
1 file changed, 31 insertions(+), 3 deletions(-)
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index d68fbf63b083..b0476bb30f92 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -298,6 +298,19 @@ unlock:
rcu_read_unlock();
}
+/*
+ * Convert a function address into the appropriate ftrace location.
+ *
+ * Usually this is just the address of the function, but on some architectures
+ * it's more complicated so allow them to provide a custom behaviour.
+ */
+#ifndef klp_get_ftrace_location
+static unsigned long klp_get_ftrace_location(unsigned long faddr)
+{
+ return faddr;
+}
+#endif
+
static void klp_disable_func(struct klp_func *func)
{
struct klp_ops *ops;
@@ -312,8 +325,14 @@ static void klp_disable_func(struct klp_func *func)
return;
if (list_is_singular(&ops->func_stack)) {
+ unsigned long ftrace_loc;
+
+ ftrace_loc = klp_get_ftrace_location(func->old_addr);
+ if (WARN_ON(!ftrace_loc))
+ return;
+
WARN_ON(unregister_ftrace_function(&ops->fops));
- WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
+ WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
list_del_rcu(&func->stack_node);
list_del(&ops->node);
@@ -338,6 +357,15 @@ static int klp_enable_func(struct klp_func *func)
ops = klp_find_ops(func->old_addr);
if (!ops) {
+ unsigned long ftrace_loc;
+
+ ftrace_loc = klp_get_ftrace_location(func->old_addr);
+ if (!ftrace_loc) {
+ pr_err("failed to find location for function '%s'\n",
+ func->old_name);
+ return -EINVAL;
+ }
+
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
@@ -352,7 +380,7 @@ static int klp_enable_func(struct klp_func *func)
INIT_LIST_HEAD(&ops->func_stack);
list_add_rcu(&func->stack_node, &ops->func_stack);
- ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
+ ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
if (ret) {
pr_err("failed to set ftrace filter for function '%s' (%d)\n",
func->old_name, ret);
@@ -363,7 +391,7 @@ static int klp_enable_func(struct klp_func *func)
if (ret) {
pr_err("failed to register ftrace handler for function '%s' (%d)\n",
func->old_name, ret);
- ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+ ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
goto err;
}
--
2.5.0
Add the powerpc specific livepatch definitions. In particular we provide
a non-default implementation of klp_get_ftrace_location().
This is required because the location of the mcount call is not constant
when using -mprofile-kernel (which we always do for live patching).
Signed-off-by: Torsten Duwe <[email protected]>
Signed-off-by: Balbir Singh <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
---
arch/powerpc/include/asm/livepatch.h | 54 ++++++++++++++++++++++++++++++++++++
1 file changed, 54 insertions(+)
create mode 100644 arch/powerpc/include/asm/livepatch.h
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
new file mode 100644
index 000000000000..ad36e8e34fa1
--- /dev/null
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -0,0 +1,54 @@
+/*
+ * livepatch.h - powerpc-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2015-2016, SUSE, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _ASM_POWERPC_LIVEPATCH_H
+#define _ASM_POWERPC_LIVEPATCH_H
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+ return 0;
+}
+
+static inline int klp_write_module_reloc(struct module *mod, unsigned long
+ type, unsigned long loc, unsigned long value)
+{
+ /* This requires infrastructure changes; we need the loadinfos. */
+ return -ENOSYS;
+}
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->nip = ip;
+}
+
+#define klp_get_ftrace_location klp_get_ftrace_location
+static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
+{
+ /*
+ * Live patch works only with -mprofile-kernel on PPC. In this case,
+ * the ftrace location is always within the first 16 bytes.
+ */
+ return ftrace_location_range(faddr, faddr + 16);
+}
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _ASM_POWERPC_LIVEPATCH_H */
--
2.5.0
On Wed, 13 Apr 2016, Michael Ellerman wrote:
> This series adds live patching support for powerpc (ppc64le only ATM).
>
> It's unchanged since the version I posted on March 24, with the exception that
> I've dropped the first patch, which was a testing-only patch.
>
> If there's no further comments I'll put this in a topic branch in the next day
> or two and Jiri & I will both merge that into next.
Hi,
I'll definitely give it a proper look today or tomorrow, but there is one
thing that needs to be solved. The patch set from Jessica reworking
relocations for live patching is now merged in our for-next branch. This
means that we need to find out if there is something in struct
mod_arch_specific for powerpc which needs to be preserved and do it.
Regards,
Miroslav
On Wed, 13 Apr 2016, Miroslav Benes wrote:
> > This series adds live patching support for powerpc (ppc64le only ATM).
> >
> > It's unchanged since the version I posted on March 24, with the exception that
> > I've dropped the first patch, which was a testing-only patch.
> >
> > If there's no further comments I'll put this in a topic branch in the next day
> > or two and Jiri & I will both merge that into next.
>
> Hi,
>
> I'll definitely give it a proper look today or tomorrow, but there is one
> thing that needs to be solved. The patch set from Jessica reworking
> relocations for live patching is now merged in our for-next branch. This
> means that we need to find out if there is something in struct
> mod_arch_specific for powerpc which needs to be preserved and do it.
Michael, if the plan is still the original one, i.e. you push it to your
branch, and I merge it to livepatching (and resolve any dependencies on
the relocations code during the merge) and push it to Linus from
livepatching.git, then there shouldn't be anything do to on your side.
Alternatively, you can rebase on top of livepatching.git#for-next, and
I'll take it directly.
Thanks,
--
Jiri Kosina
SUSE Labs
+++ Miroslav Benes [13/04/16 15:01 +0200]:
>On Wed, 13 Apr 2016, Michael Ellerman wrote:
>
>> This series adds live patching support for powerpc (ppc64le only ATM).
>>
>> It's unchanged since the version I posted on March 24, with the exception that
>> I've dropped the first patch, which was a testing-only patch.
>>
>> If there's no further comments I'll put this in a topic branch in the next day
>> or two and Jiri & I will both merge that into next.
>
>Hi,
>
>I'll definitely give it a proper look today or tomorrow, but there is one
>thing that needs to be solved. The patch set from Jessica reworking
>relocations for live patching is now merged in our for-next branch. This
>means that we need to find out if there is something in struct
>mod_arch_specific for powerpc which needs to be preserved and do it.
>
I took a look around the powerpc module.c code and it looks like the
mod_arch_specific stuff should be fine, since it is statically allocated
in the module struct (unlike the situation in s390, where
mod->arch.syminfo was vmalloc'd and we had to delay the free).
However I'm not familiar with the powerpc code so I need to dig around
a bit more to be 100% sure.
A second concern I have is that apply_relocate_add() relies on
sections like .stubs and .toc (for 64-bit) and .init.plt and .plt
sections (for 32-bit). In order for apply_relocate_add() to work for
livepatch, we must make sure these sections aren't thrown away and are
not in init module memory since this memory will be freed at the end
of module load (see how INIT_OFFSET_MASK is used in kernel/module.c).
As long as these sections are placed in module core memory, we will be
OK. I need to think about this a bit more.
Third and unrelated comment: the klp_write_module_reloc stub isn't
needed anymore :-)
Thanks,
Jessica
On Wed, 2016-04-13 at 15:22 +0200, Jiri Kosina wrote:
> On Wed, 13 Apr 2016, Miroslav Benes wrote:
> > > This series adds live patching support for powerpc (ppc64le only ATM).
> > >
> > > It's unchanged since the version I posted on March 24, with the exception that
> > > I've dropped the first patch, which was a testing-only patch.
> > >
> > > If there's no further comments I'll put this in a topic branch in the next day
> > > or two and Jiri & I will both merge that into next.
> >
> > Hi,
> >
> > I'll definitely give it a proper look today or tomorrow, but there is one
> > thing that needs to be solved. The patch set from Jessica reworking
> > relocations for live patching is now merged in our for-next branch. This
> > means that we need to find out if there is something in struct
> > mod_arch_specific for powerpc which needs to be preserved and do it.
>
> Michael, if the plan is still the original one, i.e. you push it to your
> branch, and I merge it to livepatching (and resolve any dependencies on
> the relocations code during the merge) and push it to Linus from
> livepatching.git, then there shouldn't be anything do to on your side.
That is my plan yeah.
Topic branch here:
https://git.kernel.org/cgit/linux/kernel/git/powerpc/linux.git/log/?h=topic/livepatch
I will merge that before Monday (my time) if I don't hear any objections.
cheers
On Wed, 13 Apr 2016, Michael Ellerman wrote:
> When livepatch tries to patch a function it takes the function address
> and asks ftrace to install the livepatch handler at that location.
> ftrace will look for an mcount call site at that exact address.
>
> On powerpc the mcount location is not the first instruction of the
> function, and in fact it's not at a constant offset from the start of
> the function. To accommodate this add a hook which arch code can
> override to customise the behaviour.
>
> Signed-off-by: Torsten Duwe <[email protected]>
> Signed-off-by: Balbir Singh <[email protected]>
> Signed-off-by: Petr Mladek <[email protected]>
> Signed-off-by: Michael Ellerman <[email protected]>
> ---
> kernel/livepatch/core.c | 34 +++++++++++++++++++++++++++++++---
> 1 file changed, 31 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
> index d68fbf63b083..b0476bb30f92 100644
> --- a/kernel/livepatch/core.c
> +++ b/kernel/livepatch/core.c
> @@ -298,6 +298,19 @@ unlock:
> rcu_read_unlock();
> }
>
> +/*
> + * Convert a function address into the appropriate ftrace location.
> + *
> + * Usually this is just the address of the function, but on some architectures
> + * it's more complicated so allow them to provide a custom behaviour.
> + */
> +#ifndef klp_get_ftrace_location
> +static unsigned long klp_get_ftrace_location(unsigned long faddr)
> +{
> + return faddr;
> +}
> +#endif
Whoah, what an ugly hack :)
Note to my future self - This is what you want to do if you need a weak
static inline function.
static inline is probably unnecessary here so __weak function would be
enough. It would introduce powerpc-specific livepatch.c though because of
it and this is not worth it.
> static void klp_disable_func(struct klp_func *func)
> {
> struct klp_ops *ops;
> @@ -312,8 +325,14 @@ static void klp_disable_func(struct klp_func *func)
> return;
>
> if (list_is_singular(&ops->func_stack)) {
> + unsigned long ftrace_loc;
This is a nit, but could you move the definition up to have them all in
one place to be consistent with the rest of the code? The same applies to
klp_enable_func() below.
> +
> + ftrace_loc = klp_get_ftrace_location(func->old_addr);
> + if (WARN_ON(!ftrace_loc))
> + return;
> +
> WARN_ON(unregister_ftrace_function(&ops->fops));
> - WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
> + WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
>
> list_del_rcu(&func->stack_node);
> list_del(&ops->node);
> @@ -338,6 +357,15 @@ static int klp_enable_func(struct klp_func *func)
>
> ops = klp_find_ops(func->old_addr);
> if (!ops) {
> + unsigned long ftrace_loc;
Here.
> +
> + ftrace_loc = klp_get_ftrace_location(func->old_addr);
> + if (!ftrace_loc) {
> + pr_err("failed to find location for function '%s'\n",
> + func->old_name);
> + return -EINVAL;
> + }
> +
> ops = kzalloc(sizeof(*ops), GFP_KERNEL);
> if (!ops)
> return -ENOMEM;
> @@ -352,7 +380,7 @@ static int klp_enable_func(struct klp_func *func)
> INIT_LIST_HEAD(&ops->func_stack);
> list_add_rcu(&func->stack_node, &ops->func_stack);
>
> - ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
> + ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
> if (ret) {
> pr_err("failed to set ftrace filter for function '%s' (%d)\n",
> func->old_name, ret);
> @@ -363,7 +391,7 @@ static int klp_enable_func(struct klp_func *func)
> if (ret) {
> pr_err("failed to register ftrace handler for function '%s' (%d)\n",
> func->old_name, ret);
> - ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
> + ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
> goto err;
> }
Otherwise it is ok.
Miroslav
On Wed, 13 Apr 2016, Michael Ellerman wrote:
> Add the powerpc specific livepatch definitions. In particular we provide
> a non-default implementation of klp_get_ftrace_location().
>
> This is required because the location of the mcount call is not constant
> when using -mprofile-kernel (which we always do for live patching).
>
> Signed-off-by: Torsten Duwe <[email protected]>
> Signed-off-by: Balbir Singh <[email protected]>
> Signed-off-by: Michael Ellerman <[email protected]>
> ---
> arch/powerpc/include/asm/livepatch.h | 54 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 54 insertions(+)
> create mode 100644 arch/powerpc/include/asm/livepatch.h
>
> diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
> new file mode 100644
> index 000000000000..ad36e8e34fa1
> --- /dev/null
> +++ b/arch/powerpc/include/asm/livepatch.h
> @@ -0,0 +1,54 @@
> +/*
> + * livepatch.h - powerpc-specific Kernel Live Patching Core
> + *
> + * Copyright (C) 2015-2016, SUSE, IBM Corp.
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License
> + * as published by the Free Software Foundation; either version 2
> + * of the License, or (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, see <http://www.gnu.org/licenses/>.
> + */
> +#ifndef _ASM_POWERPC_LIVEPATCH_H
> +#define _ASM_POWERPC_LIVEPATCH_H
> +
> +#include <linux/module.h>
> +#include <linux/ftrace.h>
> +
> +#ifdef CONFIG_LIVEPATCH
We don't use these guards in our header files since 335e073faacc ("klp:
remove CONFIG_LIVEPATCH dependency from klp headers").
> +static inline int klp_check_compiler_support(void)
> +{
> + return 0;
> +}
> +
> +static inline int klp_write_module_reloc(struct module *mod, unsigned long
> + type, unsigned long loc, unsigned long value)
> +{
> + /* This requires infrastructure changes; we need the loadinfos. */
> + return -ENOSYS;
> +}
And this is not needed anymore as Jessica pointed out.
> +static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
> +{
> + regs->nip = ip;
> +}
> +
> +#define klp_get_ftrace_location klp_get_ftrace_location
> +static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
> +{
> + /*
> + * Live patch works only with -mprofile-kernel on PPC. In this case,
> + * the ftrace location is always within the first 16 bytes.
> + */
> + return ftrace_location_range(faddr, faddr + 16);
> +}
> +#endif /* CONFIG_LIVEPATCH */
> +
> +#endif /* _ASM_POWERPC_LIVEPATCH_H */
> --
> 2.5.0
>
On Thu, 14 Apr 2016, Miroslav Benes wrote:
> On Wed, 13 Apr 2016, Michael Ellerman wrote:
>
> > Add the powerpc specific livepatch definitions. In particular we provide
> > a non-default implementation of klp_get_ftrace_location().
> >
> > This is required because the location of the mcount call is not constant
> > when using -mprofile-kernel (which we always do for live patching).
> >
> > Signed-off-by: Torsten Duwe <[email protected]>
> > Signed-off-by: Balbir Singh <[email protected]>
> > Signed-off-by: Michael Ellerman <[email protected]>
> > ---
> > arch/powerpc/include/asm/livepatch.h | 54 ++++++++++++++++++++++++++++++++++++
> > 1 file changed, 54 insertions(+)
> > create mode 100644 arch/powerpc/include/asm/livepatch.h
> >
> > diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
> > new file mode 100644
> > index 000000000000..ad36e8e34fa1
> > --- /dev/null
> > +++ b/arch/powerpc/include/asm/livepatch.h
> > @@ -0,0 +1,54 @@
> > +/*
> > + * livepatch.h - powerpc-specific Kernel Live Patching Core
> > + *
> > + * Copyright (C) 2015-2016, SUSE, IBM Corp.
> > + *
> > + * This program is free software; you can redistribute it and/or
> > + * modify it under the terms of the GNU General Public License
> > + * as published by the Free Software Foundation; either version 2
> > + * of the License, or (at your option) any later version.
> > + *
> > + * This program is distributed in the hope that it will be useful,
> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> > + * GNU General Public License for more details.
> > + *
> > + * You should have received a copy of the GNU General Public License
> > + * along with this program; if not, see <http://www.gnu.org/licenses/>.
> > + */
> > +#ifndef _ASM_POWERPC_LIVEPATCH_H
> > +#define _ASM_POWERPC_LIVEPATCH_H
> > +
> > +#include <linux/module.h>
> > +#include <linux/ftrace.h>
> > +
> > +#ifdef CONFIG_LIVEPATCH
>
> We don't use these guards in our header files since 335e073faacc ("klp:
> remove CONFIG_LIVEPATCH dependency from klp headers").
...but you're gonna need it in the next patch...
On Thu, Apr 14, 2016 at 04:49:50PM +1000, Michael Ellerman wrote:
> On Wed, 2016-04-13 at 15:22 +0200, Jiri Kosina wrote:
> > On Wed, 13 Apr 2016, Miroslav Benes wrote:
> > > > This series adds live patching support for powerpc (ppc64le only ATM).
> > > >
> > > > It's unchanged since the version I posted on March 24, with the exception that
> > > > I've dropped the first patch, which was a testing-only patch.
Confirmed. And it still works on top of 4.6-rc3, even with the additional testing.
> > > > If there's no further comments I'll put this in a topic branch in the next day
> > > > or two and Jiri & I will both merge that into next.
"Go" from my side.
FTR: then I still have a few ppc64 hunks floating around to support certain consistency
models...
Torsten
On Thu, 2016-04-14 at 14:01 +0200, Miroslav Benes wrote:
> On Wed, 13 Apr 2016, Michael Ellerman wrote:
> > diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
> > index d68fbf63b083..b0476bb30f92 100644
> > --- a/kernel/livepatch/core.c
> > +++ b/kernel/livepatch/core.c
> > @@ -298,6 +298,19 @@ unlock:
> > rcu_read_unlock();
> > }
> >
> > +/*
> > + * Convert a function address into the appropriate ftrace location.
> > + *
> > + * Usually this is just the address of the function, but on some architectures
> > + * it's more complicated so allow them to provide a custom behaviour.
> > + */
> > +#ifndef klp_get_ftrace_location
> > +static unsigned long klp_get_ftrace_location(unsigned long faddr)
> > +{
> > + return faddr;
> > +}
> > +#endif
>
> Whoah, what an ugly hack :)
Hey it's a "cool trick" ;)
> Note to my future self - This is what you want to do if you need a weak
> static inline function.
>
> static inline is probably unnecessary here so __weak function would be
> enough. It would introduce powerpc-specific livepatch.c though because of
> it and this is not worth it.
Yeah that was my logic, at least for now. We can always change it in future
to be weak if anyone cares deeply.
> > static void klp_disable_func(struct klp_func *func)
> > {
> > struct klp_ops *ops;
> > @@ -312,8 +325,14 @@ static void klp_disable_func(struct klp_func *func)
> > return;
> >
> > if (list_is_singular(&ops->func_stack)) {
> > + unsigned long ftrace_loc;
>
> This is a nit, but could you move the definition up to have them all in
> one place to be consistent with the rest of the code? The same applies to
> klp_enable_func() below.
Hmm, actually I moved it in there because you pointed out we only needed it
inside the if:
http://lkml.kernel.org/r/[email protected]
Thinking about it, we need ftrace_loc only in cases where we call
ftrace_set_filter_ip() right? So we can move klp_get_ftrace_location()
call to appropriate if branch both in klp_disable_func() and
klp_enable_func().
But I guess you meant the function call, not the variable declaration.
Personally I think it's better this way, as the variable is in scope for the
shortest possible amount of time, but I can change it if you want me to.
cheers
On Thu, 2016-04-14 at 14:57 +0200, Torsten Duwe wrote:
> On Thu, Apr 14, 2016 at 04:49:50PM +1000, Michael Ellerman wrote:
> > On Wed, 2016-04-13 at 15:22 +0200, Jiri Kosina wrote:
> > > On Wed, 13 Apr 2016, Miroslav Benes wrote:
> > > > > This series adds live patching support for powerpc (ppc64le only ATM).
> > > > >
> > > > > It's unchanged since the version I posted on March 24, with the exception that
> > > > > I've dropped the first patch, which was a testing-only patch.
>
> Confirmed. And it still works on top of 4.6-rc3, even with the additional testing.
Thanks. Yeah I tested on top of rc3 as well as back on the topic branch (4.5-rc).
> > > > > If there's no further comments I'll put this in a topic branch in the next day
> > > > > or two and Jiri & I will both merge that into next.
>
> "Go" from my side.
Throttle up!
> FTR: then I still have a few ppc64 hunks floating around to support certain consistency
> models...
OK. I'm not quite sure what you mean but post them and we'll see I guess :)
cheers
On Thu, 2016-04-14 at 14:23 +0200, Miroslav Benes wrote:
> On Thu, 14 Apr 2016, Miroslav Benes wrote:
> > On Wed, 13 Apr 2016, Michael Ellerman wrote:
> > > diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
> > > new file mode 100644
> > > index 000000000000..ad36e8e34fa1
> > > --- /dev/null
> > > +++ b/arch/powerpc/include/asm/livepatch.h
> > > @@ -0,0 +1,54 @@
...
> > > +#ifndef _ASM_POWERPC_LIVEPATCH_H
> > > +#define _ASM_POWERPC_LIVEPATCH_H
> > > +
> > > +#include <linux/module.h>
> > > +#include <linux/ftrace.h>
> > > +
> > > +#ifdef CONFIG_LIVEPATCH
> >
> > We don't use these guards in our header files since 335e073faacc ("klp:
> > remove CONFIG_LIVEPATCH dependency from klp headers").
>
> ...but you're gonna need it in the next patch...
Yeah I know I said at one point those #ifdefs were unneeded, but then it turns
out we did want it on powerpc for other reasons.
cheers
On Wed, 13 Apr 2016, Jessica Yu wrote:
> +++ Miroslav Benes [13/04/16 15:01 +0200]:
> > On Wed, 13 Apr 2016, Michael Ellerman wrote:
> >
> > > This series adds live patching support for powerpc (ppc64le only ATM).
> > >
> > > It's unchanged since the version I posted on March 24, with the exception
> > > that
> > > I've dropped the first patch, which was a testing-only patch.
> > >
> > > If there's no further comments I'll put this in a topic branch in the next
> > > day
> > > or two and Jiri & I will both merge that into next.
> >
> > Hi,
> >
> > I'll definitely give it a proper look today or tomorrow, but there is one
> > thing that needs to be solved. The patch set from Jessica reworking
> > relocations for live patching is now merged in our for-next branch. This
> > means that we need to find out if there is something in struct
> > mod_arch_specific for powerpc which needs to be preserved and do it.
> >
>
> I took a look around the powerpc module.c code and it looks like the
> mod_arch_specific stuff should be fine, since it is statically allocated
> in the module struct (unlike the situation in s390, where
> mod->arch.syminfo was vmalloc'd and we had to delay the free).
> However I'm not familiar with the powerpc code so I need to dig around
> a bit more to be 100% sure.
I came to the same conclusion. There is only struct bug_entry *bug_table
in mod_arch_specific but it looks unimportant wrt relocations.
> A second concern I have is that apply_relocate_add() relies on
> sections like .stubs and .toc (for 64-bit) and .init.plt and .plt
> sections (for 32-bit). In order for apply_relocate_add() to work for
> livepatch, we must make sure these sections aren't thrown away and are
> not in init module memory since this memory will be freed at the end
> of module load (see how INIT_OFFSET_MASK is used in kernel/module.c).
> As long as these sections are placed in module core memory, we will be
> OK. I need to think about this a bit more.
I knew I shouldn't have opened arch/powerpc/kernel/module*.c.
We could always hack sh_flags of those sections in
module_arch_frob_sections() to make them stay.
Miroslav
>
> Third and unrelated comment: the klp_write_module_reloc stub isn't
> needed anymore :-)
>
> Thanks,
> Jessica
>
On Thu, 14 Apr 2016, Michael Ellerman wrote:
> On Thu, 2016-04-14 at 14:01 +0200, Miroslav Benes wrote:
> > On Wed, 13 Apr 2016, Michael Ellerman wrote:
>
> > > static void klp_disable_func(struct klp_func *func)
> > > {
> > > struct klp_ops *ops;
> > > @@ -312,8 +325,14 @@ static void klp_disable_func(struct klp_func *func)
> > > return;
> > >
> > > if (list_is_singular(&ops->func_stack)) {
> > > + unsigned long ftrace_loc;
> >
> > This is a nit, but could you move the definition up to have them all in
> > one place to be consistent with the rest of the code? The same applies to
> > klp_enable_func() below.
>
> Hmm, actually I moved it in there because you pointed out we only needed it
> inside the if:
>
> http://lkml.kernel.org/r/[email protected]
>
> Thinking about it, we need ftrace_loc only in cases where we call
> ftrace_set_filter_ip() right? So we can move klp_get_ftrace_location()
> call to appropriate if branch both in klp_disable_func() and
> klp_enable_func().
>
> But I guess you meant the function call, not the variable declaration.
Exactly.
> Personally I think it's better this way, as the variable is in scope for the
> shortest possible amount of time, but I can change it if you want me to.
No, it is nothing I would insist on.
Thanks,
Miroslav
On Thu, 14 Apr 2016, Torsten Duwe wrote:
> > > > > It's unchanged since the version I posted on March 24, with the exception that
> > > > > I've dropped the first patch, which was a testing-only patch.
>
> Confirmed. And it still works on top of 4.6-rc3, even with the
> additional testing.
Thanks a lot for testing.
The imporant part here is testing on top of
livepatching.git#for-4.7/arch-independent-klp-relocations as well.
I am pretty sure there will be adjustments needed for the merge, as we'll
have to figure out which parts of ELF can't be thrown away and need to be
preserved in order for the relocation entry to be successfully
constructed.
Michael, I think this is an additional reason why the whole final pile
will have to go through livepatching.git, as the merge with what we have
in for-4.7/arch-independent-klp-relocations might not be completely
trivial.
--
Jiri Kosina
SUSE Labs
On Thu, Apr 14, 2016 at 11:08:02PM +1000, Michael Ellerman wrote:
> On Thu, 2016-04-14 at 14:57 +0200, Torsten Duwe wrote:
>
> > FTR: then I still have a few ppc64 hunks floating around to support certain consistency
> > models...
>
> OK. I'm not quite sure what you mean but post them and we'll see I guess :)
It's *roughly* the ppc64 equivalent of Josh Poimboeuf's Mar 25
| [RFC PATCH v1.9 14/14] livepatch: update task universe when exiting kernel
which only considers x86.
It's forward ported from an earlier code base; there's some glue missing,
but here it is, for reference.
Signed-off-by: Torsten Duwe <[email protected]>
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index b034ecd..3e749f4 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -92,6 +92,7 @@ static inline struct thread_info *current_thread_info(void)
TIF_NEED_RESCHED */
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
+#define TIF_KLP_NEED_UPDATE 6 /* kGraft patching in progress */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
#define TIF_NOHZ 9 /* in adaptive nohz mode */
@@ -115,8 +116,10 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
+#define _TIF_KLP_NEED_UPDATE (1<<TIF_KLP_NEED_UPDATE)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_NOHZ (1<<TIF_NOHZ)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
#define _TIF_NOERROR (1<<TIF_NOERROR)
@@ -124,7 +127,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
-#define _TIF_NOHZ (1<<TIF_NOHZ)
+
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
_TIF_NOHZ)
@@ -132,7 +135,8 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_RESTORE_TM)
-#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
+
+#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR|_TIF_KLP_NEED_UPDATE)
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 5bbd1bc..17f8a18 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -151,8 +151,8 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11)
- andi. r11,r10,_TIF_SYSCALL_DOTRACE
- bne syscall_dotrace /* does not return */
+ andi. r10,r10,(_TIF_SYSCALL_DOTRACE|_TIF_KLP_NEED_UPDATE)
+ bne- syscall_precall /* does not return */
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
@@ -245,6 +245,17 @@ syscall_error:
neg r3,r3
std r5,_CCR(r1)
b .Lsyscall_error_cont
+
+syscall_precall:
+ andi. r10,r10,(_TIF_KLP_NEED_UPDATE)
+ beq+ syscall_dotrace
+
+ addi r11,r11,TI_FLAGS
+1: ldarx r12,0,r11
+ andc r12,r12,r10
+ stdcx. r12,0,r11
+ bne- 1b
+ subi r11,r11,TI_FLAGS
/* Traced system call support */
syscall_dotrace:
On Thu, Apr 14, 2016 at 05:20:29PM +0200, Torsten Duwe wrote:
> On Thu, Apr 14, 2016 at 11:08:02PM +1000, Michael Ellerman wrote:
> > On Thu, 2016-04-14 at 14:57 +0200, Torsten Duwe wrote:
> >
> > > FTR: then I still have a few ppc64 hunks floating around to support certain consistency
> > > models...
> >
> > OK. I'm not quite sure what you mean but post them and we'll see I guess :)
>
> It's *roughly* the ppc64 equivalent of Josh Poimboeuf's Mar 25
> | [RFC PATCH v1.9 14/14] livepatch: update task universe when exiting kernel
> which only considers x86.
>
> It's forward ported from an earlier code base; there's some glue missing,
> but here it is, for reference.
>
> Signed-off-by: Torsten Duwe <[email protected]>
Hi Torsten,
Thanks for sharing. This is quite fortuitous as Miroslav just today
mentioned to me that we would need something like this. If you don't
mind, I may pull this patch or some variant of it into v2 of the
consistency model.
>
>
> diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
> index b034ecd..3e749f4 100644
> --- a/arch/powerpc/include/asm/thread_info.h
> +++ b/arch/powerpc/include/asm/thread_info.h
> @@ -92,6 +92,7 @@ static inline struct thread_info *current_thread_info(void)
> TIF_NEED_RESCHED */
> #define TIF_32BIT 4 /* 32 bit binary */
> #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
> +#define TIF_KLP_NEED_UPDATE 6 /* kGraft patching in progress */
> #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
> #define TIF_SINGLESTEP 8 /* singlestepping active */
> #define TIF_NOHZ 9 /* in adaptive nohz mode */
> @@ -115,8 +116,10 @@ static inline struct thread_info *current_thread_info(void)
> #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
> #define _TIF_32BIT (1<<TIF_32BIT)
> #define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
> +#define _TIF_KLP_NEED_UPDATE (1<<TIF_KLP_NEED_UPDATE)
> #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
> #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
> +#define _TIF_NOHZ (1<<TIF_NOHZ)
> #define _TIF_SECCOMP (1<<TIF_SECCOMP)
> #define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
> #define _TIF_NOERROR (1<<TIF_NOERROR)
> @@ -124,7 +127,7 @@ static inline struct thread_info *current_thread_info(void)
> #define _TIF_UPROBE (1<<TIF_UPROBE)
> #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
> #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
> -#define _TIF_NOHZ (1<<TIF_NOHZ)
> +
> #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
> _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
> _TIF_NOHZ)
> @@ -132,7 +135,8 @@ static inline struct thread_info *current_thread_info(void)
> #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
> _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
> _TIF_RESTORE_TM)
> -#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
> +
> +#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR|_TIF_KLP_NEED_UPDATE)
>
> /* Bits in local_flags */
> /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
> diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
> index 5bbd1bc..17f8a18 100644
> --- a/arch/powerpc/kernel/entry_64.S
> +++ b/arch/powerpc/kernel/entry_64.S
> @@ -151,8 +151,8 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
>
> CURRENT_THREAD_INFO(r11, r1)
> ld r10,TI_FLAGS(r11)
> - andi. r11,r10,_TIF_SYSCALL_DOTRACE
> - bne syscall_dotrace /* does not return */
> + andi. r10,r10,(_TIF_SYSCALL_DOTRACE|_TIF_KLP_NEED_UPDATE)
> + bne- syscall_precall /* does not return */
> cmpldi 0,r0,NR_syscalls
> bge- syscall_enosys
>
> @@ -245,6 +245,17 @@ syscall_error:
> neg r3,r3
> std r5,_CCR(r1)
> b .Lsyscall_error_cont
> +
> +syscall_precall:
> + andi. r10,r10,(_TIF_KLP_NEED_UPDATE)
> + beq+ syscall_dotrace
> +
> + addi r11,r11,TI_FLAGS
> +1: ldarx r12,0,r11
> + andc r12,r12,r10
> + stdcx. r12,0,r11
> + bne- 1b
> + subi r11,r11,TI_FLAGS
>
> /* Traced system call support */
> syscall_dotrace:
--
Josh
+++ Miroslav Benes [14/04/16 15:28 +0200]:
>On Wed, 13 Apr 2016, Jessica Yu wrote:
>
>> +++ Miroslav Benes [13/04/16 15:01 +0200]:
>> > On Wed, 13 Apr 2016, Michael Ellerman wrote:
>> >
>> > > This series adds live patching support for powerpc (ppc64le only ATM).
>> > >
>> > > It's unchanged since the version I posted on March 24, with the exception
>> > > that
>> > > I've dropped the first patch, which was a testing-only patch.
>> > >
>> > > If there's no further comments I'll put this in a topic branch in the next
>> > > day
>> > > or two and Jiri & I will both merge that into next.
>> >
>> > Hi,
>> >
>> > I'll definitely give it a proper look today or tomorrow, but there is one
>> > thing that needs to be solved. The patch set from Jessica reworking
>> > relocations for live patching is now merged in our for-next branch. This
>> > means that we need to find out if there is something in struct
>> > mod_arch_specific for powerpc which needs to be preserved and do it.
>> >
>>
>> I took a look around the powerpc module.c code and it looks like the
>> mod_arch_specific stuff should be fine, since it is statically allocated
>> in the module struct (unlike the situation in s390, where
>> mod->arch.syminfo was vmalloc'd and we had to delay the free).
>> However I'm not familiar with the powerpc code so I need to dig around
>> a bit more to be 100% sure.
>
>I came to the same conclusion. There is only struct bug_entry *bug_table
>in mod_arch_specific but it looks unimportant wrt relocations.
Yeah, I think we are fine. As long as none of the values in
mod_arch_specific are "cleared," and I don't see that happening
anywhere.
>> A second concern I have is that apply_relocate_add() relies on
>> sections like .stubs and .toc (for 64-bit) and .init.plt and .plt
>> sections (for 32-bit). In order for apply_relocate_add() to work for
>> livepatch, we must make sure these sections aren't thrown away and are
>> not in init module memory since this memory will be freed at the end
>> of module load (see how INIT_OFFSET_MASK is used in kernel/module.c).
>> As long as these sections are placed in module core memory, we will be
>> OK. I need to think about this a bit more.
>
>I knew I shouldn't have opened arch/powerpc/kernel/module*.c.
>
>We could always hack sh_flags of those sections in
>module_arch_frob_sections() to make them stay.
>
I think we are fine here too. The onus would be on the patch build
tool (e.g., kpatch) to set the sh_flags to SHF_ALLOC, like we
already do to keep the klp relocation sections in memory :-)
For the 32-bit module code, I don't believe we would need to preserve
the .init.plt section for livepatch's call to apply_relocate_add(),
since relocations to init sections should've been applied during
module initialization, and we don't patch those types of functions.
Please correct me if my understanding is off.
Jessica
On Thu, 14 Apr 2016, Jessica Yu wrote:
> +++ Miroslav Benes [14/04/16 15:28 +0200]:
> > On Wed, 13 Apr 2016, Jessica Yu wrote:
>
> > > A second concern I have is that apply_relocate_add() relies on
> > > sections like .stubs and .toc (for 64-bit) and .init.plt and .plt
> > > sections (for 32-bit). In order for apply_relocate_add() to work for
> > > livepatch, we must make sure these sections aren't thrown away and are
> > > not in init module memory since this memory will be freed at the end
> > > of module load (see how INIT_OFFSET_MASK is used in kernel/module.c).
> > > As long as these sections are placed in module core memory, we will be
> > > OK. I need to think about this a bit more.
> >
> > I knew I shouldn't have opened arch/powerpc/kernel/module*.c.
> >
> > We could always hack sh_flags of those sections in
> > module_arch_frob_sections() to make them stay.
> >
>
> I think we are fine here too. The onus would be on the patch build
> tool (e.g., kpatch) to set the sh_flags to SHF_ALLOC, like we
> already do to keep the klp relocation sections in memory :-)
Yes, this is probably the best way.
> For the 32-bit module code, I don't believe we would need to preserve
> the .init.plt section for livepatch's call to apply_relocate_add(),
> since relocations to init sections should've been applied during
> module initialization, and we don't patch those types of functions.
> Please correct me if my understanding is off.
I think you are right, but I also think we don't have to worry about
32-bit powerpc anyway. This patch set supports ppc64le only so we can
leave it for now.
Miroslav
On Fri, 2016-04-15 at 10:28 +0200, Miroslav Benes wrote:
> On Thu, 14 Apr 2016, Jessica Yu wrote:
> > For the 32-bit module code, I don't believe we would need to preserve
> > the .init.plt section for livepatch's call to apply_relocate_add(),
> > since relocations to init sections should've been applied during
> > module initialization, and we don't patch those types of functions.
> > Please correct me if my understanding is off.
>
> I think you are right, but I also think we don't have to worry about
> 32-bit powerpc anyway. This patch set supports ppc64le only so we can
> leave it for now.
Yep. Who ever wants to do 32-bit live patch support can cross that bridge when
they come to it.
cheers
On Thu, 2016-04-14 at 11:41 -0500, Josh Poimboeuf wrote:
> On Thu, Apr 14, 2016 at 05:20:29PM +0200, Torsten Duwe wrote:
> > On Thu, Apr 14, 2016 at 11:08:02PM +1000, Michael Ellerman wrote:
> > > On Thu, 2016-04-14 at 14:57 +0200, Torsten Duwe wrote:
> > > > FTR: then I still have a few ppc64 hunks floating around to support certain consistency
> > > > models...
> > >
> > > OK. I'm not quite sure what you mean but post them and we'll see I guess :)
> >
> > It's *roughly* the ppc64 equivalent of Josh Poimboeuf's Mar 25
> > > [RFC PATCH v1.9 14/14] livepatch: update task universe when exiting kernel
> > which only considers x86.
> >
> > It's forward ported from an earlier code base; there's some glue missing,
> > but here it is, for reference.
> >
> > Signed-off-by: Torsten Duwe <[email protected]>
>
> Hi Torsten,
>
> Thanks for sharing. This is quite fortuitous as Miroslav just today
> mentioned to me that we would need something like this. If you don't
> mind, I may pull this patch or some variant of it into v2 of the
> consistency model.
Well please wait for me to review & ack it before you pull it into anything
permanent.
At a quick glance it seems OK but I'd probably do it a little differently. I'll
try and have a closer look next week.
cheers
On Thu, 2016-04-14 at 16:34 +0200, Jiri Kosina wrote:
> On Thu, 14 Apr 2016, Torsten Duwe wrote:
> > > > > > It's unchanged since the version I posted on March 24, with the exception that
> > > > > > I've dropped the first patch, which was a testing-only patch.
> >
> > Confirmed. And it still works on top of 4.6-rc3, even with the
> > additional testing.
>
> Thanks a lot for testing.
>
> The imporant part here is testing on top of
> livepatching.git#for-4.7/arch-independent-klp-relocations as well.
>
> I am pretty sure there will be adjustments needed for the merge, as we'll
> have to figure out which parts of ELF can't be thrown away and need to be
> preserved in order for the relocation entry to be successfully
> constructed.
>
> Michael, I think this is an additional reason why the whole final pile
> will have to go through livepatching.git, as the merge with what we have
> in for-4.7/arch-independent-klp-relocations might not be completely
> trivial.
Well it will go through both :)
I'll merge it into powerpc#next, and you can merge it into livepatching and do
what ever else is needed as part of, or after, that merge.
cheers
On Fri, Apr 15, 2016 at 09:22:49PM +1000, Michael Ellerman wrote:
> On Thu, 2016-04-14 at 11:41 -0500, Josh Poimboeuf wrote:
> > On Thu, Apr 14, 2016 at 05:20:29PM +0200, Torsten Duwe wrote:
> > > On Thu, Apr 14, 2016 at 11:08:02PM +1000, Michael Ellerman wrote:
> > > > On Thu, 2016-04-14 at 14:57 +0200, Torsten Duwe wrote:
> > > > > FTR: then I still have a few ppc64 hunks floating around to support certain consistency
> > > > > models...
> > > >
> > > > OK. I'm not quite sure what you mean but post them and we'll see I guess :)
> > >
> > > It's *roughly* the ppc64 equivalent of Josh Poimboeuf's Mar 25
>
> > > > [RFC PATCH v1.9 14/14] livepatch: update task universe when exiting kernel
> > > which only considers x86.
> > >
> > > It's forward ported from an earlier code base; there's some glue missing,
> > > but here it is, for reference.
> > >
> > > Signed-off-by: Torsten Duwe <[email protected]>
> >
> > Hi Torsten,
> >
> > Thanks for sharing. This is quite fortuitous as Miroslav just today
> > mentioned to me that we would need something like this. If you don't
> > mind, I may pull this patch or some variant of it into v2 of the
> > consistency model.
>
> Well please wait for me to review & ack it before you pull it into anything
> permanent.
>
> At a quick glance it seems OK but I'd probably do it a little differently. I'll
> try and have a closer look next week.
Sure, no problem. The consistency model patches are still in
development so they won't be merged anytime soon. And we wouldn't merge
any powerpc code without maintainer acks anyway. I'm just glad you guys
are looking at it so I don't have to butcher it ;-)
--
Josh
On Fri, 2016-04-15 at 07:59 -0500, Josh Poimboeuf wrote:
> On Fri, Apr 15, 2016 at 09:22:49PM +1000, Michael Ellerman wrote:
> > On Thu, 2016-04-14 at 11:41 -0500, Josh Poimboeuf wrote:
> > > On Thu, Apr 14, 2016 at 05:20:29PM +0200, Torsten Duwe wrote:
> > > > On Thu, Apr 14, 2016 at 11:08:02PM +1000, Michael Ellerman wrote:
> > > > > On Thu, 2016-04-14 at 14:57 +0200, Torsten Duwe wrote:
> > > > > > FTR: then I still have a few ppc64 hunks floating around to support certain consistency
> > > > > > models...
> > > > >
> > > > > OK. I'm not quite sure what you mean but post them and we'll see I guess :)
> > > >
> > > > It's *roughly* the ppc64 equivalent of Josh Poimboeuf's Mar 25
> > > > > [RFC PATCH v1.9 14/14] livepatch: update task universe when exiting kernel
> > > > which only considers x86.
> > > >
> > > > It's forward ported from an earlier code base; there's some glue missing,
> > > > but here it is, for reference.
> > > >
> > > > Signed-off-by: Torsten Duwe <[email protected]>
> > >
> > > Hi Torsten,
> > >
> > > Thanks for sharing. This is quite fortuitous as Miroslav just today
> > > mentioned to me that we would need something like this. If you don't
> > > mind, I may pull this patch or some variant of it into v2 of the
> > > consistency model.
> >
> > Well please wait for me to review & ack it before you pull it into anything
> > permanent.
> >
> > At a quick glance it seems OK but I'd probably do it a little differently. I'll
> > try and have a closer look next week.
>
> Sure, no problem. The consistency model patches are still in
> development so they won't be merged anytime soon. And we wouldn't merge
> any powerpc code without maintainer acks anyway. I'm just glad you guys
> are looking at it so I don't have to butcher it ;-)
I'm paid to butcher it ;)
cheers
On Thu, 14 Apr 2016, Michael Ellerman wrote:
> Topic branch here:
>
> https://git.kernel.org/cgit/linux/kernel/git/powerpc/linux.git/log/?h=topic/livepatch
>
> I will merge that before Monday (my time) if I don't hear any objections.
I've now pulled this into livepatching.git#for-4.7/livepatching-ppc64 and
merged that branch into for-next as well.
That branch already contains all the relocation changes queued for 4.7, so
as much testing of the merged result as possible on ppc64 would be
appreciated.
Thanks everybody,
--
Jiri Kosina
SUSE Labs
On Wed, 2016-13-04 at 12:53:23 UTC, Michael Ellerman wrote:
> Add the kconfig logic & assembly support for handling live patched
> functions. This depends on DYNAMIC_FTRACE_WITH_REGS, which in turn
> depends on the new -mprofile-kernel ftrace ABI, which is only supported
> currently on ppc64le.
...
>
> Signed-off-by: Michael Ellerman <[email protected]>
> Reviewed-by: Torsten Duwe <[email protected]>
> Reviewed-by: Balbir Singh <[email protected]>
Applied to powerpc next.
https://git.kernel.org/powerpc/c/85baa095497f3e590df9f6c893
cheers
On Wed, 2016-13-04 at 12:53:19 UTC, Michael Ellerman wrote:
> In order to support live patching on powerpc we would like to call
> ftrace_location_range(), so make it global.
>
> Signed-off-by: Torsten Duwe <[email protected]>
> Signed-off-by: Balbir Singh <[email protected]>
> Signed-off-by: Michael Ellerman <[email protected]>
Applied to powerpc next.
https://git.kernel.org/powerpc/c/04cf31a759ef575f750a63777c
cheers
On Wed, 2016-13-04 at 12:53:20 UTC, Michael Ellerman wrote:
> When livepatch tries to patch a function it takes the function address
> and asks ftrace to install the livepatch handler at that location.
> ftrace will look for an mcount call site at that exact address.
>
> On powerpc the mcount location is not the first instruction of the
> function, and in fact it's not at a constant offset from the start of
> the function. To accommodate this add a hook which arch code can
> override to customise the behaviour.
>
> Signed-off-by: Torsten Duwe <[email protected]>
> Signed-off-by: Balbir Singh <[email protected]>
> Signed-off-by: Petr Mladek <[email protected]>
> Signed-off-by: Michael Ellerman <[email protected]>
Applied to powerpc next.
https://git.kernel.org/powerpc/c/28e7cbd3e0f5fefec892842d13
cheers
On Wed, 2016-13-04 at 12:53:22 UTC, Michael Ellerman wrote:
> In order to support live patching we need to maintain an alternate
> stack of TOC & LR values. We use the base of the stack for this, and
> store the "live patch stack pointer" in struct thread_info.
>
> Unlike the other fields of thread_info, we can not statically initialise
> that value, so it must be done at run time.
>
> This patch just adds the code to support that, it is not enabled until
> the next patch which actually adds live patch support.
>
> Signed-off-by: Michael Ellerman <[email protected]>
> Acked-by: Balbir Singh <[email protected]>
Applied to powerpc next.
https://git.kernel.org/powerpc/c/5d31a96e6c0187f2c5d7004e00
cheers
On Wed, 2016-13-04 at 12:53:21 UTC, Michael Ellerman wrote:
> Add the powerpc specific livepatch definitions. In particular we provide
> a non-default implementation of klp_get_ftrace_location().
>
> This is required because the location of the mcount call is not constant
> when using -mprofile-kernel (which we always do for live patching).
>
> Signed-off-by: Torsten Duwe <[email protected]>
> Signed-off-by: Balbir Singh <[email protected]>
> Signed-off-by: Michael Ellerman <[email protected]>
Applied to powerpc next.
https://git.kernel.org/powerpc/c/f63e6d89876034c21ecd18bb1c
cheers
On 16/04/16 01:07, Jiri Kosina wrote:
> On Thu, 14 Apr 2016, Michael Ellerman wrote:
>
>> Topic branch here:
>>
>> https://git.kernel.org/cgit/linux/kernel/git/powerpc/linux.git/log/?h=topic/livepatch
>>
>> I will merge that before Monday (my time) if I don't hear any objections.
>
> I've now pulled this into livepatching.git#for-4.7/livepatching-ppc64 and
> merged that branch into for-next as well.
>
> That branch already contains all the relocation changes queued for 4.7, so
> as much testing of the merged result as possible on ppc64 would be
> appreciated.
Thanks, do we have a summary of what the relocation changes look like?
Balbir Singh.
On Wed, 20 Apr 2016, Balbir Singh wrote:
> Thanks, do we have a summary of what the relocation changes look like?
This work is queued in
livepatching.git#for-4.7/arch-independent-klp-relocations
--
Jiri Kosina
SUSE Labs