Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757757AbcCCQwZ (ORCPT ); Thu, 3 Mar 2016 11:52:25 -0500 Received: from mx2.suse.de ([195.135.220.15]:35006 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754608AbcCCQwX (ORCPT ); Thu, 3 Mar 2016 11:52:23 -0500 From: Petr Mladek To: linuxppc-dev@ozlabs.org Cc: Balbir Singh , duwe@lst.de, linux-kernel@vger.kernel.org, rostedt@goodmis.org, kamalesh@linux.vnet.ibm.com, jeyu@redhat.com, jkosina@suse.cz, live-patching@vger.kernel.org, mbenes@suse.cz, Torsten Duwe , Petr Mladek Subject: [PATCH][v4] livepatch/ppc: Enable livepatching on powerpc Date: Thu, 3 Mar 2016 17:52:01 +0100 Message-Id: <1457023921-2051-1-git-send-email-pmladek@suse.com> X-Mailer: git-send-email 1.8.5.6 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 13053 Lines: 379 From: Balbir Singh Changelog v4: 1. Renamed klp_matchaddr() to klp_get_ftrace_location() and used it just to convert the function address. 2. Synced klp_write_module_reloc() with s390(); made it inline, no error message, return -ENOSYS 3. Added an error message when including powerpc/include/asm/livepatch.h without HAVE_LIVEPATCH 4. Update some comments. Changelog v3: 1. Moved -ENOSYS to -EINVAL in klp_write_module_reloc 2. Moved klp_matchaddr to use ftrace_location_range Changelog v2: 1. Implement review comments by Michael 2. The previous version compared _NIP from the wrong location to check for whether we are going to a patched location This applies on top of the patches posted by Michael https://patchwork.ozlabs.org/patch/589791/ It enables livepatching. This takes patch 6/8 and 7/8 of v8 as the base. (See the reference [1] below) and adds logic for checking offset ranges in livepatch with ftrace_location_range. I tested the sample in the livepatch Signed-off-by: Torsten Duwe Signed-off-by: Balbir Singh Signed-off-by: Petr Mladek --- arch/powerpc/Kconfig | 3 +++ arch/powerpc/include/asm/livepatch.h | 47 +++++++++++++++++++++++++++++++++ arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/entry_64.S | 50 ++++++++++++++++++++++++++++++++++++ arch/powerpc/kernel/livepatch.c | 29 +++++++++++++++++++++ include/linux/ftrace.h | 1 + include/linux/livepatch.h | 2 ++ kernel/livepatch/core.c | 28 +++++++++++++++++--- kernel/trace/ftrace.c | 14 +++++++++- 9 files changed, 171 insertions(+), 4 deletions(-) create mode 100644 arch/powerpc/include/asm/livepatch.h create mode 100644 arch/powerpc/kernel/livepatch.c diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 792e169c1516..8278e5ef0dfb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -159,6 +159,7 @@ config PPC select ARCH_HAS_DEVMEM_IS_ALLOWED select HAVE_ARCH_SECCOMP_FILTER select ARCH_HAS_UBSAN_SANITIZE_ALL + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN @@ -1109,3 +1110,5 @@ config PPC_LIB_RHEAP bool source "arch/powerpc/kvm/Kconfig" + +source "kernel/livepatch/Kconfig" diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h new file mode 100644 index 000000000000..b9856ceaa0cf --- /dev/null +++ b/arch/powerpc/include/asm/livepatch.h @@ -0,0 +1,47 @@ +/* + * livepatch.h - powerpc-specific Kernel Live Patching Core + * + * Copyright (C) 2015 SUSE + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#ifndef _ASM_POWERPC64_LIVEPATCH_H +#define _ASM_POWERPC64_LIVEPATCH_H + +#include + +#ifdef CONFIG_LIVEPATCH + +static inline int klp_check_compiler_support(void) +{ + return 0; +} + +static inline int klp_write_module_reloc(struct module *mod, unsigned long + type, unsigned long loc, unsigned long value) +{ + /* This requires infrastructure changes; we need the loadinfos. */ + return -ENOSYS; +} + +static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->nip = ip; +} + +#else /* CONFIG_LIVEPATCH */ +#error Include linux/livepatch.h, not asm/livepatch.h +#endif /* CONFIG_LIVEPATCH */ + +#endif /* _ASM_POWERPC64_LIVEPATCH_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 2da380fcc34c..b767e140f040 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -119,6 +119,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_TRACING) += trace_clock.o +obj-$(CONFIG_LIVEPATCH) += livepatch.o ifneq ($(CONFIG_PPC_INDIRECT_PIO),y) obj-y += iomap.o diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index ec7f8aada697..2d5333c228f1 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -1224,6 +1224,9 @@ _GLOBAL(ftrace_caller) addi r3,r3,function_trace_op@toc@l ld r5,0(r3) +#ifdef CONFIG_LIVEPATCH + mr r14,r7 /* remember old NIP */ +#endif /* Calculate ip from nip-4 into r3 for call below */ subi r3, r7, MCOUNT_INSN_SIZE @@ -1248,6 +1251,9 @@ ftrace_call: /* Load ctr with the possibly modified NIP */ ld r3, _NIP(r1) mtctr r3 +#ifdef CONFIG_LIVEPATCH + cmpd r14,r3 /* has NIP been altered? */ +#endif /* Restore gprs */ REST_8GPRS(0,r1) @@ -1265,6 +1271,31 @@ ftrace_call: ld r0, LRSAVE(r1) mtlr r0 +#ifdef CONFIG_LIVEPATCH + beq+ 4f /* likely(old_NIP == new_NIP) */ + /* + * For a local call, restore this TOC after calling the patch function. + * For a global call, it does not matter what we restore here, + * since the global caller does its own restore right afterwards, + * anyway. Just insert a klp_return_helper frame in any case, + * so a patch function can always count on the changed stack offsets. + * The patch introduces a frame such that from the patched function + * we return back to klp_return helper. For ABI compliance r12, + * lr and LRSAVE(r1) contain the address of klp_return_helper. + * We loaded ctr with the address of the patched function earlier + */ + stdu r1, -32(r1) /* open new mini stack frame */ + std r2, 24(r1) /* save TOC now, unconditionally. */ + bl 5f +5: mflr r12 + addi r12, r12, (klp_return_helper + 4 - .)@l + std r12, LRSAVE(r1) + mtlr r12 + mfctr r12 /* allow for TOC calculation in newfunc */ + bctr +4: +#endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER stdu r1, -112(r1) .globl ftrace_graph_call @@ -1281,6 +1312,25 @@ _GLOBAL(ftrace_graph_stub) _GLOBAL(ftrace_stub) blr +#ifdef CONFIG_LIVEPATCH +/* Helper function for local calls that are becoming global + * due to live patching. + * We can't simply patch the NOP after the original call, + * because, depending on the consistency model, some kernel + * threads may still have called the original, local function + * *without* saving their TOC in the respective stack frame slot, + * so the decision is made per-thread during function return by + * maybe inserting a klp_return_helper frame or not. +*/ +klp_return_helper: + ld r2, 24(r1) /* restore TOC (saved by ftrace_caller) */ + addi r1, r1, 32 /* destroy mini stack frame */ + ld r0, LRSAVE(r1) /* get the real return address */ + mtlr r0 + blr +#endif + + #else _GLOBAL_TOC(_mcount) /* Taken from output of objdump from lib64/glibc */ diff --git a/arch/powerpc/kernel/livepatch.c b/arch/powerpc/kernel/livepatch.c new file mode 100644 index 000000000000..267ce0fdd89b --- /dev/null +++ b/arch/powerpc/kernel/livepatch.c @@ -0,0 +1,29 @@ +/* + * livepatch.c - powerpc-specific Kernel Live Patching Core + * + * Copyright (C) 2015 SUSE + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include +#include + +/* + * LivePatch works only with -mprofile-kernel on PPC. In this case, + * the ftrace location is always within the first 16 bytes. + */ +unsigned long klp_get_ftrace_location(unsigned long faddr) +{ + return ftrace_location_range(faddr, faddr + 16); +} diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index c2b340e23f62..fb13cd3e68f9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable); int ftrace_test_record(struct dyn_ftrace *rec, int enable); void ftrace_run_stop_machine(int command); unsigned long ftrace_location(unsigned long ip); +unsigned long ftrace_location_range(unsigned long start, unsigned long end); unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index a8828652f794..25a267bcb01f 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -134,6 +134,8 @@ int klp_unregister_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *); int klp_disable_patch(struct klp_patch *); +unsigned long klp_get_ftrace_location(unsigned long faddr); + #endif /* CONFIG_LIVEPATCH */ #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index bc2c85c064c1..9ad597faa57f 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -298,22 +298,39 @@ unlock: rcu_read_unlock(); } +/* + * Convert a function address into the appropriate ftrace location. + * + * The given address is returned on most architectures. LivePatching + * usually works only when the ftrace location is the first instruction + * in the function. + */ +unsigned long __weak klp_get_ftrace_location(unsigned long faddr) +{ + return faddr; +} + static void klp_disable_func(struct klp_func *func) { struct klp_ops *ops; + unsigned long ftrace_loc; if (WARN_ON(func->state != KLP_ENABLED)) return; if (WARN_ON(!func->old_addr)) return; + ftrace_loc = klp_get_ftrace_location(func->old_addr); + if (WARN_ON(!ftrace_loc)) + return; + ops = klp_find_ops(func->old_addr); if (WARN_ON(!ops)) return; if (list_is_singular(&ops->func_stack)) { WARN_ON(unregister_ftrace_function(&ops->fops)); - WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0)); + WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); list_del_rcu(&func->stack_node); list_del(&ops->node); @@ -328,6 +345,7 @@ static void klp_disable_func(struct klp_func *func) static int klp_enable_func(struct klp_func *func) { struct klp_ops *ops; + unsigned long ftrace_loc; int ret; if (WARN_ON(!func->old_addr)) @@ -336,6 +354,10 @@ static int klp_enable_func(struct klp_func *func) if (WARN_ON(func->state != KLP_DISABLED)) return -EINVAL; + ftrace_loc = klp_get_ftrace_location(func->old_addr); + if (WARN_ON(!ftrace_loc)) + return -EINVAL; + ops = klp_find_ops(func->old_addr); if (!ops) { ops = kzalloc(sizeof(*ops), GFP_KERNEL); @@ -352,7 +374,7 @@ static int klp_enable_func(struct klp_func *func) INIT_LIST_HEAD(&ops->func_stack); list_add_rcu(&func->stack_node, &ops->func_stack); - ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0); + ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); if (ret) { pr_err("failed to set ftrace filter for function '%s' (%d)\n", func->old_name, ret); @@ -363,7 +385,7 @@ static int klp_enable_func(struct klp_func *func) if (ret) { pr_err("failed to register ftrace handler for function '%s' (%d)\n", func->old_name, ret); - ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0); + ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); goto err; } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 57a6eea84694..f4e6aae6ebe7 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1533,7 +1533,19 @@ static int ftrace_cmp_recs(const void *a, const void *b) return 0; } -static unsigned long ftrace_location_range(unsigned long start, unsigned long end) +/** + * ftrace_location_range - return the first address of a traced location + * if it touches the given ip range + * @start: start of range to search. + * @end: end of range to search (inclusive). @end points to the last byte + * to check. + * + * Returns rec->ip if the related ftrace location is a least partly within + * the given address range. That is, the first address of the instruction + * that is either a NOP or call to the function tracer. It checks the ftrace + * internal tables to determine if the address belongs or not. + */ +unsigned long ftrace_location_range(unsigned long start, unsigned long end) { struct ftrace_page *pg; struct dyn_ftrace *rec; -- 1.8.5.6