2019-05-01 00:15:41

by kernel test robot

[permalink] [raw]
Subject: [tip:x86/mm 14/35] kernel/trace/bpf_trace.c:179:16: error: implicit declaration of function 'nmi_uaccess_okay'; did you mean '__access_ok'?

tree: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
head: 3950746d9d8ef981c1cb842384e0e86e8d1aad76
commit: c7b6f29b6257532792fc722b68fcc0e00b5a856c [14/35] bpf: Fail bpf_probe_write_user() while mm is switched
config: s390-defconfig (attached as .config)
compiler: s390x-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
git checkout c7b6f29b6257532792fc722b68fcc0e00b5a856c
# save the attached .config to linux build tree
GCC_VERSION=7.2.0 make.cross ARCH=s390

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <[email protected]>

All errors (new ones prefixed by >>):

In file included from include/linux/kernel.h:11:0,
from kernel/trace/bpf_trace.c:5:
kernel/trace/bpf_trace.c: In function '____bpf_probe_write_user':
>> kernel/trace/bpf_trace.c:179:16: error: implicit declaration of function 'nmi_uaccess_okay'; did you mean '__access_ok'? [-Werror=implicit-function-declaration]
if (unlikely(!nmi_uaccess_okay()))
^
include/linux/compiler.h:77:42: note: in definition of macro 'unlikely'
# define unlikely(x) __builtin_expect(!!(x), 0)
^
cc1: some warnings being treated as errors

vim +179 kernel/trace/bpf_trace.c

> 5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
16
17 #include <asm/tlb.h>
18
19 #include "trace_probe.h"
20 #include "trace.h"
21
22 #ifdef CONFIG_MODULES
23 struct bpf_trace_module {
24 struct module *module;
25 struct list_head list;
26 };
27
28 static LIST_HEAD(bpf_trace_modules);
29 static DEFINE_MUTEX(bpf_module_mutex);
30
31 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
32 {
33 struct bpf_raw_event_map *btp, *ret = NULL;
34 struct bpf_trace_module *btm;
35 unsigned int i;
36
37 mutex_lock(&bpf_module_mutex);
38 list_for_each_entry(btm, &bpf_trace_modules, list) {
39 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
40 btp = &btm->module->bpf_raw_events[i];
41 if (!strcmp(btp->tp->name, name)) {
42 if (try_module_get(btm->module))
43 ret = btp;
44 goto out;
45 }
46 }
47 }
48 out:
49 mutex_unlock(&bpf_module_mutex);
50 return ret;
51 }
52 #else
53 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
54 {
55 return NULL;
56 }
57 #endif /* CONFIG_MODULES */
58
59 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
60 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
61
62 /**
63 * trace_call_bpf - invoke BPF program
64 * @call: tracepoint event
65 * @ctx: opaque context pointer
66 *
67 * kprobe handlers execute BPF programs via this helper.
68 * Can be used from static tracepoints in the future.
69 *
70 * Return: BPF programs always return an integer which is interpreted by
71 * kprobe handler as:
72 * 0 - return from kprobe (event is filtered out)
73 * 1 - store kprobe event into ring buffer
74 * Other values are reserved and currently alias to 1
75 */
76 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
77 {
78 unsigned int ret;
79
80 if (in_nmi()) /* not supported yet */
81 return 1;
82
83 preempt_disable();
84
85 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
86 /*
87 * since some bpf program is already running on this cpu,
88 * don't call into another bpf program (same or different)
89 * and don't send kprobe event into ring-buffer,
90 * so return zero here
91 */
92 ret = 0;
93 goto out;
94 }
95
96 /*
97 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
98 * to all call sites, we did a bpf_prog_array_valid() there to check
99 * whether call->prog_array is empty or not, which is
100 * a heurisitc to speed up execution.
101 *
102 * If bpf_prog_array_valid() fetched prog_array was
103 * non-NULL, we go into trace_call_bpf() and do the actual
104 * proper rcu_dereference() under RCU lock.
105 * If it turns out that prog_array is NULL then, we bail out.
106 * For the opposite, if the bpf_prog_array_valid() fetched pointer
107 * was NULL, you'll skip the prog_array with the risk of missing
108 * out of events when it was updated in between this and the
109 * rcu_dereference() which is accepted risk.
110 */
111 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
112
113 out:
114 __this_cpu_dec(bpf_prog_active);
115 preempt_enable();
116
117 return ret;
118 }
119 EXPORT_SYMBOL_GPL(trace_call_bpf);
120
121 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
122 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
123 {
124 regs_set_return_value(regs, rc);
125 override_function_with_return(regs);
126 return 0;
127 }
128
129 static const struct bpf_func_proto bpf_override_return_proto = {
130 .func = bpf_override_return,
131 .gpl_only = true,
132 .ret_type = RET_INTEGER,
133 .arg1_type = ARG_PTR_TO_CTX,
134 .arg2_type = ARG_ANYTHING,
135 };
136 #endif
137
138 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
139 {
140 int ret;
141
142 ret = probe_kernel_read(dst, unsafe_ptr, size);
143 if (unlikely(ret < 0))
144 memset(dst, 0, size);
145
146 return ret;
147 }
148
149 static const struct bpf_func_proto bpf_probe_read_proto = {
150 .func = bpf_probe_read,
151 .gpl_only = true,
152 .ret_type = RET_INTEGER,
153 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
154 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
155 .arg3_type = ARG_ANYTHING,
156 };
157
158 BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
159 u32, size)
160 {
161 /*
162 * Ensure we're in user context which is safe for the helper to
163 * run. This helper has no business in a kthread.
164 *
165 * access_ok() should prevent writing to non-user memory, but in
166 * some situations (nommu, temporary switch, etc) access_ok() does
167 * not provide enough validation, hence the check on KERNEL_DS.
168 *
169 * nmi_uaccess_okay() ensures the probe is not run in an interim
170 * state, when the task or mm are switched. This is specifically
171 * required to prevent the use of temporary mm.
172 */
173
174 if (unlikely(in_interrupt() ||
175 current->flags & (PF_KTHREAD | PF_EXITING)))
176 return -EPERM;
177 if (unlikely(uaccess_kernel()))
178 return -EPERM;
> 179 if (unlikely(!nmi_uaccess_okay()))
180 return -EPERM;
181 if (!access_ok(unsafe_ptr, size))
182 return -EPERM;
183
184 return probe_kernel_write(unsafe_ptr, src, size);
185 }
186

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation


Attachments:
(No filename) (7.82 kB)
.config.gz (10.82 kB)
Download all attachments

2019-05-01 04:20:32

by Nadav Amit

[permalink] [raw]
Subject: Re: [tip:x86/mm 14/35] kernel/trace/bpf_trace.c:179:16: error: implicit declaration of function 'nmi_uaccess_okay'; did you mean '__access_ok'?

> On Apr 30, 2019, at 5:13 PM, kbuild test robot <[email protected]> wrote:
>
> tree: https://nam04.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit.kernel.org%2Fpub%2Fscm%2Flinux%2Fkernel%2Fgit%2Ftip%2Ftip.git&amp;data=02%7C01%7Cnamit%40vmware.com%7C8a380b9453f249fd924a08d6cdc9e8cd%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C0%7C0%7C636922664483201432&amp;sdata=NWhVtiLZ4l7LvLKsNNDrR4A8PlpXIeH1tErfFz6EmcM%3D&amp;reserved=0 x86/mm
> head: 3950746d9d8ef981c1cb842384e0e86e8d1aad76
> commit: c7b6f29b6257532792fc722b68fcc0e00b5a856c [14/35] bpf: Fail bpf_probe_write_user() while mm is switched
> config: s390-defconfig (attached as .config)
> compiler: s390x-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
> reproduce:
> wget https://nam04.safelinks.protection.outlook.com/?url=https%3A%2F%2Fraw.githubusercontent.com%2Fintel%2Flkp-tests%2Fmaster%2Fsbin%2Fmake.cross&amp;data=02%7C01%7Cnamit%40vmware.com%7C8a380b9453f249fd924a08d6cdc9e8cd%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C0%7C0%7C636922664483201432&amp;sdata=Sm%2FfjL7UqETv7HEEr32M2v3XmGwdAD10Wyr8ZmoQX50%3D&amp;reserved=0 -O ~/bin/make.cross
> chmod +x ~/bin/make.cross
> git checkout c7b6f29b6257532792fc722b68fcc0e00b5a856c
> # save the attached .config to linux build tree
> GCC_VERSION=7.2.0 make.cross ARCH=s390
>
> If you fix the issue, kindly add following tag
> Reported-by: kbuild test robot <[email protected]>
>
> All errors (new ones prefixed by >>):
>
> In file included from include/linux/kernel.h:11:0,
> from kernel/trace/bpf_trace.c:5:
> kernel/trace/bpf_trace.c: In function '____bpf_probe_write_user':
>>> kernel/trace/bpf_trace.c:179:16: error: implicit declaration of function 'nmi_uaccess_okay'; did you mean '__access_ok'? [-Werror=implicit-function-declaration]
> if (unlikely(!nmi_uaccess_okay()))

So s390 does not use the generic TLB gather architecture, which triggered
the problem.

Unfortunately, reproducing the failed build caused (other) errors. But
worse, fixing this issue “cleanly” is hard due to the dependencies between
the header files.

The best I managed to do without over-complicating the solution is the
following, which might not be super clean. Let me know whether to submit a
separate patch (on top or instead of the current one).

-- >8 --

From bc60dfc415f9ecc01771b388dacd41adc976929c Mon Sep 17 00:00:00 2001
From: Nadav Amit <[email protected]>
Date: Tue, 30 Apr 2019 13:48:48 -0700
Subject: [PATCH] mm/tlb: Fix "Provide default nmi_uaccess_okay()"

Signed-off-by: Nadav Amit <[email protected]>
---
arch/x86/include/asm/tlbflush.h | 4 +---
arch/x86/include/asm/uaccess.h | 2 ++
include/asm-generic/tlb.h | 9 ---------
include/linux/uaccess.h | 12 ++++++++++++
4 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index dee375831962..765e6b01eefd 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -249,7 +249,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
* interrupted some kernel code that was temporarily using a
* different mm.
*/
-static inline bool nmi_uaccess_okay(void)
+static inline bool arch_nmi_uaccess_okay(void)
{
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
struct mm_struct *current_mm = current->mm;
@@ -274,8 +274,6 @@ static inline bool nmi_uaccess_okay(void)
return true;
}

-#define nmi_uaccess_okay nmi_uaccess_okay
-
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 22ba683afdc2..d4b487d2441c 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -698,6 +698,8 @@ extern struct movsl_mask {
*/
#define __copy_from_user_nmi __copy_from_user_inatomic

+#define nmi_uaccess_okay() arch_nmi_uaccess_okay()
+
/*
* The "unsafe" user accesses aren't really "unsafe", but the naming
* is a big fat warning: you have to not only do the access_ok()
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 480e5b2a5748..b9edc7608d90 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -21,15 +21,6 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>

-/*
- * Blindly accessing user memory from NMI context can be dangerous
- * if we're in the middle of switching the current user task or switching
- * the loaded mm.
- */
-#ifndef nmi_uaccess_okay
-# define nmi_uaccess_okay() true
-#endif
-
#ifdef CONFIG_MMU

/*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 2b70130af585..e2e12945deab 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -280,4 +280,16 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
unsigned long len);
#endif

+/*
+ * Blindly accessing user memory from NMI context can be dangerous if we're in
+ * the middle of switching the current user task or switching the loaded mm.
+ * Provide a default implementation for architectures that do not provide one.
+ */
+#ifndef nmi_uaccess_okay
+static inline bool nmi_uaccess_okay(void)
+{
+ return true;
+}
+#endif
+
#endif /* __LINUX_UACCESS_H__ */
--
2.17.1