2013-05-02 22:30:41

by Xi Wang

[permalink] [raw]
Subject: [PATCH v3 -next 0/2] seccomp filter JIT for x86

This patchset is rebased against linux-next, on top of the seccomp JIT
interface from Nicolas Schichan:

00afda30 "bpf: add comments explaining the schedule_work() operation"
c2cf3192 "ARM: net: bpf_jit: add support for jitted seccomp filters."
8ef6cd9d "ARM: net: bpf_jit: make code generation less dependent on
struct sk_filter."
747416f4 "seccomp: add generic code for jitted seccomp filters."

Patch 1/2 implements JIT by calling seccomp_bpf_load(). It introduces
SEEN_SKBREF, as suggested by Eric Dumazet, to make ensure that seccomp
filters don't use skb (%rdi).

Patch 2/2 eliminates the call to seccomp_bpf_load() and instead emits
instructions directly. It doesn't have to emit prologue/epilogue either.

Both patches have been tested using vsftpd, openssh, and the seccomp
filter examples.

My main worry about 2/2 is that it's way harder to maintain compared
to 1/2. I'm posting it here for completeness, but I'd suggest to consider
patch 1/2 only for now.

Xi Wang (2):
x86: bpf_jit_comp: support BPF_S_ANC_SECCOMP_LD_W
x86: bpf_jit_comp: optimize BPF_S_ANC_SECCOMP_LD_W

arch/x86/Kconfig | 1 +
arch/x86/net/bpf_jit_comp.c | 220 +++++++++++++++++++++++++++++++++++++++-----
2 files changed, 198 insertions(+), 23 deletions(-)

--
1.8.1.2


2013-05-02 22:30:48

by Xi Wang

[permalink] [raw]
Subject: [PATCH v3 -next 2/2] x86: bpf_jit_comp: optimize BPF_S_ANC_SECCOMP_LD_W

This patch further optimizes JIT for seccomp filters. It removes the
call to seccomp_bpf_load() and directly emits instructions instead.

Signed-off-by: Xi Wang <[email protected]>
Cc: Daniel Borkmann <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Will Drewry <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: Russell King <[email protected]>
Cc: David Laight <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Nicolas Schichan <[email protected]>
---
arch/x86/net/bpf_jit_comp.c | 126 ++++++++++++++++++++++++++++++++++++++++----
1 file changed, 116 insertions(+), 10 deletions(-)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 64c72aa..08b024b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -8,10 +8,11 @@
* of the License.
*/
#include <linux/moduleloader.h>
-#include <asm/cacheflush.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
+#include <asm/cacheflush.h>
+#include <asm/syscall.h>

/*
* Conventions :
@@ -113,7 +114,7 @@ do { \
#define SEEN_SKBREF (1 << 3) /* use pointer to skb */
#define SEEN_SECCOMP (1 << 4) /* seccomp filters */

-#define NEED_PERILOGUE(_seen) ((_seen) & (SEEN_XREG | SEEN_MEM | SEEN_DATAREF | SEEN_SECCOMP))
+#define NEED_PERILOGUE(_seen) ((_seen) & (SEEN_XREG | SEEN_MEM | SEEN_DATAREF))

static inline void bpf_flush_icache(void *start, void *end)
{
@@ -148,6 +149,25 @@ static int pkt_type_offset(void)
return -1;
}

+/* helper to find the offset in struct seccomp_data */
+#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
+
+/* helper to find the negative offset from the end of struct pt_regs */
+#define roffsetof(_type, _member) ((int)(offsetof(_type, _member) - sizeof(_type)))
+#define PT_REGS(_name) roffsetof(struct pt_regs, _name)
+
+#define EMIT_REGS_LOAD(offset) \
+do { \
+ if (is_imm8(offset)) { \
+ /* mov off8(%r8),%eax */ \
+ EMIT4(0x41, 0x8b, 0x40, offset); \
+ } else { \
+ /* mov off32(%r8),%eax */ \
+ EMIT3(0x41, 0x8b, 0x80); \
+ EMIT(offset, 4); \
+ } \
+} while (0)
+
static void *__bpf_jit_compile(struct sock_filter *filter, unsigned int flen, u8 seen_all)
{
u8 temp[64];
@@ -229,12 +249,44 @@ static void *__bpf_jit_compile(struct sock_filter *filter, unsigned int flen, u8
}

#ifdef CONFIG_SECCOMP_FILTER_JIT
+ /* For seccomp filters, load :
+ * r9 = current
+ * r8 = current->thread.sp0
+ * edi = task_thread_info(current)->status & TS_COMPAT
+ *
+ * r8 points to the end of struct pt_regs, task_pt_regs(current) + 1
+ */
if (seen_or_pass0 & SEEN_SECCOMP) {
/* seccomp filters: skb must be NULL */
if (seen_or_pass0 & (SEEN_SKBREF | SEEN_DATAREF)) {
pr_err_once("seccomp filters shouldn't use skb");
goto out;
}
+
+ /* r9 = current */
+ EMIT1(0x65);EMIT4(0x4c, 0x8b, 0x0c, 0x25); /* mov %gs:imm32,%r9 */
+ EMIT((u32)(unsigned long)&current_task, 4);
+
+ /* r8 = current->thread.sp0 */
+ EMIT3(0x4d, 0x8b, 0x81); /* mov off32(%r9),%r8 */
+ EMIT(offsetof(struct task_struct, thread.sp0), 4);
+
+ /* edi = task_thread_info(current)->status & TS_COMPAT */
+#ifdef CONFIG_IA32_EMULATION
+ /* task_thread_info(current): current->stack */
+ BUILD_BUG_ON(!is_imm8(offsetof(struct task_struct, stack)));
+ /* mov off8(%r9),%rdi */
+ EMIT4(0x49, 0x8b, 0x79, offsetof(struct task_struct, stack));
+ /* task_thread_info(current)->status */
+ BUILD_BUG_ON(!is_imm8(offsetof(struct thread_info, status)));
+ BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, status) != 4);
+ /* mov off8(%rdi),%edi */
+ EMIT3(0x8b, 0x7f, offsetof(struct thread_info, status));
+ /* task_thread_info(current)->status & TS_COMPAT */
+ BUILD_BUG_ON(!is_imm8(TS_COMPAT));
+ /* and imm8,%edi */
+ EMIT3(0x83, 0xe7, TS_COMPAT);
+#endif /* CONFIG_IA32_EMULATION */
}
#endif /* CONFIG_SECCOMP_FILTER_JIT */

@@ -709,14 +761,68 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
#ifdef CONFIG_SECCOMP_FILTER_JIT
case BPF_S_ANC_SECCOMP_LD_W:
seen |= SEEN_SECCOMP;
- func = (u8 *)seccomp_bpf_load;
- t_offset = func - (image + addrs[i]);
- /* seccomp filters don't use %rdi, %r8, %r9
- * it is safe to not save them
- */
- EMIT1_off32(0xbf, K); /* mov imm32,%edi */
- EMIT1_off32(0xe8, t_offset); /* call seccomp_bpf_load */
- break;
+ if (K == BPF_DATA(nr)) {
+ /* A = task_pt_regs(current)->orig_ax */
+ EMIT_REGS_LOAD(PT_REGS(orig_ax));
+ break;
+ }
+ if (K == BPF_DATA(arch)) {
+ /* A = AUDIT_ARCH_X86_64 */
+ EMIT1_off32(0xb8, AUDIT_ARCH_X86_64); /* mov imm32,%eax */
+#ifdef CONFIG_IA32_EMULATION
+ /* A = compat ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64 */
+ EMIT1_off32(0xb9, AUDIT_ARCH_I386); /* mov imm32,%ecx */
+ EMIT2(0x85, 0xff); /* test %edi,%edi */
+ EMIT3(0x0f, 0x45, 0xc1); /* cmovne %ecx,%eax*/
+#endif /* CONFIG_IA32_EMULATION */
+ break;
+ }
+ if (K >= BPF_DATA(args[0]) && K < BPF_DATA(args[6])) {
+ int arg = (K - BPF_DATA(args[0])) / sizeof(u64);
+ int off = K % sizeof(u64);
+
+ switch (arg) {
+ case 0: off += PT_REGS(di); break;
+ case 1: off += PT_REGS(si); break;
+ case 2: off += PT_REGS(dx); break;
+ case 3: off += PT_REGS(r10); break;
+ case 4: off += PT_REGS(r8); break;
+ case 5: off += PT_REGS(r9); break;
+ }
+ EMIT_REGS_LOAD(off);
+#ifdef CONFIG_IA32_EMULATION
+ off = K % sizeof(u64);
+ switch (arg) {
+ case 0: off += PT_REGS(bx); break;
+ case 1: off += PT_REGS(cx); break;
+ case 2: off += PT_REGS(dx); break;
+ case 3: off += PT_REGS(si); break;
+ case 4: off += PT_REGS(di); break;
+ case 5: off += PT_REGS(bp); break;
+ }
+ if (is_imm8(off)) {
+ /* mov off8(%r8),%ecx */
+ EMIT4(0x41, 0x8b, 0x48, off);
+ } else {
+ /* mov off32(%r8),%ecx */
+ EMIT3(0x41, 0x8b, 0x88);
+ EMIT(off, 4);
+ }
+ EMIT2(0x85, 0xff); /* test %edi,%edi */
+ EMIT3(0x0f, 0x45, 0xc1); /* cmovne %ecx,%eax*/
+#endif /* CONFIG_IA32_EMULATION */
+ break;
+ }
+ if (K == BPF_DATA(instruction_pointer)) {
+ /* A = task_pt_regs(current)->ip */
+ EMIT_REGS_LOAD(PT_REGS(ip));
+ break;
+ }
+ if (K == BPF_DATA(instruction_pointer) + sizeof(u32)) {
+ EMIT_REGS_LOAD(PT_REGS(ip) + 4);
+ break;
+ }
+ goto out;
#endif /* CONFIG_SECCOMP_FILTER_JIT */
default:
/* hmm, too complex filter, give up with jit compiler */
--
1.8.1.2

2013-05-02 22:31:03

by Xi Wang

[permalink] [raw]
Subject: [PATCH v3 -next 1/2] x86: bpf_jit_comp: support BPF_S_ANC_SECCOMP_LD_W

This patch implements the seccomp BPF_S_ANC_SECCOMP_LD_W instruction
in x86 JIT, by simply calling seccomp_bpf_load().

SEEN_SKBREF was suggested by Eric Dumazet. SEEN_SKBREF shouldn't be
set in seccomp filters.

Signed-off-by: Xi Wang <[email protected]>
Cc: Daniel Borkmann <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Will Drewry <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: Russell King <[email protected]>
Cc: David Laight <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Nicolas Schichan <[email protected]>
---
arch/x86/Kconfig | 1 +
arch/x86/net/bpf_jit_comp.c | 112 +++++++++++++++++++++++++++++++++++---------
2 files changed, 91 insertions(+), 22 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e8fff2f4..f7e1848 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -93,6 +93,7 @@ config X86
select IRQ_FORCED_THREADING
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_BPF_JIT if X86_64
+ select HAVE_SECCOMP_FILTER_JIT if X86_64
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select CLKEVT_I8253
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 9659817..64c72aa 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -107,9 +107,13 @@ do { \
goto cond_branch


-#define SEEN_DATAREF 1 /* might call external helpers */
-#define SEEN_XREG 2 /* ebx is used */
-#define SEEN_MEM 4 /* use mem[] for temporary storage */
+#define SEEN_DATAREF (1 << 0) /* might call external skb helpers */
+#define SEEN_XREG (1 << 1) /* ebx is used */
+#define SEEN_MEM (1 << 2) /* use mem[] for temporary storage */
+#define SEEN_SKBREF (1 << 3) /* use pointer to skb */
+#define SEEN_SECCOMP (1 << 4) /* seccomp filters */
+
+#define NEED_PERILOGUE(_seen) ((_seen) & (SEEN_XREG | SEEN_MEM | SEEN_DATAREF | SEEN_SECCOMP))

static inline void bpf_flush_icache(void *start, void *end)
{
@@ -144,7 +148,7 @@ static int pkt_type_offset(void)
return -1;
}

-void bpf_jit_compile(struct sk_filter *fp)
+static void *__bpf_jit_compile(struct sock_filter *filter, unsigned int flen, u8 seen_all)
{
u8 temp[64];
u8 *prog;
@@ -157,15 +161,14 @@ void bpf_jit_compile(struct sk_filter *fp)
int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
unsigned int cleanup_addr; /* epilogue code offset */
unsigned int *addrs;
- const struct sock_filter *filter = fp->insns;
- int flen = fp->len;
+ void *bpf_func = NULL;

if (!bpf_jit_enable)
- return;
+ return bpf_func;

addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
- return;
+ return bpf_func;

/* Before first pass, make a rough estimation of addrs[]
* each bpf instruction is translated to less than 64 bytes
@@ -177,12 +180,12 @@ void bpf_jit_compile(struct sk_filter *fp)
cleanup_addr = proglen; /* epilogue address */

for (pass = 0; pass < 10; pass++) {
- u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
+ u8 seen_or_pass0 = (pass == 0) ? seen_all : seen;
/* no prologue/epilogue for trivial filters (RET something) */
proglen = 0;
prog = temp;

- if (seen_or_pass0) {
+ if (NEED_PERILOGUE(seen_or_pass0)) {
EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
/* note : must save %rbx in case bpf_error is hit */
@@ -225,6 +228,16 @@ void bpf_jit_compile(struct sk_filter *fp)
}
}

+#ifdef CONFIG_SECCOMP_FILTER_JIT
+ if (seen_or_pass0 & SEEN_SECCOMP) {
+ /* seccomp filters: skb must be NULL */
+ if (seen_or_pass0 & (SEEN_SKBREF | SEEN_DATAREF)) {
+ pr_err_once("seccomp filters shouldn't use skb");
+ goto out;
+ }
+ }
+#endif /* CONFIG_SECCOMP_FILTER_JIT */
+
switch (filter[0].code) {
case BPF_S_RET_K:
case BPF_S_LD_W_LEN:
@@ -237,6 +250,7 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ANC_VLAN_TAG_PRESENT:
case BPF_S_ANC_QUEUE:
case BPF_S_ANC_PKTTYPE:
+ case BPF_S_ANC_SECCOMP_LD_W:
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
@@ -408,7 +422,7 @@ void bpf_jit_compile(struct sk_filter *fp)
}
/* fallinto */
case BPF_S_RET_A:
- if (seen_or_pass0) {
+ if (NEED_PERILOGUE(seen_or_pass0)) {
if (i != flen - 1) {
EMIT_JMP(cleanup_addr - addrs[i]);
break;
@@ -458,6 +472,7 @@ void bpf_jit_compile(struct sk_filter *fp)
break;
case BPF_S_LD_W_LEN: /* A = skb->len; */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+ seen |= SEEN_SKBREF;
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
@@ -467,7 +482,7 @@ void bpf_jit_compile(struct sk_filter *fp)
}
break;
case BPF_S_LDX_W_LEN: /* X = skb->len; */
- seen |= SEEN_XREG;
+ seen |= SEEN_XREG | SEEN_SKBREF;
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov off8(%rdi),%ebx */
EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
@@ -478,6 +493,7 @@ void bpf_jit_compile(struct sk_filter *fp)
break;
case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
+ seen |= SEEN_SKBREF;
if (is_imm8(offsetof(struct sk_buff, protocol))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
@@ -488,6 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp)
EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
break;
case BPF_S_ANC_IFINDEX:
+ seen |= SEEN_SKBREF;
if (is_imm8(offsetof(struct sk_buff, dev))) {
/* movq off8(%rdi),%rax */
EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
@@ -503,6 +520,7 @@ void bpf_jit_compile(struct sk_filter *fp)
break;
case BPF_S_ANC_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+ seen |= SEEN_SKBREF;
if (is_imm8(offsetof(struct sk_buff, mark))) {
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
@@ -513,6 +531,7 @@ void bpf_jit_compile(struct sk_filter *fp)
break;
case BPF_S_ANC_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
+ seen |= SEEN_SKBREF;
if (is_imm8(offsetof(struct sk_buff, rxhash))) {
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
@@ -523,6 +542,7 @@ void bpf_jit_compile(struct sk_filter *fp)
break;
case BPF_S_ANC_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
+ seen |= SEEN_SKBREF;
if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
@@ -542,6 +562,7 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+ seen |= SEEN_SKBREF;
if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
@@ -563,6 +584,7 @@ void bpf_jit_compile(struct sk_filter *fp)

if (off < 0)
goto out;
+ seen |= SEEN_SKBREF;
if (is_imm8(off)) {
/* movzbl off8(%rdi),%eax */
EMIT4(0x0f, 0xb6, 0x47, off);
@@ -576,7 +598,7 @@ void bpf_jit_compile(struct sk_filter *fp)
}
case BPF_S_LD_W_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_word);
-common_load: seen |= SEEN_DATAREF;
+common_load: seen |= SEEN_SKBREF | SEEN_DATAREF;
t_offset = func - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call */
@@ -589,14 +611,14 @@ common_load: seen |= SEEN_DATAREF;
goto common_load;
case BPF_S_LDX_B_MSH:
func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
- seen |= SEEN_DATAREF | SEEN_XREG;
+ seen |= SEEN_XREG | SEEN_SKBREF | SEEN_DATAREF;
t_offset = func - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
break;
case BPF_S_LD_W_IND:
func = sk_load_word;
-common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
+common_load_ind: seen |= SEEN_XREG | SEEN_SKBREF | SEEN_DATAREF;
t_offset = func - (image + addrs[i]);
if (K) {
if (is_imm8(K)) {
@@ -684,6 +706,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
}
EMIT_COND_JMP(f_op, f_offset);
break;
+#ifdef CONFIG_SECCOMP_FILTER_JIT
+ case BPF_S_ANC_SECCOMP_LD_W:
+ seen |= SEEN_SECCOMP;
+ func = (u8 *)seccomp_bpf_load;
+ t_offset = func - (image + addrs[i]);
+ /* seccomp filters don't use %rdi, %r8, %r9
+ * it is safe to not save them
+ */
+ EMIT1_off32(0xbf, K); /* mov imm32,%edi */
+ EMIT1_off32(0xe8, t_offset); /* call seccomp_bpf_load */
+ break;
+#endif /* CONFIG_SECCOMP_FILTER_JIT */
default:
/* hmm, too complex filter, give up with jit compiler */
goto out;
@@ -694,7 +728,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
module_free(NULL, image);
- return;
+ return bpf_func;
}
memcpy(image + proglen, temp, ilen);
}
@@ -706,7 +740,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
* use it to give the cleanup instruction(s) addr
*/
cleanup_addr = proglen - 1; /* ret */
- if (seen_or_pass0)
+ if (NEED_PERILOGUE(seen_or_pass0))
cleanup_addr -= 1; /* leaveq */
if (seen_or_pass0 & SEEN_XREG)
cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
@@ -731,11 +765,11 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];

if (image) {
bpf_flush_icache(image, image + proglen);
- fp->bpf_func = (void *)image;
+ bpf_func = image;
}
out:
kfree(addrs);
- return;
+ return bpf_func;
}

static void jit_free_defer(struct work_struct *arg)
@@ -746,16 +780,50 @@ static void jit_free_defer(struct work_struct *arg)
/* run from softirq, we must use a work_struct to call
* module_free() from process context
*/
-void bpf_jit_free(struct sk_filter *fp)
+static void __bpf_jit_free(void *bpf_func)
{
- if (fp->bpf_func != sk_run_filter) {
+ if (bpf_func != sk_run_filter) {
/*
* bpf_jit_free() can be called from softirq; module_free()
* requires process context.
*/
- struct work_struct *work = (struct work_struct *)fp->bpf_func;
+ struct work_struct *work = (struct work_struct *)bpf_func;

INIT_WORK(work, jit_free_defer);
schedule_work(work);
}
}
+
+void bpf_jit_compile(struct sk_filter *fp)
+{
+ u8 seen_all = SEEN_XREG | SEEN_MEM | SEEN_SKBREF | SEEN_DATAREF;
+ void *bpf_func = __bpf_jit_compile(fp->insns, fp->len, seen_all);
+
+ if (bpf_func)
+ fp->bpf_func = bpf_func;
+}
+
+void bpf_jit_free(struct sk_filter *fp)
+{
+ __bpf_jit_free(fp->bpf_func);
+}
+
+#ifdef CONFIG_SECCOMP_FILTER_JIT
+void seccomp_jit_compile(struct seccomp_filter *fp)
+{
+ struct sock_filter *filter = seccomp_filter_get_insns(fp);
+ unsigned int flen = seccomp_filter_get_len(fp);
+ u8 seen_all = SEEN_XREG | SEEN_MEM | SEEN_SECCOMP;
+ void *bpf_func = __bpf_jit_compile(filter, flen, seen_all);
+
+ if (bpf_func)
+ seccomp_filter_set_bpf_func(fp, bpf_func);
+}
+
+void seccomp_jit_free(struct seccomp_filter *fp)
+{
+ void *bpf_func = seccomp_filter_get_bpf_func(fp);
+
+ __bpf_jit_free(bpf_func);
+}
+#endif /* CONFIG_SECCOMP_FILTER_JIT */
--
1.8.1.2