2022-09-15 12:23:33

by Peter Zijlstra

[permalink] [raw]
Subject: [PATCH v3 55/59] x86/bpf: Emit call depth accounting if required

From: Thomas Gleixner <[email protected]>

Ensure that calls in BPF jitted programs are emitting call depth accounting
when enabled to keep the call/return balanced. The return thunk jump is
already injected due to the earlier retbleed mitigations.

Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Alexei Starovoitov <[email protected]>
Cc: Daniel Borkmann <[email protected]>
---
arch/x86/include/asm/alternative.h | 6 ++++++
arch/x86/kernel/callthunks.c | 19 +++++++++++++++++++
arch/x86/net/bpf_jit_comp.c | 32 +++++++++++++++++++++++---------
3 files changed, 48 insertions(+), 9 deletions(-)

--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -93,6 +93,7 @@ extern void callthunks_patch_module_call
struct module *mod);
extern void *callthunks_translate_call_dest(void *dest);
extern bool is_callthunk(void *addr);
+extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
#else
static __always_inline void callthunks_patch_builtin_calls(void) {}
static __always_inline void
@@ -106,6 +107,11 @@ static __always_inline bool is_callthunk
{
return false;
}
+static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
+ void *func)
+{
+ return 0;
+}
#endif

#ifdef CONFIG_SMP
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -305,6 +305,25 @@ bool is_callthunk(void *addr)
return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
}

+#ifdef CONFIG_BPF_JIT
+int x86_call_depth_emit_accounting(u8 **pprog, void *func)
+{
+ unsigned int tmpl_size = SKL_TMPL_SIZE;
+ void *tmpl = skl_call_thunk_template;
+
+ if (!thunks_initialized)
+ return 0;
+
+ /* Is function call target a thunk? */
+ if (is_callthunk(func))
+ return 0;
+
+ memcpy(*pprog, tmpl, tmpl_size);
+ *pprog += tmpl_size;
+ return tmpl_size;
+}
+#endif
+
#ifdef CONFIG_MODULES
void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
struct module *mod)
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -340,6 +340,13 @@ static int emit_call(u8 **pprog, void *f
return emit_patch(pprog, func, ip, 0xE8);
}

+static int emit_rsb_call(u8 **pprog, void *func, void *ip)
+{
+ OPTIMIZER_HIDE_VAR(func);
+ x86_call_depth_emit_accounting(pprog, func);
+ return emit_patch(pprog, func, ip, 0xE8);
+}
+
static int emit_jump(u8 **pprog, void *func, void *ip)
{
return emit_patch(pprog, func, ip, 0xE9);
@@ -1434,19 +1441,26 @@ st: if (is_imm8(insn->off))
break;

/* call */
- case BPF_JMP | BPF_CALL:
+ case BPF_JMP | BPF_CALL: {
+ int offs;
+
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85,
-round_up(bpf_prog->aux->stack_depth, 8) - 8);
- if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+ if (!imm32)
return -EINVAL;
+ offs = 7 + x86_call_depth_emit_accounting(&prog, func);
} else {
- if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
+ if (!imm32)
return -EINVAL;
+ offs = x86_call_depth_emit_accounting(&prog, func);
}
+ if (emit_call(&prog, func, image + addrs[i - 1] + offs))
+ return -EINVAL;
break;
+ }

case BPF_JMP | BPF_TAIL_CALL:
if (imm32)
@@ -1823,7 +1837,7 @@ static int invoke_bpf_prog(const struct
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);

- if (emit_call(&prog, enter, prog))
+ if (emit_rsb_call(&prog, enter, prog))
return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
@@ -1844,7 +1858,7 @@ static int invoke_bpf_prog(const struct
(long) p->insnsi >> 32,
(u32) (long) p->insnsi);
/* call JITed bpf program or interpreter */
- if (emit_call(&prog, p->bpf_func, prog))
+ if (emit_rsb_call(&prog, p->bpf_func, prog))
return -EINVAL;

/*
@@ -1868,7 +1882,7 @@ static int invoke_bpf_prog(const struct
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
/* arg3: lea rdx, [rbp - run_ctx_off] */
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
- if (emit_call(&prog, exit, prog))
+ if (emit_rsb_call(&prog, exit, prog))
return -EINVAL;

*pprog = prog;
@@ -2109,7 +2123,7 @@ int arch_prepare_bpf_trampoline(struct b
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
- if (emit_call(&prog, __bpf_tramp_enter, prog)) {
+ if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
ret = -EINVAL;
goto cleanup;
}
@@ -2141,7 +2155,7 @@ int arch_prepare_bpf_trampoline(struct b
EMIT2(0xff, 0xd0); /* call *rax */
} else {
/* call original function */
- if (emit_call(&prog, orig_call, prog)) {
+ if (emit_rsb_call(&prog, orig_call, prog)) {
ret = -EINVAL;
goto cleanup;
}
@@ -2185,7 +2199,7 @@ int arch_prepare_bpf_trampoline(struct b
im->ip_epilogue = prog;
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
- if (emit_call(&prog, __bpf_tramp_exit, prog)) {
+ if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
ret = -EINVAL;
goto cleanup;
}



2022-10-17 15:18:09

by tip-bot2 for Jacob Pan

[permalink] [raw]
Subject: [tip: x86/core] x86/bpf: Emit call depth accounting if required

The following commit has been merged into the x86/core branch of tip:

Commit-ID: b2e9dfe54be4d023124d588d6f03d16a9c0d2507
Gitweb: https://git.kernel.org/tip/b2e9dfe54be4d023124d588d6f03d16a9c0d2507
Author: Thomas Gleixner <[email protected]>
AuthorDate: Thu, 15 Sep 2022 13:11:34 +02:00
Committer: Peter Zijlstra <[email protected]>
CommitterDate: Mon, 17 Oct 2022 16:41:18 +02:00

x86/bpf: Emit call depth accounting if required

Ensure that calls in BPF jitted programs are emitting call depth accounting
when enabled to keep the call/return balanced. The return thunk jump is
already injected due to the earlier retbleed mitigations.

Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
arch/x86/include/asm/alternative.h | 6 +++++-
arch/x86/kernel/callthunks.c | 19 +++++++++++++++++-
arch/x86/net/bpf_jit_comp.c | 32 ++++++++++++++++++++---------
3 files changed, 48 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 4b8cd25..664c077 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -93,6 +93,7 @@ extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
struct module *mod);
extern void *callthunks_translate_call_dest(void *dest);
extern bool is_callthunk(void *addr);
+extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
#else
static __always_inline void callthunks_patch_builtin_calls(void) {}
static __always_inline void
@@ -106,6 +107,11 @@ static __always_inline bool is_callthunk(void *addr)
{
return false;
}
+static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
+ void *func)
+{
+ return 0;
+}
#endif

#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index 7f97881..a03d646 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -306,6 +306,25 @@ bool is_callthunk(void *addr)
return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
}

+#ifdef CONFIG_BPF_JIT
+int x86_call_depth_emit_accounting(u8 **pprog, void *func)
+{
+ unsigned int tmpl_size = SKL_TMPL_SIZE;
+ void *tmpl = skl_call_thunk_template;
+
+ if (!thunks_initialized)
+ return 0;
+
+ /* Is function call target a thunk? */
+ if (is_callthunk(func))
+ return 0;
+
+ memcpy(*pprog, tmpl, tmpl_size);
+ *pprog += tmpl_size;
+ return tmpl_size;
+}
+#endif
+
#ifdef CONFIG_MODULES
void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
struct module *mod)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index ad8cb7f..a6b4674 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -340,6 +340,13 @@ static int emit_call(u8 **pprog, void *func, void *ip)
return emit_patch(pprog, func, ip, 0xE8);
}

+static int emit_rsb_call(u8 **pprog, void *func, void *ip)
+{
+ OPTIMIZER_HIDE_VAR(func);
+ x86_call_depth_emit_accounting(pprog, func);
+ return emit_patch(pprog, func, ip, 0xE8);
+}
+
static int emit_jump(u8 **pprog, void *func, void *ip)
{
return emit_patch(pprog, func, ip, 0xE9);
@@ -1436,19 +1443,26 @@ st: if (is_imm8(insn->off))
break;

/* call */
- case BPF_JMP | BPF_CALL:
+ case BPF_JMP | BPF_CALL: {
+ int offs;
+
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85,
-round_up(bpf_prog->aux->stack_depth, 8) - 8);
- if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+ if (!imm32)
return -EINVAL;
+ offs = 7 + x86_call_depth_emit_accounting(&prog, func);
} else {
- if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
+ if (!imm32)
return -EINVAL;
+ offs = x86_call_depth_emit_accounting(&prog, func);
}
+ if (emit_call(&prog, func, image + addrs[i - 1] + offs))
+ return -EINVAL;
break;
+ }

case BPF_JMP | BPF_TAIL_CALL:
if (imm32)
@@ -1854,7 +1868,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);

- if (emit_call(&prog, enter, prog))
+ if (emit_rsb_call(&prog, enter, prog))
return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
@@ -1875,7 +1889,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
(long) p->insnsi >> 32,
(u32) (long) p->insnsi);
/* call JITed bpf program or interpreter */
- if (emit_call(&prog, p->bpf_func, prog))
+ if (emit_rsb_call(&prog, p->bpf_func, prog))
return -EINVAL;

/*
@@ -1899,7 +1913,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
/* arg3: lea rdx, [rbp - run_ctx_off] */
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
- if (emit_call(&prog, exit, prog))
+ if (emit_rsb_call(&prog, exit, prog))
return -EINVAL;

*pprog = prog;
@@ -2147,7 +2161,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
- if (emit_call(&prog, __bpf_tramp_enter, prog)) {
+ if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
ret = -EINVAL;
goto cleanup;
}
@@ -2179,7 +2193,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
EMIT2(0xff, 0xd0); /* call *rax */
} else {
/* call original function */
- if (emit_call(&prog, orig_call, prog)) {
+ if (emit_rsb_call(&prog, orig_call, prog)) {
ret = -EINVAL;
goto cleanup;
}
@@ -2223,7 +2237,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
im->ip_epilogue = prog;
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
- if (emit_call(&prog, __bpf_tramp_exit, prog)) {
+ if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
ret = -EINVAL;
goto cleanup;
}

2023-01-05 22:06:32

by Joan Bruguera Micó

[permalink] [raw]
Subject: Re: [PATCH v3 55/59] x86/bpf: Emit call depth accounting if required

From: Joan Bruguera Micó <[email protected]>

I'm observing a kernel panic on boot on both Arch Linux and Fedora Rawhide,
reproduced on a real Intel i5-7200U (with `retbleed=stuff`), and also AMD 5700G
(with `retbleed=stuff,force`) on both real hw. and QEMU+KVM with `-cpu=host`.

The panic seems to be triggered by systemd attempting to load a BPF program
(see "bpf-lsm: LSM BPF program attached" in the trace below).
Disabling BPF using `lsm=...` fixes the panic and the system boots.

It can be reproduced with Fedora-Cloud-Base-Rawhide-20230105.n.0.x86_64.qcow2
(from https://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/Cloud)
launching QEMU with:
qemu-system-x86_64 -enable-kvm -cpu host -m 1024 -serial stdio -hda \
Fedora-Cloud-Base-Rawhide-20230105.n.0.x86_64.qcow2
and appending `retbleed=stuff,force` on the GRUB command line.

I'm experimenting with a patch that adjusts `ip` (in `emit_rsb_call`) after
calling `x86_call_depth_emit_accounting` but it needs a bit more testing.

PS: Sorry if this has already been discussed elsewhere, I can't find it
(but it's been a while since this patch was posted and merged)

Sample kernel panic log:

...
:: running hook [keymap]
:: Loading keymap...kbd_mode: KDSKBMODE: Inappropriate ioctl for device
done.
:: performing fsck on '/dev/sda2'
/dev/sda2: clean, 54442/643376 files, 659509/2568704 blocks
:: mounting '/dev/sda2' on real root
[ 0.856081] EXT4-fs (sda2): mounted filesystem b11fd1e6-2bc2-43b4-ab2c-09a60647bd0b with ordered data mode. Quota mode: none.
:: running cleanup hook [udev]
[ 0.937171] systemd[1]: systemd 252.4-2-arch running in system mode (+PAM +AUDIT -SELINUX -APPARMOR -IMA +SMACK +SECCOMP +GCRYPT +GNUTLS +OPENSSL +ACL +BLKID +CURL +ELFUTILS +FIDO2 +IDN2 ...
-IDN +IPTC +KMOD +LIBCRYPTSETUP +LIBFDISK +PCRE2 -PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ +ZLIB +ZSTD +BPF_FRAMEWORK +XKBCOMMON +UTMP -SYSVINIT default-hierarchy=unified)
[ 0.939588] systemd[1]: Detected virtualization kvm.
[ 0.939925] systemd[1]: Detected architecture x86-64.

Welcome to Arch Linux!

[ 1.325709] systemd[1]: bpf-lsm: LSM BPF program attached
[ 1.326283] BUG: kernel NULL pointer dereference, address: 000000000000007a
[ 1.326762] #PF: supervisor write access in kernel mode
[ 1.327101] #PF: error_code(0x0002) - not-present page
[ 1.327463] PGD 0 P4D 0
[ 1.327634] Oops: 0002 [#1] PREEMPT SMP NOPTI
[ 1.327926] CPU: 0 PID: 1 Comm: systemd Not tainted 6.2.0-rc2-1-mainline #1 fe8e69619c49cfdd8b05f798d0c88b25f1808a3f
[ 1.328603] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Arch Linux 1.16.1-1-1 04/01/2014
[ 1.329002] RIP: 0010:__bpf_tramp_enter+0xc/0x40
[ 1.329002] Code: ff e9 08 22 ad 00 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa 53 48 89 fb e8 59 04 ee <ff> 48 8b 83 60 02 00 00 a8 03 75 0a 65 48 ...
ff 00 5b e9 4e 4d ee ff
[ 1.329002] RSP: 0018:ffff9cbe00013b88 EFLAGS: 00010282
[ 1.329002] RAX: 00000000000000ef RBX: ffffffffa9cdad88 RCX: 0000000500000000
[ 1.329002] RDX: 0000000000000000 RSI: 0000000000000064 RDI: ffff8ba14121b800
[ 1.329002] RBP: ffff9cbe00013bc0 R08: ffff8ba1439a6ca0 R09: ffff8ba140429a40
[ 1.329002] R10: 0000000000000000 R11: 0000000000000002 R12: ffff9cbe00013cc0
[ 1.329002] R13: 0000000000000000 R14: ffff8ba142157f10 R15: ffff8ba142157f00
[ 1.329002] FS: 00007f39b636da40(0000) GS:ffff8ba277a00000(0000) knlGS:0000000000000000
[ 1.329002] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 1.329002] CR2: 000000000000007a CR3: 0000000104d20000 CR4: 0000000000750ef0
[ 1.329002] PKRU: 55555554
[ 1.329002] Call Trace:
[ 1.329002] <TASK>
[ 1.329002] ? bpf_trampoline_6442538647_0+0x3d/0x1000
[ 1.329002] ? bpf_lsm_file_open+0x9/0x10
[ 1.329002] ? security_file_open+0x30/0x50
[ 1.329002] ? do_dentry_open+0xf8/0x460
[ 1.329002] ? path_openat+0xd8f/0x1260
[ 1.329002] ? inode_permission+0x3d/0x1e0
[ 1.329002] ? __pfx_bpf_lsm_inode_permission+0x10/0x10
[ 1.329002] ? security_inode_permission+0x3e/0x60
[ 1.329002] ? do_filp_open+0xb3/0x160
[ 1.329002] ? do_sys_openat2+0xaf/0x170
[ 1.329002] ? __x64_sys_openat+0x6e/0xa0
[ 1.329002] ? do_syscall_64+0x5f/0x90
[ 1.329002] ? kmem_cache_free+0x19/0x360
[ 1.329002] ? do_mkdirat+0xed/0x180
[ 1.329002] ? __x86_return_skl+0x71/0x88
[ 1.329002] ? __x86_return_skl+0x6b/0x88
[ 1.329002] ? __x86_return_skl+0x65/0x88
[ 1.329002] ? __x86_return_skl+0x5f/0x88
[ 1.329002] ? __x86_return_skl+0x59/0x88
[ 1.329002] ? __x86_return_skl+0x53/0x88
[ 1.329002] ? __x86_return_skl+0x4d/0x88
[ 1.329002] ? __x86_return_skl+0x47/0x88
[ 1.329002] ? __x86_return_skl+0x41/0x88
[ 1.329002] ? __x86_return_skl+0x3b/0x88
[ 1.329002] ? __x86_return_skl+0x35/0x88
[ 1.329002] ? __x86_return_skl+0x2f/0x88
[ 1.329002] ? __x86_return_skl+0x29/0x88
[ 1.329002] ? __x86_return_skl+0x23/0x88
[ 1.329002] ? entry_SYSCALL_64_after_hwframe+0x72/0xdc
[ 1.329002] </TASK>
[ 1.329002] Modules linked in: bpf_preload qemu_fw_cfg ip_tables x_tables ext4 crc32c_generic crc16 mbcache jbd2 virtio_net virtio_gpu ata_generic serio_raw net_failover virtio_dma_buf ...
virtio_rng failover pata_acpi atkbd libps2 vivaldi_fmap virtio_pci i8042 virtio_pci_legacy_dev crc32c_intel floppy serio ata_piix virtio_pci_modern_dev
[ 1.329002] CR2: 000000000000007a
[ 1.329002] ---[ end trace 0000000000000000 ]---
[ 1.329002] RIP: 0010:__bpf_tramp_enter+0xc/0x40
[ 1.329002] Code: ff e9 08 22 ad 00 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa 53 48 89 fb e8 59 04 ee <ff> 48 8b 83 60 02 00 00 a8 03 75 0a 65 48...
ff 00 5b e9 4e 4d ee ff
[ 1.329002] RSP: 0018:ffff9cbe00013b88 EFLAGS: 00010282
[ 1.329002] RAX: 00000000000000ef RBX: ffffffffa9cdad88 RCX: 0000000500000000
[ 1.329002] RDX: 0000000000000000 RSI: 0000000000000064 RDI: ffff8ba14121b800
[ 1.329002] RBP: ffff9cbe00013bc0 R08: ffff8ba1439a6ca0 R09: ffff8ba140429a40
[ 1.329002] R10: 0000000000000000 R11: 0000000000000002 R12: ffff9cbe00013cc0
[ 1.329002] R13: 0000000000000000 R14: ffff8ba142157f10 R15: ffff8ba142157f00
[ 1.329002] FS: 00007f39b636da40(0000) GS:ffff8ba277a00000(0000) knlGS:0000000000000000
[ 1.329002] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 1.329002] CR2: 000000000000007a CR3: 0000000104d20000 CR4: 0000000000750ef0
[ 1.329002] PKRU: 55555554
[ 1.352911] Kernel panic - not syncing: Attempted to kill init! exitcode=0x00000009
[ 1.353529] Kernel Offset: 0x27600000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff)
[ 1.354255] ---[ end Kernel panic - not syncing: Attempted to kill init! exitcode=0x00000009 ]---