Reserve memory for quick kexec on arm64
with cmdline "quickkexec=".
Signed-off-by: Sang Yan <[email protected]>
---
arch/arm64/kernel/setup.c | 6 ++++++
arch/arm64/mm/init.c | 43 +++++++++++++++++++++++++++++++++++++++
2 files changed, 49 insertions(+)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 77c4c9bad1b8..2a5dc032d95e 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -369,6 +369,12 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
*/
init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
#endif
+#ifdef CONFIG_QUICK_KEXEC
+ if (quick_kexec_res.end &&
+ quick_kexec_res.start >= res->start &&
+ quick_kexec_res.end <= res->end)
+ request_resource(res, &quick_kexec_res);
+#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 481d22c32a2e..579acb93728f 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -130,6 +130,45 @@ static void __init reserve_crashkernel(void)
}
#endif /* CONFIG_KEXEC_CORE */
+#ifdef CONFIG_QUICK_KEXEC
+static int __init parse_quick_kexec(char *p)
+{
+ if (!p)
+ return 0;
+
+ quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL));
+
+ return 0;
+}
+early_param("quickkexec", parse_quick_kexec);
+
+static void __init reserve_quick_kexec(void)
+{
+ unsigned long long mem_start, mem_len;
+
+ mem_len = quick_kexec_res.end;
+ if (mem_len == 0)
+ return;
+
+ /* Current arm64 boot protocol requires 2MB alignment */
+ mem_start = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
+ mem_len, CRASH_ALIGN);
+ if (mem_start == 0) {
+ pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n",
+ mem_len);
+ quick_kexec_res.end = 0;
+ return;
+ }
+
+ memblock_reserve(mem_start, mem_len);
+ pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
+ mem_start, mem_start + mem_len, mem_len >> 20);
+
+ quick_kexec_res.start = mem_start;
+ quick_kexec_res.end = mem_start + mem_len - 1;
+}
+#endif
+
#ifdef CONFIG_CRASH_DUMP
static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
const char *uname, int depth, void *data)
@@ -399,6 +438,10 @@ void __init arm64_memblock_init(void)
reserve_crashkernel();
+#ifdef CONFIG_QUICK_KEXEC
+ reserve_quick_kexec();
+#endif
+
reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
--
2.19.1
Hi Sang,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on arm64/for-next/core]
[also build test ERROR on linux/master linus/master v5.8 next-20200814]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Sang-Yan/kexec-Add-quick-kexec-support-for-kernel/20200814-142840
base: https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-next/core
config: arm64-randconfig-r011-20200816 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arm64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>
All errors (new ones prefixed by >>):
arch/arm64/kernel/setup.c: In function 'setup_arch':
>> arch/arm64/kernel/setup.c:374:32: error: 'res' undeclared (first use in this function)
374 | quick_kexec_res.start >= res->start &&
| ^~~
arch/arm64/kernel/setup.c:374:32: note: each undeclared identifier is reported only once for each function it appears in
--
arch/arm64/mm/init.c: In function 'reserve_quick_kexec':
>> arch/arm64/mm/init.c:155:13: error: 'CRASH_ALIGN' undeclared (first use in this function); did you mean 'CMSG_ALIGN'?
155 | mem_len, CRASH_ALIGN);
| ^~~~~~~~~~~
| CMSG_ALIGN
arch/arm64/mm/init.c:155:13: note: each undeclared identifier is reported only once for each function it appears in
vim +/res +374 arch/arm64/kernel/setup.c
284
285 void __init setup_arch(char **cmdline_p)
286 {
287 init_mm.start_code = (unsigned long) _text;
288 init_mm.end_code = (unsigned long) _etext;
289 init_mm.end_data = (unsigned long) _edata;
290 init_mm.brk = (unsigned long) _end;
291
292 *cmdline_p = boot_command_line;
293
294 /*
295 * If know now we are going to need KPTI then use non-global
296 * mappings from the start, avoiding the cost of rewriting
297 * everything later.
298 */
299 arm64_use_ng_mappings = kaslr_requires_kpti();
300
301 early_fixmap_init();
302 early_ioremap_init();
303
304 setup_machine_fdt(__fdt_pointer);
305
306 /*
307 * Initialise the static keys early as they may be enabled by the
308 * cpufeature code and early parameters.
309 */
310 jump_label_init();
311 parse_early_param();
312
313 /*
314 * Unmask asynchronous aborts and fiq after bringing up possible
315 * earlycon. (Report possible System Errors once we can report this
316 * occurred).
317 */
318 local_daif_restore(DAIF_PROCCTX_NOIRQ);
319
320 /*
321 * TTBR0 is only used for the identity mapping at this stage. Make it
322 * point to zero page to avoid speculatively fetching new entries.
323 */
324 cpu_uninstall_idmap();
325
326 xen_early_init();
327 efi_init();
328
329 if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
330 pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
331
332 arm64_memblock_init();
333
334 paging_init();
335
336 acpi_table_upgrade();
337
338 /* Parse the ACPI tables for possible boot-time configuration */
339 acpi_boot_table_init();
340
341 if (acpi_disabled)
342 unflatten_device_tree();
343
344 bootmem_init();
345
346 kasan_init();
347
348 request_standard_resources();
349
350 early_ioremap_reset();
351
352 if (acpi_disabled)
353 psci_dt_init();
354 else
355 psci_acpi_init();
356
357 init_bootcpu_ops();
358 smp_init_cpus();
359 smp_build_mpidr_hash();
360
361 /* Init percpu seeds for random tags after cpus are set up. */
362 kasan_init_tags();
363
364 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
365 /*
366 * Make sure init_thread_info.ttbr0 always generates translation
367 * faults in case uaccess_enable() is inadvertently called by the init
368 * thread.
369 */
370 init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
371 #endif
372 #ifdef CONFIG_QUICK_KEXEC
373 if (quick_kexec_res.end &&
> 374 quick_kexec_res.start >= res->start &&
375 quick_kexec_res.end <= res->end)
376 request_resource(res, &quick_kexec_res);
377 #endif
378
379 if (boot_args[1] || boot_args[2] || boot_args[3]) {
380 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
381 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
382 "This indicates a broken bootloader or old kernel\n",
383 boot_args[1], boot_args[2], boot_args[3]);
384 }
385 }
386
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]