Received: by 2002:ac0:a5a6:0:0:0:0:0 with SMTP id m35-v6csp4335357imm; Tue, 11 Sep 2018 10:17:09 -0700 (PDT) X-Google-Smtp-Source: ANB0VdY5NIi0BOtiJAEyhaMSsVZWEWTtYJC47BZ5sc+2EEFxRVAmXnXyO7WbLwxrcX0zSXZV6042 X-Received: by 2002:a17:902:d898:: with SMTP id b24-v6mr8852242plz.218.1536686229922; Tue, 11 Sep 2018 10:17:09 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1536686229; cv=none; d=google.com; s=arc-20160816; b=TdiP++SliyL1Pi8UuLSpiyYnVy4pEByQBPvAUwiU56mhV2GOSISQNAt0PKsZkCEvF5 kqIgE2iR3fEddy9B7Vda+3R5vJD1jgW8TanIFeowni+d0Ufcaf059zrAamfLJiISdDj2 SvDwyv1mNvmPhH2vqBkSAvsilOQX8ONNn8L2Ax7IFofmealaXjGSVvKePc7/AwxsNZjJ qlP9FwIS1QZIvxm/xvGV21lAYZaBUj3tzr8FTb4yFcOMsLI4GVBuufigtBlSR1Tln1YW QzymKh6GMXLW6Vgdd1f2YeNmCX+z8N4x+JyYoLBq4eWVcE9VcP3OBaEqeIDici2XBX9j 4C/w== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:references:in-reply-to:message-id:date :subject:cc:to:from; bh=bQxIwvGceSRoMXf4bc539ZBaHnBwLERLbumvLJQ9xzA=; b=S9yomo+wwnijJLoiFcXnaIlwWcLTAwOI/7vsx4miTUGcT1YmbIbVJXC95Bjv/9v1M/ g/lrp1BJVW+0h0e8LBG+vl2UETFwA0zbdO0G5Sk3OmnxXHaLpSPdZa0hdm2kSMnsma67 oj1QO+F4IecWrA2RpBhndFcSBu3/AIYKeD/qDdzyY/XXJW1MsRIIxkHcHczt0U31UNHa lwAhs9LpOISHR3p6XAcXEsDcpiDUqdfrOV/abQs8CTi36CB+YDI5w4CLBBKxRKRRk8Ee RV1JDRE6mKtZd6GJTohX1js3Awu5V/HxOezR5EGXdu7suTPLJ+K/bssXz/QB9qb2vzai WmqQ== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=intel.com Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id y28-v6si22360325pga.485.2018.09.11.10.16.55; Tue, 11 Sep 2018 10:17:09 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=intel.com Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728356AbeIKWPZ (ORCPT + 99 others); Tue, 11 Sep 2018 18:15:25 -0400 Received: from mga02.intel.com ([134.134.136.20]:31116 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728299AbeIKWPQ (ORCPT ); Tue, 11 Sep 2018 18:15:16 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 11 Sep 2018 10:15:00 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.53,361,1531810800"; d="scan'208";a="262579658" Received: from chenyu-desktop.sh.intel.com ([10.239.160.116]) by fmsmga006.fm.intel.com with ESMTP; 11 Sep 2018 10:14:56 -0700 From: Chen Yu To: Thomas Gleixner , "Rafael J. Wysocki" Cc: Pavel Machek , Rui Zhang , Chen Yu , Zhimin Gu , Len Brown , linux-kernel@vger.kernel.org, x86@kernel.org, linux-pm@vger.kernel.org Subject: [PATCH 4/4][v2] x86, hibernate: Backport several fixes from 64bits to 32bits hibernation Date: Wed, 12 Sep 2018 01:21:00 +0800 Message-Id: <6c1b31e24b78b0366c7ca9fa6ac75bdb463d8fc0.1536685746.git.yu.c.chen@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: References: Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Zhimin Gu Currently there are mainly three bugs in 32bits system when doing hibernation: 1. The page copy code is not running in safe page, which might cause hang during resume. 2. There's no text mapping for the final jump address of the original kernel, which might cause the system jumping into illegal address and causes system hang during resume. 3. The restore kernel switches to its own kernel page table(swapper_pg_dir) rather than the original kernel page table after all the pages been copied back, which might cause invalid virtual-physical mapping issue during resume. To solve these problems: 1. Copy the code core_restore_code to a safe page, to avoid the instruction code been overwritten when image kernel pages are being copied. 2. Set up temporary text mapping for the image kernel's jump address, so that after all the pages have been copied back, the system could jump to this address. 3. Switch to the original kernel page table during resume. Furthermore, MD5 hash check for e820 map is also backported from 64bits system. Acked-by: Chen Yu Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Signed-off-by: Zhimin Gu --- arch/x86/Kconfig | 2 +- arch/x86/include/asm/suspend_32.h | 4 +++ arch/x86/power/hibernate.c | 2 -- arch/x86/power/hibernate_32.c | 37 +++++++++++++++++++++++ arch/x86/power/hibernate_asm_32.S | 49 +++++++++++++++++++++++++------ 5 files changed, 82 insertions(+), 12 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1a0be022f91d..e8de5de1057f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2422,7 +2422,7 @@ menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER def_bool y - depends on X86_64 && HIBERNATION + depends on HIBERNATION source "kernel/power/Kconfig" diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index 8be6afb58471..fdbd9d7b7bca 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -32,4 +32,8 @@ struct saved_context { unsigned long return_address; } __attribute__((packed)); +/* routines for saving/restoring kernel state */ +extern char core_restore_code[]; +extern char restore_registers[]; + #endif /* _ASM_X86_SUSPEND_32_H */ diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c index 6aeac4d3c9df..d719d156114b 100644 --- a/arch/x86/power/hibernate.c +++ b/arch/x86/power/hibernate.c @@ -69,7 +69,6 @@ int pfn_is_nosave(unsigned long pfn) return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn; } -#ifdef CONFIG_X86_64 static int relocate_restore_code(void) { pgd_t *pgd; @@ -262,4 +261,3 @@ int arch_hibernation_header_restore(void *addr) return 0; } -#endif diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index e0e7b9aea22a..d692700047bf 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -135,6 +135,32 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir) #endif } +static int set_up_temporary_text_mapping(pgd_t *pgd_base) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_base + pgd_index(restore_jump_address); + + pmd = resume_one_md_table_init(pgd); + if (!pmd) + return -ENOMEM; + + if (boot_cpu_has(X86_FEATURE_PSE)) { + set_pmd(pmd + pmd_index(restore_jump_address), + __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC))); + } else { + pte = resume_one_page_table_init(pmd); + if (!pte) + return -ENOMEM; + set_pte(pte + pte_index(restore_jump_address), + __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC))); + } + + return 0; +} + asmlinkage int swsusp_arch_resume(void) { int error; @@ -144,10 +170,21 @@ asmlinkage int swsusp_arch_resume(void) return -ENOMEM; resume_init_first_level_page_table(resume_pg_dir); + + error = set_up_temporary_text_mapping(resume_pg_dir); + if (error) + return error; + error = resume_physical_mapping_init(resume_pg_dir); if (error) return error; + temp_pgt = __pa(resume_pg_dir); + + error = relocate_restore_code(); + if (error) + return error; + /* We have got enough memory and from now on we cannot recover */ restore_image(); return 0; diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 6e56815e13a0..a53b4a41e09a 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -24,21 +24,40 @@ ENTRY(swsusp_arch_suspend) pushfl popl saved_context_eflags + /* save cr3 */ + movl %cr3, %eax + movl %eax, restore_cr3 + call swsusp_save ret +ENDPROC(swsusp_arch_suspend) ENTRY(restore_image) - movl mmu_cr4_features, %ecx - movl resume_pg_dir, %eax - subl $__PAGE_OFFSET, %eax + /* prepare to jump to the image kernel */ + movl restore_jump_address, %ebx + movl restore_cr3, %ebp + + movl mmu_cr4_features, %edx + + /* jump to relocated restore code */ + movl relocated_restore_code, %eax + jmpl *%eax + + /* code below has been relocated to a safe page */ +ENTRY(core_restore_code) + movl temp_pgt, %eax movl %eax, %cr3 + /* flush TLB */ + movl %edx, %ecx jecxz 1f # cr4 Pentium and higher, skip if zero andl $~(X86_CR4_PGE), %ecx movl %ecx, %cr4; # turn off PGE movl %cr3, %eax; # flush TLB movl %eax, %cr3 + movl %edx, %cr4; # turn PGE back on 1: + /* prepare to copy image data to their original locations */ movl restore_pblist, %edx .p2align 4,,7 @@ -49,7 +68,7 @@ copy_loop: movl pbe_address(%edx), %esi movl pbe_orig_address(%edx), %edi - movl $1024, %ecx + movl $(PAGE_SIZE >> 2), %ecx rep movsl @@ -58,13 +77,22 @@ copy_loop: .p2align 4,,7 done: + jmpl *%ebx + .align PAGE_SIZE + +ENTRY(restore_registers) /* go back to the original page tables */ - movl $swapper_pg_dir, %eax - subl $__PAGE_OFFSET, %eax - movl %eax, %cr3 - movl mmu_cr4_features, %ecx + movl %ebp, %cr3 + + /* flush TLB */ + movl mmu_cr4_features, %edx + movl %edx, %ecx jecxz 1f # cr4 Pentium and higher, skip if zero - movl %ecx, %cr4; # turn PGE back on + andl $~(X86_CR4_PGE), %ecx + movl %ecx, %cr4; # turn off PGE + movl %cr3, %ecx; # flush TLB + movl %ecx, %cr3; + movl %edx, %cr4; # turn PGE back on 1: movl saved_context_esp, %esp @@ -82,4 +110,7 @@ done: xorl %eax, %eax + movl %eax, in_suspend + ret +ENDPROC(restore_registers) -- 2.17.1