The following changes since commit c1fd1b43831fa20c91cdd461342af8edf2e87c2f:
Pekka Enberg (1):
x86, mm: Unify kernel_physical_mapping_init() API
are available in the git repository at:
git://xenbits.xensource.com/people/ianc/linux-2.6.git for-x86/mm
Ian Campbell (3):
xen: disable highmem PTE allocation even when CONFIG_HIGHPTE=y
vmi: disable highmem PTE allocation even when CONFIG_HIGHPTE=y
x86: remove kmap_atomic_pte paravirt op.
arch/x86/include/asm/highmem.h | 4 ---
arch/x86/include/asm/paravirt.h | 9 --------
arch/x86/include/asm/paravirt_types.h | 4 ---
arch/x86/include/asm/pgtable_32.h | 4 +-
arch/x86/kernel/paravirt.c | 4 ---
arch/x86/kernel/vmi_32.c | 35 ++++++--------------------------
arch/x86/xen/enlighten.c | 7 ++++++
arch/x86/xen/mmu.c | 21 -------------------
8 files changed, 16 insertions(+), 72 deletions(-)
There's a path in the pagefault code where the kernel deliberately
breaks its own locking rules by kmapping a high pte page without
holding the pagetable lock (in at least page_check_address). This
breaks Xen's ability to track the pinned/unpinned state of the
page. There does not appear to be a viable workaround for this
behaviour so simply disable HIGHPTE for all Xen guests.
Signed-off-by: Ian Campbell <[email protected]>
Cc: Jeremy Fitzhardinge <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Pasi Kärkkäinen <[email protected]>
Cc: <[email protected]> # .32.x: 14315592: Allow highmem user page tables to be disabled at boot time
Cc: <[email protected]> # .32.x
Cc: <[email protected]>
---
arch/x86/xen/enlighten.c | 7 +++++++
arch/x86/xen/mmu.c | 11 ++++++-----
2 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 36daccb..b607239 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -50,6 +50,7 @@
#include <asm/traps.h>
#include <asm/setup.h>
#include <asm/desc.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
@@ -1094,6 +1095,12 @@ asmlinkage void __init xen_start_kernel(void)
__supported_pte_mask |= _PAGE_IOMAP;
+ /*
+ * Prevent page tables from being allocated in highmem, even
+ * if CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
/* Work out if we support NX */
x86_configure_nx();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index bf4cd6b..350a3de 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1432,14 +1432,15 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
{
pgprot_t prot = PAGE_KERNEL;
+ /*
+ * We disable highmem allocations for page tables so we should never
+ * see any calls to kmap_atomic_pte on a highmem page.
+ */
+ BUG_ON(PageHighMem(page));
+
if (PagePinned(page))
prot = PAGE_KERNEL_RO;
- if (0 && PageHighMem(page))
- printk("mapping highpte %lx type %d prot %s\n",
- page_to_pfn(page), type,
- (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
-
return kmap_atomic_prot(page, type, prot);
}
#endif
--
1.5.6.5
Preventing HIGHPTE allocations under VMI will allow us to remove the
kmap_atomic_pte paravirt op.
Signed-off-by: Ian Campbell <[email protected]>
Cc: Alok Kataria <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jeremy Fitzhardinge <[email protected]>
---
arch/x86/kernel/vmi_32.c | 23 +++++++++++------------
1 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index d430e4c..58aca86 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -33,6 +33,7 @@
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/apic.h>
+#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/vmi_time.h>
@@ -272,19 +273,11 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
void *va = kmap_atomic(page, type);
/*
- * Internally, the VMI ROM must map virtual addresses to physical
- * addresses for processing MMU updates. By the time MMU updates
- * are issued, this information is typically already lost.
- * Fortunately, the VMI provides a cache of mapping slots for active
- * page tables.
- *
- * We use slot zero for the linear mapping of physical memory, and
- * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
- *
- * args: SLOT VA COUNT PFN
+ * We disable highmem allocations for page tables so we should never
+ * see any calls to kmap_atomic_pte on a highmem page.
*/
- BUG_ON(type != KM_PTE0 && type != KM_PTE1);
- vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
+
+ BUG_ON(PageHighmem(page));
return va;
}
@@ -640,6 +633,12 @@ static inline int __init activate_vmi(void)
u64 reloc;
const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+ /*
+ * Prevent page tables from being allocated in highmem, even if
+ * CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
if (call_vrom_func(vmi_rom, vmi_init) != 0) {
printk(KERN_ERR "VMI ROM failed to initialize!");
return 0;
--
1.5.6.5
Now that both Xen and VMI disable allocations of PTE pages from high
memory this paravirt op serves no further purpose.
This effectively reverts ce6234b5 "add kmap_atomic_pte for mapping
highpte pages".
Signed-off-by: Ian Campbell <[email protected]>
Cc: Alok Kataria <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Jeremy Fitzhardinge <[email protected]>
Cc: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/highmem.h | 4 ----
arch/x86/include/asm/paravirt.h | 9 ---------
arch/x86/include/asm/paravirt_types.h | 4 ----
arch/x86/include/asm/pgtable_32.h | 4 ++--
arch/x86/kernel/paravirt.c | 4 ----
arch/x86/kernel/vmi_32.c | 20 --------------------
arch/x86/xen/mmu.c | 22 ----------------------
7 files changed, 2 insertions(+), 65 deletions(-)
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 014c2b8..a726650 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
-#ifndef CONFIG_PARAVIRT
-#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
-#endif
-
#define flush_cache_kmaps() do { } while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index dd59a85..5653f43 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn)
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}
-#ifdef CONFIG_HIGHPTE
-static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
-{
- unsigned long ret;
- ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
- return (void *)ret;
-}
-#endif
-
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b1e70d5..db9ef55 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -304,10 +304,6 @@ struct pv_mmu_ops {
#endif /* PAGETABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */
-#ifdef CONFIG_HIGHPTE
- void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
-#endif
-
struct pv_lazy_ops lazy_mode;
/* dom0 ops */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 01fd946..b422d22 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -54,10 +54,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
in_irq() ? KM_IRQ_PTE : \
KM_PTE0)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
pte_index((address)))
#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
pte_index((address)))
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1b1739d..1db183e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -428,10 +428,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
-#ifdef CONFIG_HIGHPTE
- .kmap_atomic_pte = kmap_atomic,
-#endif
-
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic,
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 58aca86..7dd599d 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -267,22 +267,6 @@ static void vmi_nop(void)
{
}
-#ifdef CONFIG_HIGHPTE
-static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
-{
- void *va = kmap_atomic(page, type);
-
- /*
- * We disable highmem allocations for page tables so we should never
- * see any calls to kmap_atomic_pte on a highmem page.
- */
-
- BUG_ON(PageHighmem(page));
-
- return va;
-}
-#endif
-
static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
{
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
@@ -777,10 +761,6 @@ static inline int __init activate_vmi(void)
/* Set linear is needed in all cases */
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
-#ifdef CONFIG_HIGHPTE
- if (vmi_ops.set_linear_mapping)
- pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
-#endif
/*
* These MUST always be patched. Don't support indirect jumps
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 350a3de..f9eb7de 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1427,24 +1427,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif
}
-#ifdef CONFIG_HIGHPTE
-static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
-{
- pgprot_t prot = PAGE_KERNEL;
-
- /*
- * We disable highmem allocations for page tables so we should never
- * see any calls to kmap_atomic_pte on a highmem page.
- */
- BUG_ON(PageHighMem(page));
-
- if (PagePinned(page))
- prot = PAGE_KERNEL_RO;
-
- return kmap_atomic_prot(page, type, prot);
-}
-#endif
-
#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
@@ -1903,10 +1885,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.alloc_pmd_clone = paravirt_nop,
.release_pmd = xen_release_pmd_init,
-#ifdef CONFIG_HIGHPTE
- .kmap_atomic_pte = xen_kmap_atomic_pte,
-#endif
-
#ifdef CONFIG_X86_64
.set_pte = xen_set_pte,
#else
--
1.5.6.5
On Fri, 2010-02-26 at 09:16 -0800, Ian Campbell wrote:
> Now that both Xen and VMI disable allocations of PTE pages from high
> memory this paravirt op serves no further purpose.
Acked-by: Alok N Kataria <[email protected]>
Thanks for doing this.
Alok
>
> This effectively reverts ce6234b5 "add kmap_atomic_pte for mapping
> highpte pages".
>
> Signed-off-by: Ian Campbell <[email protected]>
> Cc: Alok Kataria <[email protected]>
> Cc: H. Peter Anvin <[email protected]>
> Cc: Jeremy Fitzhardinge <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> ---
> arch/x86/include/asm/highmem.h | 4 ----
> arch/x86/include/asm/paravirt.h | 9 ---------
> arch/x86/include/asm/paravirt_types.h | 4 ----
> arch/x86/include/asm/pgtable_32.h | 4 ++--
> arch/x86/kernel/paravirt.c | 4 ----
> arch/x86/kernel/vmi_32.c | 20 --------------------
> arch/x86/xen/mmu.c | 22 ----------------------
> 7 files changed, 2 insertions(+), 65 deletions(-)
>
> diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
> index 014c2b8..a726650 100644
> --- a/arch/x86/include/asm/highmem.h
> +++ b/arch/x86/include/asm/highmem.h
> @@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
> void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
> struct page *kmap_atomic_to_page(void *ptr);
>
> -#ifndef CONFIG_PARAVIRT
> -#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
> -#endif
> -
> #define flush_cache_kmaps() do { } while (0)
>
> extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
> diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
> index dd59a85..5653f43 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn)
> PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
> }
>
> -#ifdef CONFIG_HIGHPTE
> -static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
> -{
> - unsigned long ret;
> - ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
> - return (void *)ret;
> -}
> -#endif
> -
> static inline void pte_update(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep)
> {
> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
> index b1e70d5..db9ef55 100644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -304,10 +304,6 @@ struct pv_mmu_ops {
> #endif /* PAGETABLE_LEVELS == 4 */
> #endif /* PAGETABLE_LEVELS >= 3 */
>
> -#ifdef CONFIG_HIGHPTE
> - void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
> -#endif
> -
> struct pv_lazy_ops lazy_mode;
>
> /* dom0 ops */
> diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
> index 01fd946..b422d22 100644
> --- a/arch/x86/include/asm/pgtable_32.h
> +++ b/arch/x86/include/asm/pgtable_32.h
> @@ -54,10 +54,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
> in_irq() ? KM_IRQ_PTE : \
> KM_PTE0)
> #define pte_offset_map(dir, address) \
> - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
> + ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
> pte_index((address)))
> #define pte_offset_map_nested(dir, address) \
> - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
> + ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
> pte_index((address)))
> #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
> #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
> diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
> index 1b1739d..1db183e 100644
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -428,10 +428,6 @@ struct pv_mmu_ops pv_mmu_ops = {
> .ptep_modify_prot_start = __ptep_modify_prot_start,
> .ptep_modify_prot_commit = __ptep_modify_prot_commit,
>
> -#ifdef CONFIG_HIGHPTE
> - .kmap_atomic_pte = kmap_atomic,
> -#endif
> -
> #if PAGETABLE_LEVELS >= 3
> #ifdef CONFIG_X86_PAE
> .set_pte_atomic = native_set_pte_atomic,
> diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
> index 58aca86..7dd599d 100644
> --- a/arch/x86/kernel/vmi_32.c
> +++ b/arch/x86/kernel/vmi_32.c
> @@ -267,22 +267,6 @@ static void vmi_nop(void)
> {
> }
>
> -#ifdef CONFIG_HIGHPTE
> -static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
> -{
> - void *va = kmap_atomic(page, type);
> -
> - /*
> - * We disable highmem allocations for page tables so we should never
> - * see any calls to kmap_atomic_pte on a highmem page.
> - */
> -
> - BUG_ON(PageHighmem(page));
> -
> - return va;
> -}
> -#endif
> -
> static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
> {
> vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
> @@ -777,10 +761,6 @@ static inline int __init activate_vmi(void)
>
> /* Set linear is needed in all cases */
> vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
> -#ifdef CONFIG_HIGHPTE
> - if (vmi_ops.set_linear_mapping)
> - pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
> -#endif
>
> /*
> * These MUST always be patched. Don't support indirect jumps
> diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
> index 350a3de..f9eb7de 100644
> --- a/arch/x86/xen/mmu.c
> +++ b/arch/x86/xen/mmu.c
> @@ -1427,24 +1427,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
> #endif
> }
>
> -#ifdef CONFIG_HIGHPTE
> -static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
> -{
> - pgprot_t prot = PAGE_KERNEL;
> -
> - /*
> - * We disable highmem allocations for page tables so we should never
> - * see any calls to kmap_atomic_pte on a highmem page.
> - */
> - BUG_ON(PageHighMem(page));
> -
> - if (PagePinned(page))
> - prot = PAGE_KERNEL_RO;
> -
> - return kmap_atomic_prot(page, type, prot);
> -}
> -#endif
> -
> #ifdef CONFIG_X86_32
> static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
> {
> @@ -1903,10 +1885,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
> .alloc_pmd_clone = paravirt_nop,
> .release_pmd = xen_release_pmd_init,
>
> -#ifdef CONFIG_HIGHPTE
> - .kmap_atomic_pte = xen_kmap_atomic_pte,
> -#endif
> -
> #ifdef CONFIG_X86_64
> .set_pte = xen_set_pte,
> #else
On Fri, 2010-02-26 at 09:16 -0800, Ian Campbell wrote:
> Preventing HIGHPTE allocations under VMI will allow us to remove the
> kmap_atomic_pte paravirt op.
Acked-by: Alok N Kataria <[email protected]>
Thanks,
Alok
>
> Signed-off-by: Ian Campbell <[email protected]>
> Cc: Alok Kataria <[email protected]>
> Cc: H. Peter Anvin <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Jeremy Fitzhardinge <[email protected]>
> ---
> arch/x86/kernel/vmi_32.c | 23 +++++++++++------------
> 1 files changed, 11 insertions(+), 12 deletions(-)
>
> diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
> index d430e4c..58aca86 100644
> --- a/arch/x86/kernel/vmi_32.c
> +++ b/arch/x86/kernel/vmi_32.c
> @@ -33,6 +33,7 @@
> #include <asm/fixmap.h>
> #include <asm/apicdef.h>
> #include <asm/apic.h>
> +#include <asm/pgalloc.h>
> #include <asm/processor.h>
> #include <asm/timer.h>
> #include <asm/vmi_time.h>
> @@ -272,19 +273,11 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
> void *va = kmap_atomic(page, type);
>
> /*
> - * Internally, the VMI ROM must map virtual addresses to physical
> - * addresses for processing MMU updates. By the time MMU updates
> - * are issued, this information is typically already lost.
> - * Fortunately, the VMI provides a cache of mapping slots for active
> - * page tables.
> - *
> - * We use slot zero for the linear mapping of physical memory, and
> - * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
> - *
> - * args: SLOT VA COUNT PFN
> + * We disable highmem allocations for page tables so we should never
> + * see any calls to kmap_atomic_pte on a highmem page.
> */
> - BUG_ON(type != KM_PTE0 && type != KM_PTE1);
> - vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
> +
> + BUG_ON(PageHighmem(page));
>
> return va;
> }
> @@ -640,6 +633,12 @@ static inline int __init activate_vmi(void)
> u64 reloc;
> const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
>
> + /*
> + * Prevent page tables from being allocated in highmem, even if
> + * CONFIG_HIGHPTE is enabled.
> + */
> + __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
> +
> if (call_vrom_func(vmi_rom, vmi_init) != 0) {
> printk(KERN_ERR "VMI ROM failed to initialize!");
> return 0;
Commit-ID: dad52fc01161afcb8798c609e009aed4d104927f
Gitweb: http://git.kernel.org/tip/dad52fc01161afcb8798c609e009aed4d104927f
Author: Ian Campbell <[email protected]>
AuthorDate: Fri, 26 Feb 2010 17:16:02 +0000
Committer: H. Peter Anvin <[email protected]>
CommitDate: Sat, 27 Feb 2010 14:41:35 -0800
x86, paravirt: Remove kmap_atomic_pte paravirt op.
Now that both Xen and VMI disable allocations of PTE pages from high
memory this paravirt op serves no further purpose.
This effectively reverts ce6234b5 "add kmap_atomic_pte for mapping
highpte pages".
Signed-off-by: Ian Campbell <[email protected]>
LKML-Reference: <[email protected]>
Acked-by: Alok Kataria <[email protected]>
Cc: Jeremy Fitzhardinge <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
---
arch/x86/include/asm/highmem.h | 4 ----
arch/x86/include/asm/paravirt.h | 9 ---------
arch/x86/include/asm/paravirt_types.h | 4 ----
arch/x86/include/asm/pgtable_32.h | 4 ++--
arch/x86/kernel/paravirt.c | 4 ----
arch/x86/kernel/vmi_32.c | 20 --------------------
arch/x86/xen/mmu.c | 22 ----------------------
7 files changed, 2 insertions(+), 65 deletions(-)
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 014c2b8..a726650 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
-#ifndef CONFIG_PARAVIRT
-#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
-#endif
-
#define flush_cache_kmaps() do { } while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index dd59a85..5653f43 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn)
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}
-#ifdef CONFIG_HIGHPTE
-static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
-{
- unsigned long ret;
- ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
- return (void *)ret;
-}
-#endif
-
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b1e70d5..db9ef55 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -304,10 +304,6 @@ struct pv_mmu_ops {
#endif /* PAGETABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */
-#ifdef CONFIG_HIGHPTE
- void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
-#endif
-
struct pv_lazy_ops lazy_mode;
/* dom0 ops */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 01fd946..b422d22 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -54,10 +54,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
in_irq() ? KM_IRQ_PTE : \
KM_PTE0)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
pte_index((address)))
#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
pte_index((address)))
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1b1739d..1db183e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -428,10 +428,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
-#ifdef CONFIG_HIGHPTE
- .kmap_atomic_pte = kmap_atomic,
-#endif
-
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic,
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 58aca86..7dd599d 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -267,22 +267,6 @@ static void vmi_nop(void)
{
}
-#ifdef CONFIG_HIGHPTE
-static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
-{
- void *va = kmap_atomic(page, type);
-
- /*
- * We disable highmem allocations for page tables so we should never
- * see any calls to kmap_atomic_pte on a highmem page.
- */
-
- BUG_ON(PageHighmem(page));
-
- return va;
-}
-#endif
-
static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
{
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
@@ -777,10 +761,6 @@ static inline int __init activate_vmi(void)
/* Set linear is needed in all cases */
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
-#ifdef CONFIG_HIGHPTE
- if (vmi_ops.set_linear_mapping)
- pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
-#endif
/*
* These MUST always be patched. Don't support indirect jumps
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 350a3de..f9eb7de 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1427,24 +1427,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif
}
-#ifdef CONFIG_HIGHPTE
-static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
-{
- pgprot_t prot = PAGE_KERNEL;
-
- /*
- * We disable highmem allocations for page tables so we should never
- * see any calls to kmap_atomic_pte on a highmem page.
- */
- BUG_ON(PageHighMem(page));
-
- if (PagePinned(page))
- prot = PAGE_KERNEL_RO;
-
- return kmap_atomic_prot(page, type, prot);
-}
-#endif
-
#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
@@ -1903,10 +1885,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.alloc_pmd_clone = paravirt_nop,
.release_pmd = xen_release_pmd_init,
-#ifdef CONFIG_HIGHPTE
- .kmap_atomic_pte = xen_kmap_atomic_pte,
-#endif
-
#ifdef CONFIG_X86_64
.set_pte = xen_set_pte,
#else
Commit-ID: 3249b7e1df6380e9d7bb3238f64f445bf614f787
Gitweb: http://git.kernel.org/tip/3249b7e1df6380e9d7bb3238f64f445bf614f787
Author: Ian Campbell <[email protected]>
AuthorDate: Fri, 26 Feb 2010 17:16:01 +0000
Committer: H. Peter Anvin <[email protected]>
CommitDate: Sat, 27 Feb 2010 14:41:16 -0800
x86, vmi: Disable highmem PTE allocation even when CONFIG_HIGHPTE=y
Preventing HIGHPTE allocations under VMI will allow us to remove the
kmap_atomic_pte paravirt op.
Signed-off-by: Ian Campbell <[email protected]>
LKML-Reference: <[email protected]>
Acked-by: Alok Kataria <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jeremy Fitzhardinge <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
---
arch/x86/kernel/vmi_32.c | 23 +++++++++++------------
1 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index d430e4c..58aca86 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -33,6 +33,7 @@
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/apic.h>
+#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/vmi_time.h>
@@ -272,19 +273,11 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
void *va = kmap_atomic(page, type);
/*
- * Internally, the VMI ROM must map virtual addresses to physical
- * addresses for processing MMU updates. By the time MMU updates
- * are issued, this information is typically already lost.
- * Fortunately, the VMI provides a cache of mapping slots for active
- * page tables.
- *
- * We use slot zero for the linear mapping of physical memory, and
- * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
- *
- * args: SLOT VA COUNT PFN
+ * We disable highmem allocations for page tables so we should never
+ * see any calls to kmap_atomic_pte on a highmem page.
*/
- BUG_ON(type != KM_PTE0 && type != KM_PTE1);
- vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
+
+ BUG_ON(PageHighmem(page));
return va;
}
@@ -640,6 +633,12 @@ static inline int __init activate_vmi(void)
u64 reloc;
const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+ /*
+ * Prevent page tables from being allocated in highmem, even if
+ * CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
if (call_vrom_func(vmi_rom, vmi_init) != 0) {
printk(KERN_ERR "VMI ROM failed to initialize!");
return 0;