2020-08-07 08:40:45

by Jürgen Groß

[permalink] [raw]
Subject: [PATCH v3 3/7] x86/xen: drop tests for highmem in pv code

With support for 32-bit pv guests gone pure pv-code no longer needs to
test for highmem. Dropping those tests removes the need for flushing
in some places.

Signed-off-by: Juergen Gross <[email protected]>
---
arch/x86/xen/enlighten_pv.c | 11 ++-
arch/x86/xen/mmu_pv.c | 138 ++++++++++++++----------------------
2 files changed, 57 insertions(+), 92 deletions(-)

diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 7d90b3da8bb4..9fec952f84f3 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -347,6 +347,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
unsigned long pfn;
struct page *page;
unsigned char dummy;
+ void *av;

ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
@@ -383,14 +384,10 @@ static void set_aliased_prot(void *v, pgprot_t prot)
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();

- if (!PageHighMem(page)) {
- void *av = __va(PFN_PHYS(pfn));
+ av = __va(PFN_PHYS(pfn));

- if (av != v)
- if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
- BUG();
- } else
- kmap_flush_unused();
+ if (av != v && HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
+ BUG();

preempt_enable();
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 1f9500d0b839..3774fa6d2ef7 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -537,25 +537,26 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d)
PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */

-static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
- bool last, unsigned long limit)
+static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
+ void (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+ bool last, unsigned long limit)
{
- int i, nr, flush = 0;
+ int i, nr;

nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
for (i = 0; i < nr; i++) {
if (!pmd_none(pmd[i]))
- flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
+ (*func)(mm, pmd_page(pmd[i]), PT_PTE);
}
- return flush;
}

-static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
- bool last, unsigned long limit)
+static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
+ void (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+ bool last, unsigned long limit)
{
- int i, nr, flush = 0;
+ int i, nr;

nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
for (i = 0; i < nr; i++) {
@@ -566,29 +567,26 @@ static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,

pmd = pmd_offset(&pud[i], 0);
if (PTRS_PER_PMD > 1)
- flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
- flush |= xen_pmd_walk(mm, pmd, func,
- last && i == nr - 1, limit);
+ (*func)(mm, virt_to_page(pmd), PT_PMD);
+ xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
}
- return flush;
}

-static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
- bool last, unsigned long limit)
+static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
+ void (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+ bool last, unsigned long limit)
{
- int flush = 0;
pud_t *pud;


if (p4d_none(*p4d))
- return flush;
+ return;

pud = pud_offset(p4d, 0);
if (PTRS_PER_PUD > 1)
- flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
- flush |= xen_pud_walk(mm, pud, func, last, limit);
- return flush;
+ (*func)(mm, virt_to_page(pud), PT_PUD);
+ xen_pud_walk(mm, pud, func, last, limit);
}

/*
@@ -603,12 +601,12 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
* We must skip the Xen hole in the middle of the address space, just after
* the big x86-64 virtual hole.
*/
-static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
- int (*func)(struct mm_struct *mm, struct page *,
- enum pt_level),
- unsigned long limit)
+static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
+ void (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+ unsigned long limit)
{
- int i, nr, flush = 0;
+ int i, nr;
unsigned hole_low = 0, hole_high = 0;

/* The limit is the last byte to be touched */
@@ -633,22 +631,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
continue;

p4d = p4d_offset(&pgd[i], 0);
- flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
+ xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
}

/* Do the top level last, so that the callbacks can use it as
a cue to do final things like tlb flushes. */
- flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
-
- return flush;
+ (*func)(mm, virt_to_page(pgd), PT_PGD);
}

-static int xen_pgd_walk(struct mm_struct *mm,
- int (*func)(struct mm_struct *mm, struct page *,
- enum pt_level),
- unsigned long limit)
+static void xen_pgd_walk(struct mm_struct *mm,
+ void (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+ unsigned long limit)
{
- return __xen_pgd_walk(mm, mm->pgd, func, limit);
+ __xen_pgd_walk(mm, mm->pgd, func, limit);
}

/* If we're using split pte locks, then take the page's lock and
@@ -681,26 +677,17 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
xen_extend_mmuext_op(&op);
}

-static int xen_pin_page(struct mm_struct *mm, struct page *page,
- enum pt_level level)
+static void xen_pin_page(struct mm_struct *mm, struct page *page,
+ enum pt_level level)
{
unsigned pgfl = TestSetPagePinned(page);
- int flush;
-
- if (pgfl)
- flush = 0; /* already pinned */
- else if (PageHighMem(page))
- /* kmaps need flushing if we found an unpinned
- highpage */
- flush = 1;
- else {
+
+ if (!pgfl) {
void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page);
struct multicall_space mcs = __xen_mc_entry(0);
spinlock_t *ptl;

- flush = 0;
-
/*
* We need to hold the pagetable lock between the time
* we make the pagetable RO and when we actually pin
@@ -737,8 +724,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
xen_mc_callback(xen_pte_unlock, ptl);
}
}
-
- return flush;
}

/* This is called just after a mm has been created, but it has not
@@ -752,14 +737,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)

xen_mc_batch();

- if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
- /* re-enable interrupts for flushing */
- xen_mc_issue(0);
-
- kmap_flush_unused();
-
- xen_mc_batch();
- }
+ __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);

xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));

@@ -803,11 +781,10 @@ void xen_mm_pin_all(void)
spin_unlock(&pgd_lock);
}

-static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
- enum pt_level level)
+static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
+ enum pt_level level)
{
SetPagePinned(page);
- return 0;
}

/*
@@ -823,12 +800,12 @@ static void __init xen_after_bootmem(void)
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
}

-static int xen_unpin_page(struct mm_struct *mm, struct page *page,
- enum pt_level level)
+static void xen_unpin_page(struct mm_struct *mm, struct page *page,
+ enum pt_level level)
{
unsigned pgfl = TestClearPagePinned(page);

- if (pgfl && !PageHighMem(page)) {
+ if (pgfl) {
void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page);
spinlock_t *ptl = NULL;
@@ -859,8 +836,6 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page,
xen_mc_callback(xen_pte_unlock, ptl);
}
}
-
- return 0; /* never need to flush on unpin */
}

/* Release a pagetables pages back as normal RW */
@@ -1554,20 +1529,14 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (static_branch_likely(&xen_struct_pages_ready))
SetPagePinned(page);

- if (!PageHighMem(page)) {
- xen_mc_batch();
+ xen_mc_batch();

- __set_pfn_prot(pfn, PAGE_KERNEL_RO);
+ __set_pfn_prot(pfn, PAGE_KERNEL_RO);

- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
- __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);

- xen_mc_issue(PARAVIRT_LAZY_MMU);
- } else {
- /* make sure there are no stray mappings of
- this page */
- kmap_flush_unused();
- }
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
}

@@ -1590,16 +1559,15 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
trace_xen_mmu_release_ptpage(pfn, level, pinned);

if (pinned) {
- if (!PageHighMem(page)) {
- xen_mc_batch();
+ xen_mc_batch();

- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
- __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);

- __set_pfn_prot(pfn, PAGE_KERNEL);
+ __set_pfn_prot(pfn, PAGE_KERNEL);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);

- xen_mc_issue(PARAVIRT_LAZY_MMU);
- }
ClearPagePinned(page);
}
}
--
2.26.2


2020-08-07 12:15:30

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v3 3/7] x86/xen: drop tests for highmem in pv code

Hi Juergen,

I love your patch! Perhaps something to improve:

[auto build test WARNING on tip/x86/core]
[also build test WARNING on tip/x86/asm v5.8 next-20200806]
[cannot apply to xen-tip/linux-next tip/x86/vdso]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url: https://github.com/0day-ci/linux/commits/Juergen-Gross/Remove-32-bit-Xen-PV-guest-support/20200807-164058
base: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git ef2ff0f5d6008d325c9a068e20981c0d0acc4d6b
config: x86_64-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
# save the attached .config to linux build tree
make W=1 ARCH=x86_64

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>

All warnings (new ones prefixed by >>):

arch/x86/xen/enlighten_pv.c: In function 'set_aliased_prot':
>> arch/x86/xen/enlighten_pv.c:348:15: warning: variable 'page' set but not used [-Wunused-but-set-variable]
348 | struct page *page;
| ^~~~
arch/x86/xen/enlighten_pv.c: At top level:
arch/x86/xen/enlighten_pv.c:1198:34: warning: no previous prototype for 'xen_start_kernel' [-Wmissing-prototypes]
1198 | asmlinkage __visible void __init xen_start_kernel(void)
| ^~~~~~~~~~~~~~~~

vim +/page +348 arch/x86/xen/enlighten_pv.c

e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 335
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 336 /*
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 337 * Set the page permissions for a particular virtual address. If the
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 338 * address is a vmalloc mapping (or other non-linear mapping), then
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 339 * find the linear mapping of the page and also set its protections to
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 340 * match.
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 341 */
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 342 static void set_aliased_prot(void *v, pgprot_t prot)
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 343 {
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 344 int level;
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 345 pte_t *ptep;
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 346 pte_t pte;
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 347 unsigned long pfn;
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 @348 struct page *page;
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 349 unsigned char dummy;
64aef3716eab524 Juergen Gross 2020-08-07 350 void *av;
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 351
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 352 ptep = lookup_address((unsigned long)v, &level);
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 353 BUG_ON(ptep == NULL);
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 354
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 355 pfn = pte_pfn(*ptep);
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 356 page = pfn_to_page(pfn);
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 357
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 358 pte = pfn_pte(pfn, prot);
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 359
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 360 /*
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 361 * Careful: update_va_mapping() will fail if the virtual address
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 362 * we're poking isn't populated in the page tables. We don't
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 363 * need to worry about the direct map (that's always in the page
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 364 * tables), but we need to be careful about vmap space. In
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 365 * particular, the top level page table can lazily propagate
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 366 * entries between processes, so if we've switched mms since we
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 367 * vmapped the target in the first place, we might not have the
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 368 * top-level page table entry populated.
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 369 *
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 370 * We disable preemption because we want the same mm active when
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 371 * we probe the target and when we issue the hypercall. We'll
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 372 * have the same nominal mm, but if we're a kernel thread, lazy
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 373 * mm dropping could change our pgd.
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 374 *
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 375 * Out of an abundance of caution, this uses __get_user() to fault
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 376 * in the target address just in case there's some obscure case
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 377 * in which the target address isn't readable.
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 378 */
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 379
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 380 preempt_disable();
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 381
fe557319aa06c23 Christoph Hellwig 2020-06-17 382 copy_from_kernel_nofault(&dummy, v, 1);
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 383
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 384 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 385 BUG();
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 386
64aef3716eab524 Juergen Gross 2020-08-07 387 av = __va(PFN_PHYS(pfn));
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 388
64aef3716eab524 Juergen Gross 2020-08-07 389 if (av != v && HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 390 BUG();
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 391
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 392 preempt_enable();
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 393 }
e1dab14cf68d1e0 Vitaly Kuznetsov 2017-03-14 394

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]


Attachments:
(No filename) (6.41 kB)
.config.gz (73.45 kB)
Download all attachments

2020-08-09 02:27:38

by Boris Ostrovsky

[permalink] [raw]
Subject: Re: [PATCH v3 3/7] x86/xen: drop tests for highmem in pv code

On 8/7/20 4:38 AM, Juergen Gross wrote:
> With support for 32-bit pv guests gone pure pv-code no longer needs to
> test for highmem. Dropping those tests removes the need for flushing
> in some places.
>
> Signed-off-by: Juergen Gross <[email protected]>


Reviewed-by: Boris Ostrovsky <[email protected]>


with a suggestion


> ---
> arch/x86/xen/enlighten_pv.c | 11 ++-
> arch/x86/xen/mmu_pv.c | 138 ++++++++++++++----------------------
> 2 files changed, 57 insertions(+), 92 deletions(-)
>
> diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
> index 7d90b3da8bb4..9fec952f84f3 100644
> --- a/arch/x86/xen/enlighten_pv.c
> +++ b/arch/x86/xen/enlighten_pv.c
> @@ -347,6 +347,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
> unsigned long pfn;
> struct page *page;
> unsigned char dummy;
> + void *av;


to rename this to va since you are modifying those lines anyway.


>
> ptep = lookup_address((unsigned long)v, &level);
> BUG_ON(ptep == NULL);
> @@ -383,14 +384,10 @@ static void set_aliased_prot(void *v, pgprot_t prot)
> if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
> BUG();
>
> - if (!PageHighMem(page)) {
> - void *av = __va(PFN_PHYS(pfn));
> + av = __va(PFN_PHYS(pfn));
>


2020-08-10 04:38:58

by Jürgen Groß

[permalink] [raw]
Subject: Re: [PATCH v3 3/7] x86/xen: drop tests for highmem in pv code

On 09.08.20 04:22, Boris Ostrovsky wrote:
> On 8/7/20 4:38 AM, Juergen Gross wrote:
>> With support for 32-bit pv guests gone pure pv-code no longer needs to
>> test for highmem. Dropping those tests removes the need for flushing
>> in some places.
>>
>> Signed-off-by: Juergen Gross <[email protected]>
>
>
> Reviewed-by: Boris Ostrovsky <[email protected]>
>
>
> with a suggestion
>
>
>> ---
>> arch/x86/xen/enlighten_pv.c | 11 ++-
>> arch/x86/xen/mmu_pv.c | 138 ++++++++++++++----------------------
>> 2 files changed, 57 insertions(+), 92 deletions(-)
>>
>> diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
>> index 7d90b3da8bb4..9fec952f84f3 100644
>> --- a/arch/x86/xen/enlighten_pv.c
>> +++ b/arch/x86/xen/enlighten_pv.c
>> @@ -347,6 +347,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
>> unsigned long pfn;
>> struct page *page;
>> unsigned char dummy;
>> + void *av;
>
>
> to rename this to va since you are modifying those lines anyway.

Yes.


Juergen