2018-11-07 11:31:02

by Jürgen Groß

[permalink] [raw]
Subject: [PATCH] x86/xen: fix pv boot

Commit 9da3f2b7405440 ("x86/fault: BUG() when uaccess helpers fault on
kernel addresses") introduced a regression for booting Xen PV guests.

Xen PV guests are using __put_user() and __get_user() for accessing the
p2m map (physical to machine frame number map) as accesses might fail
in case of not populated areas of the map.

With above commit using __put_user() and __get_user() for accessing
kernel pages is no longer valid. So replace the Xen hack by adding
appropriate p2m access functions using the default fixup handler.

Fixes: 9da3f2b7405440 ("x86/fault: BUG() when uaccess helpers fault on kernel addresses")
Signed-off-by: Juergen Gross <[email protected]>
---
arch/x86/include/asm/xen/page.h | 39 ++++++++++++++++++++++++++++++++++++---
arch/x86/xen/p2m.c | 3 +--
2 files changed, 37 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 123e669bf363..a39015cb2f3f 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -9,7 +9,7 @@
#include <linux/mm.h>
#include <linux/device.h>

-#include <linux/uaccess.h>
+#include <asm/extable.h>
#include <asm/page.h>
#include <asm/pgtable.h>

@@ -91,15 +91,48 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
* Helper functions to write or read unsigned long values to/from
* memory, when the access may fault.
*/
+#ifdef CONFIG_X86_32
+#define __i "l"
+#define __r "k"
+#else
+#define __i "q"
+#define __r ""
+#endif
static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
{
- return __put_user(val, (unsigned long __user *)addr);
+ int ret = 0;
+
+ asm volatile("1: mov"__i" %"__r"1,%2\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: mov %3,%0\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=r"(ret)
+ : "r" (val), "m" (*addr), "i" (-1), "0" (ret));
+ return ret;
}

static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
{
- return __get_user(*val, (unsigned long __user *)addr);
+ int ret = 0;
+ unsigned long rval;
+
+ asm volatile("1: mov"__i" %2,%"__r"1\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: mov %3,%0\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=r" (ret), "=r" (rval)
+ : "m" (*addr), "i" (-1), "0" (ret));
+ *val = rval;
+ return ret;
}
+#undef __i
+#undef __r

#ifdef CONFIG_XEN_PV
/*
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b06731705529..055e37e43541 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)

/*
* The interface requires atomic updates on p2m elements.
- * xen_safe_write_ulong() is using __put_user which does an atomic
- * store via asm().
+ * xen_safe_write_ulong() is using an atomic store via asm().
*/
if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
return true;
--
2.16.4



2018-11-07 11:57:42

by Andrew Cooper

[permalink] [raw]
Subject: Re: [Xen-devel] [PATCH] x86/xen: fix pv boot

On 07/11/18 11:30, Juergen Gross wrote:
> Commit 9da3f2b7405440 ("x86/fault: BUG() when uaccess helpers fault on
> kernel addresses") introduced a regression for booting Xen PV guests.
>
> Xen PV guests are using __put_user() and __get_user() for accessing the
> p2m map (physical to machine frame number map) as accesses might fail
> in case of not populated areas of the map.
>
> With above commit using __put_user() and __get_user() for accessing
> kernel pages is no longer valid. So replace the Xen hack by adding
> appropriate p2m access functions using the default fixup handler.
>
> Fixes: 9da3f2b7405440 ("x86/fault: BUG() when uaccess helpers fault on kernel addresses")
> Signed-off-by: Juergen Gross <[email protected]>
> ---
> arch/x86/include/asm/xen/page.h | 39 ++++++++++++++++++++++++++++++++++++---
> arch/x86/xen/p2m.c | 3 +--
> 2 files changed, 37 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
> index 123e669bf363..a39015cb2f3f 100644
> --- a/arch/x86/include/asm/xen/page.h
> +++ b/arch/x86/include/asm/xen/page.h
> @@ -9,7 +9,7 @@
> #include <linux/mm.h>
> #include <linux/device.h>
>
> -#include <linux/uaccess.h>
> +#include <asm/extable.h>
> #include <asm/page.h>
> #include <asm/pgtable.h>
>
> @@ -91,15 +91,48 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
> * Helper functions to write or read unsigned long values to/from
> * memory, when the access may fault.
> */
> +#ifdef CONFIG_X86_32
> +#define __i "l"
> +#define __r "k"
> +#else
> +#define __i "q"
> +#define __r ""
> +#endif

You don't need any of these.  GCC and Clang can derive the correct
encoding from the type passed in, and will DTRT given unsigned long's.

Furthermore, it makes the asm easier to read.

> static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
> {
> - return __put_user(val, (unsigned long __user *)addr);
> + int ret = 0;
> +
> + asm volatile("1: mov"__i" %"__r"1,%2\n"
> + "2:\n"
> + ".section .fixup,\"ax\"\n"
> + "3: mov %3,%0\n"
> + " jmp 2b\n"
> + ".previous\n"
> + _ASM_EXTABLE(1b, 3b)
> + : "=r"(ret)
> + : "r" (val), "m" (*addr), "i" (-1), "0" (ret));

This constraint hides the write to addr.

> + return ret;
> }
>
> static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
> {
> - return __get_user(*val, (unsigned long __user *)addr);
> + int ret = 0;
> + unsigned long rval;
> +
> + asm volatile("1: mov"__i" %2,%"__r"1\n"
> + "2:\n"
> + ".section .fixup,\"ax\"\n"
> + "3: mov %3,%0\n"
> + " jmp 2b\n"
> + ".previous\n"
> + _ASM_EXTABLE(1b, 3b)
> + : "=r" (ret), "=r" (rval)
> + : "m" (*addr), "i" (-1), "0" (ret));
> + *val = rval;
> + return ret;

This also uses rval uninitialised if a fault occurs.

Overall, how about:

static inline int xen_safe_write_ulong(unsigned long *addr, unsigned
long val)
{
    int ret = 0;

    asm volatile("1: mov %[val], %[ptr]\n"
                 "2:\n"
                 ".section .fixup, \"ax\"\n"
                 "3: mov %[err], %[ret]\n"
                 "   jmp 2b\n"
                 ".previous\n"
                 _ASM_EXTABLE(1b, 3b)
                 : [ret] "+r" (ret), [ptr] "=m" (*addr),
                 : [val] "r" (val), [err] "i" (-1));

    return ret;
}

static inline int xen_safe_read_ulong(const unsigned long *addr,
                                      unsigned long *val)
{
    int ret = 0;
    unsigned long rval = ~0ul;

    asm volatile("1: mov %[ptr], %[rval]\n"
                 "2:\n"
                 ".section .fixup, \"ax\"\n"
                 "3: mov %[err], %[ret]\n"
                 "   jmp 2b\n"
                 ".previous\n"
                 _ASM_EXTABLE(1b, 3b)
                 : [ret] "+r" (ret), [rval] "+r" (rval)
                 : [ptr] "=m" (*addr), [err] "i" (-1));
    *val = rval;

    return ret;
}

?

~Andrew

2018-11-07 12:03:49

by Andrew Cooper

[permalink] [raw]
Subject: Re: [Xen-devel] [PATCH] x86/xen: fix pv boot

On 07/11/18 11:55, Andrew Cooper wrote:
> On 07/11/18 11:30, Juergen Gross wrote:
>> + return ret;
>> }
>>
>> static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
>> {
>> - return __get_user(*val, (unsigned long __user *)addr);
>> + int ret = 0;
>> + unsigned long rval;
>> +
>> + asm volatile("1: mov"__i" %2,%"__r"1\n"
>> + "2:\n"
>> + ".section .fixup,\"ax\"\n"
>> + "3: mov %3,%0\n"
>> + " jmp 2b\n"
>> + ".previous\n"
>> + _ASM_EXTABLE(1b, 3b)
>> + : "=r" (ret), "=r" (rval)
>> + : "m" (*addr), "i" (-1), "0" (ret));
>> + *val = rval;
>> + return ret;
> This also uses rval uninitialised if a fault occurs.
>
> Overall, how about:
>
> static inline int xen_safe_write_ulong(unsigned long *addr, unsigned
> long val)
> {
>     int ret = 0;
>
>     asm volatile("1: mov %[val], %[ptr]\n"
>                  "2:\n"
>                  ".section .fixup, \"ax\"\n"
>                  "3: mov %[err], %[ret]\n"

Actually, if you want a shorter fixup path, `sub $1, %[ret]` would also
do, and drop the [err] constraint.

~Andrew

2018-11-07 12:17:26

by Jürgen Groß

[permalink] [raw]
Subject: Re: [Xen-devel] [PATCH] x86/xen: fix pv boot

On 07/11/2018 13:02, Andrew Cooper wrote:
> On 07/11/18 11:55, Andrew Cooper wrote:
>> On 07/11/18 11:30, Juergen Gross wrote:
>>> + return ret;
>>> }
>>>
>>> static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
>>> {
>>> - return __get_user(*val, (unsigned long __user *)addr);
>>> + int ret = 0;
>>> + unsigned long rval;
>>> +
>>> + asm volatile("1: mov"__i" %2,%"__r"1\n"
>>> + "2:\n"
>>> + ".section .fixup,\"ax\"\n"
>>> + "3: mov %3,%0\n"
>>> + " jmp 2b\n"
>>> + ".previous\n"
>>> + _ASM_EXTABLE(1b, 3b)
>>> + : "=r" (ret), "=r" (rval)
>>> + : "m" (*addr), "i" (-1), "0" (ret));
>>> + *val = rval;
>>> + return ret;
>> This also uses rval uninitialised if a fault occurs.
>>
>> Overall, how about:
>>
>> static inline int xen_safe_write_ulong(unsigned long *addr, unsigned
>> long val)
>> {
>>     int ret = 0;
>>
>>     asm volatile("1: mov %[val], %[ptr]\n"
>>                  "2:\n"
>>                  ".section .fixup, \"ax\"\n"
>>                  "3: mov %[err], %[ret]\n"
>
> Actually, if you want a shorter fixup path, `sub $1, %[ret]` would also
> do, and drop the [err] constraint.

Thanks for the review.

Will send V2 soon.


Juergen