First of all, on short copies __copy_{to,from}_user() return the amount
of bytes left uncopied, *not* -EFAULT. get_user() and put_user() are
expected to return -EFAULT on failure.
Another problem is get_user(v32, (__u64 __user *)p); that should
fetch 64bit value and the assign it to v32, truncating it in process.
Current code, OTOH, reads 8 bytes of data and stores them at the
address of v32, stomping on the 4 bytes that follow v32 itself.
Signed-off-by: Al Viro <[email protected]>
--
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 6792928ba84a..155174ddb7ae 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -100,7 +100,7 @@ do { \
case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
case 8: { \
__typeof__(*ptr) __v64 = x; \
- retval = __copy_to_user(ptr, &__v64, 8); \
+ retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
break; \
} \
default: __put_user_bad(); \
@@ -198,7 +198,12 @@ do { \
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
- case 8: retval = __copy_from_user(&x, ptr, 8); break; \
+ case 8: { \
+ __u64 __x = 0; \
+ retval = __copy_from_user(&__x, ptr, 8) ? -EFAULT : 0; \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ } \
default: (x) = __get_user_bad(); \
} \
} while (0)
On Wed, Oct 9, 2019 at 12:21 PM Al Viro <[email protected]> wrote:
>
> First of all, on short copies __copy_{to,from}_user() return the amount
> of bytes left uncopied, *not* -EFAULT. get_user() and put_user() are
> expected to return -EFAULT on failure.
>
> Another problem is get_user(v32, (__u64 __user *)p); that should
> fetch 64bit value and the assign it to v32, truncating it in process.
> Current code, OTOH, reads 8 bytes of data and stores them at the
> address of v32, stomping on the 4 bytes that follow v32 itself.
>
> Signed-off-by: Al Viro <[email protected]>
> --
> diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
> index 6792928ba84a..155174ddb7ae 100644
> --- a/arch/xtensa/include/asm/uaccess.h
> +++ b/arch/xtensa/include/asm/uaccess.h
> @@ -100,7 +100,7 @@ do { \
> case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
> case 8: { \
> __typeof__(*ptr) __v64 = x; \
> - retval = __copy_to_user(ptr, &__v64, 8); \
> + retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
Sure, I agree with that.
> break; \
> } \
> default: __put_user_bad(); \
> @@ -198,7 +198,12 @@ do { \
> case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
> case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
> case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
> - case 8: retval = __copy_from_user(&x, ptr, 8); break; \
> + case 8: { \
> + __u64 __x = 0; \
> + retval = __copy_from_user(&__x, ptr, 8) ? -EFAULT : 0; \
> + (x) = *(__force __typeof__(*(ptr)) *) &__x; \
> + break; \
> + } \
There's also the following code in the callers of this macro, e.g. in
__get_user_nocheck:
long __gu_err, __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
the last line is important for sizes 1..4, because it takes care of
sign extension of the value loaded by the assembly.
At the same time the first line doesn't make sense for the size 8
as it will result in value truncation.
How about the following change instead:
diff --git a/arch/xtensa/include/asm/uaccess.h
b/arch/xtensa/include/asm/uaccess.h
index 6792928ba84a..c54893651d69 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -100,7 +100,7 @@ do {
\
case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
case 8: { \
__typeof__(*ptr) __v64 = x; \
- retval = __copy_to_user(ptr, &__v64, 8); \
+ retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT
: 0; \
break; \
} \
default: __put_user_bad(); \
@@ -172,7 +172,8 @@ __asm__ __volatile__(
\
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
+ long __gu_err; \
+ __typeof__(*(ptr) + 0) __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -180,7 +181,8 @@ __asm__ __volatile__(
\
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
+ __typeof__(*(ptr) + 0) __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(__gu_addr, size)) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
@@ -198,7 +200,7 @@ do {
\
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
- case 8: retval = __copy_from_user(&x, ptr, 8); break; \
+ case 8: retval = __copy_from_user(&x, ptr, 8) ? -EFAULT : 0;
break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
Here __typeof__(*(ptr) + 0) makes enough room for all cases
in the __get_user_size and the "+0" part takes care of pointers
to const data.
--
Thanks.
-- Max
On Wed, Oct 09, 2019 at 06:38:12PM -0700, Max Filippov wrote:
> There's also the following code in the callers of this macro, e.g. in
> __get_user_nocheck:
>
> long __gu_err, __gu_val; \
> __get_user_size(__gu_val, (ptr), (size), __gu_err); \
> (x) = (__force __typeof__(*(ptr)))__gu_val; \
>
> the last line is important for sizes 1..4, because it takes care of
> sign extension of the value loaded by the assembly.
> At the same time the first line doesn't make sense for the size 8
> as it will result in value truncation.
Right you are...
> + long __gu_err; \
> + __typeof__(*(ptr) + 0) __gu_val; \
What would __u64 __gu_val; end up with for smaller sizes? I don't have
xtensa cross-toolchain at the moment, so I can't check it easily;
what does =r constraint generate in such case?
Another thing is, you want to zero it on failure, to avoid an uninitialized
value ending up someplace interesting....
> @@ -198,7 +200,7 @@ do {
> \
> case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
> case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
> case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
> - case 8: retval = __copy_from_user(&x, ptr, 8); break; \
> + case 8: retval = __copy_from_user(&x, ptr, 8) ? -EFAULT : 0;
> break; \
> default: (x) = __get_user_bad(); \
> } \
> } while (0)
>
> Here __typeof__(*(ptr) + 0) makes enough room for all cases
> in the __get_user_size and the "+0" part takes care of pointers
> to const data.
On Wed, Oct 9, 2019 at 6:56 PM Al Viro <[email protected]> wrote:
>
> On Wed, Oct 09, 2019 at 06:38:12PM -0700, Max Filippov wrote:
>
> > There's also the following code in the callers of this macro, e.g. in
> > __get_user_nocheck:
> >
> > long __gu_err, __gu_val; \
> > __get_user_size(__gu_val, (ptr), (size), __gu_err); \
> > (x) = (__force __typeof__(*(ptr)))__gu_val; \
> >
> > the last line is important for sizes 1..4, because it takes care of
> > sign extension of the value loaded by the assembly.
> > At the same time the first line doesn't make sense for the size 8
> > as it will result in value truncation.
>
> Right you are...
>
> > + long __gu_err; \
> > + __typeof__(*(ptr) + 0) __gu_val; \
>
> What would __u64 __gu_val; end up with for smaller sizes?
It does good job with little endian cores, i.e. the generated code is
the same in both cases, but on big endian it looks into wrong register
of the register pair that represents __gu_val. E.g.:
foo_in_s8_out_s8:
entry sp, 32 #
mov.n a7, sp # a5,
# /home/jcmvbkbc/ws/tensilica/linux/linux-xtensa/arch/xtensa/kernel/signal.c:518:
gen_outs(8)
movi.n a8, 0 # __gu_err,
#APP
# 518 "/home/jcmvbkbc/ws/tensilica/linux/linux-xtensa/arch/xtensa/kernel/signal.c"
1
1: l8ui a10, a2, 0 # __gu_val, p
2:
.section .fixup,"ax"
.align 4
.literal_position
5:
movi a2, 2b # __cb
movi a10, 0 # __gu_val
movi a8, -14 # __gu_err,
jx a2 # __cb
.previous
.section __ex_table,"a"
.long 1b, 5b
.previous
# 0 "" 2
#NO_APP
extui a2, a11, 0, 8 #, __gu_val
retw.n
> I don't have
> xtensa cross-toolchain at the moment, so I can't check it easily;
> what does =r constraint generate in such case?
Lower register of the register pair.
> Another thing is, you want to zero it on failure, to avoid an uninitialized
> value ending up someplace interesting....
Ok, this?
diff --git a/arch/xtensa/include/asm/uaccess.h
b/arch/xtensa/include/asm/uaccess.h
index 6792928ba84a..0bdaadf1636e 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -100,7 +100,7 @@ do {
\
case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
case 8: { \
__typeof__(*ptr) __v64 = x; \
- retval = __copy_to_user(ptr, &__v64, 8); \
+ retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT
: 0; \
break; \
} \
default: __put_user_bad(); \
@@ -172,7 +172,8 @@ __asm__ __volatile__(
\
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
+ long __gu_err; \
+ __typeof__(*(ptr) + 0) __gu_val = 0; \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -180,7 +181,8 @@ __asm__ __volatile__(
\
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
+ __typeof__(*(ptr) + 0) __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(__gu_addr, size)) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
@@ -198,7 +200,7 @@ do {
\
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
- case 8: retval = __copy_from_user(&x, ptr, 8); break; \
+ case 8: retval = __copy_from_user(&x, ptr, 8) ? -EFAULT : 0;
break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
--
Thanks.
-- Max
On Wed, Oct 09, 2019 at 07:11:40PM -0700, Max Filippov wrote:
> > I don't have
> > xtensa cross-toolchain at the moment, so I can't check it easily;
> > what does =r constraint generate in such case?
>
> Lower register of the register pair.
OK...
> > Another thing is, you want to zero it on failure, to avoid an uninitialized
> > value ending up someplace interesting....
>
> Ok, this?
> #define __get_user_nocheck(x, ptr, size) \
> ({ \
> - long __gu_err, __gu_val; \
> + long __gu_err; \
> + __typeof__(*(ptr) + 0) __gu_val = 0; \
> __get_user_size(__gu_val, (ptr), (size), __gu_err); \
> (x) = (__force __typeof__(*(ptr)))__gu_val; \
> __gu_err; \
> @@ -180,7 +181,8 @@ __asm__ __volatile__(
> \
>
> #define __get_user_check(x, ptr, size) \
> ({ \
> - long __gu_err = -EFAULT, __gu_val = 0; \
> + long __gu_err = -EFAULT; \
> + __typeof__(*(ptr) + 0) __gu_val = 0; \
> const __typeof__(*(ptr)) *__gu_addr = (ptr); \
> if (access_ok(__gu_addr, size)) \
> __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
> @@ -198,7 +200,7 @@ do {
> \
> case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
> case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
> case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
> - case 8: retval = __copy_from_user(&x, ptr, 8); break; \
> + case 8: retval = __copy_from_user(&x, ptr, 8) ? -EFAULT : 0;
> break; \
> default: (x) = __get_user_bad(); \
> } \
> } while (0)
Hmm... Looking at __get_user_size(), we have retval = 0; very early
in it. So I wonder if it should simply be
#define __get_user_size(x, ptr, size, retval) \
do { \
int __cb; \
retval = 0; \
switch (size) { \
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
case 8: if (unlikely(__copy_from_user(&x, ptr, 8)) { \
retval = -EFAULT; \
x = 0; \
} \
break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
so that 64bit case is closer to the others in that respect (i.e. zeroing
done on failure and out of line). No?
On Thu, Oct 10, 2019 at 7:29 AM Al Viro <[email protected]> wrote:
> Hmm... Looking at __get_user_size(), we have retval = 0; very early
> in it. So I wonder if it should simply be
> #define __get_user_size(x, ptr, size, retval) \
> do { \
> int __cb; \
> retval = 0; \
> switch (size) { \
> case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
> case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
> case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
> case 8: if (unlikely(__copy_from_user(&x, ptr, 8)) { \
> retval = -EFAULT; \
> x = 0; \
> } \
> break; \
> default: (x) = __get_user_bad(); \
> } \
> } while (0)
> so that 64bit case is closer to the others in that respect (i.e. zeroing
> done on failure and out of line). No?
Ok, I agree.
The intermediate __gu_val in __get_user_[no]check doesn't work well
with some data types used in the kernel, unfortunately. I'll post a series
with what's close to your initial patch on top of rearranged
__get_user_[no]check.
--
Thanks.
-- Max