In the optimized X86 version of the copy-with-checksum helpers, use
instrument_*() before accessing buffers from assembly code so that KASAN
and KCSAN don't have blind spots there.
Signed-off-by: Jann Horn <[email protected]>
---
Notes:
v2: use instrument_copy_{from,to}_user instead of instrument_{read,write}
where appropriate (dvyukov)
arch/x86/lib/csum-partial_64.c | 3 +++
arch/x86/lib/csum-wrappers_64.c | 9 +++++++++
2 files changed, 12 insertions(+)
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 1f8a8f895173..8b0c353cd212 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -8,6 +8,7 @@
#include <linux/compiler.h>
#include <linux/export.h>
+#include <linux/instrumented.h>
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>
@@ -37,6 +38,8 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
u64 temp64 = (__force u64)sum;
unsigned odd, result;
+ instrument_read(buff, len);
+
odd = 1 & (unsigned long) buff;
if (unlikely(odd)) {
if (unlikely(len == 0))
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 189344924a2b..c44973b8f255 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -6,6 +6,8 @@
*/
#include <asm/checksum.h>
#include <linux/export.h>
+#include <linux/in6.h>
+#include <linux/instrumented.h>
#include <linux/uaccess.h>
#include <asm/smap.h>
@@ -26,6 +28,7 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
__wsum sum;
might_sleep();
+ instrument_copy_from_user(dst, src, len);
if (!user_access_begin(src, len))
return 0;
sum = csum_partial_copy_generic((__force const void *)src, dst, len);
@@ -51,6 +54,7 @@ csum_and_copy_to_user(const void *src, void __user *dst, int len)
__wsum sum;
might_sleep();
+ instrument_copy_to_user(dst, src, len);
if (!user_access_begin(dst, len))
return 0;
sum = csum_partial_copy_generic(src, (void __force *)dst, len);
@@ -71,6 +75,8 @@ EXPORT_SYMBOL(csum_and_copy_to_user);
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
+ instrument_write(dst, len);
+ instrument_read(src, len);
return csum_partial_copy_generic(src, dst, len);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
@@ -81,6 +87,9 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
{
__u64 rest, sum64;
+ instrument_read(saddr, sizeof(*saddr));
+ instrument_read(daddr, sizeof(*daddr));
+
rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
(__force __u64)sum;
base-commit: 0280e3c58f92b2fe0e8fbbdf8d386449168de4a8
--
2.35.0.rc0.227.g00780c9af4-goog
On Fri, 28 Jan 2022 at 01:08, Jann Horn <[email protected]> wrote:
>
> In the optimized X86 version of the copy-with-checksum helpers, use
> instrument_*() before accessing buffers from assembly code so that KASAN
> and KCSAN don't have blind spots there.
>
> Signed-off-by: Jann Horn <[email protected]>
Reviewed-by: Dmitry Vyukov <[email protected]>
Thanks
> ---
>
> Notes:
> v2: use instrument_copy_{from,to}_user instead of instrument_{read,write}
> where appropriate (dvyukov)
>
> arch/x86/lib/csum-partial_64.c | 3 +++
> arch/x86/lib/csum-wrappers_64.c | 9 +++++++++
> 2 files changed, 12 insertions(+)
>
> diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
> index 1f8a8f895173..8b0c353cd212 100644
> --- a/arch/x86/lib/csum-partial_64.c
> +++ b/arch/x86/lib/csum-partial_64.c
> @@ -8,6 +8,7 @@
>
> #include <linux/compiler.h>
> #include <linux/export.h>
> +#include <linux/instrumented.h>
> #include <asm/checksum.h>
> #include <asm/word-at-a-time.h>
>
> @@ -37,6 +38,8 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
> u64 temp64 = (__force u64)sum;
> unsigned odd, result;
>
> + instrument_read(buff, len);
> +
> odd = 1 & (unsigned long) buff;
> if (unlikely(odd)) {
> if (unlikely(len == 0))
> diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
> index 189344924a2b..c44973b8f255 100644
> --- a/arch/x86/lib/csum-wrappers_64.c
> +++ b/arch/x86/lib/csum-wrappers_64.c
> @@ -6,6 +6,8 @@
> */
> #include <asm/checksum.h>
> #include <linux/export.h>
> +#include <linux/in6.h>
> +#include <linux/instrumented.h>
> #include <linux/uaccess.h>
> #include <asm/smap.h>
>
> @@ -26,6 +28,7 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
> __wsum sum;
>
> might_sleep();
> + instrument_copy_from_user(dst, src, len);
> if (!user_access_begin(src, len))
> return 0;
> sum = csum_partial_copy_generic((__force const void *)src, dst, len);
> @@ -51,6 +54,7 @@ csum_and_copy_to_user(const void *src, void __user *dst, int len)
> __wsum sum;
>
> might_sleep();
> + instrument_copy_to_user(dst, src, len);
> if (!user_access_begin(dst, len))
> return 0;
> sum = csum_partial_copy_generic(src, (void __force *)dst, len);
> @@ -71,6 +75,8 @@ EXPORT_SYMBOL(csum_and_copy_to_user);
> __wsum
> csum_partial_copy_nocheck(const void *src, void *dst, int len)
> {
> + instrument_write(dst, len);
> + instrument_read(src, len);
> return csum_partial_copy_generic(src, dst, len);
> }
> EXPORT_SYMBOL(csum_partial_copy_nocheck);
> @@ -81,6 +87,9 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
> {
> __u64 rest, sum64;
>
> + instrument_read(saddr, sizeof(*saddr));
> + instrument_read(daddr, sizeof(*daddr));
> +
> rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
> (__force __u64)sum;
>
>
> base-commit: 0280e3c58f92b2fe0e8fbbdf8d386449168de4a8
> --
> 2.35.0.rc0.227.g00780c9af4-goog
>