Hi,
As part of enabling the signed integer overflow sanitizer for production
use, we have to annotated the atomics which expect to use wrapping signed
values. Do this for x86, arm64, and the fallbacks. Additionally annotate
the first place anyone will trip over signed integer wrap-around: ipv4,
which has traditionally included the comment hint about how to debug
sanitizer issues.
Since this touches 2 architectures and netdev, I think it might be
easiest if I carry this in the hardening tree, or maybe via the netdev
tree. Thoughts?
Thanks!
-Kees
Kees Cook (4):
locking/atomic/x86: Silence intentional wrapping addition
arm64: atomics: lse: Silence intentional wrapping addition
locking/atomic: Annotate generic atomics with wrapping
ipv4: Silence intentional wrapping addition
arch/arm64/include/asm/atomic_lse.h | 10 ++++++----
arch/x86/include/asm/atomic.h | 3 ++-
arch/x86/include/asm/atomic64_32.h | 2 +-
arch/x86/include/asm/atomic64_64.h | 2 +-
include/asm-generic/atomic.h | 6 +++---
include/asm-generic/atomic64.h | 6 +++---
include/linux/atomic/atomic-arch-fallback.h | 19 ++++++++++---------
include/linux/atomic/atomic-instrumented.h | 3 ++-
include/linux/atomic/atomic-long.h | 3 ++-
include/net/ip.h | 4 ++--
lib/atomic64.c | 10 +++++-----
net/ipv4/route.c | 10 +++++-----
scripts/atomic/fallbacks/dec_if_positive | 2 +-
scripts/atomic/fallbacks/dec_unless_positive | 2 +-
scripts/atomic/fallbacks/fetch_add_unless | 2 +-
scripts/atomic/fallbacks/inc_unless_negative | 2 +-
scripts/atomic/gen-atomic-fallback.sh | 1 +
scripts/atomic/gen-atomic-instrumented.sh | 1 +
scripts/atomic/gen-atomic-long.sh | 1 +
19 files changed, 49 insertions(+), 40 deletions(-)
--
2.34.1
Because atomics depend on signed wrap-around, we need to use helpers to
perform the operations so that it is not instrumented by the signed
wrap-around sanitizer.
Refresh generated files by running scripts/atomic/gen-atomics.sh.
Signed-off-by: Kees Cook <[email protected]>
---
Cc: Mark Rutland <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Boqun Feng <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
---
include/asm-generic/atomic.h | 6 +++---
include/asm-generic/atomic64.h | 6 +++---
include/linux/atomic/atomic-arch-fallback.h | 19 ++++++++++---------
include/linux/atomic/atomic-instrumented.h | 3 ++-
include/linux/atomic/atomic-long.h | 3 ++-
lib/atomic64.c | 10 +++++-----
scripts/atomic/fallbacks/dec_if_positive | 2 +-
scripts/atomic/fallbacks/dec_unless_positive | 2 +-
scripts/atomic/fallbacks/fetch_add_unless | 2 +-
scripts/atomic/fallbacks/inc_unless_negative | 2 +-
scripts/atomic/gen-atomic-fallback.sh | 1 +
scripts/atomic/gen-atomic-instrumented.sh | 1 +
scripts/atomic/gen-atomic-long.sh | 1 +
13 files changed, 32 insertions(+), 26 deletions(-)
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 22142c71d35a..1b54e9b1cd02 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -55,7 +55,7 @@ static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
-static inline void generic_atomic_##op(int i, atomic_t *v) \
+static inline void __signed_wrap generic_atomic_##op(int i, atomic_t *v)\
{ \
unsigned long flags; \
\
@@ -65,7 +65,7 @@ static inline void generic_atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
+static inline int __signed_wrap generic_atomic_##op##_return(int i, atomic_t *v)\
{ \
unsigned long flags; \
int ret; \
@@ -78,7 +78,7 @@ static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
+static inline int __signed_wrap generic_atomic_fetch_##op(int i, atomic_t *v)\
{ \
unsigned long flags; \
int ret; \
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 100d24b02e52..0084867fe399 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -19,13 +19,13 @@ extern s64 generic_atomic64_read(const atomic64_t *v);
extern void generic_atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void generic_atomic64_##op(s64 a, atomic64_t *v);
+extern void __signed_wrap generic_atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 __signed_wrap generic_atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 __signed_wrap generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 956bcba5dbf2..2d2ebb4e0f8f 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -7,6 +7,7 @@
#define _LINUX_ATOMIC_FALLBACK_H
#include <linux/compiler.h>
+#include <linux/overflow.h>
#if defined(arch_xchg)
#define raw_xchg arch_xchg
@@ -2428,7 +2429,7 @@ raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
do {
if (unlikely(c == u))
break;
- } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
+ } while (!raw_atomic_try_cmpxchg(v, &c, wrapping_add(int, c, a)));
return c;
#endif
@@ -2500,7 +2501,7 @@ raw_atomic_inc_unless_negative(atomic_t *v)
do {
if (unlikely(c < 0))
return false;
- } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
+ } while (!raw_atomic_try_cmpxchg(v, &c, wrapping_add(int, c, 1)));
return true;
#endif
@@ -2528,7 +2529,7 @@ raw_atomic_dec_unless_positive(atomic_t *v)
do {
if (unlikely(c > 0))
return false;
- } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
+ } while (!raw_atomic_try_cmpxchg(v, &c, wrapping_sub(int, c, 1)));
return true;
#endif
@@ -2554,7 +2555,7 @@ raw_atomic_dec_if_positive(atomic_t *v)
int dec, c = raw_atomic_read(v);
do {
- dec = c - 1;
+ dec = wrapping_sub(int, c, 1);
if (unlikely(dec < 0))
break;
} while (!raw_atomic_try_cmpxchg(v, &c, dec));
@@ -4554,7 +4555,7 @@ raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
do {
if (unlikely(c == u))
break;
- } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
+ } while (!raw_atomic64_try_cmpxchg(v, &c, wrapping_add(s64, c, a)));
return c;
#endif
@@ -4626,7 +4627,7 @@ raw_atomic64_inc_unless_negative(atomic64_t *v)
do {
if (unlikely(c < 0))
return false;
- } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
+ } while (!raw_atomic64_try_cmpxchg(v, &c, wrapping_add(s64, c, 1)));
return true;
#endif
@@ -4654,7 +4655,7 @@ raw_atomic64_dec_unless_positive(atomic64_t *v)
do {
if (unlikely(c > 0))
return false;
- } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
+ } while (!raw_atomic64_try_cmpxchg(v, &c, wrapping_sub(s64, c, 1)));
return true;
#endif
@@ -4680,7 +4681,7 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
s64 dec, c = raw_atomic64_read(v);
do {
- dec = c - 1;
+ dec = wrapping_sub(s64, c, 1);
if (unlikely(dec < 0))
break;
} while (!raw_atomic64_try_cmpxchg(v, &c, dec));
@@ -4690,4 +4691,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 14850c0b0db20c62fdc78ccd1d42b98b88d76331
+// 1278e3a674d0a36c2f0eb9f5fd0ddfcbf3690406
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index debd487fe971..af103189bd7d 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -15,6 +15,7 @@
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/instrumented.h>
+#include <linux/overflow.h>
/**
* atomic_read() - atomic load with relaxed ordering
@@ -5050,4 +5051,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// ce5b65e0f1f8a276268b667194581d24bed219d4
+// b9cd8314e11c4c818fb469dbd18c7390fcaf9b3c
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index 3ef844b3ab8a..07c1625a2d92 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -7,6 +7,7 @@
#define _LINUX_ATOMIC_LONG_H
#include <linux/compiler.h>
+#include <linux/overflow.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
@@ -1809,4 +1810,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a
+// 01a5fe70d091e84c1de5eea7e9c09ebeaf7799b3
diff --git a/lib/atomic64.c b/lib/atomic64.c
index caf895789a1e..25cc8993d7da 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -67,7 +67,7 @@ void generic_atomic64_set(atomic64_t *v, s64 i)
EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void generic_atomic64_##op(s64 a, atomic64_t *v) \
+void __signed_wrap generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -79,7 +79,7 @@ void generic_atomic64_##op(s64 a, atomic64_t *v) \
EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
+s64 __signed_wrap generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -93,7 +93,7 @@ s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
+s64 __signed_wrap generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -135,7 +135,7 @@ s64 generic_atomic64_dec_if_positive(atomic64_t *v)
s64 val;
raw_spin_lock_irqsave(lock, flags);
- val = v->counter - 1;
+ val = wrapping_sub(typeof(val), v->counter, 1);
if (val >= 0)
v->counter = val;
raw_spin_unlock_irqrestore(lock, flags);
@@ -181,7 +181,7 @@ s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val != u)
- v->counter += a;
+ wrapping_assign_add(v->counter, a);
raw_spin_unlock_irqrestore(lock, flags);
return val;
diff --git a/scripts/atomic/fallbacks/dec_if_positive b/scripts/atomic/fallbacks/dec_if_positive
index f65c11b4b85b..910a6d4ef398 100755
--- a/scripts/atomic/fallbacks/dec_if_positive
+++ b/scripts/atomic/fallbacks/dec_if_positive
@@ -2,7 +2,7 @@ cat <<EOF
${int} dec, c = raw_${atomic}_read(v);
do {
- dec = c - 1;
+ dec = wrapping_sub(${int}, c, 1);
if (unlikely(dec < 0))
break;
} while (!raw_${atomic}_try_cmpxchg(v, &c, dec));
diff --git a/scripts/atomic/fallbacks/dec_unless_positive b/scripts/atomic/fallbacks/dec_unless_positive
index d025361d7b85..327451527825 100755
--- a/scripts/atomic/fallbacks/dec_unless_positive
+++ b/scripts/atomic/fallbacks/dec_unless_positive
@@ -4,7 +4,7 @@ cat <<EOF
do {
if (unlikely(c > 0))
return false;
- } while (!raw_${atomic}_try_cmpxchg(v, &c, c - 1));
+ } while (!raw_${atomic}_try_cmpxchg(v, &c, wrapping_sub(${int}, c, 1)));
return true;
EOF
diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless
index 8db7e9e17fac..a9a11675a4d7 100755
--- a/scripts/atomic/fallbacks/fetch_add_unless
+++ b/scripts/atomic/fallbacks/fetch_add_unless
@@ -4,7 +4,7 @@ cat << EOF
do {
if (unlikely(c == u))
break;
- } while (!raw_${atomic}_try_cmpxchg(v, &c, c + a));
+ } while (!raw_${atomic}_try_cmpxchg(v, &c, wrapping_add(${int}, c, a)));
return c;
EOF
diff --git a/scripts/atomic/fallbacks/inc_unless_negative b/scripts/atomic/fallbacks/inc_unless_negative
index 7b4b09868842..0275d3c683eb 100755
--- a/scripts/atomic/fallbacks/inc_unless_negative
+++ b/scripts/atomic/fallbacks/inc_unless_negative
@@ -4,7 +4,7 @@ cat <<EOF
do {
if (unlikely(c < 0))
return false;
- } while (!raw_${atomic}_try_cmpxchg(v, &c, c + 1));
+ } while (!raw_${atomic}_try_cmpxchg(v, &c, wrapping_add(${int}, c, 1)));
return true;
EOF
diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh
index f80d69cfeb1f..60f5adf3a022 100755
--- a/scripts/atomic/gen-atomic-fallback.sh
+++ b/scripts/atomic/gen-atomic-fallback.sh
@@ -297,6 +297,7 @@ cat << EOF
#define _LINUX_ATOMIC_FALLBACK_H
#include <linux/compiler.h>
+#include <linux/overflow.h>
EOF
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
index 592f3ec89b5f..fbc6c2f0abd3 100755
--- a/scripts/atomic/gen-atomic-instrumented.sh
+++ b/scripts/atomic/gen-atomic-instrumented.sh
@@ -146,6 +146,7 @@ cat << EOF
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/instrumented.h>
+#include <linux/overflow.h>
EOF
diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh
index 9826be3ba986..ae6d549c9079 100755
--- a/scripts/atomic/gen-atomic-long.sh
+++ b/scripts/atomic/gen-atomic-long.sh
@@ -75,6 +75,7 @@ cat << EOF
#define _LINUX_ATOMIC_LONG_H
#include <linux/compiler.h>
+#include <linux/overflow.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
--
2.34.1
The overflow sanitizer quickly noticed what appears to have been an old
sore spot involving intended wrap around:
[ 22.192362] ------------[ cut here ]------------
[ 22.193329] UBSAN: signed-integer-overflow in ../arch/x86/include/asm/atomic.h:85:11
[ 22.194844] 1469769800 + 1671667352 cannot be represented in type 'int'
[ 22.195975] CPU: 2 PID: 2260 Comm: nmbd Not tainted 6.7.0 #1
[ 22.196927] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 0.0.0 02/06/2015
[ 22.198231] Call Trace:
[ 22.198641] <TASK>
[ 22.198641] dump_stack_lvl+0x64/0x80
[ 22.199533] handle_overflow+0x152/0x1a0
[ 22.200382] __ip_select_ident+0xe3/0x100
Explicitly mark ip_select_ident() as performing wrapping signed
arithmetic. Update the passed type as a u32 since that is how it is used
(it is either u16 or a literal "1" in callers, but used with a wrapping
int, so it's actually a u32). Update the comment to mention annotation
instead of -fno-strict-overflow, which is no longer the issue.
Signed-off-by: Kees Cook <[email protected]>
---
Cc: Jakub Kicinski <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: David Ahern <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: Paolo Abeni <[email protected]>
Cc: [email protected]
---
include/net/ip.h | 4 ++--
net/ipv4/route.c | 10 +++++-----
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/include/net/ip.h b/include/net/ip.h
index 25cb688bdc62..09d502a0ae30 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -537,10 +537,10 @@ void ip_dst_metrics_put(struct dst_entry *dst)
kfree(p);
}
-void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
+void __ip_select_ident(struct net *net, struct iphdr *iph, u32 segs);
static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
- struct sock *sk, int segs)
+ struct sock *sk, u32 segs)
{
struct iphdr *iph = ip_hdr(skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c8f76f56dc16..400e7a16fdba 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -458,7 +458,7 @@ static u32 *ip_tstamps __read_mostly;
* if one generator is seldom used. This makes hard for an attacker
* to infer how many packets were sent between two points in time.
*/
-static u32 ip_idents_reserve(u32 hash, int segs)
+static __signed_wrap u32 ip_idents_reserve(u32 hash, u32 segs)
{
u32 bucket, old, now = (u32)jiffies;
atomic_t *p_id;
@@ -473,14 +473,14 @@ static u32 ip_idents_reserve(u32 hash, int segs)
if (old != now && cmpxchg(p_tstamp, old, now) == old)
delta = get_random_u32_below(now - old);
- /* If UBSAN reports an error there, please make sure your compiler
- * supports -fno-strict-overflow before reporting it that was a bug
- * in UBSAN, and it has been fixed in GCC-8.
+ /* If UBSAN reports an error here, please make sure your arch's
+ * atomic_add_return() implementation has been annotated with
+ * __signed_wrap or uses wrapping_add() internally.
*/
return atomic_add_return(segs + delta, p_id) - segs;
}
-void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
+void __ip_select_ident(struct net *net, struct iphdr *iph, u32 segs)
{
u32 hash, id;
--
2.34.1