2022-03-25 10:59:35

by Artem Savkov

[permalink] [raw]
Subject: [PATCH v2 0/2] Upper bound mode for kernel timers

As previously discussed [1] we had a report of a regression in TCP keepalive
timer where timers were up to 4 minutes late resulting in disconnects.

This patchset tries to fix the problem by introducing upper bound kernel timers
and making tcp keepalive timer use those.

[1] https://lore.kernel.org/all/20210302001054.4qgrvnkltvkgikzr@treble/T/#u

---

Changes in v2:
- TIMER_UPPER_BOUND flag description added as a comment in timer.h
- Code style fixes
- More elaborate commit message in timer commit

Artem Savkov (2):
timer: introduce upper bound timers
net: make tcp keepalive timer upper bound

include/linux/timer.h | 6 +++++-
kernel/time/timer.c | 36 ++++++++++++++++++++-------------
net/ipv4/inet_connection_sock.c | 2 +-
3 files changed, 28 insertions(+), 16 deletions(-)

--
2.34.1


2022-03-25 11:06:06

by Artem Savkov

[permalink] [raw]
Subject: [PATCH v2 2/2] net: make tcp keepalive timer upper bound

Make sure TCP keepalive timer does not expire late. Switching to upper
bound timers means it can fire off early but in case of keepalive
tcp_keepalive_timer() handler checks elapsed time and resets the timer
if it was triggered early. This results in timer "cascading" to a
higher precision and being just a couple of milliseconds off it's
original mark.

Signed-off-by: Artem Savkov <[email protected]>
---
net/ipv4/inet_connection_sock.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index fc2a985f6064..a8fea958960b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -564,7 +564,7 @@ void inet_csk_init_xmit_timers(struct sock *sk,

timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
- timer_setup(&sk->sk_timer, keepalive_handler, 0);
+ timer_setup(&sk->sk_timer, keepalive_handler, TIMER_UPPER_BOUND);
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
}
EXPORT_SYMBOL(inet_csk_init_xmit_timers);
--
2.34.1

2022-03-25 11:59:12

by Artem Savkov

[permalink] [raw]
Subject: [PATCH v2 1/2] timer: introduce upper bound timers

Current timer wheel implementation is optimized for performance and
energy usage but lacks in precision. This, normally, is not a problem as
most timers that use timer wheel are used for timeouts and thus rarely
expire, instead they often get canceled or modified before expiration.
Even when they don't, expiring a bit late is not an issue for timeout
timers.

TCP keepalive timer is a special case, it's aim is to prevent timeouts,
so triggering earlier rather than later is desired behavior. In a
reported case the user had a 3600s keepalive timer for preventing firewall
disconnects (on a 3650s interval). They observed keepalive timers coming
in up to four minutes late, causing unexpected disconnects.

This commit adds TIMER_UPPER_BOUND flag which allows creation of timers
that would expire at most at specified time or earlier.

This was previously discussed here:
https://lore.kernel.org/all/20210302001054.4qgrvnkltvkgikzr@treble/T/#u

Suggested-by: Josh Poimboeuf <[email protected]>
Signed-off-by: Artem Savkov <[email protected]>
---
include/linux/timer.h | 6 +++++-
kernel/time/timer.c | 36 ++++++++++++++++++++++--------------
2 files changed, 27 insertions(+), 15 deletions(-)

diff --git a/include/linux/timer.h b/include/linux/timer.h
index fda13c9d1256..4b2456501be6 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -60,6 +60,9 @@ struct timer_list {
* function is invoked via mod_timer() or add_timer(). If the timer
* should be placed on a particular CPU, then add_timer_on() has to be
* used.
+ *
+ * @TIMER_UPPER_BOUND: Unlike normal timers which trigger at specified time or
+ * later, upper bound timer will expire at most at specified time or earlier.
*/
#define TIMER_CPUMASK 0x0003FFFF
#define TIMER_MIGRATING 0x00040000
@@ -67,7 +70,8 @@ struct timer_list {
#define TIMER_DEFERRABLE 0x00080000
#define TIMER_PINNED 0x00100000
#define TIMER_IRQSAFE 0x00200000
-#define TIMER_INIT_FLAGS (TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
+#define TIMER_UPPER_BOUND 0x00400000
+#define TIMER_INIT_FLAGS (TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE | TIMER_UPPER_BOUND)
#define TIMER_ARRAYSHIFT 22
#define TIMER_ARRAYMASK 0xFFC00000

diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 85f1021ad459..f4965644d728 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -491,7 +491,7 @@ static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
* time.
*/
static inline unsigned calc_index(unsigned long expires, unsigned lvl,
- unsigned long *bucket_expiry)
+ unsigned long *bucket_expiry, bool upper_bound)
{

/*
@@ -501,34 +501,39 @@ static inline unsigned calc_index(unsigned long expires, unsigned lvl,
* - Truncation of the expiry time in the outer wheel levels
*
* Round up with level granularity to prevent this.
+ * Do not perform round up in case of upper bound timer.
*/
- expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+ if (upper_bound)
+ expires = expires >> LVL_SHIFT(lvl);
+ else
+ expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+
*bucket_expiry = expires << LVL_SHIFT(lvl);
return LVL_OFFS(lvl) + (expires & LVL_MASK);
}

static int calc_wheel_index(unsigned long expires, unsigned long clk,
- unsigned long *bucket_expiry)
+ unsigned long *bucket_expiry, bool upper_bound)
{
unsigned long delta = expires - clk;
unsigned int idx;

if (delta < LVL_START(1)) {
- idx = calc_index(expires, 0, bucket_expiry);
+ idx = calc_index(expires, 0, bucket_expiry, upper_bound);
} else if (delta < LVL_START(2)) {
- idx = calc_index(expires, 1, bucket_expiry);
+ idx = calc_index(expires, 1, bucket_expiry, upper_bound);
} else if (delta < LVL_START(3)) {
- idx = calc_index(expires, 2, bucket_expiry);
+ idx = calc_index(expires, 2, bucket_expiry, upper_bound);
} else if (delta < LVL_START(4)) {
- idx = calc_index(expires, 3, bucket_expiry);
+ idx = calc_index(expires, 3, bucket_expiry, upper_bound);
} else if (delta < LVL_START(5)) {
- idx = calc_index(expires, 4, bucket_expiry);
+ idx = calc_index(expires, 4, bucket_expiry, upper_bound);
} else if (delta < LVL_START(6)) {
- idx = calc_index(expires, 5, bucket_expiry);
+ idx = calc_index(expires, 5, bucket_expiry, upper_bound);
} else if (delta < LVL_START(7)) {
- idx = calc_index(expires, 6, bucket_expiry);
+ idx = calc_index(expires, 6, bucket_expiry, upper_bound);
} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
- idx = calc_index(expires, 7, bucket_expiry);
+ idx = calc_index(expires, 7, bucket_expiry, upper_bound);
} else if ((long) delta < 0) {
idx = clk & LVL_MASK;
*bucket_expiry = clk;
@@ -540,7 +545,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk,
if (delta >= WHEEL_TIMEOUT_CUTOFF)
expires = clk + WHEEL_TIMEOUT_MAX;

- idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
+ idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry,
+ upper_bound);
}
return idx;
}
@@ -607,7 +613,8 @@ static void internal_add_timer(struct timer_base *base, struct timer_list *timer
unsigned long bucket_expiry;
unsigned int idx;

- idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
+ idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry,
+ timer->flags & TIMER_UPPER_BOUND);
enqueue_timer(base, timer, idx, bucket_expiry);
}

@@ -1000,7 +1007,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
}

clk = base->clk;
- idx = calc_wheel_index(expires, clk, &bucket_expiry);
+ idx = calc_wheel_index(expires, clk, &bucket_expiry,
+ timer->flags & TIMER_UPPER_BOUND);

/*
* Retrieve and compare the array index of the pending
--
2.34.1