Adding the same element to a linked list multiple times
seems to be a rather common programming mistake. To debug
those I've more than once written some code to check a
linked list for duplicates.
Since re-inventing the wheel over and over again is a bad
idea this patch tries to add some common code which allows
to check linked lists for duplicates while adding new
elements.
When list debugging is enabled we currently already check
the previous and next element if they are identical to the
new one. This patch now adds a configuration option to
check N elements before and after the desired position.
By default we still only test one item since testing more
means quite a large CPU overhead. This can be overwritten
on a per C file bases by defining DEBUG_LIST_DOUBLE_ADD
before including list.h.
A new kunit test is also added to the existing list tests
which intentionally triggers the debug functionality.
Signed-off-by: Christian König <[email protected]>
---
include/linux/list.h | 18 +++++++++++++++---
include/linux/rculist.h | 2 +-
lib/Kconfig.debug | 14 ++++++++++++++
lib/list-test.c | 27 +++++++++++++++++++++++++++
lib/list_debug.c | 26 +++++++++++++++++++++-----
5 files changed, 78 insertions(+), 9 deletions(-)
diff --git a/include/linux/list.h b/include/linux/list.h
index 89bdc92e75c3..e772e5e7c96d 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -37,14 +37,26 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
}
#ifdef CONFIG_DEBUG_LIST
+
+#ifndef DEBUG_LIST_DOUBLE_ADD
+#define DEBUG_LIST_DOUBLE_ADD CONFIG_DEBUG_LIST_DOUBLE_ADD
+#endif
+
extern bool __list_add_valid(struct list_head *new,
struct list_head *prev,
- struct list_head *next);
+ struct list_head *next,
+ int num_entries);
extern bool __list_del_entry_valid(struct list_head *entry);
#else
+
+#ifndef DEBUG_LIST_DOUBLE_ADD
+#define DEBUG_LIST_DOUBLE_ADD 0
+#endif
+
static inline bool __list_add_valid(struct list_head *new,
struct list_head *prev,
- struct list_head *next)
+ struct list_head *next,
+ int num_entries)
{
return true;
}
@@ -64,7 +76,7 @@ static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
- if (!__list_add_valid(new, prev, next))
+ if (!__list_add_valid(new, prev, next, DEBUG_LIST_DOUBLE_ADD))
return;
next->prev = new;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index f8633d37e358..186618ad11d9 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -85,7 +85,7 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
static inline void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
- if (!__list_add_valid(new, prev, next))
+ if (!__list_add_valid(new, prev, next, DEBUG_LIST_DOUBLE_ADD))
return;
new->next = next;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e6e58b26e888..ded75214ea76 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1508,6 +1508,20 @@ config DEBUG_LIST
If unsure, say N.
+config DEBUG_LIST_DOUBLE_ADD
+ int "How many list entries are checked for double adds"
+ depends on DEBUG_LIST
+ default 1
+ help
+ Controls how many list entries are checked while adding new ones.
+ Larger values catch more double adds, but reduce the performance
+ massively.
+
+ Can be overwritten locally in a C file by defining
+ DEBUG_LIST_DOUBLE_ADD to an arbitrary value.
+
+ If unsure, say 1.
+
config DEBUG_PLIST
bool "Debug priority linked list manipulation"
depends on DEBUG_KERNEL
diff --git a/lib/list-test.c b/lib/list-test.c
index ee09505df16f..05174051977b 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -5,6 +5,9 @@
* Copyright (C) 2019, Google LLC.
* Author: David Gow <[email protected]>
*/
+
+#define DEBUG_LIST_DOUBLE_ADD 4
+
#include <kunit/test.h>
#include <linux/list.h>
@@ -698,6 +701,27 @@ static void list_test_list_for_each_entry_reverse(struct kunit *test)
KUNIT_EXPECT_EQ(test, i, -1);
}
+
+#ifdef CONFIG_DEBUG_LIST
+static void list_test_list_double_add(struct kunit *test)
+{
+ struct list_head entries[DEBUG_LIST_DOUBLE_ADD];
+ LIST_HEAD(list);
+ int i;
+
+ for (i = 0; i < DEBUG_LIST_DOUBLE_ADD; ++i)
+ list_add_tail(&entries[i], &list);
+
+ /* Intentionally double add the first one, this should be catched and
+ * prevented by the list debug code.
+ */
+ list_add_tail(&entries[0], &list);
+
+ KUNIT_EXPECT_PTR_EQ(test, entries[0].next, &entries[1]);
+ KUNIT_EXPECT_PTR_EQ(test, entries[0].prev, &list);
+}
+#endif
+
static struct kunit_case list_test_cases[] = {
KUNIT_CASE(list_test_list_init),
KUNIT_CASE(list_test_list_add),
@@ -735,6 +759,9 @@ static struct kunit_case list_test_cases[] = {
KUNIT_CASE(list_test_list_for_each_prev_safe),
KUNIT_CASE(list_test_list_for_each_entry),
KUNIT_CASE(list_test_list_for_each_entry_reverse),
+#ifdef CONFIG_DEBUG_LIST
+ KUNIT_CASE(list_test_list_double_add),
+#endif
{},
};
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 5d5424b51b74..df378b6ad3df 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -18,19 +18,35 @@
*/
bool __list_add_valid(struct list_head *new, struct list_head *prev,
- struct list_head *next)
+ struct list_head *next, int num_entries)
{
+ struct list_head *tmp;
+ int cnt;
+
if (CHECK_DATA_CORRUPTION(next->prev != prev,
"list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
CHECK_DATA_CORRUPTION(prev->next != next,
"list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
- next, prev->next, prev) ||
- CHECK_DATA_CORRUPTION(new == prev || new == next,
- "list_add double add: new=%px, prev=%px, next=%px.\n",
- new, prev, next))
+ next, prev->next, prev))
return false;
+ for (cnt = 0, tmp = prev; cnt < num_entries && tmp != next;
+ ++cnt, tmp = tmp->prev)
+ if (CHECK_DATA_CORRUPTION(new == tmp,
+ "list_add double add: new=%px, prev=%px, next=%px.\n",
+ new, prev, next))
+ return false;
+
+ if (tmp == next)
+ return true;
+
+ for (cnt = 0, tmp = next; cnt < num_entries; ++cnt, tmp = tmp->next)
+ if (CHECK_DATA_CORRUPTION(new == tmp,
+ "list_add double add: new=%px, prev=%px, next=%px.\n",
+ new, prev, next))
+ return false;
+
return true;
}
EXPORT_SYMBOL(__list_add_valid);
--
2.25.1
On Mon, Feb 01, 2021 at 02:52:51PM +0100, Christian K?nig wrote:
> Adding the same element to a linked list multiple times
> seems to be a rather common programming mistake. To debug
> those I've more than once written some code to check a
> linked list for duplicates.
>
> Since re-inventing the wheel over and over again is a bad
> idea this patch tries to add some common code which allows
> to check linked lists for duplicates while adding new
> elements.
>
> When list debugging is enabled we currently already check
> the previous and next element if they are identical to the
> new one. This patch now adds a configuration option to
> check N elements before and after the desired position.
>
> By default we still only test one item since testing more
> means quite a large CPU overhead. This can be overwritten
> on a per C file bases by defining DEBUG_LIST_DOUBLE_ADD
> before including list.h.
I'm not sure it is a good idea. Currently the implementation is *generic*.
You are customizing it w/o letting caller know.
Create a derivative implementation and name it exlist (exclusive list) and use
whenever it makes sense.
And I think if you are still pushing to modify generic one the default must be
0 in order not altering current behaviour.
> A new kunit test is also added to the existing list tests
> which intentionally triggers the debug functionality.
--
With Best Regards,
Andy Shevchenko
On Mon, Feb 01, 2021 at 06:08:03PM +0200, Andy Shevchenko wrote:
> On Mon, Feb 01, 2021 at 02:52:51PM +0100, Christian K?nig wrote:
> > Adding the same element to a linked list multiple times
> > seems to be a rather common programming mistake. To debug
> > those I've more than once written some code to check a
> > linked list for duplicates.
> >
> > Since re-inventing the wheel over and over again is a bad
> > idea this patch tries to add some common code which allows
> > to check linked lists for duplicates while adding new
> > elements.
> >
> > When list debugging is enabled we currently already check
> > the previous and next element if they are identical to the
> > new one. This patch now adds a configuration option to
> > check N elements before and after the desired position.
> >
> > By default we still only test one item since testing more
> > means quite a large CPU overhead. This can be overwritten
> > on a per C file bases by defining DEBUG_LIST_DOUBLE_ADD
> > before including list.h.
>
> I'm not sure it is a good idea. Currently the implementation is *generic*.
> You are customizing it w/o letting caller know.
>
> Create a derivative implementation and name it exlist (exclusive list) and use
> whenever it makes sense.
And make depth is a runtime parameter available for user.
> And I think if you are still pushing to modify generic one the default must be
> 0 in order not altering current behaviour.
>
> > A new kunit test is also added to the existing list tests
> > which intentionally triggers the debug functionality.
--
With Best Regards,
Andy Shevchenko
On Mon, Feb 01 2021, Andy Shevchenko wrote:
> On Mon, Feb 01, 2021 at 02:52:51PM +0100, Christian König wrote:
>> Adding the same element to a linked list multiple times
>> seems to be a rather common programming mistake. To debug
>> those I've more than once written some code to check a
>> linked list for duplicates.
>>
>> Since re-inventing the wheel over and over again is a bad
>> idea this patch tries to add some common code which allows
>> to check linked lists for duplicates while adding new
>> elements.
>>
>> When list debugging is enabled we currently already check
>> the previous and next element if they are identical to the
>> new one. This patch now adds a configuration option to
>> check N elements before and after the desired position.
>>
>> By default we still only test one item since testing more
>> means quite a large CPU overhead. This can be overwritten
>> on a per C file bases by defining DEBUG_LIST_DOUBLE_ADD
>> before including list.h.
>
> I'm not sure it is a good idea. Currently the implementation is *generic*.
> You are customizing it w/o letting caller know.
>
> Create a derivative implementation and name it exlist (exclusive list) and use
> whenever it makes sense.
>
> And I think if you are still pushing to modify generic one the default must be
> 0 in order not altering current behaviour.
I don't understand your complaint.
The extra checks are also completely *generic*. It can never make sense
to add sometime to a list if it is already on the list. All lists are
exclusive lists.
The code ALREADY tests if the inserted object is already present either
side of the insert side of the insertion point. This patch just extends
it somewhat.
I myself have never had, or heard of, a bug due to double insertion so
I'm no strongly in favour of this patch for that reason.
But I *am* in favour of making the platform more resilient in general,
and if others have experienced this sort of bug, then I'm in favour of
make that easier to detect in future.
NeilBrown
>
>> A new kunit test is also added to the existing list tests
>> which intentionally triggers the debug functionality.
>
> --
> With Best Regards,
> Andy Shevchenko
Am 01.02.21 um 23:16 schrieb NeilBrown:
> On Mon, Feb 01 2021, Andy Shevchenko wrote:
>
>> On Mon, Feb 01, 2021 at 02:52:51PM +0100, Christian König wrote:
>>> Adding the same element to a linked list multiple times
>>> seems to be a rather common programming mistake. To debug
>>> those I've more than once written some code to check a
>>> linked list for duplicates.
>>>
>>> Since re-inventing the wheel over and over again is a bad
>>> idea this patch tries to add some common code which allows
>>> to check linked lists for duplicates while adding new
>>> elements.
>>>
>>> When list debugging is enabled we currently already check
>>> the previous and next element if they are identical to the
>>> new one. This patch now adds a configuration option to
>>> check N elements before and after the desired position.
>>>
>>> By default we still only test one item since testing more
>>> means quite a large CPU overhead. This can be overwritten
>>> on a per C file bases by defining DEBUG_LIST_DOUBLE_ADD
>>> before including list.h.
>> I'm not sure it is a good idea. Currently the implementation is *generic*.
>> You are customizing it w/o letting caller know.
>>
>> Create a derivative implementation and name it exlist (exclusive list) and use
>> whenever it makes sense.
>>
>> And I think if you are still pushing to modify generic one the default must be
>> 0 in order not altering current behaviour.
> I don't understand your complaint.
> The extra checks are also completely *generic*. It can never make sense
> to add sometime to a list if it is already on the list. All lists are
> exclusive lists.
> The code ALREADY tests if the inserted object is already present either
> side of the insert side of the insertion point. This patch just extends
> it somewhat.
Correct, we are just checking for obvious bugs. The bigger problem is
the usability and potentially performance impact.
In other words when you set this value to high the list_add() function
will use so much time that the kernel thinks that the CPU is stuck. I've
was already able to trigger this.
Would it be more acceptable if I drop the config option and only allow
to override the check on a per C file basis?
> I myself have never had, or heard of, a bug due to double insertion so
> I'm no strongly in favour of this patch for that reason.
> But I *am* in favour of making the platform more resilient in general,
> and if others have experienced this sort of bug, then I'm in favour of
> make that easier to detect in future.
I have seen plenty of those. Especially when you implement state
machines when a certain object needs to move from state to state
triggered by external events.
For example it seems to be a common mistake to do a list_del_init, drop
a lock and then assume a list_add should do it when you re-aquired the
lock. In reality you have a very small window where a device interrupt
could have already added the item to the list again between the locks.
Thanks,
Christian.
>
> NeilBrown
>
>
>>> A new kunit test is also added to the existing list tests
>>> which intentionally triggers the debug functionality.
>> --
>> With Best Regards,
>> Andy Shevchenko
Greeting,
FYI, we noticed the following commit (built with gcc-9):
commit: b9dc2e095274fa65d5b96231bb121a4e4616b5d6 ("list: add more extensive double add check")
https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git Christian-K-nig/list-add-more-extensive-double-add-check/20210201-215918
in testcase: locktorture
version:
with following parameters:
runtime: 300s
test: cpuhotplug
test-description: This torture test consists of creating a number of kernel threads which acquire the lock and hold it for specific amount of time, thus simulating different critical region behaviors.
test-url: https://www.kernel.org/doc/Documentation/locking/locktorture.txt
on test machine: qemu-system-i386 -enable-kvm -cpu SandyBridge -smp 2 -m 8G
caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):
+-------------------------------------------------------------+------------+------------+
| | 4826dea261 | b9dc2e0952 |
+-------------------------------------------------------------+------------+------------+
| WARNING:at_lib/list_debug.c:#__list_add_valid | 0 | 12 |
| EIP:__list_add_valid | 0 | 12 |
+-------------------------------------------------------------+------------+------------+
If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>
[ 106.277211] WARNING: CPU: 1 PID: 218 at lib/list_debug.c:36 __list_add_valid (kbuild/src/consumer/lib/list_debug.c:36 (discriminator 5))
[ 106.278116] Modules linked in:
[ 106.278458] CPU: 1 PID: 218 Comm: kunit_try_catch Tainted: G W 5.11.0-rc3-00219-gb9dc2e095274 #1
[ 106.279527] EIP: __list_add_valid (kbuild/src/consumer/lib/list_debug.c:36 (discriminator 5))
[ 106.280000] Code: da 51 52 56 68 d0 c8 ed 4b 83 05 88 18 06 4d 01 83 15 8c 18 06 4d 00 e8 4d 31 25 01 83 05 90 18 06 4d 01 83 15 94 18 06 4d 00 <0f> 0b 83 05 98 18 06 4d 01 83 15 9c 18 06 4d 00 83 c4 10 31 c0 e9
All code
========
0: da 51 52 ficoml 0x52(%rcx)
3: 56 push %rsi
4: 68 d0 c8 ed 4b pushq $0x4bedc8d0
9: 83 05 88 18 06 4d 01 addl $0x1,0x4d061888(%rip) # 0x4d061898
10: 83 15 8c 18 06 4d 00 adcl $0x0,0x4d06188c(%rip) # 0x4d0618a3
17: e8 4d 31 25 01 callq 0x1253169
1c: 83 05 90 18 06 4d 01 addl $0x1,0x4d061890(%rip) # 0x4d0618b3
23: 83 15 94 18 06 4d 00 adcl $0x0,0x4d061894(%rip) # 0x4d0618be
2a:* 0f 0b ud2 <-- trapping instruction
2c: 83 05 98 18 06 4d 01 addl $0x1,0x4d061898(%rip) # 0x4d0618cb
33: 83 15 9c 18 06 4d 00 adcl $0x0,0x4d06189c(%rip) # 0x4d0618d6
3a: 83 c4 10 add $0x10,%esp
3d: 31 c0 xor %eax,%eax
3f: e9 .byte 0xe9
Code starting with the faulting instruction
===========================================
0: 0f 0b ud2
2: 83 05 98 18 06 4d 01 addl $0x1,0x4d061898(%rip) # 0x4d0618a1
9: 83 15 9c 18 06 4d 00 adcl $0x0,0x4d06189c(%rip) # 0x4d0618ac
10: 83 c4 10 add $0x10,%esp
13: 31 c0 xor %eax,%eax
15: e9 .byte 0xe9
[ 106.282024] EAX: 00000040 EBX: becf7f01 ECX: 00000000 EDX: ffffffff
[ 106.282698] ESI: becf7f08 EDI: 00000003 EBP: becf7ed8 ESP: becf7eb8
[ 106.283362] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068 EFLAGS: 00010246
[ 106.284059] CR0: 80050033 CR2: 00000000 CR3: 0cb60000 CR4: 000406f0
[ 106.284729] DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
[ 106.285406] DR6: fffe0ff0 DR7: 00000400
[ 106.285797] Call Trace:
[ 106.286060] __list_add (kbuild/src/consumer/include/linux/list.h:79)
[ 106.286419] list_add_tail (kbuild/src/consumer/include/linux/list.h:112)
[ 106.286804] list_test_list_double_add (kbuild/src/consumer/lib/list-test.c:720)
[ 106.287292] ? find_held_lock (kbuild/src/consumer/kernel/locking/lockdep.c:4935)
[ 106.287702] ? __kthread_parkme (kbuild/src/consumer/arch/x86/include/asm/bitops.h:207 kbuild/src/consumer/include/asm-generic/bitops/instrumented-non-atomic.h:135 kbuild/src/consumer/kernel/kthread.c:222)
[ 106.288112] ? lock_release (kbuild/src/consumer/kernel/locking/lockdep.c:5120 kbuild/src/consumer/kernel/locking/lockdep.c:5457)
[ 106.288542] kunit_try_run_case (kbuild/src/consumer/lib/kunit/test.c:242 kbuild/src/consumer/lib/kunit/test.c:285)
[ 106.288995] ? kunit_catch_run_case (kbuild/src/consumer/lib/kunit/test.c:270)
[ 106.289458] kunit_generic_run_threadfn_adapter (kbuild/src/consumer/lib/kunit/try-catch.c:30)
[ 106.290035] kthread (kbuild/src/consumer/kernel/kthread.c:292)
[ 106.290393] ? kunit_ptr_not_err_assert_format (kbuild/src/consumer/lib/kunit/try-catch.c:25)
[ 106.290966] ? list_del_init (kbuild/src/consumer/arch/x86/events/intel/uncore.c:1349)
[ 106.291413] ret_from_fork (kbuild/src/consumer/arch/x86/entry/entry_32.S:856)
[ 106.291812] ---[ end trace 45c83c6d92ae08ff ]---
[ 106.292406] ok 37 - list_test_list_double_add
[ 106.292416] kunit_try_catch (218) used greatest stack depth: 6748 bytes left
[ 106.292420] ok 9 - list-kunit-test
[ 106.310005] Freeing unused kernel image (initmem) memory: 1580K
[ 106.311600] Write protecting kernel text and read-only data: 37892k
[ 106.312308] NX-protecting the kernel data: 24708k
[ 106.321393] Run /init as init process
[ 106.321862] with arguments:
[ 106.322185] /init
[ 106.322439] with environment:
[ 106.322785] HOME=/
[ 106.323048] TERM=linux
[ 106.323345] user=lkp
[ 106.323627] job=/lkp/jobs/scheduled/vm-snb-i386-120/locktorture-300s-cpuhotplug-debian-i386-20191205.cgz-b9dc2e095274fa65d5b96231bb121a4e4616b5d6-20210203-5969-yuy83m-3.yaml
[ 106.325290] ARCH=i386
[ 106.325570] kconfig=i386-randconfig-a002-20210202
[ 106.326097] branch=linux-review/Christian-K-nig/list-add-more-extensive-double-add-check/20210201-215918
[ 106.327103] commit=b9dc2e095274fa65d5b96231bb121a4e4616b5d6
[ 106.327767] BOOT_IMAGE=/pkg/linux/i386-randconfig-a002-20210202/gcc-9/b9dc2e095274fa65d5b96231bb121a4e4616b5d6/vmlinuz-5.11.0-rc3-00219-gb9dc2e095274
[ 106.329186] max_uptime=2100
[ 106.329527] RESULT_ROOT=/result/locktorture/300s-cpuhotplug/vm-snb-i386/debian-i386-20191205.cgz/i386-randconfig-a002-20210202/gcc-9/b9dc2e095274fa65d5b96231bb121a4e4616b5d6/3
[ 106.331227] LKP_SERVER=internal-lkp-server
[ 106.331718] selinux=0
[ 106.332016] softlockup_panic=1
[ 106.332398] prompt_ramdisk=0
[ 106.332732] vga=normal
[ 106.372270] systemd[1]: RTC configured in localtime, applying delta of 0 minutes to system time.
[ 106.413240] random: systemd: uninitialized urandom read (16 bytes read)
Welcome to Debian GNU/Linux 9 (stretch)!
[ 106.566125] random: systemd: uninitialized urandom read (16 bytes read)
[ 106.635196] random: systemd-cryptse: uninitialized urandom read (16 bytes read)
[ OK ] Listening on Syslog Socket.
[ OK ] Started Dispatch Password Requests to Console Directory Watch.
[ OK ] Created slice System Slice.
Mounting RPC Pipe File System...
[ OK ] Started Forward Password Requests to Wall Directory Watch.
[ OK ] Listening on /dev/initctl Compatibility Named Pipe.
[ OK ] Listening on udev Control Socket.
[ OK ] Reached target Paths.
[ OK ] Listening on Journal Socket (/dev/log).
[ OK ] Created slice User and Session Slice.
[ OK ] Reached target Slices.
[ OK ] Reached target Encrypted Volumes.
[ OK ] Created slice system-getty.slice.
[ OK ] Listening on udev Kernel Socket.
[ OK ] Reached target Swap.
[ OK ] Listening on Journal Socket.
Starting Load Kernel Modules...
Starting Remount Root and Kernel File Systems...
Starting Create Static Device Nodes in /dev...
Starting Journal Service...
Mounting Debug File System...
[ OK ] Listening on RPCbind Server Activation Socket.
[ OK ] Mounted RPC Pipe File System.
[ OK ] Started Load Kernel Modules.
[ OK ] Started Remount Root and Kernel File Systems.
[ OK ] Started Create Static Device Nodes in /dev.
[ OK ] Mounted Debug File System.
Starting udev Kernel Device Manager...
Starting Load/Save Random Seed...
[ OK ] Reached target Local File Systems (Pre).
[ OK ] Reached target Local File Systems.
Starting Preprocess NFS configuration...
Starting udev Coldplug all Devices...
Starting Apply Kernel Variables...
Mounting Configuration File System...
[ OK ] Started udev Kernel Device Manager.
[ OK ] Started Load/Save Random Seed.
[ OK ] Started Preprocess NFS configuration.
[ OK ] Reached target NFS client services.
[ OK ] Started Apply Kernel Variables.
Starting Raise network interfaces...
[ OK ] Mounted Configuration File System.
[ 107.714356] ip (263) used greatest stack depth: 6516 bytes left
[ 108.208565] systemctl (279) used greatest stack depth: 6404 bytes left
[ OK ] Started Raise network interfaces.
[ OK ] Reached target Network.
[ OK ] Reached target Network is Online.
[ OK ] Started Journal Service.
Starting Flush Journal to Persistent Storage...
[ OK ] Started Flush Journal to Persistent Storage.
Starting Create Volatile Files and Directories...
[ OK ] Started Create Volatile Files and Directories.
Starting RPC bind portmap service...
Starting Network Time Synchronization...
Starting Update UTMP about System Boot/Shutdown...
[ OK ] Started RPC bind portmap service.
[ OK ] Reached target Remote File Systems (Pre).
[ OK ] Reached target Remote File Systems.
[ OK ] Reached target RPC Port Mapper.
[ OK ] Started Update UTMP about System Boot/Shutdown.
[ OK ] Started Network Time Synchronization.
[ OK ] Reached target System Time Synchronized.
[ OK ] Started udev Coldplug all Devices.
[ OK ] Reached target System Initialization.
[ OK ] Started Daily apt download activities.
[ OK ] Started Daily apt upgrade and clean activities.
[ 118.549503] random: fast init done
To reproduce:
# build kernel
cd linux
cp config-5.11.0-rc3-00219-gb9dc2e095274 .config
make HOSTCC=gcc-9 CC=gcc-9 ARCH=i386 olddefconfig prepare modules_prepare bzImage modules
make HOSTCC=gcc-9 CC=gcc-9 ARCH=i386 INSTALL_MOD_PATH=<mod-install-dir> modules_install
cd <mod-install-dir>
find lib/ | cpio -o -H newc --quiet | gzip > modules.cgz
git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
bin/lkp qemu -k <bzImage> -m modules.cgz job-script # job-script is attached in this email
Thanks,
Oliver Sang