Subject: [PATCH 0/7] Create and populate toplevel tests/ directory

The following series of patches create and populate the toplevel tests/
directory. This will henceforth be the place where all in-kernel tests
live.

All patches against 2.6.24-rc6-mm1

Ananth


Subject: [PATCH 1/7] Add tests/ directory

From: Ananth N Mavinakayanahalli <[email protected]>

Create a toplevel tests/ directory to house in-kernel subsystem specific
tests.

PS: I am not sure if I've gotten the Makefile change right.

Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
---
Makefile | 3 +++
lib/Kconfig.debug | 2 ++
tests/Kconfig | 11 +++++++++++
tests/Makefile | 3 +++
4 files changed, 19 insertions(+)

Index: linux-2.6.24-rc6/lib/Kconfig.debug
===================================================================
--- linux-2.6.24-rc6.orig/lib/Kconfig.debug
+++ linux-2.6.24-rc6/lib/Kconfig.debug
@@ -596,3 +596,5 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.

source "samples/Kconfig"
+
+source "tests/Kconfig"
Index: linux-2.6.24-rc6/tests/Kconfig
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/Kconfig
@@ -0,0 +1,11 @@
+# tests/Kconfig
+
+menuconfig KERNEL_TESTS
+ bool "Kernel subsystem tests"
+ help
+ You can build kernel subsystem specific tests.
+
+if KERNEL_TESTS
+
+endif # KERNEL_TESTS
+
Index: linux-2.6.24-rc6/tests/Makefile
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/Makefile
@@ -0,0 +1,3 @@
+#
+# Makefile for kernel subsystem specific tests
+#
Index: linux-2.6.24-rc6/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/Makefile
+++ linux-2.6.24-rc6/Makefile
@@ -598,6 +598,9 @@ export mod_strip_cmd

ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
+ifdef CONFIG_KERNEL_TESTS
+core-y += tests/
+endif

vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \

Subject: [PATCH 2/7] Move locking-selftest to tests/

From: Ananth N Mavinakayanahalli <[email protected]>

Move the locking-selftest infrastructure to tests/

Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
---
lib/locking-selftest-hardirq.h | 9
lib/locking-selftest-mutex.h | 11
lib/locking-selftest-rlock-hardirq.h | 2
lib/locking-selftest-rlock-softirq.h | 2
lib/locking-selftest-rlock.h | 14
lib/locking-selftest-rsem.h | 14
lib/locking-selftest-softirq.h | 9
lib/locking-selftest-spin-hardirq.h | 2
lib/locking-selftest-spin-softirq.h | 2
lib/locking-selftest-spin.h | 11
lib/locking-selftest-wlock-hardirq.h | 2
lib/locking-selftest-wlock-softirq.h | 2
lib/locking-selftest-wlock.h | 14
lib/locking-selftest-wsem.h | 14
lib/locking-selftest.c | 1218 ---------------------------------
lib/Kconfig.debug | 11
lib/Makefile | 1
tests/Kconfig | 11
tests/Makefile | 2
tests/locking-selftest-hardirq.h | 9
tests/locking-selftest-mutex.h | 11
tests/locking-selftest-rlock-hardirq.h | 2
tests/locking-selftest-rlock-softirq.h | 2
tests/locking-selftest-rlock.h | 14
tests/locking-selftest-rsem.h | 14
tests/locking-selftest-softirq.h | 9
tests/locking-selftest-spin-hardirq.h | 2
tests/locking-selftest-spin-softirq.h | 2
tests/locking-selftest-spin.h | 11
tests/locking-selftest-wlock-hardirq.h | 2
tests/locking-selftest-wlock-softirq.h | 2
tests/locking-selftest-wlock.h | 14
tests/locking-selftest-wsem.h | 14
tests/locking-selftest.c | 1218 +++++++++++++++++++++++++++++++++
34 files changed, 1339 insertions(+), 1338 deletions(-)

Index: linux-2.6.24-rc6/lib/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/lib/Makefile
+++ linux-2.6.24-rc6/lib/Makefile
@@ -25,7 +25,6 @@ lib-$(CONFIG_HOTPLUG) += kobject_uevent.
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
-obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
Index: linux-2.6.24-rc6/lib/locking-selftest-hardirq.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-hardirq.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#undef IRQ_DISABLE
-#undef IRQ_ENABLE
-#undef IRQ_ENTER
-#undef IRQ_EXIT
-
-#define IRQ_ENABLE HARDIRQ_ENABLE
-#define IRQ_DISABLE HARDIRQ_DISABLE
-#define IRQ_ENTER HARDIRQ_ENTER
-#define IRQ_EXIT HARDIRQ_EXIT
Index: linux-2.6.24-rc6/lib/locking-selftest-mutex.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-mutex.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#undef LOCK
-#define LOCK ML
-
-#undef UNLOCK
-#define UNLOCK MU
-
-#undef RLOCK
-#undef WLOCK
-
-#undef INIT
-#define INIT MI
Index: linux-2.6.24-rc6/lib/locking-selftest-rlock-hardirq.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-rlock-hardirq.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "locking-selftest-rlock.h"
-#include "locking-selftest-hardirq.h"
Index: linux-2.6.24-rc6/lib/locking-selftest-rlock-softirq.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-rlock-softirq.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "locking-selftest-rlock.h"
-#include "locking-selftest-softirq.h"
Index: linux-2.6.24-rc6/lib/locking-selftest-rlock.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-rlock.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#undef LOCK
-#define LOCK RL
-
-#undef UNLOCK
-#define UNLOCK RU
-
-#undef RLOCK
-#define RLOCK RL
-
-#undef WLOCK
-#define WLOCK WL
-
-#undef INIT
-#define INIT RWI
Index: linux-2.6.24-rc6/lib/locking-selftest-rsem.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-rsem.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#undef LOCK
-#define LOCK RSL
-
-#undef UNLOCK
-#define UNLOCK RSU
-
-#undef RLOCK
-#define RLOCK RSL
-
-#undef WLOCK
-#define WLOCK WSL
-
-#undef INIT
-#define INIT RWSI
Index: linux-2.6.24-rc6/lib/locking-selftest-softirq.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-softirq.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#undef IRQ_DISABLE
-#undef IRQ_ENABLE
-#undef IRQ_ENTER
-#undef IRQ_EXIT
-
-#define IRQ_DISABLE SOFTIRQ_DISABLE
-#define IRQ_ENABLE SOFTIRQ_ENABLE
-#define IRQ_ENTER SOFTIRQ_ENTER
-#define IRQ_EXIT SOFTIRQ_EXIT
Index: linux-2.6.24-rc6/lib/locking-selftest-spin-hardirq.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-spin-hardirq.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "locking-selftest-spin.h"
-#include "locking-selftest-hardirq.h"
Index: linux-2.6.24-rc6/lib/locking-selftest-spin-softirq.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-spin-softirq.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "locking-selftest-spin.h"
-#include "locking-selftest-softirq.h"
Index: linux-2.6.24-rc6/lib/locking-selftest-spin.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-spin.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#undef LOCK
-#define LOCK L
-
-#undef UNLOCK
-#define UNLOCK U
-
-#undef RLOCK
-#undef WLOCK
-
-#undef INIT
-#define INIT SI
Index: linux-2.6.24-rc6/lib/locking-selftest-wlock-hardirq.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-wlock-hardirq.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "locking-selftest-wlock.h"
-#include "locking-selftest-hardirq.h"
Index: linux-2.6.24-rc6/lib/locking-selftest-wlock-softirq.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-wlock-softirq.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "locking-selftest-wlock.h"
-#include "locking-selftest-softirq.h"
Index: linux-2.6.24-rc6/lib/locking-selftest-wlock.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-wlock.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#undef LOCK
-#define LOCK WL
-
-#undef UNLOCK
-#define UNLOCK WU
-
-#undef RLOCK
-#define RLOCK RL
-
-#undef WLOCK
-#define WLOCK WL
-
-#undef INIT
-#define INIT RWI
Index: linux-2.6.24-rc6/lib/locking-selftest-wsem.h
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest-wsem.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#undef LOCK
-#define LOCK WSL
-
-#undef UNLOCK
-#define UNLOCK WSU
-
-#undef RLOCK
-#define RLOCK RSL
-
-#undef WLOCK
-#define WLOCK WSL
-
-#undef INIT
-#define INIT RWSI
Index: linux-2.6.24-rc6/lib/locking-selftest.c
===================================================================
--- linux-2.6.24-rc6.orig/lib/locking-selftest.c
+++ /dev/null
@@ -1,1218 +0,0 @@
-/*
- * lib/locking-selftest.c
- *
- * Testsuite for various locking APIs: spinlocks, rwlocks,
- * mutexes and rw-semaphores.
- *
- * It is checking both false positives and false negatives.
- *
- * Started by Ingo Molnar:
- *
- * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <[email protected]>
- */
-#include <linux/rwsem.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/lockdep.h>
-#include <linux/spinlock.h>
-#include <linux/kallsyms.h>
-#include <linux/interrupt.h>
-#include <linux/debug_locks.h>
-#include <linux/irqflags.h>
-
-/*
- * Change this to 1 if you want to see the failure printouts:
- */
-static unsigned int debug_locks_verbose;
-
-static int __init setup_debug_locks_verbose(char *str)
-{
- get_option(&str, &debug_locks_verbose);
-
- return 1;
-}
-
-__setup("debug_locks_verbose=", setup_debug_locks_verbose);
-
-#define FAILURE 0
-#define SUCCESS 1
-
-#define LOCKTYPE_SPIN 0x1
-#define LOCKTYPE_RWLOCK 0x2
-#define LOCKTYPE_MUTEX 0x4
-#define LOCKTYPE_RWSEM 0x8
-
-/*
- * Normal standalone locks, for the circular and irq-context
- * dependency tests:
- */
-static DEFINE_SPINLOCK(lock_A);
-static DEFINE_SPINLOCK(lock_B);
-static DEFINE_SPINLOCK(lock_C);
-static DEFINE_SPINLOCK(lock_D);
-
-static DEFINE_RWLOCK(rwlock_A);
-static DEFINE_RWLOCK(rwlock_B);
-static DEFINE_RWLOCK(rwlock_C);
-static DEFINE_RWLOCK(rwlock_D);
-
-static DEFINE_MUTEX(mutex_A);
-static DEFINE_MUTEX(mutex_B);
-static DEFINE_MUTEX(mutex_C);
-static DEFINE_MUTEX(mutex_D);
-
-static DECLARE_RWSEM(rwsem_A);
-static DECLARE_RWSEM(rwsem_B);
-static DECLARE_RWSEM(rwsem_C);
-static DECLARE_RWSEM(rwsem_D);
-
-/*
- * Locks that we initialize dynamically as well so that
- * e.g. X1 and X2 becomes two instances of the same class,
- * but X* and Y* are different classes. We do this so that
- * we do not trigger a real lockup:
- */
-static DEFINE_SPINLOCK(lock_X1);
-static DEFINE_SPINLOCK(lock_X2);
-static DEFINE_SPINLOCK(lock_Y1);
-static DEFINE_SPINLOCK(lock_Y2);
-static DEFINE_SPINLOCK(lock_Z1);
-static DEFINE_SPINLOCK(lock_Z2);
-
-static DEFINE_RWLOCK(rwlock_X1);
-static DEFINE_RWLOCK(rwlock_X2);
-static DEFINE_RWLOCK(rwlock_Y1);
-static DEFINE_RWLOCK(rwlock_Y2);
-static DEFINE_RWLOCK(rwlock_Z1);
-static DEFINE_RWLOCK(rwlock_Z2);
-
-static DEFINE_MUTEX(mutex_X1);
-static DEFINE_MUTEX(mutex_X2);
-static DEFINE_MUTEX(mutex_Y1);
-static DEFINE_MUTEX(mutex_Y2);
-static DEFINE_MUTEX(mutex_Z1);
-static DEFINE_MUTEX(mutex_Z2);
-
-static DECLARE_RWSEM(rwsem_X1);
-static DECLARE_RWSEM(rwsem_X2);
-static DECLARE_RWSEM(rwsem_Y1);
-static DECLARE_RWSEM(rwsem_Y2);
-static DECLARE_RWSEM(rwsem_Z1);
-static DECLARE_RWSEM(rwsem_Z2);
-
-/*
- * non-inlined runtime initializers, to let separate locks share
- * the same lock-class:
- */
-#define INIT_CLASS_FUNC(class) \
-static noinline void \
-init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \
- struct rw_semaphore *rwsem) \
-{ \
- spin_lock_init(lock); \
- rwlock_init(rwlock); \
- mutex_init(mutex); \
- init_rwsem(rwsem); \
-}
-
-INIT_CLASS_FUNC(X)
-INIT_CLASS_FUNC(Y)
-INIT_CLASS_FUNC(Z)
-
-static void init_shared_classes(void)
-{
- init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
- init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
-
- init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1);
- init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2);
-
- init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1);
- init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2);
-}
-
-/*
- * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests.
- * The following functions use a lock from a simulated hardirq/softirq
- * context, causing the locks to be marked as hardirq-safe/softirq-safe:
- */
-
-#define HARDIRQ_DISABLE local_irq_disable
-#define HARDIRQ_ENABLE local_irq_enable
-
-#define HARDIRQ_ENTER() \
- local_irq_disable(); \
- irq_enter(); \
- WARN_ON(!in_irq());
-
-#define HARDIRQ_EXIT() \
- __irq_exit(); \
- local_irq_enable();
-
-#define SOFTIRQ_DISABLE local_bh_disable
-#define SOFTIRQ_ENABLE local_bh_enable
-
-#define SOFTIRQ_ENTER() \
- local_bh_disable(); \
- local_irq_disable(); \
- trace_softirq_enter(); \
- WARN_ON(!in_softirq());
-
-#define SOFTIRQ_EXIT() \
- trace_softirq_exit(); \
- local_irq_enable(); \
- local_bh_enable();
-
-/*
- * Shortcuts for lock/unlock API variants, to keep
- * the testcases compact:
- */
-#define L(x) spin_lock(&lock_##x)
-#define U(x) spin_unlock(&lock_##x)
-#define LU(x) L(x); U(x)
-#define SI(x) spin_lock_init(&lock_##x)
-
-#define WL(x) write_lock(&rwlock_##x)
-#define WU(x) write_unlock(&rwlock_##x)
-#define WLU(x) WL(x); WU(x)
-
-#define RL(x) read_lock(&rwlock_##x)
-#define RU(x) read_unlock(&rwlock_##x)
-#define RLU(x) RL(x); RU(x)
-#define RWI(x) rwlock_init(&rwlock_##x)
-
-#define ML(x) mutex_lock(&mutex_##x)
-#define MU(x) mutex_unlock(&mutex_##x)
-#define MI(x) mutex_init(&mutex_##x)
-
-#define WSL(x) down_write(&rwsem_##x)
-#define WSU(x) up_write(&rwsem_##x)
-
-#define RSL(x) down_read(&rwsem_##x)
-#define RSU(x) up_read(&rwsem_##x)
-#define RWSI(x) init_rwsem(&rwsem_##x)
-
-#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
-
-/*
- * Generate different permutations of the same testcase, using
- * the same basic lock-dependency/state events:
- */
-
-#define GENERATE_TESTCASE(name) \
- \
-static void name(void) { E(); }
-
-#define GENERATE_PERMUTATIONS_2_EVENTS(name) \
- \
-static void name##_12(void) { E1(); E2(); } \
-static void name##_21(void) { E2(); E1(); }
-
-#define GENERATE_PERMUTATIONS_3_EVENTS(name) \
- \
-static void name##_123(void) { E1(); E2(); E3(); } \
-static void name##_132(void) { E1(); E3(); E2(); } \
-static void name##_213(void) { E2(); E1(); E3(); } \
-static void name##_231(void) { E2(); E3(); E1(); } \
-static void name##_312(void) { E3(); E1(); E2(); } \
-static void name##_321(void) { E3(); E2(); E1(); }
-
-/*
- * AA deadlock:
- */
-
-#define E() \
- \
- LOCK(X1); \
- LOCK(X2); /* this one should fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(AA_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(AA_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(AA_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(AA_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(AA_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(AA_rsem)
-
-#undef E
-
-/*
- * Special-case for read-locking, they are
- * allowed to recurse on the same lock class:
- */
-static void rlock_AA1(void)
-{
- RL(X1);
- RL(X1); // this one should NOT fail
-}
-
-static void rlock_AA1B(void)
-{
- RL(X1);
- RL(X2); // this one should NOT fail
-}
-
-static void rsem_AA1(void)
-{
- RSL(X1);
- RSL(X1); // this one should fail
-}
-
-static void rsem_AA1B(void)
-{
- RSL(X1);
- RSL(X2); // this one should fail
-}
-/*
- * The mixing of read and write locks is not allowed:
- */
-static void rlock_AA2(void)
-{
- RL(X1);
- WL(X2); // this one should fail
-}
-
-static void rsem_AA2(void)
-{
- RSL(X1);
- WSL(X2); // this one should fail
-}
-
-static void rlock_AA3(void)
-{
- WL(X1);
- RL(X2); // this one should fail
-}
-
-static void rsem_AA3(void)
-{
- WSL(X1);
- RSL(X2); // this one should fail
-}
-
-/*
- * ABBA deadlock:
- */
-
-#define E() \
- \
- LOCK_UNLOCK_2(A, B); \
- LOCK_UNLOCK_2(B, A); /* fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(ABBA_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(ABBA_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(ABBA_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(ABBA_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(ABBA_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(ABBA_rsem)
-
-#undef E
-
-/*
- * AB BC CA deadlock:
- */
-
-#define E() \
- \
- LOCK_UNLOCK_2(A, B); \
- LOCK_UNLOCK_2(B, C); \
- LOCK_UNLOCK_2(C, A); /* fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(ABBCCA_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(ABBCCA_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(ABBCCA_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(ABBCCA_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(ABBCCA_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(ABBCCA_rsem)
-
-#undef E
-
-/*
- * AB CA BC deadlock:
- */
-
-#define E() \
- \
- LOCK_UNLOCK_2(A, B); \
- LOCK_UNLOCK_2(C, A); \
- LOCK_UNLOCK_2(B, C); /* fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(ABCABC_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(ABCABC_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(ABCABC_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(ABCABC_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(ABCABC_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(ABCABC_rsem)
-
-#undef E
-
-/*
- * AB BC CD DA deadlock:
- */
-
-#define E() \
- \
- LOCK_UNLOCK_2(A, B); \
- LOCK_UNLOCK_2(B, C); \
- LOCK_UNLOCK_2(C, D); \
- LOCK_UNLOCK_2(D, A); /* fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(ABBCCDDA_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(ABBCCDDA_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(ABBCCDDA_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(ABBCCDDA_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(ABBCCDDA_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(ABBCCDDA_rsem)
-
-#undef E
-
-/*
- * AB CD BD DA deadlock:
- */
-#define E() \
- \
- LOCK_UNLOCK_2(A, B); \
- LOCK_UNLOCK_2(C, D); \
- LOCK_UNLOCK_2(B, D); \
- LOCK_UNLOCK_2(D, A); /* fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(ABCDBDDA_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(ABCDBDDA_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(ABCDBDDA_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(ABCDBDDA_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(ABCDBDDA_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(ABCDBDDA_rsem)
-
-#undef E
-
-/*
- * AB CD BC DA deadlock:
- */
-#define E() \
- \
- LOCK_UNLOCK_2(A, B); \
- LOCK_UNLOCK_2(C, D); \
- LOCK_UNLOCK_2(B, C); \
- LOCK_UNLOCK_2(D, A); /* fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(ABCDBCDA_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(ABCDBCDA_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(ABCDBCDA_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(ABCDBCDA_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(ABCDBCDA_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(ABCDBCDA_rsem)
-
-#undef E
-
-/*
- * Double unlock:
- */
-#define E() \
- \
- LOCK(A); \
- UNLOCK(A); \
- UNLOCK(A); /* fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(double_unlock_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(double_unlock_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(double_unlock_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(double_unlock_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(double_unlock_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(double_unlock_rsem)
-
-#undef E
-
-/*
- * Bad unlock ordering:
- */
-#define E() \
- \
- LOCK(A); \
- LOCK(B); \
- UNLOCK(A); /* fail */ \
- UNLOCK(B);
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(bad_unlock_order_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(bad_unlock_order_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(bad_unlock_order_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(bad_unlock_order_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(bad_unlock_order_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(bad_unlock_order_rsem)
-
-#undef E
-
-/*
- * initializing a held lock:
- */
-#define E() \
- \
- LOCK(A); \
- INIT(A); /* fail */
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(init_held_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(init_held_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(init_held_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(init_held_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(init_held_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(init_held_rsem)
-
-#undef E
-
-/*
- * locking an irq-safe lock with irqs enabled:
- */
-#define E1() \
- \
- IRQ_ENTER(); \
- LOCK(A); \
- UNLOCK(A); \
- IRQ_EXIT();
-
-#define E2() \
- \
- LOCK(A); \
- UNLOCK(A);
-
-/*
- * Generate 24 testcases:
- */
-#include "locking-selftest-spin-hardirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
-
-#include "locking-selftest-rlock-hardirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
-
-#include "locking-selftest-wlock-hardirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
-
-#include "locking-selftest-spin-softirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
-
-#include "locking-selftest-rlock-softirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
-
-#include "locking-selftest-wlock-softirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
-
-#undef E1
-#undef E2
-
-/*
- * Enabling hardirqs with a softirq-safe lock held:
- */
-#define E1() \
- \
- SOFTIRQ_ENTER(); \
- LOCK(A); \
- UNLOCK(A); \
- SOFTIRQ_EXIT();
-
-#define E2() \
- \
- HARDIRQ_DISABLE(); \
- LOCK(A); \
- HARDIRQ_ENABLE(); \
- UNLOCK(A);
-
-/*
- * Generate 12 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin)
-
-#include "locking-selftest-wlock.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock)
-
-#include "locking-selftest-rlock.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
-
-#undef E1
-#undef E2
-
-/*
- * Enabling irqs with an irq-safe lock held:
- */
-#define E1() \
- \
- IRQ_ENTER(); \
- LOCK(A); \
- UNLOCK(A); \
- IRQ_EXIT();
-
-#define E2() \
- \
- IRQ_DISABLE(); \
- LOCK(A); \
- IRQ_ENABLE(); \
- UNLOCK(A);
-
-/*
- * Generate 24 testcases:
- */
-#include "locking-selftest-spin-hardirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
-
-#include "locking-selftest-rlock-hardirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
-
-#include "locking-selftest-wlock-hardirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
-
-#include "locking-selftest-spin-softirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
-
-#include "locking-selftest-rlock-softirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
-
-#include "locking-selftest-wlock-softirq.h"
-GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
-
-#undef E1
-#undef E2
-
-/*
- * Acquiring a irq-unsafe lock while holding an irq-safe-lock:
- */
-#define E1() \
- \
- LOCK(A); \
- LOCK(B); \
- UNLOCK(B); \
- UNLOCK(A); \
-
-#define E2() \
- \
- LOCK(B); \
- UNLOCK(B);
-
-#define E3() \
- \
- IRQ_ENTER(); \
- LOCK(A); \
- UNLOCK(A); \
- IRQ_EXIT();
-
-/*
- * Generate 36 testcases:
- */
-#include "locking-selftest-spin-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
-
-#include "locking-selftest-rlock-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
-
-#include "locking-selftest-wlock-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
-
-#include "locking-selftest-spin-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
-
-#include "locking-selftest-rlock-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
-
-#include "locking-selftest-wlock-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
-
-#undef E1
-#undef E2
-#undef E3
-
-/*
- * If a lock turns into softirq-safe, but earlier it took
- * a softirq-unsafe lock:
- */
-
-#define E1() \
- IRQ_DISABLE(); \
- LOCK(A); \
- LOCK(B); \
- UNLOCK(B); \
- UNLOCK(A); \
- IRQ_ENABLE();
-
-#define E2() \
- LOCK(B); \
- UNLOCK(B);
-
-#define E3() \
- IRQ_ENTER(); \
- LOCK(A); \
- UNLOCK(A); \
- IRQ_EXIT();
-
-/*
- * Generate 36 testcases:
- */
-#include "locking-selftest-spin-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
-
-#include "locking-selftest-rlock-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
-
-#include "locking-selftest-wlock-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
-
-#include "locking-selftest-spin-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
-
-#include "locking-selftest-rlock-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
-
-#include "locking-selftest-wlock-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
-
-#undef E1
-#undef E2
-#undef E3
-
-/*
- * read-lock / write-lock irq inversion.
- *
- * Deadlock scenario:
- *
- * CPU#1 is at #1, i.e. it has write-locked A, but has not
- * taken B yet.
- *
- * CPU#2 is at #2, i.e. it has locked B.
- *
- * Hardirq hits CPU#2 at point #2 and is trying to read-lock A.
- *
- * The deadlock occurs because CPU#1 will spin on B, and CPU#2
- * will spin on A.
- */
-
-#define E1() \
- \
- IRQ_DISABLE(); \
- WL(A); \
- LOCK(B); \
- UNLOCK(B); \
- WU(A); \
- IRQ_ENABLE();
-
-#define E2() \
- \
- LOCK(B); \
- UNLOCK(B);
-
-#define E3() \
- \
- IRQ_ENTER(); \
- RL(A); \
- RU(A); \
- IRQ_EXIT();
-
-/*
- * Generate 36 testcases:
- */
-#include "locking-selftest-spin-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin)
-
-#include "locking-selftest-rlock-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
-
-#include "locking-selftest-wlock-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
-
-#include "locking-selftest-spin-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
-
-#include "locking-selftest-rlock-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
-
-#include "locking-selftest-wlock-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
-
-#undef E1
-#undef E2
-#undef E3
-
-/*
- * read-lock / write-lock recursion that is actually safe.
- */
-
-#define E1() \
- \
- IRQ_DISABLE(); \
- WL(A); \
- WU(A); \
- IRQ_ENABLE();
-
-#define E2() \
- \
- RL(A); \
- RU(A); \
-
-#define E3() \
- \
- IRQ_ENTER(); \
- RL(A); \
- L(B); \
- U(B); \
- RU(A); \
- IRQ_EXIT();
-
-/*
- * Generate 12 testcases:
- */
-#include "locking-selftest-hardirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard)
-
-#include "locking-selftest-softirq.h"
-GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
-
-#undef E1
-#undef E2
-#undef E3
-
-/*
- * read-lock / write-lock recursion that is unsafe.
- */
-
-#define E1() \
- \
- IRQ_DISABLE(); \
- L(B); \
- WL(A); \
- WU(A); \
- U(B); \
- IRQ_ENABLE();
-
-#define E2() \
- \
- RL(A); \
- RU(A); \
-
-#define E3() \
- \
- IRQ_ENTER(); \
- L(B); \
- U(B); \
- IRQ_EXIT();
-
-/*
- * Generate 12 testcases:
- */
-#include "locking-selftest-hardirq.h"
-// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard)
-
-#include "locking-selftest-softirq.h"
-// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
-# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
-# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
-# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
-#else
-# define I_SPINLOCK(x)
-# define I_RWLOCK(x)
-# define I_MUTEX(x)
-# define I_RWSEM(x)
-#endif
-
-#define I1(x) \
- do { \
- I_SPINLOCK(x); \
- I_RWLOCK(x); \
- I_MUTEX(x); \
- I_RWSEM(x); \
- } while (0)
-
-#define I2(x) \
- do { \
- spin_lock_init(&lock_##x); \
- rwlock_init(&rwlock_##x); \
- mutex_init(&mutex_##x); \
- init_rwsem(&rwsem_##x); \
- } while (0)
-
-static void reset_locks(void)
-{
- local_irq_disable();
- I1(A); I1(B); I1(C); I1(D);
- I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
- lockdep_reset();
- I2(A); I2(B); I2(C); I2(D);
- init_shared_classes();
- local_irq_enable();
-}
-
-#undef I
-
-static int testcase_total;
-static int testcase_successes;
-static int expected_testcase_failures;
-static int unexpected_testcase_failures;
-
-static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
-{
- unsigned long saved_preempt_count = preempt_count();
- int expected_failure = 0;
-
- WARN_ON(irqs_disabled());
-
- testcase_fn();
- /*
- * Filter out expected failures:
- */
-#ifndef CONFIG_PROVE_LOCKING
- if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected)
- expected_failure = 1;
- if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected)
- expected_failure = 1;
- if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected)
- expected_failure = 1;
- if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected)
- expected_failure = 1;
-#endif
- if (debug_locks != expected) {
- if (expected_failure) {
- expected_testcase_failures++;
- printk("failed|");
- } else {
- unexpected_testcase_failures++;
-
- printk("FAILED|");
- dump_stack();
- }
- } else {
- testcase_successes++;
- printk(" ok |");
- }
- testcase_total++;
-
- if (debug_locks_verbose)
- printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
- lockclass_mask, debug_locks, expected);
- /*
- * Some tests (e.g. double-unlock) might corrupt the preemption
- * count, so restore it:
- */
- preempt_count() = saved_preempt_count;
-#ifdef CONFIG_TRACE_IRQFLAGS
- if (softirq_count())
- current->softirqs_enabled = 0;
- else
- current->softirqs_enabled = 1;
-#endif
-
- reset_locks();
-}
-
-static inline void print_testname(const char *testname)
-{
- printk("%33s:", testname);
-}
-
-#define DO_TESTCASE_1(desc, name, nr) \
- print_testname(desc"/"#nr); \
- dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
- printk("\n");
-
-#define DO_TESTCASE_1B(desc, name, nr) \
- print_testname(desc"/"#nr); \
- dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
- printk("\n");
-
-#define DO_TESTCASE_3(desc, name, nr) \
- print_testname(desc"/"#nr); \
- dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
- dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
- dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
- printk("\n");
-
-#define DO_TESTCASE_3RW(desc, name, nr) \
- print_testname(desc"/"#nr); \
- dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\
- dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
- dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
- printk("\n");
-
-#define DO_TESTCASE_6(desc, name) \
- print_testname(desc); \
- dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
- dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
- dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \
- dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
- dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
- dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
- printk("\n");
-
-#define DO_TESTCASE_6_SUCCESS(desc, name) \
- print_testname(desc); \
- dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \
- dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
- dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
- dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
- printk("\n");
-
-/*
- * 'read' variant: rlocks must not trigger.
- */
-#define DO_TESTCASE_6R(desc, name) \
- print_testname(desc); \
- dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
- dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
- dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
- dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
- dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
- printk("\n");
-
-#define DO_TESTCASE_2I(desc, name, nr) \
- DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
- DO_TESTCASE_1("soft-"desc, name##_soft, nr);
-
-#define DO_TESTCASE_2IB(desc, name, nr) \
- DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
- DO_TESTCASE_1B("soft-"desc, name##_soft, nr);
-
-#define DO_TESTCASE_6I(desc, name, nr) \
- DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
- DO_TESTCASE_3("soft-"desc, name##_soft, nr);
-
-#define DO_TESTCASE_6IRW(desc, name, nr) \
- DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
- DO_TESTCASE_3RW("soft-"desc, name##_soft, nr);
-
-#define DO_TESTCASE_2x3(desc, name) \
- DO_TESTCASE_3(desc, name, 12); \
- DO_TESTCASE_3(desc, name, 21);
-
-#define DO_TESTCASE_2x6(desc, name) \
- DO_TESTCASE_6I(desc, name, 12); \
- DO_TESTCASE_6I(desc, name, 21);
-
-#define DO_TESTCASE_6x2(desc, name) \
- DO_TESTCASE_2I(desc, name, 123); \
- DO_TESTCASE_2I(desc, name, 132); \
- DO_TESTCASE_2I(desc, name, 213); \
- DO_TESTCASE_2I(desc, name, 231); \
- DO_TESTCASE_2I(desc, name, 312); \
- DO_TESTCASE_2I(desc, name, 321);
-
-#define DO_TESTCASE_6x2B(desc, name) \
- DO_TESTCASE_2IB(desc, name, 123); \
- DO_TESTCASE_2IB(desc, name, 132); \
- DO_TESTCASE_2IB(desc, name, 213); \
- DO_TESTCASE_2IB(desc, name, 231); \
- DO_TESTCASE_2IB(desc, name, 312); \
- DO_TESTCASE_2IB(desc, name, 321);
-
-#define DO_TESTCASE_6x6(desc, name) \
- DO_TESTCASE_6I(desc, name, 123); \
- DO_TESTCASE_6I(desc, name, 132); \
- DO_TESTCASE_6I(desc, name, 213); \
- DO_TESTCASE_6I(desc, name, 231); \
- DO_TESTCASE_6I(desc, name, 312); \
- DO_TESTCASE_6I(desc, name, 321);
-
-#define DO_TESTCASE_6x6RW(desc, name) \
- DO_TESTCASE_6IRW(desc, name, 123); \
- DO_TESTCASE_6IRW(desc, name, 132); \
- DO_TESTCASE_6IRW(desc, name, 213); \
- DO_TESTCASE_6IRW(desc, name, 231); \
- DO_TESTCASE_6IRW(desc, name, 312); \
- DO_TESTCASE_6IRW(desc, name, 321);
-
-
-void locking_selftest(void)
-{
- /*
- * Got a locking failure before the selftest ran?
- */
- if (!debug_locks) {
- printk("----------------------------------\n");
- printk("| Locking API testsuite disabled |\n");
- printk("----------------------------------\n");
- return;
- }
-
- /*
- * Run the testsuite:
- */
- printk("------------------------\n");
- printk("| Locking API testsuite:\n");
- printk("----------------------------------------------------------------------------\n");
- printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n");
- printk(" --------------------------------------------------------------------------\n");
-
- init_shared_classes();
- debug_locks_silent = !debug_locks_verbose;
-
- DO_TESTCASE_6R("A-A deadlock", AA);
- DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
- DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA);
- DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC);
- DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA);
- DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA);
- DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
- DO_TESTCASE_6("double unlock", double_unlock);
- DO_TESTCASE_6("initialize held", init_held);
- DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order);
-
- printk(" --------------------------------------------------------------------------\n");
- print_testname("recursive read-lock");
- printk(" |");
- dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK);
- printk(" |");
- dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
- printk("\n");
-
- print_testname("recursive read-lock #2");
- printk(" |");
- dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK);
- printk(" |");
- dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
- printk("\n");
-
- print_testname("mixed read-write-lock");
- printk(" |");
- dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
- printk(" |");
- dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
- printk("\n");
-
- print_testname("mixed write-read-lock");
- printk(" |");
- dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
- printk(" |");
- dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
- printk("\n");
-
- printk(" --------------------------------------------------------------------------\n");
-
- /*
- * irq-context testcases:
- */
- DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
- DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A);
- DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
- DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
- DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
- DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
-
- DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
-// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
-
- if (unexpected_testcase_failures) {
- printk("-----------------------------------------------------------------\n");
- debug_locks = 0;
- printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
- unexpected_testcase_failures, testcase_total);
- printk("-----------------------------------------------------------------\n");
- } else if (expected_testcase_failures && testcase_successes) {
- printk("--------------------------------------------------------\n");
- printk("%3d out of %3d testcases failed, as expected. |\n",
- expected_testcase_failures, testcase_total);
- printk("----------------------------------------------------\n");
- debug_locks = 1;
- } else if (expected_testcase_failures && !testcase_successes) {
- printk("--------------------------------------------------------\n");
- printk("All %3d testcases failed, as expected. |\n",
- expected_testcase_failures);
- printk("----------------------------------------\n");
- debug_locks = 1;
- } else {
- printk("-------------------------------------------------------\n");
- printk("Good, all %3d testcases passed! |\n",
- testcase_successes);
- printk("---------------------------------\n");
- debug_locks = 1;
- }
- debug_locks_silent = 0;
-}
Index: linux-2.6.24-rc6/tests/Kconfig
===================================================================
--- linux-2.6.24-rc6.orig/tests/Kconfig
+++ linux-2.6.24-rc6/tests/Kconfig
@@ -7,5 +7,16 @@ menuconfig KERNEL_TESTS

if KERNEL_TESTS

+config DEBUG_LOCKING_API_SELFTESTS
+ bool "Locking API boot-time self-tests"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you want the kernel to run a short self-test during
+ bootup. The self-test checks whether common types of locking bugs
+ are detected by debugging mechanisms or not. (if you disable
+ lock debugging then those bugs wont be detected of course.)
+ The following locking APIs are covered: spinlocks, rwlocks,
+ mutexes and rwsems.
+
endif # KERNEL_TESTS

Index: linux-2.6.24-rc6/tests/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/tests/Makefile
+++ linux-2.6.24-rc6/tests/Makefile
@@ -1,3 +1,5 @@
#
# Makefile for kernel subsystem specific tests
#
+
+obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
Index: linux-2.6.24-rc6/tests/locking-selftest-hardirq.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-hardirq.h
@@ -0,0 +1,9 @@
+#undef IRQ_DISABLE
+#undef IRQ_ENABLE
+#undef IRQ_ENTER
+#undef IRQ_EXIT
+
+#define IRQ_ENABLE HARDIRQ_ENABLE
+#define IRQ_DISABLE HARDIRQ_DISABLE
+#define IRQ_ENTER HARDIRQ_ENTER
+#define IRQ_EXIT HARDIRQ_EXIT
Index: linux-2.6.24-rc6/tests/locking-selftest-mutex.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-mutex.h
@@ -0,0 +1,11 @@
+#undef LOCK
+#define LOCK ML
+
+#undef UNLOCK
+#define UNLOCK MU
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT MI
Index: linux-2.6.24-rc6/tests/locking-selftest-rlock-hardirq.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-rlock-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-rlock.h"
+#include "locking-selftest-hardirq.h"
Index: linux-2.6.24-rc6/tests/locking-selftest-rlock-softirq.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-rlock-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-rlock.h"
+#include "locking-selftest-softirq.h"
Index: linux-2.6.24-rc6/tests/locking-selftest-rlock.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-rlock.h
@@ -0,0 +1,14 @@
+#undef LOCK
+#define LOCK RL
+
+#undef UNLOCK
+#define UNLOCK RU
+
+#undef RLOCK
+#define RLOCK RL
+
+#undef WLOCK
+#define WLOCK WL
+
+#undef INIT
+#define INIT RWI
Index: linux-2.6.24-rc6/tests/locking-selftest-rsem.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-rsem.h
@@ -0,0 +1,14 @@
+#undef LOCK
+#define LOCK RSL
+
+#undef UNLOCK
+#define UNLOCK RSU
+
+#undef RLOCK
+#define RLOCK RSL
+
+#undef WLOCK
+#define WLOCK WSL
+
+#undef INIT
+#define INIT RWSI
Index: linux-2.6.24-rc6/tests/locking-selftest-softirq.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-softirq.h
@@ -0,0 +1,9 @@
+#undef IRQ_DISABLE
+#undef IRQ_ENABLE
+#undef IRQ_ENTER
+#undef IRQ_EXIT
+
+#define IRQ_DISABLE SOFTIRQ_DISABLE
+#define IRQ_ENABLE SOFTIRQ_ENABLE
+#define IRQ_ENTER SOFTIRQ_ENTER
+#define IRQ_EXIT SOFTIRQ_EXIT
Index: linux-2.6.24-rc6/tests/locking-selftest-spin-hardirq.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-spin-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-spin.h"
+#include "locking-selftest-hardirq.h"
Index: linux-2.6.24-rc6/tests/locking-selftest-spin-softirq.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-spin-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-spin.h"
+#include "locking-selftest-softirq.h"
Index: linux-2.6.24-rc6/tests/locking-selftest-spin.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-spin.h
@@ -0,0 +1,11 @@
+#undef LOCK
+#define LOCK L
+
+#undef UNLOCK
+#define UNLOCK U
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT SI
Index: linux-2.6.24-rc6/tests/locking-selftest-wlock-hardirq.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-wlock-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-wlock.h"
+#include "locking-selftest-hardirq.h"
Index: linux-2.6.24-rc6/tests/locking-selftest-wlock-softirq.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-wlock-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-wlock.h"
+#include "locking-selftest-softirq.h"
Index: linux-2.6.24-rc6/tests/locking-selftest-wlock.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-wlock.h
@@ -0,0 +1,14 @@
+#undef LOCK
+#define LOCK WL
+
+#undef UNLOCK
+#define UNLOCK WU
+
+#undef RLOCK
+#define RLOCK RL
+
+#undef WLOCK
+#define WLOCK WL
+
+#undef INIT
+#define INIT RWI
Index: linux-2.6.24-rc6/tests/locking-selftest-wsem.h
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest-wsem.h
@@ -0,0 +1,14 @@
+#undef LOCK
+#define LOCK WSL
+
+#undef UNLOCK
+#define UNLOCK WSU
+
+#undef RLOCK
+#define RLOCK RSL
+
+#undef WLOCK
+#define WLOCK WSL
+
+#undef INIT
+#define INIT RWSI
Index: linux-2.6.24-rc6/tests/locking-selftest.c
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/locking-selftest.c
@@ -0,0 +1,1218 @@
+/*
+ * lib/locking-selftest.c
+ *
+ * Testsuite for various locking APIs: spinlocks, rwlocks,
+ * mutexes and rw-semaphores.
+ *
+ * It is checking both false positives and false negatives.
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <[email protected]>
+ */
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/lockdep.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/debug_locks.h>
+#include <linux/irqflags.h>
+
+/*
+ * Change this to 1 if you want to see the failure printouts:
+ */
+static unsigned int debug_locks_verbose;
+
+static int __init setup_debug_locks_verbose(char *str)
+{
+ get_option(&str, &debug_locks_verbose);
+
+ return 1;
+}
+
+__setup("debug_locks_verbose=", setup_debug_locks_verbose);
+
+#define FAILURE 0
+#define SUCCESS 1
+
+#define LOCKTYPE_SPIN 0x1
+#define LOCKTYPE_RWLOCK 0x2
+#define LOCKTYPE_MUTEX 0x4
+#define LOCKTYPE_RWSEM 0x8
+
+/*
+ * Normal standalone locks, for the circular and irq-context
+ * dependency tests:
+ */
+static DEFINE_SPINLOCK(lock_A);
+static DEFINE_SPINLOCK(lock_B);
+static DEFINE_SPINLOCK(lock_C);
+static DEFINE_SPINLOCK(lock_D);
+
+static DEFINE_RWLOCK(rwlock_A);
+static DEFINE_RWLOCK(rwlock_B);
+static DEFINE_RWLOCK(rwlock_C);
+static DEFINE_RWLOCK(rwlock_D);
+
+static DEFINE_MUTEX(mutex_A);
+static DEFINE_MUTEX(mutex_B);
+static DEFINE_MUTEX(mutex_C);
+static DEFINE_MUTEX(mutex_D);
+
+static DECLARE_RWSEM(rwsem_A);
+static DECLARE_RWSEM(rwsem_B);
+static DECLARE_RWSEM(rwsem_C);
+static DECLARE_RWSEM(rwsem_D);
+
+/*
+ * Locks that we initialize dynamically as well so that
+ * e.g. X1 and X2 becomes two instances of the same class,
+ * but X* and Y* are different classes. We do this so that
+ * we do not trigger a real lockup:
+ */
+static DEFINE_SPINLOCK(lock_X1);
+static DEFINE_SPINLOCK(lock_X2);
+static DEFINE_SPINLOCK(lock_Y1);
+static DEFINE_SPINLOCK(lock_Y2);
+static DEFINE_SPINLOCK(lock_Z1);
+static DEFINE_SPINLOCK(lock_Z2);
+
+static DEFINE_RWLOCK(rwlock_X1);
+static DEFINE_RWLOCK(rwlock_X2);
+static DEFINE_RWLOCK(rwlock_Y1);
+static DEFINE_RWLOCK(rwlock_Y2);
+static DEFINE_RWLOCK(rwlock_Z1);
+static DEFINE_RWLOCK(rwlock_Z2);
+
+static DEFINE_MUTEX(mutex_X1);
+static DEFINE_MUTEX(mutex_X2);
+static DEFINE_MUTEX(mutex_Y1);
+static DEFINE_MUTEX(mutex_Y2);
+static DEFINE_MUTEX(mutex_Z1);
+static DEFINE_MUTEX(mutex_Z2);
+
+static DECLARE_RWSEM(rwsem_X1);
+static DECLARE_RWSEM(rwsem_X2);
+static DECLARE_RWSEM(rwsem_Y1);
+static DECLARE_RWSEM(rwsem_Y2);
+static DECLARE_RWSEM(rwsem_Z1);
+static DECLARE_RWSEM(rwsem_Z2);
+
+/*
+ * non-inlined runtime initializers, to let separate locks share
+ * the same lock-class:
+ */
+#define INIT_CLASS_FUNC(class) \
+static noinline void \
+init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \
+ struct rw_semaphore *rwsem) \
+{ \
+ spin_lock_init(lock); \
+ rwlock_init(rwlock); \
+ mutex_init(mutex); \
+ init_rwsem(rwsem); \
+}
+
+INIT_CLASS_FUNC(X)
+INIT_CLASS_FUNC(Y)
+INIT_CLASS_FUNC(Z)
+
+static void init_shared_classes(void)
+{
+ init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
+ init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
+
+ init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1);
+ init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2);
+
+ init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1);
+ init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2);
+}
+
+/*
+ * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests.
+ * The following functions use a lock from a simulated hardirq/softirq
+ * context, causing the locks to be marked as hardirq-safe/softirq-safe:
+ */
+
+#define HARDIRQ_DISABLE local_irq_disable
+#define HARDIRQ_ENABLE local_irq_enable
+
+#define HARDIRQ_ENTER() \
+ local_irq_disable(); \
+ irq_enter(); \
+ WARN_ON(!in_irq());
+
+#define HARDIRQ_EXIT() \
+ __irq_exit(); \
+ local_irq_enable();
+
+#define SOFTIRQ_DISABLE local_bh_disable
+#define SOFTIRQ_ENABLE local_bh_enable
+
+#define SOFTIRQ_ENTER() \
+ local_bh_disable(); \
+ local_irq_disable(); \
+ trace_softirq_enter(); \
+ WARN_ON(!in_softirq());
+
+#define SOFTIRQ_EXIT() \
+ trace_softirq_exit(); \
+ local_irq_enable(); \
+ local_bh_enable();
+
+/*
+ * Shortcuts for lock/unlock API variants, to keep
+ * the testcases compact:
+ */
+#define L(x) spin_lock(&lock_##x)
+#define U(x) spin_unlock(&lock_##x)
+#define LU(x) L(x); U(x)
+#define SI(x) spin_lock_init(&lock_##x)
+
+#define WL(x) write_lock(&rwlock_##x)
+#define WU(x) write_unlock(&rwlock_##x)
+#define WLU(x) WL(x); WU(x)
+
+#define RL(x) read_lock(&rwlock_##x)
+#define RU(x) read_unlock(&rwlock_##x)
+#define RLU(x) RL(x); RU(x)
+#define RWI(x) rwlock_init(&rwlock_##x)
+
+#define ML(x) mutex_lock(&mutex_##x)
+#define MU(x) mutex_unlock(&mutex_##x)
+#define MI(x) mutex_init(&mutex_##x)
+
+#define WSL(x) down_write(&rwsem_##x)
+#define WSU(x) up_write(&rwsem_##x)
+
+#define RSL(x) down_read(&rwsem_##x)
+#define RSU(x) up_read(&rwsem_##x)
+#define RWSI(x) init_rwsem(&rwsem_##x)
+
+#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
+
+/*
+ * Generate different permutations of the same testcase, using
+ * the same basic lock-dependency/state events:
+ */
+
+#define GENERATE_TESTCASE(name) \
+ \
+static void name(void) { E(); }
+
+#define GENERATE_PERMUTATIONS_2_EVENTS(name) \
+ \
+static void name##_12(void) { E1(); E2(); } \
+static void name##_21(void) { E2(); E1(); }
+
+#define GENERATE_PERMUTATIONS_3_EVENTS(name) \
+ \
+static void name##_123(void) { E1(); E2(); E3(); } \
+static void name##_132(void) { E1(); E3(); E2(); } \
+static void name##_213(void) { E2(); E1(); E3(); } \
+static void name##_231(void) { E2(); E3(); E1(); } \
+static void name##_312(void) { E3(); E1(); E2(); } \
+static void name##_321(void) { E3(); E2(); E1(); }
+
+/*
+ * AA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK(X1); \
+ LOCK(X2); /* this one should fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(AA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(AA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(AA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(AA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(AA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(AA_rsem)
+
+#undef E
+
+/*
+ * Special-case for read-locking, they are
+ * allowed to recurse on the same lock class:
+ */
+static void rlock_AA1(void)
+{
+ RL(X1);
+ RL(X1); // this one should NOT fail
+}
+
+static void rlock_AA1B(void)
+{
+ RL(X1);
+ RL(X2); // this one should NOT fail
+}
+
+static void rsem_AA1(void)
+{
+ RSL(X1);
+ RSL(X1); // this one should fail
+}
+
+static void rsem_AA1B(void)
+{
+ RSL(X1);
+ RSL(X2); // this one should fail
+}
+/*
+ * The mixing of read and write locks is not allowed:
+ */
+static void rlock_AA2(void)
+{
+ RL(X1);
+ WL(X2); // this one should fail
+}
+
+static void rsem_AA2(void)
+{
+ RSL(X1);
+ WSL(X2); // this one should fail
+}
+
+static void rlock_AA3(void)
+{
+ WL(X1);
+ RL(X2); // this one should fail
+}
+
+static void rsem_AA3(void)
+{
+ WSL(X1);
+ RSL(X2); // this one should fail
+}
+
+/*
+ * ABBA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBA_rsem)
+
+#undef E
+
+/*
+ * AB BC CA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(C, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBCCA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBCCA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBCCA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBCCA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBCCA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBCCA_rsem)
+
+#undef E
+
+/*
+ * AB CA BC deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, A); \
+ LOCK_UNLOCK_2(B, C); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCABC_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCABC_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCABC_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCABC_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCABC_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCABC_rsem)
+
+#undef E
+
+/*
+ * AB BC CD DA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBCCDDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBCCDDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBCCDDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBCCDDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBCCDDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBCCDDA_rsem)
+
+#undef E
+
+/*
+ * AB CD BD DA deadlock:
+ */
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(B, D); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCDBDDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCDBDDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCDBDDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCDBDDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCDBDDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCDBDDA_rsem)
+
+#undef E
+
+/*
+ * AB CD BC DA deadlock:
+ */
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCDBCDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCDBCDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCDBCDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCDBCDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCDBCDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCDBCDA_rsem)
+
+#undef E
+
+/*
+ * Double unlock:
+ */
+#define E() \
+ \
+ LOCK(A); \
+ UNLOCK(A); \
+ UNLOCK(A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(double_unlock_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(double_unlock_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(double_unlock_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(double_unlock_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(double_unlock_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(double_unlock_rsem)
+
+#undef E
+
+/*
+ * Bad unlock ordering:
+ */
+#define E() \
+ \
+ LOCK(A); \
+ LOCK(B); \
+ UNLOCK(A); /* fail */ \
+ UNLOCK(B);
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(bad_unlock_order_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(bad_unlock_order_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(bad_unlock_order_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(bad_unlock_order_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(bad_unlock_order_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(bad_unlock_order_rsem)
+
+#undef E
+
+/*
+ * initializing a held lock:
+ */
+#define E() \
+ \
+ LOCK(A); \
+ INIT(A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(init_held_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(init_held_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(init_held_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(init_held_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(init_held_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(init_held_rsem)
+
+#undef E
+
+/*
+ * locking an irq-safe lock with irqs enabled:
+ */
+#define E1() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+#define E2() \
+ \
+ LOCK(A); \
+ UNLOCK(A);
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
+
+#undef E1
+#undef E2
+
+/*
+ * Enabling hardirqs with a softirq-safe lock held:
+ */
+#define E1() \
+ \
+ SOFTIRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ SOFTIRQ_EXIT();
+
+#define E2() \
+ \
+ HARDIRQ_DISABLE(); \
+ LOCK(A); \
+ HARDIRQ_ENABLE(); \
+ UNLOCK(A);
+
+/*
+ * Generate 12 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock)
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+
+#undef E1
+#undef E2
+
+/*
+ * Enabling irqs with an irq-safe lock held:
+ */
+#define E1() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+#define E2() \
+ \
+ IRQ_DISABLE(); \
+ LOCK(A); \
+ IRQ_ENABLE(); \
+ UNLOCK(A);
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+
+#undef E1
+#undef E2
+
+/*
+ * Acquiring a irq-unsafe lock while holding an irq-safe-lock:
+ */
+#define E1() \
+ \
+ LOCK(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ UNLOCK(A); \
+
+#define E2() \
+ \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * If a lock turns into softirq-safe, but earlier it took
+ * a softirq-unsafe lock:
+ */
+
+#define E1() \
+ IRQ_DISABLE(); \
+ LOCK(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ UNLOCK(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * read-lock / write-lock irq inversion.
+ *
+ * Deadlock scenario:
+ *
+ * CPU#1 is at #1, i.e. it has write-locked A, but has not
+ * taken B yet.
+ *
+ * CPU#2 is at #2, i.e. it has locked B.
+ *
+ * Hardirq hits CPU#2 at point #2 and is trying to read-lock A.
+ *
+ * The deadlock occurs because CPU#1 will spin on B, and CPU#2
+ * will spin on A.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ WU(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ RL(A); \
+ RU(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
+
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * read-lock / write-lock recursion that is actually safe.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(A); \
+ WU(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ RL(A); \
+ L(B); \
+ U(B); \
+ RU(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 12 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard)
+
+#include "locking-selftest-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * read-lock / write-lock recursion that is unsafe.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ L(B); \
+ WL(A); \
+ WU(A); \
+ U(B); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ L(B); \
+ U(B); \
+ IRQ_EXIT();
+
+/*
+ * Generate 12 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard)
+
+#include "locking-selftest-softirq.h"
+// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
+# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
+# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
+# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
+#else
+# define I_SPINLOCK(x)
+# define I_RWLOCK(x)
+# define I_MUTEX(x)
+# define I_RWSEM(x)
+#endif
+
+#define I1(x) \
+ do { \
+ I_SPINLOCK(x); \
+ I_RWLOCK(x); \
+ I_MUTEX(x); \
+ I_RWSEM(x); \
+ } while (0)
+
+#define I2(x) \
+ do { \
+ spin_lock_init(&lock_##x); \
+ rwlock_init(&rwlock_##x); \
+ mutex_init(&mutex_##x); \
+ init_rwsem(&rwsem_##x); \
+ } while (0)
+
+static void reset_locks(void)
+{
+ local_irq_disable();
+ I1(A); I1(B); I1(C); I1(D);
+ I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
+ lockdep_reset();
+ I2(A); I2(B); I2(C); I2(D);
+ init_shared_classes();
+ local_irq_enable();
+}
+
+#undef I
+
+static int testcase_total;
+static int testcase_successes;
+static int expected_testcase_failures;
+static int unexpected_testcase_failures;
+
+static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
+{
+ unsigned long saved_preempt_count = preempt_count();
+ int expected_failure = 0;
+
+ WARN_ON(irqs_disabled());
+
+ testcase_fn();
+ /*
+ * Filter out expected failures:
+ */
+#ifndef CONFIG_PROVE_LOCKING
+ if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected)
+ expected_failure = 1;
+ if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected)
+ expected_failure = 1;
+ if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected)
+ expected_failure = 1;
+ if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected)
+ expected_failure = 1;
+#endif
+ if (debug_locks != expected) {
+ if (expected_failure) {
+ expected_testcase_failures++;
+ printk("failed|");
+ } else {
+ unexpected_testcase_failures++;
+
+ printk("FAILED|");
+ dump_stack();
+ }
+ } else {
+ testcase_successes++;
+ printk(" ok |");
+ }
+ testcase_total++;
+
+ if (debug_locks_verbose)
+ printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
+ lockclass_mask, debug_locks, expected);
+ /*
+ * Some tests (e.g. double-unlock) might corrupt the preemption
+ * count, so restore it:
+ */
+ preempt_count() = saved_preempt_count;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ if (softirq_count())
+ current->softirqs_enabled = 0;
+ else
+ current->softirqs_enabled = 1;
+#endif
+
+ reset_locks();
+}
+
+static inline void print_testname(const char *testname)
+{
+ printk("%33s:", testname);
+}
+
+#define DO_TESTCASE_1(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ printk("\n");
+
+#define DO_TESTCASE_1B(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ printk("\n");
+
+#define DO_TESTCASE_3(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ printk("\n");
+
+#define DO_TESTCASE_3RW(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ printk("\n");
+
+#define DO_TESTCASE_6(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
+ printk("\n");
+
+#define DO_TESTCASE_6_SUCCESS(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
+ printk("\n");
+
+/*
+ * 'read' variant: rlocks must not trigger.
+ */
+#define DO_TESTCASE_6R(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
+ printk("\n");
+
+#define DO_TESTCASE_2I(desc, name, nr) \
+ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
+ DO_TESTCASE_1("soft-"desc, name##_soft, nr);
+
+#define DO_TESTCASE_2IB(desc, name, nr) \
+ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
+ DO_TESTCASE_1B("soft-"desc, name##_soft, nr);
+
+#define DO_TESTCASE_6I(desc, name, nr) \
+ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
+ DO_TESTCASE_3("soft-"desc, name##_soft, nr);
+
+#define DO_TESTCASE_6IRW(desc, name, nr) \
+ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
+ DO_TESTCASE_3RW("soft-"desc, name##_soft, nr);
+
+#define DO_TESTCASE_2x3(desc, name) \
+ DO_TESTCASE_3(desc, name, 12); \
+ DO_TESTCASE_3(desc, name, 21);
+
+#define DO_TESTCASE_2x6(desc, name) \
+ DO_TESTCASE_6I(desc, name, 12); \
+ DO_TESTCASE_6I(desc, name, 21);
+
+#define DO_TESTCASE_6x2(desc, name) \
+ DO_TESTCASE_2I(desc, name, 123); \
+ DO_TESTCASE_2I(desc, name, 132); \
+ DO_TESTCASE_2I(desc, name, 213); \
+ DO_TESTCASE_2I(desc, name, 231); \
+ DO_TESTCASE_2I(desc, name, 312); \
+ DO_TESTCASE_2I(desc, name, 321);
+
+#define DO_TESTCASE_6x2B(desc, name) \
+ DO_TESTCASE_2IB(desc, name, 123); \
+ DO_TESTCASE_2IB(desc, name, 132); \
+ DO_TESTCASE_2IB(desc, name, 213); \
+ DO_TESTCASE_2IB(desc, name, 231); \
+ DO_TESTCASE_2IB(desc, name, 312); \
+ DO_TESTCASE_2IB(desc, name, 321);
+
+#define DO_TESTCASE_6x6(desc, name) \
+ DO_TESTCASE_6I(desc, name, 123); \
+ DO_TESTCASE_6I(desc, name, 132); \
+ DO_TESTCASE_6I(desc, name, 213); \
+ DO_TESTCASE_6I(desc, name, 231); \
+ DO_TESTCASE_6I(desc, name, 312); \
+ DO_TESTCASE_6I(desc, name, 321);
+
+#define DO_TESTCASE_6x6RW(desc, name) \
+ DO_TESTCASE_6IRW(desc, name, 123); \
+ DO_TESTCASE_6IRW(desc, name, 132); \
+ DO_TESTCASE_6IRW(desc, name, 213); \
+ DO_TESTCASE_6IRW(desc, name, 231); \
+ DO_TESTCASE_6IRW(desc, name, 312); \
+ DO_TESTCASE_6IRW(desc, name, 321);
+
+
+void locking_selftest(void)
+{
+ /*
+ * Got a locking failure before the selftest ran?
+ */
+ if (!debug_locks) {
+ printk("----------------------------------\n");
+ printk("| Locking API testsuite disabled |\n");
+ printk("----------------------------------\n");
+ return;
+ }
+
+ /*
+ * Run the testsuite:
+ */
+ printk("------------------------\n");
+ printk("| Locking API testsuite:\n");
+ printk("----------------------------------------------------------------------------\n");
+ printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n");
+ printk(" --------------------------------------------------------------------------\n");
+
+ init_shared_classes();
+ debug_locks_silent = !debug_locks_verbose;
+
+ DO_TESTCASE_6R("A-A deadlock", AA);
+ DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
+ DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA);
+ DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC);
+ DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA);
+ DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA);
+ DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
+ DO_TESTCASE_6("double unlock", double_unlock);
+ DO_TESTCASE_6("initialize held", init_held);
+ DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order);
+
+ printk(" --------------------------------------------------------------------------\n");
+ print_testname("recursive read-lock");
+ printk(" |");
+ dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK);
+ printk(" |");
+ dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
+ printk("\n");
+
+ print_testname("recursive read-lock #2");
+ printk(" |");
+ dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK);
+ printk(" |");
+ dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
+ printk("\n");
+
+ print_testname("mixed read-write-lock");
+ printk(" |");
+ dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
+ printk(" |");
+ dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
+ printk("\n");
+
+ print_testname("mixed write-read-lock");
+ printk(" |");
+ dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
+ printk(" |");
+ dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
+ printk("\n");
+
+ printk(" --------------------------------------------------------------------------\n");
+
+ /*
+ * irq-context testcases:
+ */
+ DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
+ DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A);
+ DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
+ DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
+ DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
+ DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
+
+ DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
+// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
+
+ if (unexpected_testcase_failures) {
+ printk("-----------------------------------------------------------------\n");
+ debug_locks = 0;
+ printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
+ unexpected_testcase_failures, testcase_total);
+ printk("-----------------------------------------------------------------\n");
+ } else if (expected_testcase_failures && testcase_successes) {
+ printk("--------------------------------------------------------\n");
+ printk("%3d out of %3d testcases failed, as expected. |\n",
+ expected_testcase_failures, testcase_total);
+ printk("----------------------------------------------------\n");
+ debug_locks = 1;
+ } else if (expected_testcase_failures && !testcase_successes) {
+ printk("--------------------------------------------------------\n");
+ printk("All %3d testcases failed, as expected. |\n",
+ expected_testcase_failures);
+ printk("----------------------------------------\n");
+ debug_locks = 1;
+ } else {
+ printk("-------------------------------------------------------\n");
+ printk("Good, all %3d testcases passed! |\n",
+ testcase_successes);
+ printk("---------------------------------\n");
+ debug_locks = 1;
+ }
+ debug_locks_silent = 0;
+}
Index: linux-2.6.24-rc6/lib/Kconfig.debug
===================================================================
--- linux-2.6.24-rc6.orig/lib/Kconfig.debug
+++ linux-2.6.24-rc6/lib/Kconfig.debug
@@ -350,17 +350,6 @@ config DEBUG_SPINLOCK_SLEEP
If you say Y here, various routines which may sleep will become very
noisy if they are called with a spinlock held.

-config DEBUG_LOCKING_API_SELFTESTS
- bool "Locking API boot-time self-tests"
- depends on DEBUG_KERNEL
- help
- Say Y here if you want the kernel to run a short self-test during
- bootup. The self-test checks whether common types of locking bugs
- are detected by debugging mechanisms or not. (if you disable
- lock debugging then those bugs wont be detected of course.)
- The following locking APIs are covered: spinlocks, rwlocks,
- mutexes and rwsems.
-
config STACKTRACE
bool
depends on DEBUG_KERNEL

Subject: [PATCH 3/7] Move rcutorture to tests/

From: Ananth N Mavinakayanahalli <[email protected]>

Move the rcutorture infrastructure to tests/

Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
---
kernel/rcutorture.c | 995 ----------------------------------------------------
kernel/Makefile | 1
lib/Kconfig.debug | 13
tests/Kconfig | 13
tests/Makefile | 1
tests/rcutorture.c | 995 ++++++++++++++++++++++++++++++++++++++++++++++++++++
6 files changed, 1009 insertions(+), 1009 deletions(-)

Index: linux-2.6.24-rc6/kernel/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/kernel/Makefile
+++ linux-2.6.24-rc6/kernel/Makefile
@@ -56,7 +56,6 @@ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softl
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_DEBUG_SYNCHRO_TEST) += synchro-test.o
-obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
ifeq ($(CONFIG_PREEMPT_RCU),y)
Index: linux-2.6.24-rc6/kernel/rcutorture.c
===================================================================
--- linux-2.6.24-rc6.orig/kernel/rcutorture.c
+++ /dev/null
@@ -1,995 +0,0 @@
-/*
- * Read-Copy Update module-based torture test facility
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2005, 2006
- *
- * Authors: Paul E. McKenney <[email protected]>
- * Josh Triplett <[email protected]>
- *
- * See also: Documentation/RCU/torture.txt
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/rcupdate.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <asm/atomic.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/freezer.h>
-#include <linux/cpu.h>
-#include <linux/delay.h>
-#include <linux/byteorder/swabb.h>
-#include <linux/stat.h>
-#include <linux/srcu.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <[email protected]> and "
- "Josh Triplett <[email protected]>");
-
-static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
-static int nfakewriters = 4; /* # fake writer threads */
-static int stat_interval; /* Interval between stats, in seconds. */
- /* Defaults to "only at end of test". */
-static int verbose; /* Print more debug info. */
-static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
-static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
-static char *torture_type = "rcu"; /* What RCU implementation to torture. */
-
-module_param(nreaders, int, 0444);
-MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
-module_param(nfakewriters, int, 0444);
-MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-module_param(stat_interval, int, 0444);
-MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
-module_param(verbose, bool, 0444);
-MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
-module_param(test_no_idle_hz, bool, 0444);
-MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
-module_param(shuffle_interval, int, 0444);
-MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-module_param(torture_type, charp, 0444);
-MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
-
-#define TORTURE_FLAG "-torture:"
-#define PRINTK_STRING(s) \
- do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
-#define VERBOSE_PRINTK_STRING(s) \
- do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
-#define VERBOSE_PRINTK_ERRSTRING(s) \
- do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
-
-static char printk_buf[4096];
-
-static int nrealreaders;
-static struct task_struct *writer_task;
-static struct task_struct **fakewriter_tasks;
-static struct task_struct **reader_tasks;
-static struct task_struct *stats_task;
-static struct task_struct *shuffler_task;
-
-#define RCU_TORTURE_PIPE_LEN 10
-
-struct rcu_torture {
- struct rcu_head rtort_rcu;
- int rtort_pipe_count;
- struct list_head rtort_free;
- int rtort_mbtest;
-};
-
-static int fullstop = 0; /* stop generating callbacks at test end. */
-static LIST_HEAD(rcu_torture_freelist);
-static struct rcu_torture *rcu_torture_current = NULL;
-static long rcu_torture_current_version = 0;
-static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
-static DEFINE_SPINLOCK(rcu_torture_lock);
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
- { 0 };
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
- { 0 };
-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
-static atomic_t n_rcu_torture_alloc;
-static atomic_t n_rcu_torture_alloc_fail;
-static atomic_t n_rcu_torture_free;
-static atomic_t n_rcu_torture_mberror;
-static atomic_t n_rcu_torture_error;
-static struct list_head rcu_torture_removed;
-
-/*
- * Allocate an element from the rcu_tortures pool.
- */
-static struct rcu_torture *
-rcu_torture_alloc(void)
-{
- struct list_head *p;
-
- spin_lock_bh(&rcu_torture_lock);
- if (list_empty(&rcu_torture_freelist)) {
- atomic_inc(&n_rcu_torture_alloc_fail);
- spin_unlock_bh(&rcu_torture_lock);
- return NULL;
- }
- atomic_inc(&n_rcu_torture_alloc);
- p = rcu_torture_freelist.next;
- list_del_init(p);
- spin_unlock_bh(&rcu_torture_lock);
- return container_of(p, struct rcu_torture, rtort_free);
-}
-
-/*
- * Free an element to the rcu_tortures pool.
- */
-static void
-rcu_torture_free(struct rcu_torture *p)
-{
- atomic_inc(&n_rcu_torture_free);
- spin_lock_bh(&rcu_torture_lock);
- list_add_tail(&p->rtort_free, &rcu_torture_freelist);
- spin_unlock_bh(&rcu_torture_lock);
-}
-
-struct rcu_random_state {
- unsigned long rrs_state;
- long rrs_count;
-};
-
-#define RCU_RANDOM_MULT 39916801 /* prime */
-#define RCU_RANDOM_ADD 479001701 /* prime */
-#define RCU_RANDOM_REFRESH 10000
-
-#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
-
-/*
- * Crude but fast random-number generator. Uses a linear congruential
- * generator, with occasional help from cpu_clock().
- */
-static unsigned long
-rcu_random(struct rcu_random_state *rrsp)
-{
- if (--rrsp->rrs_count < 0) {
- rrsp->rrs_state +=
- (unsigned long)cpu_clock(raw_smp_processor_id());
- rrsp->rrs_count = RCU_RANDOM_REFRESH;
- }
- rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
- return swahw32(rrsp->rrs_state);
-}
-
-/*
- * Operations vector for selecting different types of tests.
- */
-
-struct rcu_torture_ops {
- void (*init)(void);
- void (*cleanup)(void);
- int (*readlock)(void);
- void (*readdelay)(struct rcu_random_state *rrsp);
- void (*readunlock)(int idx);
- int (*completed)(void);
- void (*deferredfree)(struct rcu_torture *p);
- void (*sync)(void);
- int (*stats)(char *page);
- char *name;
-};
-static struct rcu_torture_ops *cur_ops = NULL;
-
-/*
- * Definitions for rcu torture testing.
- */
-
-static int rcu_torture_read_lock(void) __acquires(RCU)
-{
- rcu_read_lock();
- return 0;
-}
-
-static void rcu_read_delay(struct rcu_random_state *rrsp)
-{
- long delay;
- const long longdelay = 200;
-
- /* We want there to be long-running readers, but not all the time. */
-
- delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
- if (!delay)
- udelay(longdelay);
-}
-
-static void rcu_torture_read_unlock(int idx) __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int rcu_torture_completed(void)
-{
- return rcu_batches_completed();
-}
-
-static void
-rcu_torture_cb(struct rcu_head *p)
-{
- int i;
- struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
-
- if (fullstop) {
- /* Test is ending, just drop callbacks on the floor. */
- /* The next initialization will pick up the pieces. */
- return;
- }
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- rcu_torture_free(rp);
- } else
- cur_ops->deferredfree(rp);
-}
-
-static void rcu_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu(&p->rtort_rcu, rcu_torture_cb);
-}
-
-static struct rcu_torture_ops rcu_ops = {
- .init = NULL,
- .cleanup = NULL,
- .readlock = rcu_torture_read_lock,
- .readdelay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferredfree = rcu_torture_deferred_free,
- .sync = synchronize_rcu,
- .stats = NULL,
- .name = "rcu"
-};
-
-static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
-{
- int i;
- struct rcu_torture *rp;
- struct rcu_torture *rp1;
-
- cur_ops->sync();
- list_add(&p->rtort_free, &rcu_torture_removed);
- list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- list_del(&rp->rtort_free);
- rcu_torture_free(rp);
- }
- }
-}
-
-static void rcu_sync_torture_init(void)
-{
- INIT_LIST_HEAD(&rcu_torture_removed);
-}
-
-static struct rcu_torture_ops rcu_sync_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = rcu_torture_read_lock,
- .readdelay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu,
- .stats = NULL,
- .name = "rcu_sync"
-};
-
-/*
- * Definitions for rcu_bh torture testing.
- */
-
-static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
-{
- rcu_read_lock_bh();
- return 0;
-}
-
-static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
-{
- rcu_read_unlock_bh();
-}
-
-static int rcu_bh_torture_completed(void)
-{
- return rcu_batches_completed_bh();
-}
-
-static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
-}
-
-struct rcu_bh_torture_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
-{
- struct rcu_bh_torture_synchronize *rcu;
-
- rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
- complete(&rcu->completion);
-}
-
-static void rcu_bh_torture_synchronize(void)
-{
- struct rcu_bh_torture_synchronize rcu;
-
- init_completion(&rcu.completion);
- call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
- wait_for_completion(&rcu.completion);
-}
-
-static struct rcu_torture_ops rcu_bh_ops = {
- .init = NULL,
- .cleanup = NULL,
- .readlock = rcu_bh_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferredfree = rcu_bh_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
- .stats = NULL,
- .name = "rcu_bh"
-};
-
-static struct rcu_torture_ops rcu_bh_sync_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = rcu_bh_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
- .stats = NULL,
- .name = "rcu_bh_sync"
-};
-
-/*
- * Definitions for srcu torture testing.
- */
-
-static struct srcu_struct srcu_ctl;
-
-static void srcu_torture_init(void)
-{
- init_srcu_struct(&srcu_ctl);
- rcu_sync_torture_init();
-}
-
-static void srcu_torture_cleanup(void)
-{
- synchronize_srcu(&srcu_ctl);
- cleanup_srcu_struct(&srcu_ctl);
-}
-
-static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
-{
- return srcu_read_lock(&srcu_ctl);
-}
-
-static void srcu_read_delay(struct rcu_random_state *rrsp)
-{
- long delay;
- const long uspertick = 1000000 / HZ;
- const long longdelay = 10;
-
- /* We want there to be long-running readers, but not all the time. */
-
- delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
- if (!delay)
- schedule_timeout_interruptible(longdelay);
-}
-
-static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
-{
- srcu_read_unlock(&srcu_ctl, idx);
-}
-
-static int srcu_torture_completed(void)
-{
- return srcu_batches_completed(&srcu_ctl);
-}
-
-static void srcu_torture_synchronize(void)
-{
- synchronize_srcu(&srcu_ctl);
-}
-
-static int srcu_torture_stats(char *page)
-{
- int cnt = 0;
- int cpu;
- int idx = srcu_ctl.completed & 0x1;
-
- cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
- torture_type, TORTURE_FLAG, idx);
- for_each_possible_cpu(cpu) {
- cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
- per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
- per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
- }
- cnt += sprintf(&page[cnt], "\n");
- return cnt;
-}
-
-static struct rcu_torture_ops srcu_ops = {
- .init = srcu_torture_init,
- .cleanup = srcu_torture_cleanup,
- .readlock = srcu_torture_read_lock,
- .readdelay = srcu_read_delay,
- .readunlock = srcu_torture_read_unlock,
- .completed = srcu_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = srcu_torture_synchronize,
- .stats = srcu_torture_stats,
- .name = "srcu"
-};
-
-/*
- * Definitions for sched torture testing.
- */
-
-static int sched_torture_read_lock(void)
-{
- preempt_disable();
- return 0;
-}
-
-static void sched_torture_read_unlock(int idx)
-{
- preempt_enable();
-}
-
-static int sched_torture_completed(void)
-{
- return 0;
-}
-
-static void sched_torture_synchronize(void)
-{
- synchronize_sched();
-}
-
-static struct rcu_torture_ops sched_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = sched_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = sched_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = sched_torture_synchronize,
- .stats = NULL,
- .name = "sched"
-};
-
-/*
- * RCU torture writer kthread. Repeatedly substitutes a new structure
- * for that pointed to by rcu_torture_current, freeing the old structure
- * after a series of grace periods (the "pipeline").
- */
-static int
-rcu_torture_writer(void *arg)
-{
- int i;
- long oldbatch = rcu_batches_completed();
- struct rcu_torture *rp;
- struct rcu_torture *old_rp;
- static DEFINE_RCU_RANDOM(rand);
-
- VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
- set_user_nice(current, 19);
-
- do {
- schedule_timeout_uninterruptible(1);
- if ((rp = rcu_torture_alloc()) == NULL)
- continue;
- rp->rtort_pipe_count = 0;
- udelay(rcu_random(&rand) & 0x3ff);
- old_rp = rcu_torture_current;
- rp->rtort_mbtest = 1;
- rcu_assign_pointer(rcu_torture_current, rp);
- smp_wmb();
- if (old_rp) {
- i = old_rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- old_rp->rtort_pipe_count++;
- cur_ops->deferredfree(old_rp);
- }
- rcu_torture_current_version++;
- oldbatch = cur_ops->completed();
- } while (!kthread_should_stop() && !fullstop);
- VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
- while (!kthread_should_stop())
- schedule_timeout_uninterruptible(1);
- return 0;
-}
-
-/*
- * RCU torture fake writer kthread. Repeatedly calls sync, with a random
- * delay between calls.
- */
-static int
-rcu_torture_fakewriter(void *arg)
-{
- DEFINE_RCU_RANDOM(rand);
-
- VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
- set_user_nice(current, 19);
-
- do {
- schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
- udelay(rcu_random(&rand) & 0x3ff);
- cur_ops->sync();
- } while (!kthread_should_stop() && !fullstop);
-
- VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
- while (!kthread_should_stop())
- schedule_timeout_uninterruptible(1);
- return 0;
-}
-
-/*
- * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
- * incrementing the corresponding element of the pipeline array. The
- * counter in the element should never be greater than 1, otherwise, the
- * RCU implementation is broken.
- */
-static int
-rcu_torture_reader(void *arg)
-{
- int completed;
- int idx;
- DEFINE_RCU_RANDOM(rand);
- struct rcu_torture *p;
- int pipe_count;
-
- VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
- set_user_nice(current, 19);
-
- do {
- idx = cur_ops->readlock();
- completed = cur_ops->completed();
- p = rcu_dereference(rcu_torture_current);
- if (p == NULL) {
- /* Wait for rcu_torture_writer to get underway */
- cur_ops->readunlock(idx);
- schedule_timeout_interruptible(HZ);
- continue;
- }
- if (p->rtort_mbtest == 0)
- atomic_inc(&n_rcu_torture_mberror);
- cur_ops->readdelay(&rand);
- preempt_disable();
- pipe_count = p->rtort_pipe_count;
- if (pipe_count > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- pipe_count = RCU_TORTURE_PIPE_LEN;
- }
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
- completed = cur_ops->completed() - completed;
- if (completed > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- completed = RCU_TORTURE_PIPE_LEN;
- }
- ++__get_cpu_var(rcu_torture_batch)[completed];
- preempt_enable();
- cur_ops->readunlock(idx);
- schedule();
- } while (!kthread_should_stop() && !fullstop);
- VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
- while (!kthread_should_stop())
- schedule_timeout_uninterruptible(1);
- return 0;
-}
-
-/*
- * Create an RCU-torture statistics message in the specified buffer.
- */
-static int
-rcu_torture_printk(char *page)
-{
- int cnt = 0;
- int cpu;
- int i;
- long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
- long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
-
- for_each_possible_cpu(cpu) {
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
- batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
- }
- }
- for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
- if (pipesummary[i] != 0)
- break;
- }
- cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
- cnt += sprintf(&page[cnt],
- "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
- "rtmbe: %d",
- rcu_torture_current,
- rcu_torture_current_version,
- list_empty(&rcu_torture_freelist),
- atomic_read(&n_rcu_torture_alloc),
- atomic_read(&n_rcu_torture_alloc_fail),
- atomic_read(&n_rcu_torture_free),
- atomic_read(&n_rcu_torture_mberror));
- if (atomic_read(&n_rcu_torture_mberror) != 0)
- cnt += sprintf(&page[cnt], " !!!");
- cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
- if (i > 1) {
- cnt += sprintf(&page[cnt], "!!! ");
- atomic_inc(&n_rcu_torture_error);
- }
- cnt += sprintf(&page[cnt], "Reader Pipe: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
- cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
- cnt += sprintf(&page[cnt], "Reader Batch: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
- cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
- cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- cnt += sprintf(&page[cnt], " %d",
- atomic_read(&rcu_torture_wcount[i]));
- }
- cnt += sprintf(&page[cnt], "\n");
- if (cur_ops->stats)
- cnt += cur_ops->stats(&page[cnt]);
- return cnt;
-}
-
-/*
- * Print torture statistics. Caller must ensure that there is only
- * one call to this function at a given time!!! This is normally
- * accomplished by relying on the module system to only have one copy
- * of the module loaded, and then by giving the rcu_torture_stats
- * kthread full control (or the init/cleanup functions when rcu_torture_stats
- * thread is not running).
- */
-static void
-rcu_torture_stats_print(void)
-{
- int cnt;
-
- cnt = rcu_torture_printk(printk_buf);
- printk(KERN_ALERT "%s", printk_buf);
-}
-
-/*
- * Periodically prints torture statistics, if periodic statistics printing
- * was specified via the stat_interval module parameter.
- *
- * No need to worry about fullstop here, since this one doesn't reference
- * volatile state or register callbacks.
- */
-static int
-rcu_torture_stats(void *arg)
-{
- VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
- do {
- schedule_timeout_interruptible(stat_interval * HZ);
- rcu_torture_stats_print();
- } while (!kthread_should_stop());
- VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
- return 0;
-}
-
-static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
-
-/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
- * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
- */
-static void rcu_torture_shuffle_tasks(void)
-{
- cpumask_t tmp_mask = CPU_MASK_ALL;
- int i;
-
- get_online_cpus();
-
- /* No point in shuffling if there is only one online CPU (ex: UP) */
- if (num_online_cpus() == 1) {
- put_online_cpus();
- return;
- }
-
- if (rcu_idle_cpu != -1)
- cpu_clear(rcu_idle_cpu, tmp_mask);
-
- set_cpus_allowed(current, tmp_mask);
-
- if (reader_tasks) {
- for (i = 0; i < nrealreaders; i++)
- if (reader_tasks[i])
- set_cpus_allowed(reader_tasks[i], tmp_mask);
- }
-
- if (fakewriter_tasks) {
- for (i = 0; i < nfakewriters; i++)
- if (fakewriter_tasks[i])
- set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
- }
-
- if (writer_task)
- set_cpus_allowed(writer_task, tmp_mask);
-
- if (stats_task)
- set_cpus_allowed(stats_task, tmp_mask);
-
- if (rcu_idle_cpu == -1)
- rcu_idle_cpu = num_online_cpus() - 1;
- else
- rcu_idle_cpu--;
-
- put_online_cpus();
-}
-
-/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
- * system to become idle at a time and cut off its timer ticks. This is meant
- * to test the support for such tickless idle CPU in RCU.
- */
-static int
-rcu_torture_shuffle(void *arg)
-{
- VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
- do {
- schedule_timeout_interruptible(shuffle_interval * HZ);
- rcu_torture_shuffle_tasks();
- } while (!kthread_should_stop());
- VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
- return 0;
-}
-
-static inline void
-rcu_torture_print_module_parms(char *tag)
-{
- printk(KERN_ALERT "%s" TORTURE_FLAG
- "--- %s: nreaders=%d nfakewriters=%d "
- "stat_interval=%d verbose=%d test_no_idle_hz=%d "
- "shuffle_interval = %d\n",
- torture_type, tag, nrealreaders, nfakewriters,
- stat_interval, verbose, test_no_idle_hz, shuffle_interval);
-}
-
-static void
-rcu_torture_cleanup(void)
-{
- int i;
-
- fullstop = 1;
- if (shuffler_task) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
- kthread_stop(shuffler_task);
- }
- shuffler_task = NULL;
-
- if (writer_task) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
- kthread_stop(writer_task);
- }
- writer_task = NULL;
-
- if (reader_tasks) {
- for (i = 0; i < nrealreaders; i++) {
- if (reader_tasks[i]) {
- VERBOSE_PRINTK_STRING(
- "Stopping rcu_torture_reader task");
- kthread_stop(reader_tasks[i]);
- }
- reader_tasks[i] = NULL;
- }
- kfree(reader_tasks);
- reader_tasks = NULL;
- }
- rcu_torture_current = NULL;
-
- if (fakewriter_tasks) {
- for (i = 0; i < nfakewriters; i++) {
- if (fakewriter_tasks[i]) {
- VERBOSE_PRINTK_STRING(
- "Stopping rcu_torture_fakewriter task");
- kthread_stop(fakewriter_tasks[i]);
- }
- fakewriter_tasks[i] = NULL;
- }
- kfree(fakewriter_tasks);
- fakewriter_tasks = NULL;
- }
-
- if (stats_task) {
- VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
- kthread_stop(stats_task);
- }
- stats_task = NULL;
-
- /* Wait for all RCU callbacks to fire. */
- rcu_barrier();
-
- rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
-
- if (cur_ops->cleanup)
- cur_ops->cleanup();
- if (atomic_read(&n_rcu_torture_error))
- rcu_torture_print_module_parms("End of test: FAILURE");
- else
- rcu_torture_print_module_parms("End of test: SUCCESS");
-}
-
-static int __init
-rcu_torture_init(void)
-{
- int i;
- int cpu;
- int firsterr = 0;
- static struct rcu_torture_ops *torture_ops[] =
- { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
- &srcu_ops, &sched_ops, };
-
- /* Process args and tell the world that the torturer is on the job. */
- for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
- cur_ops = torture_ops[i];
- if (strcmp(torture_type, cur_ops->name) == 0)
- break;
- }
- if (i == ARRAY_SIZE(torture_ops)) {
- printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
- torture_type);
- return (-EINVAL);
- }
- if (cur_ops->init)
- cur_ops->init(); /* no "goto unwind" prior to this point!!! */
-
- if (nreaders >= 0)
- nrealreaders = nreaders;
- else
- nrealreaders = 2 * num_online_cpus();
- rcu_torture_print_module_parms("Start of test");
- fullstop = 0;
-
- /* Set up the freelist. */
-
- INIT_LIST_HEAD(&rcu_torture_freelist);
- for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
- rcu_tortures[i].rtort_mbtest = 0;
- list_add_tail(&rcu_tortures[i].rtort_free,
- &rcu_torture_freelist);
- }
-
- /* Initialize the statistics so that each run gets its own numbers. */
-
- rcu_torture_current = NULL;
- rcu_torture_current_version = 0;
- atomic_set(&n_rcu_torture_alloc, 0);
- atomic_set(&n_rcu_torture_alloc_fail, 0);
- atomic_set(&n_rcu_torture_free, 0);
- atomic_set(&n_rcu_torture_mberror, 0);
- atomic_set(&n_rcu_torture_error, 0);
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- atomic_set(&rcu_torture_wcount[i], 0);
- for_each_possible_cpu(cpu) {
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- per_cpu(rcu_torture_count, cpu)[i] = 0;
- per_cpu(rcu_torture_batch, cpu)[i] = 0;
- }
- }
-
- /* Start up the kthreads. */
-
- VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
- writer_task = kthread_run(rcu_torture_writer, NULL,
- "rcu_torture_writer");
- if (IS_ERR(writer_task)) {
- firsterr = PTR_ERR(writer_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
- writer_task = NULL;
- goto unwind;
- }
- fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
- GFP_KERNEL);
- if (fakewriter_tasks == NULL) {
- VERBOSE_PRINTK_ERRSTRING("out of memory");
- firsterr = -ENOMEM;
- goto unwind;
- }
- for (i = 0; i < nfakewriters; i++) {
- VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
- fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
- "rcu_torture_fakewriter");
- if (IS_ERR(fakewriter_tasks[i])) {
- firsterr = PTR_ERR(fakewriter_tasks[i]);
- VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
- fakewriter_tasks[i] = NULL;
- goto unwind;
- }
- }
- reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
- GFP_KERNEL);
- if (reader_tasks == NULL) {
- VERBOSE_PRINTK_ERRSTRING("out of memory");
- firsterr = -ENOMEM;
- goto unwind;
- }
- for (i = 0; i < nrealreaders; i++) {
- VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
- reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
- "rcu_torture_reader");
- if (IS_ERR(reader_tasks[i])) {
- firsterr = PTR_ERR(reader_tasks[i]);
- VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
- reader_tasks[i] = NULL;
- goto unwind;
- }
- }
- if (stat_interval > 0) {
- VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
- stats_task = kthread_run(rcu_torture_stats, NULL,
- "rcu_torture_stats");
- if (IS_ERR(stats_task)) {
- firsterr = PTR_ERR(stats_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
- stats_task = NULL;
- goto unwind;
- }
- }
- if (test_no_idle_hz) {
- rcu_idle_cpu = num_online_cpus() - 1;
- /* Create the shuffler thread */
- shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
- "rcu_torture_shuffle");
- if (IS_ERR(shuffler_task)) {
- firsterr = PTR_ERR(shuffler_task);
- VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
- shuffler_task = NULL;
- goto unwind;
- }
- }
- return 0;
-
-unwind:
- rcu_torture_cleanup();
- return firsterr;
-}
-
-module_init(rcu_torture_init);
-module_exit(rcu_torture_cleanup);
Index: linux-2.6.24-rc6/lib/Kconfig.debug
===================================================================
--- linux-2.6.24-rc6.orig/lib/Kconfig.debug
+++ linux-2.6.24-rc6/lib/Kconfig.debug
@@ -487,19 +487,6 @@ config DEBUG_SYNCHRO_TEST

See Documentation/synchro-test.txt.

-config RCU_TORTURE_TEST
- tristate "torture tests for RCU"
- depends on DEBUG_KERNEL
- depends on m
- default n
- help
- This option provides a kernel module that runs torture tests
- on the RCU infrastructure. The kernel module may be built
- after the fact on the running kernel to be tested, if desired.
-
- Say M if you want the RCU torture tests to build as a module.
- Say N if you are unsure.
-
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_KERNEL
Index: linux-2.6.24-rc6/tests/Kconfig
===================================================================
--- linux-2.6.24-rc6.orig/tests/Kconfig
+++ linux-2.6.24-rc6/tests/Kconfig
@@ -18,5 +18,18 @@ config DEBUG_LOCKING_API_SELFTESTS
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.

+config RCU_TORTURE_TEST
+ tristate "torture tests for RCU"
+ depends on DEBUG_KERNEL
+ depends on m
+ default n
+ help
+ This option provides a kernel module that runs torture tests
+ on the RCU infrastructure. The kernel module may be built
+ after the fact on the running kernel to be tested, if desired.
+
+ Say M if you want the RCU torture tests to build as a module.
+ Say N if you are unsure.
+
endif # KERNEL_TESTS

Index: linux-2.6.24-rc6/tests/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/tests/Makefile
+++ linux-2.6.24-rc6/tests/Makefile
@@ -3,3 +3,4 @@
#

obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
Index: linux-2.6.24-rc6/tests/rcutorture.c
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/rcutorture.c
@@ -0,0 +1,995 @@
+/*
+ * Read-Copy Update module-based torture test facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2005, 2006
+ *
+ * Authors: Paul E. McKenney <[email protected]>
+ * Josh Triplett <[email protected]>
+ *
+ * See also: Documentation/RCU/torture.txt
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/byteorder/swabb.h>
+#include <linux/stat.h>
+#include <linux/srcu.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <[email protected]> and "
+ "Josh Triplett <[email protected]>");
+
+static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
+static int nfakewriters = 4; /* # fake writer threads */
+static int stat_interval; /* Interval between stats, in seconds. */
+ /* Defaults to "only at end of test". */
+static int verbose; /* Print more debug info. */
+static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
+static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
+static char *torture_type = "rcu"; /* What RCU implementation to torture. */
+
+module_param(nreaders, int, 0444);
+MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
+module_param(nfakewriters, int, 0444);
+MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
+module_param(stat_interval, int, 0444);
+MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
+module_param(verbose, bool, 0444);
+MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
+module_param(test_no_idle_hz, bool, 0444);
+MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
+module_param(shuffle_interval, int, 0444);
+MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
+module_param(torture_type, charp, 0444);
+MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
+
+#define TORTURE_FLAG "-torture:"
+#define PRINTK_STRING(s) \
+ do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
+#define VERBOSE_PRINTK_STRING(s) \
+ do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
+#define VERBOSE_PRINTK_ERRSTRING(s) \
+ do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
+
+static char printk_buf[4096];
+
+static int nrealreaders;
+static struct task_struct *writer_task;
+static struct task_struct **fakewriter_tasks;
+static struct task_struct **reader_tasks;
+static struct task_struct *stats_task;
+static struct task_struct *shuffler_task;
+
+#define RCU_TORTURE_PIPE_LEN 10
+
+struct rcu_torture {
+ struct rcu_head rtort_rcu;
+ int rtort_pipe_count;
+ struct list_head rtort_free;
+ int rtort_mbtest;
+};
+
+static int fullstop = 0; /* stop generating callbacks at test end. */
+static LIST_HEAD(rcu_torture_freelist);
+static struct rcu_torture *rcu_torture_current = NULL;
+static long rcu_torture_current_version = 0;
+static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
+static DEFINE_SPINLOCK(rcu_torture_lock);
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
+ { 0 };
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
+ { 0 };
+static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+static atomic_t n_rcu_torture_alloc;
+static atomic_t n_rcu_torture_alloc_fail;
+static atomic_t n_rcu_torture_free;
+static atomic_t n_rcu_torture_mberror;
+static atomic_t n_rcu_torture_error;
+static struct list_head rcu_torture_removed;
+
+/*
+ * Allocate an element from the rcu_tortures pool.
+ */
+static struct rcu_torture *
+rcu_torture_alloc(void)
+{
+ struct list_head *p;
+
+ spin_lock_bh(&rcu_torture_lock);
+ if (list_empty(&rcu_torture_freelist)) {
+ atomic_inc(&n_rcu_torture_alloc_fail);
+ spin_unlock_bh(&rcu_torture_lock);
+ return NULL;
+ }
+ atomic_inc(&n_rcu_torture_alloc);
+ p = rcu_torture_freelist.next;
+ list_del_init(p);
+ spin_unlock_bh(&rcu_torture_lock);
+ return container_of(p, struct rcu_torture, rtort_free);
+}
+
+/*
+ * Free an element to the rcu_tortures pool.
+ */
+static void
+rcu_torture_free(struct rcu_torture *p)
+{
+ atomic_inc(&n_rcu_torture_free);
+ spin_lock_bh(&rcu_torture_lock);
+ list_add_tail(&p->rtort_free, &rcu_torture_freelist);
+ spin_unlock_bh(&rcu_torture_lock);
+}
+
+struct rcu_random_state {
+ unsigned long rrs_state;
+ long rrs_count;
+};
+
+#define RCU_RANDOM_MULT 39916801 /* prime */
+#define RCU_RANDOM_ADD 479001701 /* prime */
+#define RCU_RANDOM_REFRESH 10000
+
+#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
+
+/*
+ * Crude but fast random-number generator. Uses a linear congruential
+ * generator, with occasional help from cpu_clock().
+ */
+static unsigned long
+rcu_random(struct rcu_random_state *rrsp)
+{
+ if (--rrsp->rrs_count < 0) {
+ rrsp->rrs_state +=
+ (unsigned long)cpu_clock(raw_smp_processor_id());
+ rrsp->rrs_count = RCU_RANDOM_REFRESH;
+ }
+ rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
+ return swahw32(rrsp->rrs_state);
+}
+
+/*
+ * Operations vector for selecting different types of tests.
+ */
+
+struct rcu_torture_ops {
+ void (*init)(void);
+ void (*cleanup)(void);
+ int (*readlock)(void);
+ void (*readdelay)(struct rcu_random_state *rrsp);
+ void (*readunlock)(int idx);
+ int (*completed)(void);
+ void (*deferredfree)(struct rcu_torture *p);
+ void (*sync)(void);
+ int (*stats)(char *page);
+ char *name;
+};
+static struct rcu_torture_ops *cur_ops = NULL;
+
+/*
+ * Definitions for rcu torture testing.
+ */
+
+static int rcu_torture_read_lock(void) __acquires(RCU)
+{
+ rcu_read_lock();
+ return 0;
+}
+
+static void rcu_read_delay(struct rcu_random_state *rrsp)
+{
+ long delay;
+ const long longdelay = 200;
+
+ /* We want there to be long-running readers, but not all the time. */
+
+ delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
+ if (!delay)
+ udelay(longdelay);
+}
+
+static void rcu_torture_read_unlock(int idx) __releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int rcu_torture_completed(void)
+{
+ return rcu_batches_completed();
+}
+
+static void
+rcu_torture_cb(struct rcu_head *p)
+{
+ int i;
+ struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
+
+ if (fullstop) {
+ /* Test is ending, just drop callbacks on the floor. */
+ /* The next initialization will pick up the pieces. */
+ return;
+ }
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ rcu_torture_free(rp);
+ } else
+ cur_ops->deferredfree(rp);
+}
+
+static void rcu_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static struct rcu_torture_ops rcu_ops = {
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .readdelay = rcu_read_delay,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferredfree = rcu_torture_deferred_free,
+ .sync = synchronize_rcu,
+ .stats = NULL,
+ .name = "rcu"
+};
+
+static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
+{
+ int i;
+ struct rcu_torture *rp;
+ struct rcu_torture *rp1;
+
+ cur_ops->sync();
+ list_add(&p->rtort_free, &rcu_torture_removed);
+ list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ list_del(&rp->rtort_free);
+ rcu_torture_free(rp);
+ }
+ }
+}
+
+static void rcu_sync_torture_init(void)
+{
+ INIT_LIST_HEAD(&rcu_torture_removed);
+}
+
+static struct rcu_torture_ops rcu_sync_ops = {
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .readdelay = rcu_read_delay,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferredfree = rcu_sync_torture_deferred_free,
+ .sync = synchronize_rcu,
+ .stats = NULL,
+ .name = "rcu_sync"
+};
+
+/*
+ * Definitions for rcu_bh torture testing.
+ */
+
+static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
+{
+ rcu_read_lock_bh();
+ return 0;
+}
+
+static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
+{
+ rcu_read_unlock_bh();
+}
+
+static int rcu_bh_torture_completed(void)
+{
+ return rcu_batches_completed_bh();
+}
+
+static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
+}
+
+struct rcu_bh_torture_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
+
+static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
+{
+ struct rcu_bh_torture_synchronize *rcu;
+
+ rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
+ complete(&rcu->completion);
+}
+
+static void rcu_bh_torture_synchronize(void)
+{
+ struct rcu_bh_torture_synchronize rcu;
+
+ init_completion(&rcu.completion);
+ call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
+ wait_for_completion(&rcu.completion);
+}
+
+static struct rcu_torture_ops rcu_bh_ops = {
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .readdelay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferredfree = rcu_bh_torture_deferred_free,
+ .sync = rcu_bh_torture_synchronize,
+ .stats = NULL,
+ .name = "rcu_bh"
+};
+
+static struct rcu_torture_ops rcu_bh_sync_ops = {
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .readdelay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferredfree = rcu_sync_torture_deferred_free,
+ .sync = rcu_bh_torture_synchronize,
+ .stats = NULL,
+ .name = "rcu_bh_sync"
+};
+
+/*
+ * Definitions for srcu torture testing.
+ */
+
+static struct srcu_struct srcu_ctl;
+
+static void srcu_torture_init(void)
+{
+ init_srcu_struct(&srcu_ctl);
+ rcu_sync_torture_init();
+}
+
+static void srcu_torture_cleanup(void)
+{
+ synchronize_srcu(&srcu_ctl);
+ cleanup_srcu_struct(&srcu_ctl);
+}
+
+static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
+{
+ return srcu_read_lock(&srcu_ctl);
+}
+
+static void srcu_read_delay(struct rcu_random_state *rrsp)
+{
+ long delay;
+ const long uspertick = 1000000 / HZ;
+ const long longdelay = 10;
+
+ /* We want there to be long-running readers, but not all the time. */
+
+ delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
+ if (!delay)
+ schedule_timeout_interruptible(longdelay);
+}
+
+static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
+{
+ srcu_read_unlock(&srcu_ctl, idx);
+}
+
+static int srcu_torture_completed(void)
+{
+ return srcu_batches_completed(&srcu_ctl);
+}
+
+static void srcu_torture_synchronize(void)
+{
+ synchronize_srcu(&srcu_ctl);
+}
+
+static int srcu_torture_stats(char *page)
+{
+ int cnt = 0;
+ int cpu;
+ int idx = srcu_ctl.completed & 0x1;
+
+ cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
+ torture_type, TORTURE_FLAG, idx);
+ for_each_possible_cpu(cpu) {
+ cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
+ per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
+ per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
+ }
+ cnt += sprintf(&page[cnt], "\n");
+ return cnt;
+}
+
+static struct rcu_torture_ops srcu_ops = {
+ .init = srcu_torture_init,
+ .cleanup = srcu_torture_cleanup,
+ .readlock = srcu_torture_read_lock,
+ .readdelay = srcu_read_delay,
+ .readunlock = srcu_torture_read_unlock,
+ .completed = srcu_torture_completed,
+ .deferredfree = rcu_sync_torture_deferred_free,
+ .sync = srcu_torture_synchronize,
+ .stats = srcu_torture_stats,
+ .name = "srcu"
+};
+
+/*
+ * Definitions for sched torture testing.
+ */
+
+static int sched_torture_read_lock(void)
+{
+ preempt_disable();
+ return 0;
+}
+
+static void sched_torture_read_unlock(int idx)
+{
+ preempt_enable();
+}
+
+static int sched_torture_completed(void)
+{
+ return 0;
+}
+
+static void sched_torture_synchronize(void)
+{
+ synchronize_sched();
+}
+
+static struct rcu_torture_ops sched_ops = {
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = sched_torture_read_lock,
+ .readdelay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = sched_torture_read_unlock,
+ .completed = sched_torture_completed,
+ .deferredfree = rcu_sync_torture_deferred_free,
+ .sync = sched_torture_synchronize,
+ .stats = NULL,
+ .name = "sched"
+};
+
+/*
+ * RCU torture writer kthread. Repeatedly substitutes a new structure
+ * for that pointed to by rcu_torture_current, freeing the old structure
+ * after a series of grace periods (the "pipeline").
+ */
+static int
+rcu_torture_writer(void *arg)
+{
+ int i;
+ long oldbatch = rcu_batches_completed();
+ struct rcu_torture *rp;
+ struct rcu_torture *old_rp;
+ static DEFINE_RCU_RANDOM(rand);
+
+ VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
+ set_user_nice(current, 19);
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ if ((rp = rcu_torture_alloc()) == NULL)
+ continue;
+ rp->rtort_pipe_count = 0;
+ udelay(rcu_random(&rand) & 0x3ff);
+ old_rp = rcu_torture_current;
+ rp->rtort_mbtest = 1;
+ rcu_assign_pointer(rcu_torture_current, rp);
+ smp_wmb();
+ if (old_rp) {
+ i = old_rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ old_rp->rtort_pipe_count++;
+ cur_ops->deferredfree(old_rp);
+ }
+ rcu_torture_current_version++;
+ oldbatch = cur_ops->completed();
+ } while (!kthread_should_stop() && !fullstop);
+ VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
+ while (!kthread_should_stop())
+ schedule_timeout_uninterruptible(1);
+ return 0;
+}
+
+/*
+ * RCU torture fake writer kthread. Repeatedly calls sync, with a random
+ * delay between calls.
+ */
+static int
+rcu_torture_fakewriter(void *arg)
+{
+ DEFINE_RCU_RANDOM(rand);
+
+ VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
+ set_user_nice(current, 19);
+
+ do {
+ schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
+ udelay(rcu_random(&rand) & 0x3ff);
+ cur_ops->sync();
+ } while (!kthread_should_stop() && !fullstop);
+
+ VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
+ while (!kthread_should_stop())
+ schedule_timeout_uninterruptible(1);
+ return 0;
+}
+
+/*
+ * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
+ * incrementing the corresponding element of the pipeline array. The
+ * counter in the element should never be greater than 1, otherwise, the
+ * RCU implementation is broken.
+ */
+static int
+rcu_torture_reader(void *arg)
+{
+ int completed;
+ int idx;
+ DEFINE_RCU_RANDOM(rand);
+ struct rcu_torture *p;
+ int pipe_count;
+
+ VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
+ set_user_nice(current, 19);
+
+ do {
+ idx = cur_ops->readlock();
+ completed = cur_ops->completed();
+ p = rcu_dereference(rcu_torture_current);
+ if (p == NULL) {
+ /* Wait for rcu_torture_writer to get underway */
+ cur_ops->readunlock(idx);
+ schedule_timeout_interruptible(HZ);
+ continue;
+ }
+ if (p->rtort_mbtest == 0)
+ atomic_inc(&n_rcu_torture_mberror);
+ cur_ops->readdelay(&rand);
+ preempt_disable();
+ pipe_count = p->rtort_pipe_count;
+ if (pipe_count > RCU_TORTURE_PIPE_LEN) {
+ /* Should not happen, but... */
+ pipe_count = RCU_TORTURE_PIPE_LEN;
+ }
+ ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ completed = cur_ops->completed() - completed;
+ if (completed > RCU_TORTURE_PIPE_LEN) {
+ /* Should not happen, but... */
+ completed = RCU_TORTURE_PIPE_LEN;
+ }
+ ++__get_cpu_var(rcu_torture_batch)[completed];
+ preempt_enable();
+ cur_ops->readunlock(idx);
+ schedule();
+ } while (!kthread_should_stop() && !fullstop);
+ VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
+ while (!kthread_should_stop())
+ schedule_timeout_uninterruptible(1);
+ return 0;
+}
+
+/*
+ * Create an RCU-torture statistics message in the specified buffer.
+ */
+static int
+rcu_torture_printk(char *page)
+{
+ int cnt = 0;
+ int cpu;
+ int i;
+ long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
+ long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
+ batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
+ }
+ }
+ for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
+ if (pipesummary[i] != 0)
+ break;
+ }
+ cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
+ cnt += sprintf(&page[cnt],
+ "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
+ "rtmbe: %d",
+ rcu_torture_current,
+ rcu_torture_current_version,
+ list_empty(&rcu_torture_freelist),
+ atomic_read(&n_rcu_torture_alloc),
+ atomic_read(&n_rcu_torture_alloc_fail),
+ atomic_read(&n_rcu_torture_free),
+ atomic_read(&n_rcu_torture_mberror));
+ if (atomic_read(&n_rcu_torture_mberror) != 0)
+ cnt += sprintf(&page[cnt], " !!!");
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
+ if (i > 1) {
+ cnt += sprintf(&page[cnt], "!!! ");
+ atomic_inc(&n_rcu_torture_error);
+ }
+ cnt += sprintf(&page[cnt], "Reader Pipe: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+ cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
+ cnt += sprintf(&page[cnt], "Reader Batch: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+ cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
+ cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ cnt += sprintf(&page[cnt], " %d",
+ atomic_read(&rcu_torture_wcount[i]));
+ }
+ cnt += sprintf(&page[cnt], "\n");
+ if (cur_ops->stats)
+ cnt += cur_ops->stats(&page[cnt]);
+ return cnt;
+}
+
+/*
+ * Print torture statistics. Caller must ensure that there is only
+ * one call to this function at a given time!!! This is normally
+ * accomplished by relying on the module system to only have one copy
+ * of the module loaded, and then by giving the rcu_torture_stats
+ * kthread full control (or the init/cleanup functions when rcu_torture_stats
+ * thread is not running).
+ */
+static void
+rcu_torture_stats_print(void)
+{
+ int cnt;
+
+ cnt = rcu_torture_printk(printk_buf);
+ printk(KERN_ALERT "%s", printk_buf);
+}
+
+/*
+ * Periodically prints torture statistics, if periodic statistics printing
+ * was specified via the stat_interval module parameter.
+ *
+ * No need to worry about fullstop here, since this one doesn't reference
+ * volatile state or register callbacks.
+ */
+static int
+rcu_torture_stats(void *arg)
+{
+ VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
+ do {
+ schedule_timeout_interruptible(stat_interval * HZ);
+ rcu_torture_stats_print();
+ } while (!kthread_should_stop());
+ VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
+ return 0;
+}
+
+static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
+
+/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
+ * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
+ */
+static void rcu_torture_shuffle_tasks(void)
+{
+ cpumask_t tmp_mask = CPU_MASK_ALL;
+ int i;
+
+ get_online_cpus();
+
+ /* No point in shuffling if there is only one online CPU (ex: UP) */
+ if (num_online_cpus() == 1) {
+ put_online_cpus();
+ return;
+ }
+
+ if (rcu_idle_cpu != -1)
+ cpu_clear(rcu_idle_cpu, tmp_mask);
+
+ set_cpus_allowed(current, tmp_mask);
+
+ if (reader_tasks) {
+ for (i = 0; i < nrealreaders; i++)
+ if (reader_tasks[i])
+ set_cpus_allowed(reader_tasks[i], tmp_mask);
+ }
+
+ if (fakewriter_tasks) {
+ for (i = 0; i < nfakewriters; i++)
+ if (fakewriter_tasks[i])
+ set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
+ }
+
+ if (writer_task)
+ set_cpus_allowed(writer_task, tmp_mask);
+
+ if (stats_task)
+ set_cpus_allowed(stats_task, tmp_mask);
+
+ if (rcu_idle_cpu == -1)
+ rcu_idle_cpu = num_online_cpus() - 1;
+ else
+ rcu_idle_cpu--;
+
+ put_online_cpus();
+}
+
+/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
+ * system to become idle at a time and cut off its timer ticks. This is meant
+ * to test the support for such tickless idle CPU in RCU.
+ */
+static int
+rcu_torture_shuffle(void *arg)
+{
+ VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
+ do {
+ schedule_timeout_interruptible(shuffle_interval * HZ);
+ rcu_torture_shuffle_tasks();
+ } while (!kthread_should_stop());
+ VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
+ return 0;
+}
+
+static inline void
+rcu_torture_print_module_parms(char *tag)
+{
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "--- %s: nreaders=%d nfakewriters=%d "
+ "stat_interval=%d verbose=%d test_no_idle_hz=%d "
+ "shuffle_interval = %d\n",
+ torture_type, tag, nrealreaders, nfakewriters,
+ stat_interval, verbose, test_no_idle_hz, shuffle_interval);
+}
+
+static void
+rcu_torture_cleanup(void)
+{
+ int i;
+
+ fullstop = 1;
+ if (shuffler_task) {
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
+ kthread_stop(shuffler_task);
+ }
+ shuffler_task = NULL;
+
+ if (writer_task) {
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
+ kthread_stop(writer_task);
+ }
+ writer_task = NULL;
+
+ if (reader_tasks) {
+ for (i = 0; i < nrealreaders; i++) {
+ if (reader_tasks[i]) {
+ VERBOSE_PRINTK_STRING(
+ "Stopping rcu_torture_reader task");
+ kthread_stop(reader_tasks[i]);
+ }
+ reader_tasks[i] = NULL;
+ }
+ kfree(reader_tasks);
+ reader_tasks = NULL;
+ }
+ rcu_torture_current = NULL;
+
+ if (fakewriter_tasks) {
+ for (i = 0; i < nfakewriters; i++) {
+ if (fakewriter_tasks[i]) {
+ VERBOSE_PRINTK_STRING(
+ "Stopping rcu_torture_fakewriter task");
+ kthread_stop(fakewriter_tasks[i]);
+ }
+ fakewriter_tasks[i] = NULL;
+ }
+ kfree(fakewriter_tasks);
+ fakewriter_tasks = NULL;
+ }
+
+ if (stats_task) {
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
+ kthread_stop(stats_task);
+ }
+ stats_task = NULL;
+
+ /* Wait for all RCU callbacks to fire. */
+ rcu_barrier();
+
+ rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
+
+ if (cur_ops->cleanup)
+ cur_ops->cleanup();
+ if (atomic_read(&n_rcu_torture_error))
+ rcu_torture_print_module_parms("End of test: FAILURE");
+ else
+ rcu_torture_print_module_parms("End of test: SUCCESS");
+}
+
+static int __init
+rcu_torture_init(void)
+{
+ int i;
+ int cpu;
+ int firsterr = 0;
+ static struct rcu_torture_ops *torture_ops[] =
+ { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
+ &srcu_ops, &sched_ops, };
+
+ /* Process args and tell the world that the torturer is on the job. */
+ for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
+ cur_ops = torture_ops[i];
+ if (strcmp(torture_type, cur_ops->name) == 0)
+ break;
+ }
+ if (i == ARRAY_SIZE(torture_ops)) {
+ printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
+ torture_type);
+ return (-EINVAL);
+ }
+ if (cur_ops->init)
+ cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+
+ if (nreaders >= 0)
+ nrealreaders = nreaders;
+ else
+ nrealreaders = 2 * num_online_cpus();
+ rcu_torture_print_module_parms("Start of test");
+ fullstop = 0;
+
+ /* Set up the freelist. */
+
+ INIT_LIST_HEAD(&rcu_torture_freelist);
+ for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
+ rcu_tortures[i].rtort_mbtest = 0;
+ list_add_tail(&rcu_tortures[i].rtort_free,
+ &rcu_torture_freelist);
+ }
+
+ /* Initialize the statistics so that each run gets its own numbers. */
+
+ rcu_torture_current = NULL;
+ rcu_torture_current_version = 0;
+ atomic_set(&n_rcu_torture_alloc, 0);
+ atomic_set(&n_rcu_torture_alloc_fail, 0);
+ atomic_set(&n_rcu_torture_free, 0);
+ atomic_set(&n_rcu_torture_mberror, 0);
+ atomic_set(&n_rcu_torture_error, 0);
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+ atomic_set(&rcu_torture_wcount[i], 0);
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ per_cpu(rcu_torture_count, cpu)[i] = 0;
+ per_cpu(rcu_torture_batch, cpu)[i] = 0;
+ }
+ }
+
+ /* Start up the kthreads. */
+
+ VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
+ writer_task = kthread_run(rcu_torture_writer, NULL,
+ "rcu_torture_writer");
+ if (IS_ERR(writer_task)) {
+ firsterr = PTR_ERR(writer_task);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
+ writer_task = NULL;
+ goto unwind;
+ }
+ fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
+ GFP_KERNEL);
+ if (fakewriter_tasks == NULL) {
+ VERBOSE_PRINTK_ERRSTRING("out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ for (i = 0; i < nfakewriters; i++) {
+ VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
+ fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
+ "rcu_torture_fakewriter");
+ if (IS_ERR(fakewriter_tasks[i])) {
+ firsterr = PTR_ERR(fakewriter_tasks[i]);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
+ fakewriter_tasks[i] = NULL;
+ goto unwind;
+ }
+ }
+ reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
+ GFP_KERNEL);
+ if (reader_tasks == NULL) {
+ VERBOSE_PRINTK_ERRSTRING("out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ for (i = 0; i < nrealreaders; i++) {
+ VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
+ reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
+ "rcu_torture_reader");
+ if (IS_ERR(reader_tasks[i])) {
+ firsterr = PTR_ERR(reader_tasks[i]);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
+ reader_tasks[i] = NULL;
+ goto unwind;
+ }
+ }
+ if (stat_interval > 0) {
+ VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
+ stats_task = kthread_run(rcu_torture_stats, NULL,
+ "rcu_torture_stats");
+ if (IS_ERR(stats_task)) {
+ firsterr = PTR_ERR(stats_task);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
+ stats_task = NULL;
+ goto unwind;
+ }
+ }
+ if (test_no_idle_hz) {
+ rcu_idle_cpu = num_online_cpus() - 1;
+ /* Create the shuffler thread */
+ shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
+ "rcu_torture_shuffle");
+ if (IS_ERR(shuffler_task)) {
+ firsterr = PTR_ERR(shuffler_task);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
+ shuffler_task = NULL;
+ goto unwind;
+ }
+ }
+ return 0;
+
+unwind:
+ rcu_torture_cleanup();
+ return firsterr;
+}
+
+module_init(rcu_torture_init);
+module_exit(rcu_torture_cleanup);

Subject: [PATCH 4/7] Move rtmutex tests under tests/

From: Ananth N Mavinakayanahalli <[email protected]>

Move the rtmutex-tester infrastructure to tests/

Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
---
kernel/rtmutex-tester.c | 442 ------------------------------------------------
kernel/Makefile | 1
lib/Kconfig.debug | 6
tests/Kconfig | 6
tests/Makefile | 1
tests/rtmutex-tester.c | 442 ++++++++++++++++++++++++++++++++++++++++++++++++
6 files changed, 449 insertions(+), 449 deletions(-)

Index: linux-2.6.24-rc6/kernel/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/kernel/Makefile
+++ linux-2.6.24-rc6/kernel/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_FUTEX) += futex_compat.o
endif
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
-obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
Index: linux-2.6.24-rc6/kernel/rtmutex-tester.c
===================================================================
--- linux-2.6.24-rc6.orig/kernel/rtmutex-tester.c
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * RT-Mutex-tester: scriptable tester for rt mutexes
- *
- * started by Thomas Gleixner:
- *
- * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <[email protected]>
- *
- */
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/smp_lock.h>
-#include <linux/spinlock.h>
-#include <linux/sysdev.h>
-#include <linux/timer.h>
-#include <linux/freezer.h>
-
-#include "rtmutex.h"
-
-#define MAX_RT_TEST_THREADS 8
-#define MAX_RT_TEST_MUTEXES 8
-
-static spinlock_t rttest_lock;
-static atomic_t rttest_event;
-
-struct test_thread_data {
- int opcode;
- int opdata;
- int mutexes[MAX_RT_TEST_MUTEXES];
- int bkl;
- int event;
- struct sys_device sysdev;
-};
-
-static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
-static struct task_struct *threads[MAX_RT_TEST_THREADS];
-static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
-
-enum test_opcodes {
- RTTEST_NOP = 0,
- RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
- RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
- RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
- RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
- RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
- RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
- RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
- RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
- RTTEST_LOCKBKL, /* 9 Lock BKL */
- RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
- RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
- RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
- RTTEST_RESET = 99, /* 99 Reset all pending operations */
-};
-
-static int handle_op(struct test_thread_data *td, int lockwakeup)
-{
- int i, id, ret = -EINVAL;
-
- switch(td->opcode) {
-
- case RTTEST_NOP:
- return 0;
-
- case RTTEST_LOCKCONT:
- td->mutexes[td->opdata] = 1;
- td->event = atomic_add_return(1, &rttest_event);
- return 0;
-
- case RTTEST_RESET:
- for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
- if (td->mutexes[i] == 4) {
- rt_mutex_unlock(&mutexes[i]);
- td->mutexes[i] = 0;
- }
- }
-
- if (!lockwakeup && td->bkl == 4) {
- unlock_kernel();
- td->bkl = 0;
- }
- return 0;
-
- case RTTEST_RESETEVENT:
- atomic_set(&rttest_event, 0);
- return 0;
-
- default:
- if (lockwakeup)
- return ret;
- }
-
- switch(td->opcode) {
-
- case RTTEST_LOCK:
- case RTTEST_LOCKNOWAIT:
- id = td->opdata;
- if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
- return ret;
-
- td->mutexes[id] = 1;
- td->event = atomic_add_return(1, &rttest_event);
- rt_mutex_lock(&mutexes[id]);
- td->event = atomic_add_return(1, &rttest_event);
- td->mutexes[id] = 4;
- return 0;
-
- case RTTEST_LOCKINT:
- case RTTEST_LOCKINTNOWAIT:
- id = td->opdata;
- if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
- return ret;
-
- td->mutexes[id] = 1;
- td->event = atomic_add_return(1, &rttest_event);
- ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
- td->event = atomic_add_return(1, &rttest_event);
- td->mutexes[id] = ret ? 0 : 4;
- return ret ? -EINTR : 0;
-
- case RTTEST_UNLOCK:
- id = td->opdata;
- if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
- return ret;
-
- td->event = atomic_add_return(1, &rttest_event);
- rt_mutex_unlock(&mutexes[id]);
- td->event = atomic_add_return(1, &rttest_event);
- td->mutexes[id] = 0;
- return 0;
-
- case RTTEST_LOCKBKL:
- if (td->bkl)
- return 0;
- td->bkl = 1;
- lock_kernel();
- td->bkl = 4;
- return 0;
-
- case RTTEST_UNLOCKBKL:
- if (td->bkl != 4)
- break;
- unlock_kernel();
- td->bkl = 0;
- return 0;
-
- default:
- break;
- }
- return ret;
-}
-
-/*
- * Schedule replacement for rtsem_down(). Only called for threads with
- * PF_MUTEX_TESTER set.
- *
- * This allows us to have finegrained control over the event flow.
- *
- */
-void schedule_rt_mutex_test(struct rt_mutex *mutex)
-{
- int tid, op, dat;
- struct test_thread_data *td;
-
- /* We have to lookup the task */
- for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
- if (threads[tid] == current)
- break;
- }
-
- BUG_ON(tid == MAX_RT_TEST_THREADS);
-
- td = &thread_data[tid];
-
- op = td->opcode;
- dat = td->opdata;
-
- switch (op) {
- case RTTEST_LOCK:
- case RTTEST_LOCKINT:
- case RTTEST_LOCKNOWAIT:
- case RTTEST_LOCKINTNOWAIT:
- if (mutex != &mutexes[dat])
- break;
-
- if (td->mutexes[dat] != 1)
- break;
-
- td->mutexes[dat] = 2;
- td->event = atomic_add_return(1, &rttest_event);
- break;
-
- case RTTEST_LOCKBKL:
- default:
- break;
- }
-
- schedule();
-
-
- switch (op) {
- case RTTEST_LOCK:
- case RTTEST_LOCKINT:
- if (mutex != &mutexes[dat])
- return;
-
- if (td->mutexes[dat] != 2)
- return;
-
- td->mutexes[dat] = 3;
- td->event = atomic_add_return(1, &rttest_event);
- break;
-
- case RTTEST_LOCKNOWAIT:
- case RTTEST_LOCKINTNOWAIT:
- if (mutex != &mutexes[dat])
- return;
-
- if (td->mutexes[dat] != 2)
- return;
-
- td->mutexes[dat] = 1;
- td->event = atomic_add_return(1, &rttest_event);
- return;
-
- case RTTEST_LOCKBKL:
- return;
- default:
- return;
- }
-
- td->opcode = 0;
-
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (td->opcode > 0) {
- int ret;
-
- set_current_state(TASK_RUNNING);
- ret = handle_op(td, 1);
- set_current_state(TASK_INTERRUPTIBLE);
- if (td->opcode == RTTEST_LOCKCONT)
- break;
- td->opcode = ret;
- }
-
- /* Wait for the next command to be executed */
- schedule();
- }
-
- /* Restore previous command and data */
- td->opcode = op;
- td->opdata = dat;
-}
-
-static int test_func(void *data)
-{
- struct test_thread_data *td = data;
- int ret;
-
- current->flags |= PF_MUTEX_TESTER;
- set_freezable();
- allow_signal(SIGHUP);
-
- for(;;) {
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (td->opcode > 0) {
- set_current_state(TASK_RUNNING);
- ret = handle_op(td, 0);
- set_current_state(TASK_INTERRUPTIBLE);
- td->opcode = ret;
- }
-
- /* Wait for the next command to be executed */
- schedule();
- try_to_freeze();
-
- if (signal_pending(current))
- flush_signals(current);
-
- if(kthread_should_stop())
- break;
- }
- return 0;
-}
-
-/**
- * sysfs_test_command - interface for test commands
- * @dev: thread reference
- * @buf: command for actual step
- * @count: length of buffer
- *
- * command syntax:
- *
- * opcode:data
- */
-static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
- size_t count)
-{
- struct sched_param schedpar;
- struct test_thread_data *td;
- char cmdbuf[32];
- int op, dat, tid, ret;
-
- td = container_of(dev, struct test_thread_data, sysdev);
- tid = td->sysdev.id;
-
- /* strings from sysfs write are not 0 terminated! */
- if (count >= sizeof(cmdbuf))
- return -EINVAL;
-
- /* strip of \n: */
- if (buf[count-1] == '\n')
- count--;
- if (count < 1)
- return -EINVAL;
-
- memcpy(cmdbuf, buf, count);
- cmdbuf[count] = 0;
-
- if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
- return -EINVAL;
-
- switch (op) {
- case RTTEST_SCHEDOT:
- schedpar.sched_priority = 0;
- ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
- if (ret)
- return ret;
- set_user_nice(current, 0);
- break;
-
- case RTTEST_SCHEDRT:
- schedpar.sched_priority = dat;
- ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
- if (ret)
- return ret;
- break;
-
- case RTTEST_SIGNAL:
- send_sig(SIGHUP, threads[tid], 0);
- break;
-
- default:
- if (td->opcode > 0)
- return -EBUSY;
- td->opdata = dat;
- td->opcode = op;
- wake_up_process(threads[tid]);
- }
-
- return count;
-}
-
-/**
- * sysfs_test_status - sysfs interface for rt tester
- * @dev: thread to query
- * @buf: char buffer to be filled with thread status info
- */
-static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
-{
- struct test_thread_data *td;
- struct task_struct *tsk;
- char *curr = buf;
- int i;
-
- td = container_of(dev, struct test_thread_data, sysdev);
- tsk = threads[td->sysdev.id];
-
- spin_lock(&rttest_lock);
-
- curr += sprintf(curr,
- "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
- td->opcode, td->event, tsk->state,
- (MAX_RT_PRIO - 1) - tsk->prio,
- (MAX_RT_PRIO - 1) - tsk->normal_prio,
- tsk->pi_blocked_on, td->bkl);
-
- for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
- curr += sprintf(curr, "%d", td->mutexes[i]);
-
- spin_unlock(&rttest_lock);
-
- curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
- mutexes[td->sysdev.id].owner);
-
- return curr - buf;
-}
-
-static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
-static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
-
-static struct sysdev_class rttest_sysclass = {
- .name = "rttest",
-};
-
-static int init_test_thread(int id)
-{
- thread_data[id].sysdev.cls = &rttest_sysclass;
- thread_data[id].sysdev.id = id;
-
- threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
- if (IS_ERR(threads[id]))
- return PTR_ERR(threads[id]);
-
- return sysdev_register(&thread_data[id].sysdev);
-}
-
-static int init_rttest(void)
-{
- int ret, i;
-
- spin_lock_init(&rttest_lock);
-
- for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
- rt_mutex_init(&mutexes[i]);
-
- ret = sysdev_class_register(&rttest_sysclass);
- if (ret)
- return ret;
-
- for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
- ret = init_test_thread(i);
- if (ret)
- break;
- ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
- if (ret)
- break;
- ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
- if (ret)
- break;
- }
-
- printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
-
- return ret;
-}
-
-device_initcall(init_rttest);
Index: linux-2.6.24-rc6/lib/Kconfig.debug
===================================================================
--- linux-2.6.24-rc6.orig/lib/Kconfig.debug
+++ linux-2.6.24-rc6/lib/Kconfig.debug
@@ -219,12 +219,6 @@ config DEBUG_PI_LIST
default y
depends on DEBUG_RT_MUTEXES

-config RT_MUTEX_TESTER
- bool "Built-in scriptable tester for rt-mutexes"
- depends on DEBUG_KERNEL && RT_MUTEXES
- help
- This option enables a rt-mutex tester.
-
config DEBUG_SPINLOCK
bool "Spinlock and rw-lock debugging: basic checks"
depends on DEBUG_KERNEL
Index: linux-2.6.24-rc6/tests/Kconfig
===================================================================
--- linux-2.6.24-rc6.orig/tests/Kconfig
+++ linux-2.6.24-rc6/tests/Kconfig
@@ -31,5 +31,11 @@ config RCU_TORTURE_TEST
Say M if you want the RCU torture tests to build as a module.
Say N if you are unsure.

+config RT_MUTEX_TESTER
+ bool "Built-in scriptable tester for rt-mutexes"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This option enables a rt-mutex tester.
+
endif # KERNEL_TESTS

Index: linux-2.6.24-rc6/tests/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/tests/Makefile
+++ linux-2.6.24-rc6/tests/Makefile
@@ -4,3 +4,4 @@

obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
+obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
Index: linux-2.6.24-rc6/tests/rtmutex-tester.c
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/rtmutex-tester.c
@@ -0,0 +1,442 @@
+/*
+ * RT-Mutex-tester: scriptable tester for rt mutexes
+ *
+ * started by Thomas Gleixner:
+ *
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <[email protected]>
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+#include <linux/freezer.h>
+
+#include "../kernel/rtmutex.h"
+
+#define MAX_RT_TEST_THREADS 8
+#define MAX_RT_TEST_MUTEXES 8
+
+static spinlock_t rttest_lock;
+static atomic_t rttest_event;
+
+struct test_thread_data {
+ int opcode;
+ int opdata;
+ int mutexes[MAX_RT_TEST_MUTEXES];
+ int bkl;
+ int event;
+ struct sys_device sysdev;
+};
+
+static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
+static struct task_struct *threads[MAX_RT_TEST_THREADS];
+static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
+
+enum test_opcodes {
+ RTTEST_NOP = 0,
+ RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
+ RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
+ RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
+ RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
+ RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
+ RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
+ RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
+ RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
+ RTTEST_LOCKBKL, /* 9 Lock BKL */
+ RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
+ RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
+ RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
+ RTTEST_RESET = 99, /* 99 Reset all pending operations */
+};
+
+static int handle_op(struct test_thread_data *td, int lockwakeup)
+{
+ int i, id, ret = -EINVAL;
+
+ switch(td->opcode) {
+
+ case RTTEST_NOP:
+ return 0;
+
+ case RTTEST_LOCKCONT:
+ td->mutexes[td->opdata] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ return 0;
+
+ case RTTEST_RESET:
+ for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
+ if (td->mutexes[i] == 4) {
+ rt_mutex_unlock(&mutexes[i]);
+ td->mutexes[i] = 0;
+ }
+ }
+
+ if (!lockwakeup && td->bkl == 4) {
+ unlock_kernel();
+ td->bkl = 0;
+ }
+ return 0;
+
+ case RTTEST_RESETEVENT:
+ atomic_set(&rttest_event, 0);
+ return 0;
+
+ default:
+ if (lockwakeup)
+ return ret;
+ }
+
+ switch(td->opcode) {
+
+ case RTTEST_LOCK:
+ case RTTEST_LOCKNOWAIT:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
+ return ret;
+
+ td->mutexes[id] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ rt_mutex_lock(&mutexes[id]);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = 4;
+ return 0;
+
+ case RTTEST_LOCKINT:
+ case RTTEST_LOCKINTNOWAIT:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
+ return ret;
+
+ td->mutexes[id] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = ret ? 0 : 4;
+ return ret ? -EINTR : 0;
+
+ case RTTEST_UNLOCK:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
+ return ret;
+
+ td->event = atomic_add_return(1, &rttest_event);
+ rt_mutex_unlock(&mutexes[id]);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = 0;
+ return 0;
+
+ case RTTEST_LOCKBKL:
+ if (td->bkl)
+ return 0;
+ td->bkl = 1;
+ lock_kernel();
+ td->bkl = 4;
+ return 0;
+
+ case RTTEST_UNLOCKBKL:
+ if (td->bkl != 4)
+ break;
+ unlock_kernel();
+ td->bkl = 0;
+ return 0;
+
+ default:
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Schedule replacement for rtsem_down(). Only called for threads with
+ * PF_MUTEX_TESTER set.
+ *
+ * This allows us to have finegrained control over the event flow.
+ *
+ */
+void schedule_rt_mutex_test(struct rt_mutex *mutex)
+{
+ int tid, op, dat;
+ struct test_thread_data *td;
+
+ /* We have to lookup the task */
+ for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
+ if (threads[tid] == current)
+ break;
+ }
+
+ BUG_ON(tid == MAX_RT_TEST_THREADS);
+
+ td = &thread_data[tid];
+
+ op = td->opcode;
+ dat = td->opdata;
+
+ switch (op) {
+ case RTTEST_LOCK:
+ case RTTEST_LOCKINT:
+ case RTTEST_LOCKNOWAIT:
+ case RTTEST_LOCKINTNOWAIT:
+ if (mutex != &mutexes[dat])
+ break;
+
+ if (td->mutexes[dat] != 1)
+ break;
+
+ td->mutexes[dat] = 2;
+ td->event = atomic_add_return(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKBKL:
+ default:
+ break;
+ }
+
+ schedule();
+
+
+ switch (op) {
+ case RTTEST_LOCK:
+ case RTTEST_LOCKINT:
+ if (mutex != &mutexes[dat])
+ return;
+
+ if (td->mutexes[dat] != 2)
+ return;
+
+ td->mutexes[dat] = 3;
+ td->event = atomic_add_return(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKNOWAIT:
+ case RTTEST_LOCKINTNOWAIT:
+ if (mutex != &mutexes[dat])
+ return;
+
+ if (td->mutexes[dat] != 2)
+ return;
+
+ td->mutexes[dat] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ return;
+
+ case RTTEST_LOCKBKL:
+ return;
+ default:
+ return;
+ }
+
+ td->opcode = 0;
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (td->opcode > 0) {
+ int ret;
+
+ set_current_state(TASK_RUNNING);
+ ret = handle_op(td, 1);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (td->opcode == RTTEST_LOCKCONT)
+ break;
+ td->opcode = ret;
+ }
+
+ /* Wait for the next command to be executed */
+ schedule();
+ }
+
+ /* Restore previous command and data */
+ td->opcode = op;
+ td->opdata = dat;
+}
+
+static int test_func(void *data)
+{
+ struct test_thread_data *td = data;
+ int ret;
+
+ current->flags |= PF_MUTEX_TESTER;
+ set_freezable();
+ allow_signal(SIGHUP);
+
+ for(;;) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (td->opcode > 0) {
+ set_current_state(TASK_RUNNING);
+ ret = handle_op(td, 0);
+ set_current_state(TASK_INTERRUPTIBLE);
+ td->opcode = ret;
+ }
+
+ /* Wait for the next command to be executed */
+ schedule();
+ try_to_freeze();
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ if(kthread_should_stop())
+ break;
+ }
+ return 0;
+}
+
+/**
+ * sysfs_test_command - interface for test commands
+ * @dev: thread reference
+ * @buf: command for actual step
+ * @count: length of buffer
+ *
+ * command syntax:
+ *
+ * opcode:data
+ */
+static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
+ size_t count)
+{
+ struct sched_param schedpar;
+ struct test_thread_data *td;
+ char cmdbuf[32];
+ int op, dat, tid, ret;
+
+ td = container_of(dev, struct test_thread_data, sysdev);
+ tid = td->sysdev.id;
+
+ /* strings from sysfs write are not 0 terminated! */
+ if (count >= sizeof(cmdbuf))
+ return -EINVAL;
+
+ /* strip of \n: */
+ if (buf[count-1] == '\n')
+ count--;
+ if (count < 1)
+ return -EINVAL;
+
+ memcpy(cmdbuf, buf, count);
+ cmdbuf[count] = 0;
+
+ if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
+ return -EINVAL;
+
+ switch (op) {
+ case RTTEST_SCHEDOT:
+ schedpar.sched_priority = 0;
+ ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
+ if (ret)
+ return ret;
+ set_user_nice(current, 0);
+ break;
+
+ case RTTEST_SCHEDRT:
+ schedpar.sched_priority = dat;
+ ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
+ if (ret)
+ return ret;
+ break;
+
+ case RTTEST_SIGNAL:
+ send_sig(SIGHUP, threads[tid], 0);
+ break;
+
+ default:
+ if (td->opcode > 0)
+ return -EBUSY;
+ td->opdata = dat;
+ td->opcode = op;
+ wake_up_process(threads[tid]);
+ }
+
+ return count;
+}
+
+/**
+ * sysfs_test_status - sysfs interface for rt tester
+ * @dev: thread to query
+ * @buf: char buffer to be filled with thread status info
+ */
+static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
+{
+ struct test_thread_data *td;
+ struct task_struct *tsk;
+ char *curr = buf;
+ int i;
+
+ td = container_of(dev, struct test_thread_data, sysdev);
+ tsk = threads[td->sysdev.id];
+
+ spin_lock(&rttest_lock);
+
+ curr += sprintf(curr,
+ "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
+ td->opcode, td->event, tsk->state,
+ (MAX_RT_PRIO - 1) - tsk->prio,
+ (MAX_RT_PRIO - 1) - tsk->normal_prio,
+ tsk->pi_blocked_on, td->bkl);
+
+ for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
+ curr += sprintf(curr, "%d", td->mutexes[i]);
+
+ spin_unlock(&rttest_lock);
+
+ curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
+ mutexes[td->sysdev.id].owner);
+
+ return curr - buf;
+}
+
+static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
+static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
+
+static struct sysdev_class rttest_sysclass = {
+ .name = "rttest",
+};
+
+static int init_test_thread(int id)
+{
+ thread_data[id].sysdev.cls = &rttest_sysclass;
+ thread_data[id].sysdev.id = id;
+
+ threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
+ if (IS_ERR(threads[id]))
+ return PTR_ERR(threads[id]);
+
+ return sysdev_register(&thread_data[id].sysdev);
+}
+
+static int init_rttest(void)
+{
+ int ret, i;
+
+ spin_lock_init(&rttest_lock);
+
+ for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
+ rt_mutex_init(&mutexes[i]);
+
+ ret = sysdev_class_register(&rttest_sysclass);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
+ ret = init_test_thread(i);
+ if (ret)
+ break;
+ ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
+ if (ret)
+ break;
+ ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
+ if (ret)
+ break;
+ }
+
+ printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
+
+ return ret;
+}
+
+device_initcall(init_rttest);

Subject: [PATCH 5/7] Move synchro-test under tests/

From: Ananth N Mavinakayanahalli <[email protected]>

Move the synchro-test infrastructure to tests/

Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
---
kernel/synchro-test.c | 526 --------------------------------------------------
kernel/Makefile | 1
lib/Kconfig.debug | 14 -
tests/Kconfig | 14 +
tests/Makefile | 1
tests/synchro-test.c | 526 ++++++++++++++++++++++++++++++++++++++++++++++++++
6 files changed, 541 insertions(+), 541 deletions(-)

Index: linux-2.6.24-rc6/kernel/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/kernel/Makefile
+++ linux-2.6.24-rc6/kernel/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_SYSFS) += ksysfs.o
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
-obj-$(CONFIG_DEBUG_SYNCHRO_TEST) += synchro-test.o
obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
ifeq ($(CONFIG_PREEMPT_RCU),y)
Index: linux-2.6.24-rc6/kernel/synchro-test.c
===================================================================
--- linux-2.6.24-rc6.orig/kernel/synchro-test.c
+++ /dev/null
@@ -1,526 +0,0 @@
-/* synchro-test.c: run some threads to test the synchronisation primitives
- *
- * Copyright (C) 2005, 2006 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells ([email protected])
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * The module should be run as something like:
- *
- * insmod synchro-test.ko rd=2 wr=2
- * insmod synchro-test.ko mx=1
- * insmod synchro-test.ko sm=2 ism=1
- * insmod synchro-test.ko sm=2 ism=2
- *
- * See Documentation/synchro-test.txt for more information.
- */
-
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/moduleparam.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <asm/atomic.h>
-#include <linux/personality.h>
-#include <linux/smp_lock.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-
-#define MAX_THREADS 64
-
-/*
- * Turn on self-validation if we do a one-shot boot-time test:
- */
-#ifndef MODULE
-# define VALIDATE_OPERATORS
-#endif
-
-static int nummx;
-static int numsm, seminit = 4;
-static int numrd, numwr, numdg;
-static int elapse = 5, load = 2, do_sched, interval = 2;
-static int verbose = 0;
-
-MODULE_AUTHOR("David Howells");
-MODULE_DESCRIPTION("Synchronisation primitive test demo");
-MODULE_LICENSE("GPL");
-
-module_param_named(v, verbose, int, 0);
-MODULE_PARM_DESC(verbose, "Verbosity");
-
-module_param_named(mx, nummx, int, 0);
-MODULE_PARM_DESC(nummx, "Number of mutex threads");
-
-module_param_named(sm, numsm, int, 0);
-MODULE_PARM_DESC(numsm, "Number of semaphore threads");
-
-module_param_named(ism, seminit, int, 0);
-MODULE_PARM_DESC(seminit, "Initial semaphore value");
-
-module_param_named(rd, numrd, int, 0);
-MODULE_PARM_DESC(numrd, "Number of reader threads");
-
-module_param_named(wr, numwr, int, 0);
-MODULE_PARM_DESC(numwr, "Number of writer threads");
-
-module_param_named(dg, numdg, int, 0);
-MODULE_PARM_DESC(numdg, "Number of downgrader threads");
-
-module_param(elapse, int, 0);
-MODULE_PARM_DESC(elapse, "Number of seconds to run for");
-
-module_param(load, int, 0);
-MODULE_PARM_DESC(load, "Length of load in uS");
-
-module_param(interval, int, 0);
-MODULE_PARM_DESC(interval, "Length of interval in uS before re-getting lock");
-
-module_param(do_sched, int, 0);
-MODULE_PARM_DESC(do_sched, "True if each thread should schedule regularly");
-
-/* the semaphores under test */
-static struct mutex ____cacheline_aligned mutex;
-static struct semaphore ____cacheline_aligned sem;
-static struct rw_semaphore ____cacheline_aligned rwsem;
-
-static atomic_t ____cacheline_aligned do_stuff = ATOMIC_INIT(0);
-
-#ifdef VALIDATE_OPERATORS
-static atomic_t ____cacheline_aligned mutexes = ATOMIC_INIT(0);
-static atomic_t ____cacheline_aligned semaphores = ATOMIC_INIT(0);
-static atomic_t ____cacheline_aligned readers = ATOMIC_INIT(0);
-static atomic_t ____cacheline_aligned writers = ATOMIC_INIT(0);
-#endif
-
-static unsigned int ____cacheline_aligned mutexes_taken [MAX_THREADS];
-static unsigned int ____cacheline_aligned semaphores_taken [MAX_THREADS];
-static unsigned int ____cacheline_aligned reads_taken [MAX_THREADS];
-static unsigned int ____cacheline_aligned writes_taken [MAX_THREADS];
-static unsigned int ____cacheline_aligned downgrades_taken [MAX_THREADS];
-
-static struct completion ____cacheline_aligned mx_comp[MAX_THREADS];
-static struct completion ____cacheline_aligned sm_comp[MAX_THREADS];
-static struct completion ____cacheline_aligned rd_comp[MAX_THREADS];
-static struct completion ____cacheline_aligned wr_comp[MAX_THREADS];
-static struct completion ____cacheline_aligned dg_comp[MAX_THREADS];
-
-static struct timer_list ____cacheline_aligned timer;
-
-#define ACCOUNT(var, N) var##_taken[N]++;
-
-#ifdef VALIDATE_OPERATORS
-#define TRACK(var, dir) atomic_##dir(&(var))
-
-#define CHECK(var, cond, val) \
-do { \
- int x = atomic_read(&(var)); \
- if (unlikely(!(x cond (val)))) \
- printk("check [%s %s %d, == %d] failed in %s\n", \
- #var, #cond, (val), x, __func__); \
-} while (0)
-
-#else
-#define TRACK(var, dir) do {} while(0)
-#define CHECK(var, cond, val) do {} while(0)
-#endif
-
-static inline void do_mutex_lock(unsigned int N)
-{
- mutex_lock(&mutex);
-
- ACCOUNT(mutexes, N);
- TRACK(mutexes, inc);
- CHECK(mutexes, ==, 1);
-}
-
-static inline void do_mutex_unlock(unsigned int N)
-{
- CHECK(mutexes, ==, 1);
- TRACK(mutexes, dec);
-
- mutex_unlock(&mutex);
-}
-
-static inline void do_down(unsigned int N)
-{
- CHECK(mutexes, <, seminit);
-
- down(&sem);
-
- ACCOUNT(semaphores, N);
- TRACK(semaphores, inc);
-}
-
-static inline void do_up(unsigned int N)
-{
- CHECK(semaphores, >, 0);
- TRACK(semaphores, dec);
-
- up(&sem);
-}
-
-static inline void do_down_read(unsigned int N)
-{
- down_read(&rwsem);
-
- ACCOUNT(reads, N);
- TRACK(readers, inc);
- CHECK(readers, >, 0);
- CHECK(writers, ==, 0);
-}
-
-static inline void do_up_read(unsigned int N)
-{
- CHECK(readers, >, 0);
- CHECK(writers, ==, 0);
- TRACK(readers, dec);
-
- up_read(&rwsem);
-}
-
-static inline void do_down_write(unsigned int N)
-{
- down_write(&rwsem);
-
- ACCOUNT(writes, N);
- TRACK(writers, inc);
- CHECK(writers, ==, 1);
- CHECK(readers, ==, 0);
-}
-
-static inline void do_up_write(unsigned int N)
-{
- CHECK(writers, ==, 1);
- CHECK(readers, ==, 0);
- TRACK(writers, dec);
-
- up_write(&rwsem);
-}
-
-static inline void do_downgrade_write(unsigned int N)
-{
- CHECK(writers, ==, 1);
- CHECK(readers, ==, 0);
- TRACK(writers, dec);
- TRACK(readers, inc);
-
- downgrade_write(&rwsem);
-
- ACCOUNT(downgrades, N);
-}
-
-static inline void sched(void)
-{
- if (do_sched)
- schedule();
-}
-
-static int mutexer(void *arg)
-{
- unsigned int N = (unsigned long) arg;
-
- daemonize("Mutex%u", N);
- set_user_nice(current, 19);
-
- while (atomic_read(&do_stuff)) {
- do_mutex_lock(N);
- if (load)
- udelay(load);
- do_mutex_unlock(N);
- sched();
- if (interval)
- udelay(interval);
- }
-
- if (verbose >= 2)
- printk("%s: done\n", current->comm);
- complete_and_exit(&mx_comp[N], 0);
-}
-
-static int semaphorer(void *arg)
-{
- unsigned int N = (unsigned long) arg;
-
- daemonize("Sem%u", N);
- set_user_nice(current, 19);
-
- while (atomic_read(&do_stuff)) {
- do_down(N);
- if (load)
- udelay(load);
- do_up(N);
- sched();
- if (interval)
- udelay(interval);
- }
-
- if (verbose >= 2)
- printk("%s: done\n", current->comm);
- complete_and_exit(&sm_comp[N], 0);
-}
-
-static int reader(void *arg)
-{
- unsigned int N = (unsigned long) arg;
-
- daemonize("Read%u", N);
- set_user_nice(current, 19);
-
- while (atomic_read(&do_stuff)) {
- do_down_read(N);
-#ifdef LOAD_TEST
- if (load)
- udelay(load);
-#endif
- do_up_read(N);
- sched();
- if (interval)
- udelay(interval);
- }
-
- if (verbose >= 2)
- printk("%s: done\n", current->comm);
- complete_and_exit(&rd_comp[N], 0);
-}
-
-static int writer(void *arg)
-{
- unsigned int N = (unsigned long) arg;
-
- daemonize("Write%u", N);
- set_user_nice(current, 19);
-
- while (atomic_read(&do_stuff)) {
- do_down_write(N);
-#ifdef LOAD_TEST
- if (load)
- udelay(load);
-#endif
- do_up_write(N);
- sched();
- if (interval)
- udelay(interval);
- }
-
- if (verbose >= 2)
- printk("%s: done\n", current->comm);
- complete_and_exit(&wr_comp[N], 0);
-}
-
-static int downgrader(void *arg)
-{
- unsigned int N = (unsigned long) arg;
-
- daemonize("Down%u", N);
- set_user_nice(current, 19);
-
- while (atomic_read(&do_stuff)) {
- do_down_write(N);
-#ifdef LOAD_TEST
- if (load)
- udelay(load);
-#endif
- do_downgrade_write(N);
-#ifdef LOAD_TEST
- if (load)
- udelay(load);
-#endif
- do_up_read(N);
- sched();
- if (interval)
- udelay(interval);
- }
-
- if (verbose >= 2)
- printk("%s: done\n", current->comm);
- complete_and_exit(&dg_comp[N], 0);
-}
-
-static void stop_test(unsigned long dummy)
-{
- atomic_set(&do_stuff, 0);
-}
-
-static unsigned int total(const char *what, unsigned int counts[], int num)
-{
- unsigned int tot = 0, max = 0, min = UINT_MAX, zeros = 0, cnt;
- int loop;
-
- for (loop = 0; loop < num; loop++) {
- cnt = counts[loop];
-
- if (cnt == 0) {
- zeros++;
- min = 0;
- continue;
- }
-
- tot += cnt;
- if (tot > max)
- max = tot;
- if (tot < min)
- min = tot;
- }
-
- if (verbose && tot > 0) {
- printk("%s:", what);
-
- for (loop = 0; loop < num; loop++) {
- cnt = counts[loop];
-
- if (cnt == 0)
- printk(" zzz");
- else
- printk(" %d%%", cnt * 100 / tot);
- }
-
- printk("\n");
- }
-
- return tot;
-}
-
-/*****************************************************************************/
-/*
- *
- */
-static int __init do_tests(void)
-{
- unsigned long loop;
- unsigned int mutex_total, sem_total, rd_total, wr_total, dg_total;
-
- if (nummx < 0 || nummx > MAX_THREADS ||
- numsm < 0 || numsm > MAX_THREADS ||
- numrd < 0 || numrd > MAX_THREADS ||
- numwr < 0 || numwr > MAX_THREADS ||
- numdg < 0 || numdg > MAX_THREADS ||
- seminit < 1 ||
- elapse < 1 ||
- load < 0 || load > 999 ||
- interval < 0 || interval > 999
- ) {
- printk("Parameter out of range\n");
- return -ERANGE;
- }
-
- if ((nummx | numsm | numrd | numwr | numdg) == 0) {
- int num = num_online_cpus();
-
- if (num > MAX_THREADS)
- num = MAX_THREADS;
- nummx = numsm = numrd = numwr = numdg = num;
-
- load = 1;
- interval = 1;
- do_sched = 1;
- printk("No parameters - using defaults.\n");
- }
-
- if (verbose)
- printk("\nStarting synchronisation primitive tests...\n");
-
- mutex_init(&mutex);
- sema_init(&sem, seminit);
- init_rwsem(&rwsem);
- atomic_set(&do_stuff, 1);
-
- /* kick off all the children */
- for (loop = 0; loop < MAX_THREADS; loop++) {
- if (loop < nummx) {
- init_completion(&mx_comp[loop]);
- kernel_thread(mutexer, (void *) loop, 0);
- }
-
- if (loop < numsm) {
- init_completion(&sm_comp[loop]);
- kernel_thread(semaphorer, (void *) loop, 0);
- }
-
- if (loop < numrd) {
- init_completion(&rd_comp[loop]);
- kernel_thread(reader, (void *) loop, 0);
- }
-
- if (loop < numwr) {
- init_completion(&wr_comp[loop]);
- kernel_thread(writer, (void *) loop, 0);
- }
-
- if (loop < numdg) {
- init_completion(&dg_comp[loop]);
- kernel_thread(downgrader, (void *) loop, 0);
- }
- }
-
- /* set a stop timer */
- init_timer(&timer);
- timer.function = stop_test;
- timer.expires = jiffies + elapse * HZ;
- add_timer(&timer);
-
- /* now wait until it's all done */
- for (loop = 0; loop < nummx; loop++)
- wait_for_completion(&mx_comp[loop]);
-
- for (loop = 0; loop < numsm; loop++)
- wait_for_completion(&sm_comp[loop]);
-
- for (loop = 0; loop < numrd; loop++)
- wait_for_completion(&rd_comp[loop]);
-
- for (loop = 0; loop < numwr; loop++)
- wait_for_completion(&wr_comp[loop]);
-
- for (loop = 0; loop < numdg; loop++)
- wait_for_completion(&dg_comp[loop]);
-
- atomic_set(&do_stuff, 0);
- del_timer(&timer);
-
- if (mutex_is_locked(&mutex))
- printk(KERN_ERR "Mutex is still locked!\n");
-
- /* count up */
- mutex_total = total("MTX", mutexes_taken, nummx);
- sem_total = total("SEM", semaphores_taken, numsm);
- rd_total = total("RD ", reads_taken, numrd);
- wr_total = total("WR ", writes_taken, numwr);
- dg_total = total("DG ", downgrades_taken, numdg);
-
- /* print the results */
- if (verbose) {
- printk("mutexes taken: %u\n", mutex_total);
- printk("semaphores taken: %u\n", sem_total);
- printk("reads taken: %u\n", rd_total);
- printk("writes taken: %u\n", wr_total);
- printk("downgrades taken: %u\n", dg_total);
- }
- else {
- char buf[30];
-
- sprintf(buf, "%d/%d", interval, load);
-
- printk("%3d %3d %3d %3d %3d %c %5s %9u %9u %9u %9u %9u\n",
- nummx, numsm, numrd, numwr, numdg,
- do_sched ? 's' : '-',
- buf,
- mutex_total,
- sem_total,
- rd_total,
- wr_total,
- dg_total);
- }
-
- /* tell insmod to discard the module */
- if (verbose)
- printk("Tests complete\n");
- return -ENOANO;
-
-} /* end do_tests() */
-
-module_init(do_tests);
Index: linux-2.6.24-rc6/lib/Kconfig.debug
===================================================================
--- linux-2.6.24-rc6.orig/lib/Kconfig.debug
+++ linux-2.6.24-rc6/lib/Kconfig.debug
@@ -467,20 +467,6 @@ config BOOT_PRINTK_DELAY
BOOT_PRINTK_DELAY also may cause DETECT_SOFTLOCKUP to detect
what it believes to be lockup conditions.

-config DEBUG_SYNCHRO_TEST
- tristate "Synchronisation primitive testing module"
- depends on DEBUG_KERNEL
- default n
- help
- This option provides a kernel module that can thrash the sleepable
- synchronisation primitives (mutexes and semaphores).
-
- You should say N or M here. Whilst the module can be built in, it's
- not recommended as it requires module parameters supplying to get it
- to do anything.
-
- See Documentation/synchro-test.txt.
-
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_KERNEL
Index: linux-2.6.24-rc6/tests/Kconfig
===================================================================
--- linux-2.6.24-rc6.orig/tests/Kconfig
+++ linux-2.6.24-rc6/tests/Kconfig
@@ -37,5 +37,19 @@ config RT_MUTEX_TESTER
help
This option enables a rt-mutex tester.

+config DEBUG_SYNCHRO_TEST
+ tristate "Synchronisation primitive testing module"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ This option provides a kernel module that can thrash the sleepable
+ synchronisation primitives (mutexes and semaphores).
+
+ You should say N or M here. Whilst the module can be built in, it's
+ not recommended as it requires module parameters supplying to get it
+ to do anything.
+
+ See Documentation/synchro-test.txt.
+
endif # KERNEL_TESTS

Index: linux-2.6.24-rc6/tests/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/tests/Makefile
+++ linux-2.6.24-rc6/tests/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
+obj-$(CONFIG_DEBUG_SYNCHRO_TEST) += synchro-test.o
Index: linux-2.6.24-rc6/tests/synchro-test.c
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/synchro-test.c
@@ -0,0 +1,526 @@
+/* synchro-test.c: run some threads to test the synchronisation primitives
+ *
+ * Copyright (C) 2005, 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells ([email protected])
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * The module should be run as something like:
+ *
+ * insmod synchro-test.ko rd=2 wr=2
+ * insmod synchro-test.ko mx=1
+ * insmod synchro-test.ko sm=2 ism=1
+ * insmod synchro-test.ko sm=2 ism=2
+ *
+ * See Documentation/synchro-test.txt for more information.
+ */
+
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+#include <linux/personality.h>
+#include <linux/smp_lock.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+
+#define MAX_THREADS 64
+
+/*
+ * Turn on self-validation if we do a one-shot boot-time test:
+ */
+#ifndef MODULE
+# define VALIDATE_OPERATORS
+#endif
+
+static int nummx;
+static int numsm, seminit = 4;
+static int numrd, numwr, numdg;
+static int elapse = 5, load = 2, do_sched, interval = 2;
+static int verbose = 0;
+
+MODULE_AUTHOR("David Howells");
+MODULE_DESCRIPTION("Synchronisation primitive test demo");
+MODULE_LICENSE("GPL");
+
+module_param_named(v, verbose, int, 0);
+MODULE_PARM_DESC(verbose, "Verbosity");
+
+module_param_named(mx, nummx, int, 0);
+MODULE_PARM_DESC(nummx, "Number of mutex threads");
+
+module_param_named(sm, numsm, int, 0);
+MODULE_PARM_DESC(numsm, "Number of semaphore threads");
+
+module_param_named(ism, seminit, int, 0);
+MODULE_PARM_DESC(seminit, "Initial semaphore value");
+
+module_param_named(rd, numrd, int, 0);
+MODULE_PARM_DESC(numrd, "Number of reader threads");
+
+module_param_named(wr, numwr, int, 0);
+MODULE_PARM_DESC(numwr, "Number of writer threads");
+
+module_param_named(dg, numdg, int, 0);
+MODULE_PARM_DESC(numdg, "Number of downgrader threads");
+
+module_param(elapse, int, 0);
+MODULE_PARM_DESC(elapse, "Number of seconds to run for");
+
+module_param(load, int, 0);
+MODULE_PARM_DESC(load, "Length of load in uS");
+
+module_param(interval, int, 0);
+MODULE_PARM_DESC(interval, "Length of interval in uS before re-getting lock");
+
+module_param(do_sched, int, 0);
+MODULE_PARM_DESC(do_sched, "True if each thread should schedule regularly");
+
+/* the semaphores under test */
+static struct mutex ____cacheline_aligned mutex;
+static struct semaphore ____cacheline_aligned sem;
+static struct rw_semaphore ____cacheline_aligned rwsem;
+
+static atomic_t ____cacheline_aligned do_stuff = ATOMIC_INIT(0);
+
+#ifdef VALIDATE_OPERATORS
+static atomic_t ____cacheline_aligned mutexes = ATOMIC_INIT(0);
+static atomic_t ____cacheline_aligned semaphores = ATOMIC_INIT(0);
+static atomic_t ____cacheline_aligned readers = ATOMIC_INIT(0);
+static atomic_t ____cacheline_aligned writers = ATOMIC_INIT(0);
+#endif
+
+static unsigned int ____cacheline_aligned mutexes_taken [MAX_THREADS];
+static unsigned int ____cacheline_aligned semaphores_taken [MAX_THREADS];
+static unsigned int ____cacheline_aligned reads_taken [MAX_THREADS];
+static unsigned int ____cacheline_aligned writes_taken [MAX_THREADS];
+static unsigned int ____cacheline_aligned downgrades_taken [MAX_THREADS];
+
+static struct completion ____cacheline_aligned mx_comp[MAX_THREADS];
+static struct completion ____cacheline_aligned sm_comp[MAX_THREADS];
+static struct completion ____cacheline_aligned rd_comp[MAX_THREADS];
+static struct completion ____cacheline_aligned wr_comp[MAX_THREADS];
+static struct completion ____cacheline_aligned dg_comp[MAX_THREADS];
+
+static struct timer_list ____cacheline_aligned timer;
+
+#define ACCOUNT(var, N) var##_taken[N]++;
+
+#ifdef VALIDATE_OPERATORS
+#define TRACK(var, dir) atomic_##dir(&(var))
+
+#define CHECK(var, cond, val) \
+do { \
+ int x = atomic_read(&(var)); \
+ if (unlikely(!(x cond (val)))) \
+ printk("check [%s %s %d, == %d] failed in %s\n", \
+ #var, #cond, (val), x, __func__); \
+} while (0)
+
+#else
+#define TRACK(var, dir) do {} while(0)
+#define CHECK(var, cond, val) do {} while(0)
+#endif
+
+static inline void do_mutex_lock(unsigned int N)
+{
+ mutex_lock(&mutex);
+
+ ACCOUNT(mutexes, N);
+ TRACK(mutexes, inc);
+ CHECK(mutexes, ==, 1);
+}
+
+static inline void do_mutex_unlock(unsigned int N)
+{
+ CHECK(mutexes, ==, 1);
+ TRACK(mutexes, dec);
+
+ mutex_unlock(&mutex);
+}
+
+static inline void do_down(unsigned int N)
+{
+ CHECK(mutexes, <, seminit);
+
+ down(&sem);
+
+ ACCOUNT(semaphores, N);
+ TRACK(semaphores, inc);
+}
+
+static inline void do_up(unsigned int N)
+{
+ CHECK(semaphores, >, 0);
+ TRACK(semaphores, dec);
+
+ up(&sem);
+}
+
+static inline void do_down_read(unsigned int N)
+{
+ down_read(&rwsem);
+
+ ACCOUNT(reads, N);
+ TRACK(readers, inc);
+ CHECK(readers, >, 0);
+ CHECK(writers, ==, 0);
+}
+
+static inline void do_up_read(unsigned int N)
+{
+ CHECK(readers, >, 0);
+ CHECK(writers, ==, 0);
+ TRACK(readers, dec);
+
+ up_read(&rwsem);
+}
+
+static inline void do_down_write(unsigned int N)
+{
+ down_write(&rwsem);
+
+ ACCOUNT(writes, N);
+ TRACK(writers, inc);
+ CHECK(writers, ==, 1);
+ CHECK(readers, ==, 0);
+}
+
+static inline void do_up_write(unsigned int N)
+{
+ CHECK(writers, ==, 1);
+ CHECK(readers, ==, 0);
+ TRACK(writers, dec);
+
+ up_write(&rwsem);
+}
+
+static inline void do_downgrade_write(unsigned int N)
+{
+ CHECK(writers, ==, 1);
+ CHECK(readers, ==, 0);
+ TRACK(writers, dec);
+ TRACK(readers, inc);
+
+ downgrade_write(&rwsem);
+
+ ACCOUNT(downgrades, N);
+}
+
+static inline void sched(void)
+{
+ if (do_sched)
+ schedule();
+}
+
+static int mutexer(void *arg)
+{
+ unsigned int N = (unsigned long) arg;
+
+ daemonize("Mutex%u", N);
+ set_user_nice(current, 19);
+
+ while (atomic_read(&do_stuff)) {
+ do_mutex_lock(N);
+ if (load)
+ udelay(load);
+ do_mutex_unlock(N);
+ sched();
+ if (interval)
+ udelay(interval);
+ }
+
+ if (verbose >= 2)
+ printk("%s: done\n", current->comm);
+ complete_and_exit(&mx_comp[N], 0);
+}
+
+static int semaphorer(void *arg)
+{
+ unsigned int N = (unsigned long) arg;
+
+ daemonize("Sem%u", N);
+ set_user_nice(current, 19);
+
+ while (atomic_read(&do_stuff)) {
+ do_down(N);
+ if (load)
+ udelay(load);
+ do_up(N);
+ sched();
+ if (interval)
+ udelay(interval);
+ }
+
+ if (verbose >= 2)
+ printk("%s: done\n", current->comm);
+ complete_and_exit(&sm_comp[N], 0);
+}
+
+static int reader(void *arg)
+{
+ unsigned int N = (unsigned long) arg;
+
+ daemonize("Read%u", N);
+ set_user_nice(current, 19);
+
+ while (atomic_read(&do_stuff)) {
+ do_down_read(N);
+#ifdef LOAD_TEST
+ if (load)
+ udelay(load);
+#endif
+ do_up_read(N);
+ sched();
+ if (interval)
+ udelay(interval);
+ }
+
+ if (verbose >= 2)
+ printk("%s: done\n", current->comm);
+ complete_and_exit(&rd_comp[N], 0);
+}
+
+static int writer(void *arg)
+{
+ unsigned int N = (unsigned long) arg;
+
+ daemonize("Write%u", N);
+ set_user_nice(current, 19);
+
+ while (atomic_read(&do_stuff)) {
+ do_down_write(N);
+#ifdef LOAD_TEST
+ if (load)
+ udelay(load);
+#endif
+ do_up_write(N);
+ sched();
+ if (interval)
+ udelay(interval);
+ }
+
+ if (verbose >= 2)
+ printk("%s: done\n", current->comm);
+ complete_and_exit(&wr_comp[N], 0);
+}
+
+static int downgrader(void *arg)
+{
+ unsigned int N = (unsigned long) arg;
+
+ daemonize("Down%u", N);
+ set_user_nice(current, 19);
+
+ while (atomic_read(&do_stuff)) {
+ do_down_write(N);
+#ifdef LOAD_TEST
+ if (load)
+ udelay(load);
+#endif
+ do_downgrade_write(N);
+#ifdef LOAD_TEST
+ if (load)
+ udelay(load);
+#endif
+ do_up_read(N);
+ sched();
+ if (interval)
+ udelay(interval);
+ }
+
+ if (verbose >= 2)
+ printk("%s: done\n", current->comm);
+ complete_and_exit(&dg_comp[N], 0);
+}
+
+static void stop_test(unsigned long dummy)
+{
+ atomic_set(&do_stuff, 0);
+}
+
+static unsigned int total(const char *what, unsigned int counts[], int num)
+{
+ unsigned int tot = 0, max = 0, min = UINT_MAX, zeros = 0, cnt;
+ int loop;
+
+ for (loop = 0; loop < num; loop++) {
+ cnt = counts[loop];
+
+ if (cnt == 0) {
+ zeros++;
+ min = 0;
+ continue;
+ }
+
+ tot += cnt;
+ if (tot > max)
+ max = tot;
+ if (tot < min)
+ min = tot;
+ }
+
+ if (verbose && tot > 0) {
+ printk("%s:", what);
+
+ for (loop = 0; loop < num; loop++) {
+ cnt = counts[loop];
+
+ if (cnt == 0)
+ printk(" zzz");
+ else
+ printk(" %d%%", cnt * 100 / tot);
+ }
+
+ printk("\n");
+ }
+
+ return tot;
+}
+
+/*****************************************************************************/
+/*
+ *
+ */
+static int __init do_tests(void)
+{
+ unsigned long loop;
+ unsigned int mutex_total, sem_total, rd_total, wr_total, dg_total;
+
+ if (nummx < 0 || nummx > MAX_THREADS ||
+ numsm < 0 || numsm > MAX_THREADS ||
+ numrd < 0 || numrd > MAX_THREADS ||
+ numwr < 0 || numwr > MAX_THREADS ||
+ numdg < 0 || numdg > MAX_THREADS ||
+ seminit < 1 ||
+ elapse < 1 ||
+ load < 0 || load > 999 ||
+ interval < 0 || interval > 999
+ ) {
+ printk("Parameter out of range\n");
+ return -ERANGE;
+ }
+
+ if ((nummx | numsm | numrd | numwr | numdg) == 0) {
+ int num = num_online_cpus();
+
+ if (num > MAX_THREADS)
+ num = MAX_THREADS;
+ nummx = numsm = numrd = numwr = numdg = num;
+
+ load = 1;
+ interval = 1;
+ do_sched = 1;
+ printk("No parameters - using defaults.\n");
+ }
+
+ if (verbose)
+ printk("\nStarting synchronisation primitive tests...\n");
+
+ mutex_init(&mutex);
+ sema_init(&sem, seminit);
+ init_rwsem(&rwsem);
+ atomic_set(&do_stuff, 1);
+
+ /* kick off all the children */
+ for (loop = 0; loop < MAX_THREADS; loop++) {
+ if (loop < nummx) {
+ init_completion(&mx_comp[loop]);
+ kernel_thread(mutexer, (void *) loop, 0);
+ }
+
+ if (loop < numsm) {
+ init_completion(&sm_comp[loop]);
+ kernel_thread(semaphorer, (void *) loop, 0);
+ }
+
+ if (loop < numrd) {
+ init_completion(&rd_comp[loop]);
+ kernel_thread(reader, (void *) loop, 0);
+ }
+
+ if (loop < numwr) {
+ init_completion(&wr_comp[loop]);
+ kernel_thread(writer, (void *) loop, 0);
+ }
+
+ if (loop < numdg) {
+ init_completion(&dg_comp[loop]);
+ kernel_thread(downgrader, (void *) loop, 0);
+ }
+ }
+
+ /* set a stop timer */
+ init_timer(&timer);
+ timer.function = stop_test;
+ timer.expires = jiffies + elapse * HZ;
+ add_timer(&timer);
+
+ /* now wait until it's all done */
+ for (loop = 0; loop < nummx; loop++)
+ wait_for_completion(&mx_comp[loop]);
+
+ for (loop = 0; loop < numsm; loop++)
+ wait_for_completion(&sm_comp[loop]);
+
+ for (loop = 0; loop < numrd; loop++)
+ wait_for_completion(&rd_comp[loop]);
+
+ for (loop = 0; loop < numwr; loop++)
+ wait_for_completion(&wr_comp[loop]);
+
+ for (loop = 0; loop < numdg; loop++)
+ wait_for_completion(&dg_comp[loop]);
+
+ atomic_set(&do_stuff, 0);
+ del_timer(&timer);
+
+ if (mutex_is_locked(&mutex))
+ printk(KERN_ERR "Mutex is still locked!\n");
+
+ /* count up */
+ mutex_total = total("MTX", mutexes_taken, nummx);
+ sem_total = total("SEM", semaphores_taken, numsm);
+ rd_total = total("RD ", reads_taken, numrd);
+ wr_total = total("WR ", writes_taken, numwr);
+ dg_total = total("DG ", downgrades_taken, numdg);
+
+ /* print the results */
+ if (verbose) {
+ printk("mutexes taken: %u\n", mutex_total);
+ printk("semaphores taken: %u\n", sem_total);
+ printk("reads taken: %u\n", rd_total);
+ printk("writes taken: %u\n", wr_total);
+ printk("downgrades taken: %u\n", dg_total);
+ }
+ else {
+ char buf[30];
+
+ sprintf(buf, "%d/%d", interval, load);
+
+ printk("%3d %3d %3d %3d %3d %c %5s %9u %9u %9u %9u %9u\n",
+ nummx, numsm, numrd, numwr, numdg,
+ do_sched ? 's' : '-',
+ buf,
+ mutex_total,
+ sem_total,
+ rd_total,
+ wr_total,
+ dg_total);
+ }
+
+ /* tell insmod to discard the module */
+ if (verbose)
+ printk("Tests complete\n");
+ return -ENOANO;
+
+} /* end do_tests() */
+
+module_init(do_tests);

Subject: [PATCH 6/7] Move lkdtm under tests/

From: Ananth N Mavinakayanahalli <[email protected]>

Move the lkdtm infrastructure to tests/

Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
---
drivers/misc/lkdtm.c | 345 --------------------------------------------------
drivers/misc/Makefile | 1
lib/Kconfig.debug | 15 --
tests/Kconfig | 15 ++
tests/Makefile | 1
tests/lkdtm.c | 345 ++++++++++++++++++++++++++++++++++++++++++++++++++
6 files changed, 361 insertions(+), 361 deletions(-)

Index: linux-2.6.24-rc6/drivers/misc/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/drivers/misc/Makefile
+++ linux-2.6.24-rc6/drivers/misc/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
-obj-$(CONFIG_LKDTM) += lkdtm.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
Index: linux-2.6.24-rc6/drivers/misc/lkdtm.c
===================================================================
--- linux-2.6.24-rc6.orig/drivers/misc/lkdtm.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Kprobe module for testing crash dumps
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Ankita Garg <[email protected]>
- *
- * This module induces system failures at predefined crashpoints to
- * evaluate the reliability of crash dumps obtained using different dumping
- * solutions.
- *
- * It is adapted from the Linux Kernel Dump Test Tool by
- * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
- *
- * Usage : insmod lkdtm.ko [recur_count={>0}] cpoint_name=<> cpoint_type=<>
- * [cpoint_count={>0}]
- *
- * recur_count : Recursion level for the stack overflow test. Default is 10.
- *
- * cpoint_name : Crash point where the kernel is to be crashed. It can be
- * one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
- * FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD,
- * IDE_CORE_CP
- *
- * cpoint_type : Indicates the action to be taken on hitting the crash point.
- * It can be one of PANIC, BUG, EXCEPTION, LOOP, OVERFLOW
- *
- * cpoint_count : Indicates the number of times the crash point is to be hit
- * to trigger an action. The default is 10.
- */
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/buffer_head.h>
-#include <linux/kprobes.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <scsi/scsi_cmnd.h>
-
-#ifdef CONFIG_IDE
-#include <linux/ide.h>
-#endif
-
-#define NUM_CPOINTS 8
-#define NUM_CPOINT_TYPES 5
-#define DEFAULT_COUNT 10
-#define REC_NUM_DEFAULT 10
-
-enum cname {
- INVALID,
- INT_HARDWARE_ENTRY,
- INT_HW_IRQ_EN,
- INT_TASKLET_ENTRY,
- FS_DEVRW,
- MEM_SWAPOUT,
- TIMERADD,
- SCSI_DISPATCH_CMD,
- IDE_CORE_CP
-};
-
-enum ctype {
- NONE,
- PANIC,
- BUG,
- EXCEPTION,
- LOOP,
- OVERFLOW
-};
-
-static char* cp_name[] = {
- "INT_HARDWARE_ENTRY",
- "INT_HW_IRQ_EN",
- "INT_TASKLET_ENTRY",
- "FS_DEVRW",
- "MEM_SWAPOUT",
- "TIMERADD",
- "SCSI_DISPATCH_CMD",
- "IDE_CORE_CP"
-};
-
-static char* cp_type[] = {
- "PANIC",
- "BUG",
- "EXCEPTION",
- "LOOP",
- "OVERFLOW"
-};
-
-static struct jprobe lkdtm;
-
-static int lkdtm_parse_commandline(void);
-static void lkdtm_handler(void);
-
-static char* cpoint_name;
-static char* cpoint_type;
-static int cpoint_count = DEFAULT_COUNT;
-static int recur_count = REC_NUM_DEFAULT;
-
-static enum cname cpoint = INVALID;
-static enum ctype cptype = NONE;
-static int count = DEFAULT_COUNT;
-
-module_param(recur_count, int, 0644);
-MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
- "default is 10");
-module_param(cpoint_name, charp, 0644);
-MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
-module_param(cpoint_type, charp, 0644);
-MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
- "hitting the crash point");
-module_param(cpoint_count, int, 0644);
-MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
- "crash point is to be hit to trigger action");
-
-static unsigned int jp_do_irq(unsigned int irq)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static irqreturn_t jp_handle_irq_event(unsigned int irq,
- struct irqaction *action)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static void jp_tasklet_action(struct softirq_action *a)
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-struct scan_control;
-
-static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
- struct zone *zone,
- struct scan_control *sc)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-#ifdef CONFIG_IDE
-int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
- struct block_device *bdev, unsigned int cmd,
- unsigned long arg)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-#endif
-
-static int lkdtm_parse_commandline(void)
-{
- int i;
-
- if (cpoint_name == INVALID || cpoint_type == NONE ||
- cpoint_count < 1 || recur_count < 1)
- return -EINVAL;
-
- for (i = 0; i < NUM_CPOINTS; ++i) {
- if (!strcmp(cpoint_name, cp_name[i])) {
- cpoint = i + 1;
- break;
- }
- }
-
- for (i = 0; i < NUM_CPOINT_TYPES; ++i) {
- if (!strcmp(cpoint_type, cp_type[i])) {
- cptype = i + 1;
- break;
- }
- }
-
- if (cpoint == INVALID || cptype == NONE)
- return -EINVAL;
-
- count = cpoint_count;
-
- return 0;
-}
-
-static int recursive_loop(int a)
-{
- char buf[1024];
-
- memset(buf,0xFF,1024);
- recur_count--;
- if (!recur_count)
- return 0;
- else
- return recursive_loop(a);
-}
-
-void lkdtm_handler(void)
-{
- printk(KERN_INFO "lkdtm : Crash point %s of type %s hit\n",
- cpoint_name, cpoint_type);
- --count;
-
- if (count == 0) {
- switch (cptype) {
- case NONE:
- break;
- case PANIC:
- printk(KERN_INFO "lkdtm : PANIC\n");
- panic("dumptest");
- break;
- case BUG:
- printk(KERN_INFO "lkdtm : BUG\n");
- BUG();
- break;
- case EXCEPTION:
- printk(KERN_INFO "lkdtm : EXCEPTION\n");
- *((int *) 0) = 0;
- break;
- case LOOP:
- printk(KERN_INFO "lkdtm : LOOP\n");
- for (;;);
- break;
- case OVERFLOW:
- printk(KERN_INFO "lkdtm : OVERFLOW\n");
- (void) recursive_loop(0);
- break;
- default:
- break;
- }
- count = cpoint_count;
- }
-}
-
-static int __init lkdtm_module_init(void)
-{
- int ret;
-
- if (lkdtm_parse_commandline() == -EINVAL) {
- printk(KERN_INFO "lkdtm : Invalid command\n");
- return -EINVAL;
- }
-
- switch (cpoint) {
- case INT_HARDWARE_ENTRY:
- lkdtm.kp.symbol_name = "__do_IRQ";
- lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
- break;
- case INT_HW_IRQ_EN:
- lkdtm.kp.symbol_name = "handle_IRQ_event";
- lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
- break;
- case INT_TASKLET_ENTRY:
- lkdtm.kp.symbol_name = "tasklet_action";
- lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
- break;
- case FS_DEVRW:
- lkdtm.kp.symbol_name = "ll_rw_block";
- lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
- break;
- case MEM_SWAPOUT:
- lkdtm.kp.symbol_name = "shrink_inactive_list";
- lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
- break;
- case TIMERADD:
- lkdtm.kp.symbol_name = "hrtimer_start";
- lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
- break;
- case SCSI_DISPATCH_CMD:
- lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
- lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
- break;
- case IDE_CORE_CP:
-#ifdef CONFIG_IDE
- lkdtm.kp.symbol_name = "generic_ide_ioctl";
- lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
-#else
- printk(KERN_INFO "lkdtm : Crash point not available\n");
-#endif
- break;
- default:
- printk(KERN_INFO "lkdtm : Invalid Crash Point\n");
- break;
- }
-
- if ((ret = register_jprobe(&lkdtm)) < 0) {
- printk(KERN_INFO "lkdtm : Couldn't register jprobe\n");
- return ret;
- }
-
- printk(KERN_INFO "lkdtm : Crash point %s of type %s registered\n",
- cpoint_name, cpoint_type);
- return 0;
-}
-
-static void __exit lkdtm_module_exit(void)
-{
- unregister_jprobe(&lkdtm);
- printk(KERN_INFO "lkdtm : Crash point unregistered\n");
-}
-
-module_init(lkdtm_module_init);
-module_exit(lkdtm_module_exit);
-
-MODULE_LICENSE("GPL");
Index: linux-2.6.24-rc6/lib/Kconfig.debug
===================================================================
--- linux-2.6.24-rc6.orig/lib/Kconfig.debug
+++ linux-2.6.24-rc6/lib/Kconfig.debug
@@ -467,21 +467,6 @@ config BOOT_PRINTK_DELAY
BOOT_PRINTK_DELAY also may cause DETECT_SOFTLOCKUP to detect
what it believes to be lockup conditions.

-config LKDTM
- tristate "Linux Kernel Dump Test Tool Module"
- depends on DEBUG_KERNEL
- depends on KPROBES
- default n
- help
- This module enables testing of the different dumping mechanisms by
- inducing system failures at predefined crash points.
- If you don't need it: say N
- Choose M here to compile this code as a module. The module will be
- called lkdtm.
-
- Documentation on how to use the module can be found in
- drivers/misc/lkdtm.c
-
config FAULT_INJECTION
bool "Fault-injection framework"
depends on DEBUG_KERNEL
Index: linux-2.6.24-rc6/tests/Kconfig
===================================================================
--- linux-2.6.24-rc6.orig/tests/Kconfig
+++ linux-2.6.24-rc6/tests/Kconfig
@@ -51,5 +51,20 @@ config DEBUG_SYNCHRO_TEST

See Documentation/synchro-test.txt.

+config LKDTM
+ tristate "Linux Kernel Dump Test Tool Module"
+ depends on DEBUG_KERNEL
+ depends on KPROBES
+ default n
+ help
+ This module enables testing of the different dumping mechanisms by
+ inducing system failures at predefined crash points.
+ If you don't need it: say N
+ Choose M here to compile this code as a module. The module will be
+ called lkdtm.
+
+ Documentation on how to use the module can be found in
+ drivers/misc/lkdtm.c
+
endif # KERNEL_TESTS

Index: linux-2.6.24-rc6/tests/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/tests/Makefile
+++ linux-2.6.24-rc6/tests/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_DEBUG_SYNCHRO_TEST) += synchro-test.o
+obj-$(CONFIG_LKDTM) += lkdtm.o
Index: linux-2.6.24-rc6/tests/lkdtm.c
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/lkdtm.c
@@ -0,0 +1,345 @@
+/*
+ * Kprobe module for testing crash dumps
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Ankita Garg <[email protected]>
+ *
+ * This module induces system failures at predefined crashpoints to
+ * evaluate the reliability of crash dumps obtained using different dumping
+ * solutions.
+ *
+ * It is adapted from the Linux Kernel Dump Test Tool by
+ * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
+ *
+ * Usage : insmod lkdtm.ko [recur_count={>0}] cpoint_name=<> cpoint_type=<>
+ * [cpoint_count={>0}]
+ *
+ * recur_count : Recursion level for the stack overflow test. Default is 10.
+ *
+ * cpoint_name : Crash point where the kernel is to be crashed. It can be
+ * one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
+ * FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD,
+ * IDE_CORE_CP
+ *
+ * cpoint_type : Indicates the action to be taken on hitting the crash point.
+ * It can be one of PANIC, BUG, EXCEPTION, LOOP, OVERFLOW
+ *
+ * cpoint_count : Indicates the number of times the crash point is to be hit
+ * to trigger an action. The default is 10.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/kprobes.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <scsi/scsi_cmnd.h>
+
+#ifdef CONFIG_IDE
+#include <linux/ide.h>
+#endif
+
+#define NUM_CPOINTS 8
+#define NUM_CPOINT_TYPES 5
+#define DEFAULT_COUNT 10
+#define REC_NUM_DEFAULT 10
+
+enum cname {
+ INVALID,
+ INT_HARDWARE_ENTRY,
+ INT_HW_IRQ_EN,
+ INT_TASKLET_ENTRY,
+ FS_DEVRW,
+ MEM_SWAPOUT,
+ TIMERADD,
+ SCSI_DISPATCH_CMD,
+ IDE_CORE_CP
+};
+
+enum ctype {
+ NONE,
+ PANIC,
+ BUG,
+ EXCEPTION,
+ LOOP,
+ OVERFLOW
+};
+
+static char* cp_name[] = {
+ "INT_HARDWARE_ENTRY",
+ "INT_HW_IRQ_EN",
+ "INT_TASKLET_ENTRY",
+ "FS_DEVRW",
+ "MEM_SWAPOUT",
+ "TIMERADD",
+ "SCSI_DISPATCH_CMD",
+ "IDE_CORE_CP"
+};
+
+static char* cp_type[] = {
+ "PANIC",
+ "BUG",
+ "EXCEPTION",
+ "LOOP",
+ "OVERFLOW"
+};
+
+static struct jprobe lkdtm;
+
+static int lkdtm_parse_commandline(void);
+static void lkdtm_handler(void);
+
+static char* cpoint_name;
+static char* cpoint_type;
+static int cpoint_count = DEFAULT_COUNT;
+static int recur_count = REC_NUM_DEFAULT;
+
+static enum cname cpoint = INVALID;
+static enum ctype cptype = NONE;
+static int count = DEFAULT_COUNT;
+
+module_param(recur_count, int, 0644);
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
+ "default is 10");
+module_param(cpoint_name, charp, 0644);
+MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+module_param(cpoint_type, charp, 0644);
+MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
+ "hitting the crash point");
+module_param(cpoint_count, int, 0644);
+MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
+ "crash point is to be hit to trigger action");
+
+static unsigned int jp_do_irq(unsigned int irq)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static irqreturn_t jp_handle_irq_event(unsigned int irq,
+ struct irqaction *action)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static void jp_tasklet_action(struct softirq_action *a)
+{
+ lkdtm_handler();
+ jprobe_return();
+}
+
+static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+{
+ lkdtm_handler();
+ jprobe_return();
+}
+
+struct scan_control;
+
+static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
+ struct zone *zone,
+ struct scan_control *sc)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+#ifdef CONFIG_IDE
+int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
+ struct block_device *bdev, unsigned int cmd,
+ unsigned long arg)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+#endif
+
+static int lkdtm_parse_commandline(void)
+{
+ int i;
+
+ if (cpoint_name == INVALID || cpoint_type == NONE ||
+ cpoint_count < 1 || recur_count < 1)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_CPOINTS; ++i) {
+ if (!strcmp(cpoint_name, cp_name[i])) {
+ cpoint = i + 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < NUM_CPOINT_TYPES; ++i) {
+ if (!strcmp(cpoint_type, cp_type[i])) {
+ cptype = i + 1;
+ break;
+ }
+ }
+
+ if (cpoint == INVALID || cptype == NONE)
+ return -EINVAL;
+
+ count = cpoint_count;
+
+ return 0;
+}
+
+static int recursive_loop(int a)
+{
+ char buf[1024];
+
+ memset(buf,0xFF,1024);
+ recur_count--;
+ if (!recur_count)
+ return 0;
+ else
+ return recursive_loop(a);
+}
+
+void lkdtm_handler(void)
+{
+ printk(KERN_INFO "lkdtm : Crash point %s of type %s hit\n",
+ cpoint_name, cpoint_type);
+ --count;
+
+ if (count == 0) {
+ switch (cptype) {
+ case NONE:
+ break;
+ case PANIC:
+ printk(KERN_INFO "lkdtm : PANIC\n");
+ panic("dumptest");
+ break;
+ case BUG:
+ printk(KERN_INFO "lkdtm : BUG\n");
+ BUG();
+ break;
+ case EXCEPTION:
+ printk(KERN_INFO "lkdtm : EXCEPTION\n");
+ *((int *) 0) = 0;
+ break;
+ case LOOP:
+ printk(KERN_INFO "lkdtm : LOOP\n");
+ for (;;);
+ break;
+ case OVERFLOW:
+ printk(KERN_INFO "lkdtm : OVERFLOW\n");
+ (void) recursive_loop(0);
+ break;
+ default:
+ break;
+ }
+ count = cpoint_count;
+ }
+}
+
+static int __init lkdtm_module_init(void)
+{
+ int ret;
+
+ if (lkdtm_parse_commandline() == -EINVAL) {
+ printk(KERN_INFO "lkdtm : Invalid command\n");
+ return -EINVAL;
+ }
+
+ switch (cpoint) {
+ case INT_HARDWARE_ENTRY:
+ lkdtm.kp.symbol_name = "__do_IRQ";
+ lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
+ break;
+ case INT_HW_IRQ_EN:
+ lkdtm.kp.symbol_name = "handle_IRQ_event";
+ lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
+ break;
+ case INT_TASKLET_ENTRY:
+ lkdtm.kp.symbol_name = "tasklet_action";
+ lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
+ break;
+ case FS_DEVRW:
+ lkdtm.kp.symbol_name = "ll_rw_block";
+ lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
+ break;
+ case MEM_SWAPOUT:
+ lkdtm.kp.symbol_name = "shrink_inactive_list";
+ lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
+ break;
+ case TIMERADD:
+ lkdtm.kp.symbol_name = "hrtimer_start";
+ lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
+ break;
+ case SCSI_DISPATCH_CMD:
+ lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
+ lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
+ break;
+ case IDE_CORE_CP:
+#ifdef CONFIG_IDE
+ lkdtm.kp.symbol_name = "generic_ide_ioctl";
+ lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
+#else
+ printk(KERN_INFO "lkdtm : Crash point not available\n");
+#endif
+ break;
+ default:
+ printk(KERN_INFO "lkdtm : Invalid Crash Point\n");
+ break;
+ }
+
+ if ((ret = register_jprobe(&lkdtm)) < 0) {
+ printk(KERN_INFO "lkdtm : Couldn't register jprobe\n");
+ return ret;
+ }
+
+ printk(KERN_INFO "lkdtm : Crash point %s of type %s registered\n",
+ cpoint_name, cpoint_type);
+ return 0;
+}
+
+static void __exit lkdtm_module_exit(void)
+{
+ unregister_jprobe(&lkdtm);
+ printk(KERN_INFO "lkdtm : Crash point unregistered\n");
+}
+
+module_init(lkdtm_module_init);
+module_exit(lkdtm_module_exit);
+
+MODULE_LICENSE("GPL");

Subject: [PATCH 7/7] Add kprobes smoke tests under tests/

From: Ananth N Mavinakayanahalli <[email protected]>

Here is a quick and naive smoke test for kprobes. This is intended to
just verify if some unrelated change broke the *probes subsystem. It is
self contained, architecture agnostic and isn't of any great use by itself.

This needs to be built in the kernel and runs a basic set of tests to
verify if kprobes, jprobes and kretprobes run fine on the kernel. In case
of an error, it'll print out a message with a "BUG" prefix.

This is a start; we intend to add more tests to this bucket over time.

Thanks to Jim Keniston and Masami Hiramatsu for comments and suggestions.

Tested on x86 (32/64) and powerpc.

PS: This can't be built as a module (yet), as noinline doesn't always
work reliably and I am yet to figure out a solution (non-varargs based),
so jprobes tests can work seamlessly.

Updated to live under tests/

Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
Acked-by: Masami Hiramatsu <[email protected]>
---
include/linux/kprobes.h | 9 +
kernel/kprobes.c | 2
tests/Kconfig | 12 ++
tests/Makefile | 1
tests/test_kprobes.c | 218 ++++++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 242 insertions(+)

Index: linux-2.6.24-rc6/tests/Kconfig
===================================================================
--- linux-2.6.24-rc6.orig/tests/Kconfig
+++ linux-2.6.24-rc6/tests/Kconfig
@@ -66,5 +66,17 @@ config LKDTM
Documentation on how to use the module can be found in
drivers/misc/lkdtm.c

+config KPROBES_SANITY_TEST
+ bool "Kprobes sanity tests"
+ depends on DEBUG_KERNEL
+ depends on KPROBES
+ default n
+ help
+ This option provides for testing basic kprobes functionality on
+ boot. A sample kprobe, jprobe and kretprobe are inserted and
+ verified for functionality.
+
+ Say N if you are unsure.
+
endif # KERNEL_TESTS

Index: linux-2.6.24-rc6/tests/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/tests/Makefile
+++ linux-2.6.24-rc6/tests/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutor
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_DEBUG_SYNCHRO_TEST) += synchro-test.o
obj-$(CONFIG_LKDTM) += lkdtm.o
+obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
Index: linux-2.6.24-rc6/tests/test_kprobes.c
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/test_kprobes.c
@@ -0,0 +1,218 @@
+/*
+ * test_kprobes.c - simple sanity test for *probes
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/random.h>
+
+#define div_factor 3
+
+static u32 rand1, preh_val, posth_val, jph_val;
+static int errors, handler_errors, num_tests;
+
+static noinline u32 kprobe_target(u32 value)
+{
+ /*
+ * gcc ignores noinline on some architectures unless we stuff
+ * sufficient lard into the function. The get_kprobe() here is
+ * just for that.
+ *
+ * NOTE: We aren't concerned about the correctness of get_kprobe()
+ * here; hence, this call is neither under !preempt nor with the
+ * kprobe_mutex held. This is fine(tm)
+ */
+ if (get_kprobe((void *)0xdeadbeef))
+ printk(KERN_INFO "Kprobe smoke test: probe on 0xdeadbeef!\n");
+
+ return (value / div_factor);
+}
+
+static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ preh_val = (rand1 / div_factor);
+ return 0;
+}
+
+static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ if (preh_val != (rand1 / div_factor)) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "incorrect value in post_handler\n");
+ }
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp = {
+ .symbol_name = "kprobe_target",
+ .pre_handler = kp_pre_handler,
+ .post_handler = kp_post_handler
+};
+
+static int test_kprobe(void)
+{
+ int ret;
+
+ ret = register_kprobe(&kp);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_kprobe returned %d\n", ret);
+ return ret;
+ }
+
+ ret = kprobe_target(rand1);
+ unregister_kprobe(&kp);
+
+ if (preh_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kprobe pre_handler not called\n");
+ handler_errors++;
+ }
+
+ if (posth_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kprobe post_handler not called\n");
+ handler_errors++;
+ }
+
+ return 0;
+}
+
+static u32 j_kprobe_target(u32 value)
+{
+ if (value != rand1) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "incorrect value in jprobe handler\n");
+ }
+
+ jph_val = rand1;
+ jprobe_return();
+ return 0;
+}
+
+static struct jprobe jp = {
+ .entry = j_kprobe_target,
+ .kp.symbol_name = "kprobe_target"
+};
+
+static int test_jprobe(void)
+{
+ int ret;
+
+ ret = register_jprobe(&jp);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_jprobe returned %d\n", ret);
+ return ret;
+ }
+
+ ret = kprobe_target(rand1);
+ unregister_jprobe(&jp);
+ if (jph_val == 0) {
+ /* Pre-handler was never called */
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "jprobe handler not called\n");
+ handler_errors++;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_KRETPROBES
+static u32 krph_val;
+
+static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ if (ret != (rand1 / div_factor)) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "incorrect value in kretprobe handler\n");
+ }
+
+ krph_val = (rand1 / div_factor);
+ return 0;
+}
+
+static struct kretprobe rp = {
+ .handler = return_handler,
+ .kp.symbol_name = "kprobe_target"
+};
+
+static int test_kretprobe(void)
+{
+ int ret;
+
+ ret = register_kretprobe(&rp);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_kretprobe returned %d\n", ret);
+ return ret;
+ }
+
+ ret = kprobe_target(rand1);
+ unregister_kretprobe(&rp);
+ if (krph_val == 0) {
+ /* Pre-handler was never called */
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kretprobe handler not called\n");
+ handler_errors++;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_KRETPROBES */
+
+int init_test_probes(void)
+{
+ int ret;
+
+ do {
+ rand1 = random32();
+ } while (rand1 <= div_factor);
+
+ printk(KERN_INFO "Kprobe smoke test started\n");
+ num_tests++;
+ ret = test_kprobe();
+ if (ret < 0)
+ errors++;
+
+ num_tests++;
+ ret = test_jprobe();
+ if (ret < 0)
+ errors++;
+
+#ifdef CONFIG_KRETPROBES
+ num_tests++;
+ ret = test_kretprobe();
+ if (ret < 0)
+ errors++;
+#endif /* CONFIG_KRETPROBES */
+
+ if (errors)
+ printk(KERN_ERR "BUG: Kprobe smoke test: %d out of "
+ "%d tests failed\n", errors, num_tests);
+ else if (handler_errors)
+ printk(KERN_ERR "BUG: Kprobe smoke test: %d error(s) "
+ "running handlers\n", handler_errors);
+ else
+ printk(KERN_INFO "Kprobe smoke test passed successfully\n");
+
+ return 0;
+}
Index: linux-2.6.24-rc6/include/linux/kprobes.h
===================================================================
--- linux-2.6.24-rc6.orig/include/linux/kprobes.h
+++ linux-2.6.24-rc6/include/linux/kprobes.h
@@ -182,6 +182,15 @@ static inline void kretprobe_assert(stru
}
}

+#ifdef CONFIG_KPROBES_SANITY_TEST
+extern int init_test_probes(void);
+#else
+static inline int init_test_probes(void)
+{
+ return 0;
+}
+#endif /* CONFIG_KPROBES_SANITY_TEST */
+
extern spinlock_t kretprobe_lock;
extern struct mutex kprobe_mutex;
extern int arch_prepare_kprobe(struct kprobe *p);
Index: linux-2.6.24-rc6/kernel/kprobes.c
===================================================================
--- linux-2.6.24-rc6.orig/kernel/kprobes.c
+++ linux-2.6.24-rc6/kernel/kprobes.c
@@ -824,6 +824,8 @@ static int __init init_kprobes(void)
if (!err)
err = register_die_notifier(&kprobe_exceptions_nb);

+ if (!err)
+ init_test_probes();
return err;
}

2008-01-10 15:16:53

by Sam Ravnborg

[permalink] [raw]
Subject: Re: [PATCH 1/7] Add tests/ directory

On Thu, Jan 10, 2008 at 05:25:37PM +0530, Ananth N Mavinakayanahalli wrote:
> From: Ananth N Mavinakayanahalli <[email protected]>
>
> Create a toplevel tests/ directory to house in-kernel subsystem specific
> tests.
>
> PS: I am not sure if I've gotten the Makefile change right.
>
> Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
> ---
> Makefile | 3 +++
> lib/Kconfig.debug | 2 ++
> tests/Kconfig | 11 +++++++++++
> tests/Makefile | 3 +++
> 4 files changed, 19 insertions(+)
>
> Index: linux-2.6.24-rc6/lib/Kconfig.debug
> ===================================================================
> --- linux-2.6.24-rc6.orig/lib/Kconfig.debug
> +++ linux-2.6.24-rc6/lib/Kconfig.debug
> @@ -596,3 +596,5 @@ config PROVIDE_OHCI1394_DMA_INIT
> See Documentation/debugging-via-ohci1394.txt for more information.
>
> source "samples/Kconfig"
> +
> +source "tests/Kconfig"
> Index: linux-2.6.24-rc6/tests/Kconfig
> ===================================================================
> --- /dev/null
> +++ linux-2.6.24-rc6/tests/Kconfig
> @@ -0,0 +1,11 @@
> +# tests/Kconfig
> +
> +menuconfig KERNEL_TESTS
> + bool "Kernel subsystem tests"
> + help
> + You can build kernel subsystem specific tests.
> +
> +if KERNEL_TESTS
> +
> +endif # KERNEL_TESTS
> +
> Index: linux-2.6.24-rc6/tests/Makefile
> ===================================================================
> --- /dev/null
> +++ linux-2.6.24-rc6/tests/Makefile
> @@ -0,0 +1,3 @@
> +#
> +# Makefile for kernel subsystem specific tests
> +#
> Index: linux-2.6.24-rc6/Makefile
> ===================================================================
> --- linux-2.6.24-rc6.orig/Makefile
> +++ linux-2.6.24-rc6/Makefile
> @@ -598,6 +598,9 @@ export mod_strip_cmd
>
> ifeq ($(KBUILD_EXTMOD),)
> core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
> +ifdef CONFIG_KERNEL_TESTS
> +core-y += tests/
> +endif

CONFIG_KERNEL_TESTS is a bool so this should be written as:
core-$(CONFIG_KERNEL_TESTS) += tests/

With this change:
Acked-by: Sam Ravnborg <[email protected]>

Sam

2008-01-10 15:17:59

by Sam Ravnborg

[permalink] [raw]
Subject: Re: [PATCH 0/7] Create and populate toplevel tests/ directory

Hi Ananth.

On Thu, Jan 10, 2008 at 05:24:28PM +0530, Ananth N Mavinakayanahalli wrote:
> The following series of patches create and populate the toplevel tests/
> directory. This will henceforth be the place where all in-kernel tests
> live.

Thanks for creating these patches.
Look good - one small comment in a follow-up mail.

Sam

Subject: Re: [PATCH 1/7] Add tests/ directory

On Thu, Jan 10, 2008 at 04:16:35PM +0100, Sam Ravnborg wrote:
> On Thu, Jan 10, 2008 at 05:25:37PM +0530, Ananth N Mavinakayanahalli wrote:
> > From: Ananth N Mavinakayanahalli <[email protected]>
> >
> > Create a toplevel tests/ directory to house in-kernel subsystem specific
> > tests.
> >
> > PS: I am not sure if I've gotten the Makefile change right.
> >
> > Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
> > ---
> > Makefile | 3 +++
> > lib/Kconfig.debug | 2 ++
> > tests/Kconfig | 11 +++++++++++
> > tests/Makefile | 3 +++
> > 4 files changed, 19 insertions(+)
> >
> > Index: linux-2.6.24-rc6/lib/Kconfig.debug
> > ===================================================================
> > --- linux-2.6.24-rc6.orig/lib/Kconfig.debug
> > +++ linux-2.6.24-rc6/lib/Kconfig.debug
> > @@ -596,3 +596,5 @@ config PROVIDE_OHCI1394_DMA_INIT
> > See Documentation/debugging-via-ohci1394.txt for more information.
> >
> > source "samples/Kconfig"
> > +
> > +source "tests/Kconfig"
> > Index: linux-2.6.24-rc6/tests/Kconfig
> > ===================================================================
> > --- /dev/null
> > +++ linux-2.6.24-rc6/tests/Kconfig
> > @@ -0,0 +1,11 @@
> > +# tests/Kconfig
> > +
> > +menuconfig KERNEL_TESTS
> > + bool "Kernel subsystem tests"
> > + help
> > + You can build kernel subsystem specific tests.
> > +
> > +if KERNEL_TESTS
> > +
> > +endif # KERNEL_TESTS
> > +
> > Index: linux-2.6.24-rc6/tests/Makefile
> > ===================================================================
> > --- /dev/null
> > +++ linux-2.6.24-rc6/tests/Makefile
> > @@ -0,0 +1,3 @@
> > +#
> > +# Makefile for kernel subsystem specific tests
> > +#
> > Index: linux-2.6.24-rc6/Makefile
> > ===================================================================
> > --- linux-2.6.24-rc6.orig/Makefile
> > +++ linux-2.6.24-rc6/Makefile
> > @@ -598,6 +598,9 @@ export mod_strip_cmd
> >
> > ifeq ($(KBUILD_EXTMOD),)
> > core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
> > +ifdef CONFIG_KERNEL_TESTS
> > +core-y += tests/
> > +endif
>
> CONFIG_KERNEL_TESTS is a bool so this should be written as:
> core-$(CONFIG_KERNEL_TESTS) += tests/
>
> With this change:
> Acked-by: Sam Ravnborg <[email protected]>

Thanks Sam!

Updated patch below...

From: Ananth N Mavinakayanahalli <[email protected]>

Create a toplevel tests/ directory to house in-kernel subsystem specific
tests.

PS: I am not sure if I've gotten the Makefile change right.

Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
Acked-by: Sam Ravnborg <[email protected]>
---
Makefile | 1 +
lib/Kconfig.debug | 2 ++
tests/Kconfig | 11 +++++++++++
tests/Makefile | 3 +++
4 files changed, 17 insertions(+)

Index: linux-2.6.24-rc6/lib/Kconfig.debug
===================================================================
--- linux-2.6.24-rc6.orig/lib/Kconfig.debug
+++ linux-2.6.24-rc6/lib/Kconfig.debug
@@ -596,3 +596,5 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.

source "samples/Kconfig"
+
+source "tests/Kconfig"
Index: linux-2.6.24-rc6/tests/Kconfig
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/Kconfig
@@ -0,0 +1,11 @@
+# tests/Kconfig
+
+menuconfig KERNEL_TESTS
+ bool "Kernel subsystem tests"
+ help
+ You can build kernel subsystem specific tests.
+
+if KERNEL_TESTS
+
+endif # KERNEL_TESTS
+
Index: linux-2.6.24-rc6/tests/Makefile
===================================================================
--- /dev/null
+++ linux-2.6.24-rc6/tests/Makefile
@@ -0,0 +1,3 @@
+#
+# Makefile for kernel subsystem specific tests
+#
Index: linux-2.6.24-rc6/Makefile
===================================================================
--- linux-2.6.24-rc6.orig/Makefile
+++ linux-2.6.24-rc6/Makefile
@@ -598,6 +598,7 @@ export mod_strip_cmd

ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
+core-$(CONFIG_KERNEL_TESTS) += tests/

vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \

2008-01-10 23:21:35

by Josh Triplett

[permalink] [raw]
Subject: Re: [PATCH 3/7] Move rcutorture to tests/

On Thu, 2008-01-10 at 17:27 +0530, Ananth N Mavinakayanahalli wrote:
> From: Ananth N Mavinakayanahalli <[email protected]>
>
> Move the rcutorture infrastructure to tests/
>
> Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>

Fine by me.
Acked-by: Josh Triplett <[email protected]>

- Josh Triplett


Attachments:
signature.asc (189.00 B)
This is a digitally signed message part

2008-01-14 07:01:56

by Paul E. McKenney

[permalink] [raw]
Subject: Re: [PATCH 3/7] Move rcutorture to tests/

On Thu, Jan 10, 2008 at 03:14:48PM -0800, Josh Triplett wrote:
> On Thu, 2008-01-10 at 17:27 +0530, Ananth N Mavinakayanahalli wrote:
> > From: Ananth N Mavinakayanahalli <[email protected]>
> >
> > Move the rcutorture infrastructure to tests/
> >
> > Signed-off-by: Ananth N Mavinakayanahalli <[email protected]>
>
> Fine by me.
> Acked-by: Josh Triplett <[email protected]>

And me.

Acked-by: Paul E. McKenney <[email protected]>

Thanx, Paul

2008-01-15 16:51:58

by Matt Mackall

[permalink] [raw]
Subject: Re: [PATCH 0/7] Create and populate toplevel tests/ directory


On Thu, 2008-01-10 at 17:24 +0530, Ananth N Mavinakayanahalli wrote:
> The following series of patches create and populate the toplevel tests/
> directory. This will henceforth be the place where all in-kernel tests
> live.
>
> All patches against 2.6.24-rc6-mm1

There's a small test I stuck at the end of lib/sort.c, if you're in the
mood. And there's another large set of tests in the cryptoapi.

--
Mathematics is the supreme nostalgia of our time.