Add the ability to run one or more groups of vm tests (specified
by the environment variable TEST_ITEMS). Preserve existing default
behavior of running all tests when TEST_ITEMS is empty or "default".
Documentation of test groups is included in the patch as follows:
# ./run_vmtests.sh [ -h || --help ]
usage: ./tools/testing/selftests/vm/run_vmtests.sh [ -h ]
-h: display this message
The default behavior is to run all tests.
Alternatively, specific groups tests can be run by defining
the TEST_ITEMS shell variable with a space-separated string
of one or more of the following:
- mmap
tests for mmap(2)
- gup_test
tests for gup using gup_test interface
- userfaultfd
tests for userfaultfd(2)
- compaction
a test for the patch "Allow compaction of unevictable pages"
- mlock
tests for mlock(2)
- mremap
tests for mremap(2)
- hugevm
tests for very large virtual address space
- vmalloc
vmalloc smoke tests
- hmm
hmm smoke tests
- madv_populate
test memadvise(2) MADV_POPULATE_{READ,WRITE} options
- memfd_secret
test memfd_secret(2)
- process_mrelease
test process_mrelease(2)
- ksm
ksm tests that do not require >=2 NUMA nodes
- ksm_numa
ksm tests that require >=2 NUMA nodes
example: TEST_ITEMS="hmm mmap ksm" ./run_vmtests.sh
Signed-off-by: Joel Savitz <[email protected]>
---
tools/testing/selftests/vm/run_vmtests.sh | 224 +++++++++++++++-------
1 file changed, 150 insertions(+), 74 deletions(-)
diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
index 41fce8bea929..d31fc66205f8 100755
--- a/tools/testing/selftests/vm/run_vmtests.sh
+++ b/tools/testing/selftests/vm/run_vmtests.sh
@@ -1,22 +1,74 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-#please run as root
+# Please run as root
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
mnt=./huge
exitcode=0
-
-#get huge pagesize and freepages from /proc/meminfo
-while read -r name size unit; do
- if [ "$name" = "HugePages_Free:" ]; then
- freepgs="$size"
- fi
- if [ "$name" = "Hugepagesize:" ]; then
- hpgsize_KB="$size"
+nr_tests_ran=0
+
+if [ ${1:-0} == "-h" ] || [ ${1:-0} == "--help" ]
+then
+ cat <<EOF
+usage: ${BASH_SOURCE[0]:-$0} [ -h ]
+ -h: display this message
+
+The default behavior is to run all tests.
+
+Alternatively, specific groups tests can be run by defining
+the TEST_ITEMS shell variable with a space-separated string
+of one or more of the following:
+- mmap
+ tests for mmap(2)
+- gup_test
+ tests for gup using gup_test interface
+- userfaultfd
+ tests for userfaultfd(2)
+- compaction
+ a test for the patch "Allow compaction of unevictable pages"
+- mlock
+ tests for mlock(2)
+- mremap
+ tests for mremap(2)
+- hugevm
+ tests for very large virtual address space
+- vmalloc
+ vmalloc smoke tests
+- hmm
+ hmm smoke tests
+- madv_populate
+ test memadvise(2) MADV_POPULATE_{READ,WRITE} options
+- memfd_secret
+ test memfd_secret(2)
+- process_mrelease
+ test process_mrelease(2)
+- ksm
+ ksm tests that do not require >=2 NUMA nodes
+- ksm_numa
+ ksm tests that require >=2 NUMA nodes
+example: TEST_ITEMS="hmm mmap ksm" ./run_vmtests.sh
+EOF
+ exit 0
+fi # $1 == -h || $1 == --help
+
+# default behavior: run all tests
+TEST_ITEMS=${TEST_ITEMS:-default}
+
+echo "Selected test items: ${TEST_ITEMS}"
+
+test_selected() {
+ if [ "$TEST_ITEMS" == "default" ]; then
+ # If no TEST_ITEMS are specified, run all tests
+ return 0
fi
-done < /proc/meminfo
+ echo ${TEST_ITEMS} | grep ${1} 2>&1 >/dev/null
+ return ${?}
+}
+
+# Hugepage setup only needed for hugetlb tests
+if test_selected "hugetlb"; then
# Simple hugetlbfs tests have a hardcoded minimum requirement of
# huge pages totaling 256MB (262144KB) in size. The userfaultfd
@@ -28,7 +80,17 @@ hpgsize_MB=$((hpgsize_KB / 1024))
half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
needmem_KB=$((half_ufd_size_MB * 2 * 1024))
-#set proper nr_hugepages
+# get huge pagesize and freepages from /proc/meminfo
+while read -r name size unit; do
+ if [ "$name" = "HugePages_Free:" ]; then
+ freepgs="$size"
+ fi
+ if [ "$name" = "Hugepagesize:" ]; then
+ hpgsize_KB="$size"
+ fi
+done < /proc/meminfo
+
+# set proper nr_hugepages
if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
needpgs=$((needmem_KB / hpgsize_KB))
@@ -57,126 +119,140 @@ else
exit 1
fi
-#filter 64bit architectures
+fi # test_selected "hugetlb"
+
+# filter 64bit architectures
ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64"
if [ -z "$ARCH" ]; then
ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
fi
VADDR64=0
-echo "$ARCH64STR" | grep "$ARCH" && VADDR64=1
+echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
# Usage: run_test [test binary] [arbitrary test arguments...]
run_test() {
- local title="running $*"
- local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
- printf "%s\n%s\n%s\n" "$sep" "$title" "$sep"
-
- "$@"
- local ret=$?
- if [ $ret -eq 0 ]; then
- echo "[PASS]"
- elif [ $ret -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
- else
- echo "[FAIL]"
- exitcode=1
- fi
+ if test_selected ${CATEGORY}; then
+ local title="running $*"
+ local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
+ printf "%s\n%s\n%s\n" "$sep" "$title" "$sep"
+
+ "$@"
+ local ret=$?
+ if [ $ret -eq 0 ]; then
+ echo "[PASS]"
+ elif [ $ret -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+ else
+ echo "[FAIL]"
+ exitcode=1
+ fi
+ nr_tests_ran=$((nr_tests_ran + 1))
+ fi # test_selected
}
-mkdir "$mnt"
-mount -t hugetlbfs none "$mnt"
+# setup only needed for hugetlb tests
+if test_selected "hugetlb"; then
+ mkdir "$mnt"
+ mount -t hugetlbfs none "$mnt"
+fi
-run_test ./hugepage-mmap
+CATEGORY="hugetlb" run_test ./hugepage-mmap
shmmax=$(cat /proc/sys/kernel/shmmax)
shmall=$(cat /proc/sys/kernel/shmall)
echo 268435456 > /proc/sys/kernel/shmmax
echo 4194304 > /proc/sys/kernel/shmall
-run_test ./hugepage-shm
+CATEGORY="hugetlb" run_test ./hugepage-shm
echo "$shmmax" > /proc/sys/kernel/shmmax
echo "$shmall" > /proc/sys/kernel/shmall
-run_test ./map_hugetlb
+CATEGORY="hugetlb" run_test ./map_hugetlb
-run_test ./hugepage-mremap "$mnt"/huge_mremap
-rm -f "$mnt"/huge_mremap
+CATEGORY="hugetlb" run_test ./hugepage-mremap "$mnt"/huge_mremap
+test_selected "hugetlb" && rm -f "$mnt"/huge_mremap
-run_test ./hugepage-vmemmap
+CATEGORY="hugetlb" run_test ./hugepage-vmemmap
-run_test ./hugetlb-madvise "$mnt"/madvise-test
-rm -f "$mnt"/madvise-test
+CATEGORY="hugetlb" run_test ./hugetlb-madvise "$mnt"/madvise-test
+test_selected "hugetlb" && rm -f "$mnt"/madvise-test
-echo "NOTE: The above hugetlb tests provide minimal coverage. Use"
-echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
-echo " hugetlb regression testing."
+if test_selected "hugetlb"; then
+ echo "NOTE: These hugetlb tests provide minimal coverage. Use"
+ echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
+ echo " hugetlb regression testing."
+fi
-run_test ./map_fixed_noreplace
+CATEGORY="mmap" run_test ./map_fixed_noreplace
# get_user_pages_fast() benchmark
-run_test ./gup_test -u
+CATEGORY="gup_test" run_test ./gup_test -u
# pin_user_pages_fast() benchmark
-run_test ./gup_test -a
+CATEGORY="gup_test" run_test ./gup_test -a
# Dump pages 0, 19, and 4096, using pin_user_pages:
-run_test ./gup_test -ct -F 0x1 0 19 0x1000
+CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000
-run_test ./userfaultfd anon 20 16
+CATEGORY="userfaultfd" run_test ./userfaultfd anon 20 16
# Test requires source and destination huge pages. Size of source
# (half_ufd_size_MB) is passed as argument to test.
-run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32
-run_test ./userfaultfd shmem 20 16
-
-#cleanup
-umount "$mnt"
-rm -rf "$mnt"
-echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
+CATEGORY="userfaultfd" run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32
+CATEGORY="userfaultfd" run_test ./userfaultfd shmem 20 16
+
+# cleanup (only needed when running hugetlb tests)
+if test_selected "hugetlb"; then
+ umount "$mnt"
+ rm -rf "$mnt"
+ echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
+fi
-run_test ./compaction_test
+CATEGORY="compaction" run_test ./compaction_test
-run_test sudo -u nobody ./on-fault-limit
+CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
-run_test ./map_populate
+CATEGORY="mmap" run_test ./map_populate
-run_test ./mlock-random-test
+CATEGORY="mlock" run_test ./mlock-random-test
-run_test ./mlock2-tests
+CATEGORY="mlock" run_test ./mlock2-tests
-run_test ./mrelease_test
+CATEGORY="process_mrelease" run_test ./mrelease_test
-run_test ./mremap_test
+CATEGORY="mremap" run_test ./mremap_test
-run_test ./thuge-gen
+CATEGORY="hugetlb" run_test ./thuge-gen
if [ $VADDR64 -ne 0 ]; then
- run_test ./virtual_address_range
+ CATEGORY="hugevm" run_test ./virtual_address_range
# virtual address 128TB switch test
- run_test ./va_128TBswitch
+ CATEGORY="hugevm" run_test ./va_128TBswitch
fi # VADDR64
# vmalloc stability smoke test
-run_test ./test_vmalloc.sh smoke
+CATEGORY="vmalloc" run_test ./test_vmalloc.sh smoke
-run_test ./mremap_dontunmap
+CATEGORY="mremap" run_test ./mremap_dontunmap
-run_test ./test_hmm.sh smoke
+CATEGORY="hmm" run_test ./test_hmm.sh smoke
# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
-run_test ./madv_populate
+CATEGORY="madv_populate" run_test ./madv_populate
-run_test ./memfd_secret
+CATEGORY="memfd_secret" run_test ./memfd_secret
# KSM MADV_MERGEABLE test with 10 identical pages
-run_test ./ksm_tests -M -p 10
+CATEGORY="ksm" run_test ./ksm_tests -M -p 10
# KSM unmerge test
-run_test ./ksm_tests -U
+CATEGORY="ksm" run_test ./ksm_tests -U
# KSM test with 10 zero pages and use_zero_pages = 0
-run_test ./ksm_tests -Z -p 10 -z 0
+CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0
# KSM test with 10 zero pages and use_zero_pages = 1
-run_test ./ksm_tests -Z -p 10 -z 1
+CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1
# KSM test with 2 NUMA nodes and merge_across_nodes = 1
-run_test ./ksm_tests -N -m 1
+CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1
# KSM test with 2 NUMA nodes and merge_across_nodes = 0
-run_test ./ksm_tests -N -m 0
+CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
+
+echo "Ran $nr_tests_ran tests"
exit $exitcode
--
2.31.1
Greeting,
FYI, we noticed the following commit (built with gcc-11):
commit: c5fac7744cf417920552cb4eec2600fb53048b0b ("[RFC PATCH] selftests/vm: enable running select groups of tests")
url: https://github.com/intel-lab-lkp/linux/commits/Joel-Savitz/selftests-vm-enable-running-select-groups-of-tests/20220610-082232
base: https://git.kernel.org/cgit/linux/kernel/git/shuah/linux-kselftest.git next
patch link: https://lore.kernel.org/lkml/[email protected]
in testcase: kernel-selftests
version: kernel-selftests-x86_64-cebf67a3-1_20220612
with following parameters:
sc_nr_hugepages: 2
group: vm
ucode: 0xec
test-description: The kernel contains a set of "self tests" under the tools/testing/selftests/ directory. These are intended to be small unit tests to exercise individual code paths in the kernel.
test-url: https://www.kernel.org/doc/Documentation/kselftest.txt
on test machine: 12 threads 1 sockets Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz with 16G memory
caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):
If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>
please be noted, besides kernel-selftests.vm.run_vmtests.sh../hugepage_mmap.fail,
we actually observed other tests failed on this commit but pass on parent.
here we list all tests run on both parent and the commit, even they have
same results. FYI
=========================================================================================
compiler/group/kconfig/rootfs/sc_nr_hugepages/tbox_group/testcase/ucode:
gcc-11/vm/x86_64-rhel-8.3-kselftests/debian-11.1-x86_64-20220510.cgz/2/lkp-cfl-d1/kernel-selftests/0xec
v5.19-rc1 c5fac7744cf417920552cb4eec2
---------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
18:18 -67% 6:6 kernel-selftests.vm.madv_populate.fail
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../compaction_test.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../gup_test_a.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../gup_test_ct_F_0x1_0_19_0x1000.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../gup_test_u.pass
:18 33% 6:6 kernel-selftests.vm.run_vmtests.sh../hugepage_mmap.fail
18:18 -100% :6 kernel-selftests.vm.run_vmtests.sh../hugepage_mmap.pass
:18 33% 6:6 kernel-selftests.vm.run_vmtests.sh../hugepage_mremap_./huge/huge_mremap.fail
18:18 -100% :6 kernel-selftests.vm.run_vmtests.sh../hugepage_mremap_./huge/huge_mremap.pass
:18 33% 6:6 kernel-selftests.vm.run_vmtests.sh../hugepage_shm.fail
18:18 -100% :6 kernel-selftests.vm.run_vmtests.sh../hugepage_shm.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../hugepage_vmemmap.pass
:18 33% 6:6 kernel-selftests.vm.run_vmtests.sh../hugetlb_madvise_./huge/madvise_test.fail
18:18 -100% :6 kernel-selftests.vm.run_vmtests.sh../hugetlb_madvise_./huge/madvise_test.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../map_fixed_noreplace.pass
:18 33% 6:6 kernel-selftests.vm.run_vmtests.sh../map_hugetlb.fail
18:18 -100% :6 kernel-selftests.vm.run_vmtests.sh../map_hugetlb.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../map_populate.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../mlock2_tests.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../mlock_random_test.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../mrelease_test.fail
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../mremap_test.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../thuge_gen.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../userfaultfd_anon_20_16.pass
:18 33% 6:6 kernel-selftests.vm.run_vmtests.sh../userfaultfd_hugetlb_0_32.fail
18:18 -100% :6 kernel-selftests.vm.run_vmtests.sh../userfaultfd_hugetlb_128_32.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../userfaultfd_shmem_20_16.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../va_128TBswitch.fail
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh../virtual_address_range.pass
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh.fail
18:18 -67% 6:6 kernel-selftests.vm.run_vmtests.sh.sudo_u_nobody_./on_fault_limit.pass
18:18 -67% 6:6 kernel-selftests.vm.soft-dirty.pass
18:18 -67% 6:6 kernel-selftests.vm.split_huge_page_test.pass
# selftests: vm: run_vmtests.sh
# Selected test items: default
# -----------------------
# running ./hugepage-mmap
# -----------------------
# mmap: Cannot allocate memory
# [FAIL]
# ----------------------
# running ./hugepage-shm
# ----------------------
# shmget: Cannot allocate memory
# [FAIL]
# ---------------------
# running ./map_hugetlb
# ---------------------
# mmap: Cannot allocate memory
# Default size hugepages
# Mapping 256 Mbytes
# [FAIL]
# --------------------------------------------
# running ./hugepage-mremap ./huge/huge_mremap
# --------------------------------------------
# mmap1: Cannot allocate memory
# Map haddr: Returned address is 0xffffffffffffffff
# [FAIL]
# --------------------------
# running ./hugepage-vmemmap
# --------------------------
# Returned address is 0x7f4d3c600000 whose pfn is 1bc800
# [PASS]
# ---------------------------------------------
# running ./hugetlb-madvise ./huge/madvise-test
# ---------------------------------------------
# Not enough free huge pages to test, exiting!
# [FAIL]
# NOTE: These hugetlb tests provide minimal coverage. Use
# https://github.com/libhugetlbfs/libhugetlbfs.git for
# hugetlb regression testing.
...
# ----------------------------------
# running ./userfaultfd hugetlb 0 32
# ----------------------------------
# ERROR: invalid MiB (errno=0, line=1662)
#
# Usage: ./userfaultfd <test type> <MiB> <bounces> [hugetlbfs_file]
#
# Supported <test type>: anon, hugetlb, hugetlb_shared, shmem
#
# Examples:
#
# # Run anonymous memory test on 100MiB region with 99999 bounces:
# ./userfaultfd anon 100 99999
#
# # Run share memory test on 1GiB region with 99 bounces:
# ./userfaultfd shmem 1000 99
#
# # Run hugetlb memory test on 256MiB region with 50 bounces:
# ./userfaultfd hugetlb 256 50
#
# # Run the same hugetlb test but using shared file:
# ./userfaultfd hugetlb_shared 256 50 /dev/hugepages/hugefile
#
# # 10MiB-~6GiB 999 bounces anonymous test, continue forever unless an error triggers
# while ./userfaultfd anon $[RANDOM % 6000 + 10] 999; do true; done
#
# [FAIL]
...
To reproduce:
git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
sudo bin/lkp install job.yaml # job file is attached in this email
bin/lkp split-job --compatible job.yaml # generate the yaml file for lkp run
sudo bin/lkp run generated-yaml-file
# if come across any failure that blocks the test,
# please remove ~/.lkp and /lkp dir to run from a clean state.
--
0-DAY CI Kernel Test Service
https://01.org/lkp
Hey Joel,
good work on this! I dont see anything wrong with this and the added
functionality is very nice.
I reviewed the code changes and everything seems to be correct.
I also ran the code with a few examples, including correct and
incorrect examples, and was not able to break it.
Cheers,
-- Nico
Tested-by: Nico Pache <[email protected]>
Acked-by: Nico Pache <[email protected]>
On Thu, Jun 9, 2022 at 8:20 PM Joel Savitz <[email protected]> wrote:
>
> Add the ability to run one or more groups of vm tests (specified
> by the environment variable TEST_ITEMS). Preserve existing default
> behavior of running all tests when TEST_ITEMS is empty or "default".
>
> Documentation of test groups is included in the patch as follows:
>
> # ./run_vmtests.sh [ -h || --help ]
>
> usage: ./tools/testing/selftests/vm/run_vmtests.sh [ -h ]
> -h: display this message
>
> The default behavior is to run all tests.
>
> Alternatively, specific groups tests can be run by defining
> the TEST_ITEMS shell variable with a space-separated string
> of one or more of the following:
> - mmap
> tests for mmap(2)
> - gup_test
> tests for gup using gup_test interface
> - userfaultfd
> tests for userfaultfd(2)
> - compaction
> a test for the patch "Allow compaction of unevictable pages"
> - mlock
> tests for mlock(2)
> - mremap
> tests for mremap(2)
> - hugevm
> tests for very large virtual address space
> - vmalloc
> vmalloc smoke tests
> - hmm
> hmm smoke tests
> - madv_populate
> test memadvise(2) MADV_POPULATE_{READ,WRITE} options
> - memfd_secret
> test memfd_secret(2)
> - process_mrelease
> test process_mrelease(2)
> - ksm
> ksm tests that do not require >=2 NUMA nodes
> - ksm_numa
> ksm tests that require >=2 NUMA nodes
> example: TEST_ITEMS="hmm mmap ksm" ./run_vmtests.sh
>
> Signed-off-by: Joel Savitz <[email protected]>
> ---
> tools/testing/selftests/vm/run_vmtests.sh | 224 +++++++++++++++-------
> 1 file changed, 150 insertions(+), 74 deletions(-)
>
> diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
> index 41fce8bea929..d31fc66205f8 100755
> --- a/tools/testing/selftests/vm/run_vmtests.sh
> +++ b/tools/testing/selftests/vm/run_vmtests.sh
> @@ -1,22 +1,74 @@
> #!/bin/bash
> # SPDX-License-Identifier: GPL-2.0
> -#please run as root
> +# Please run as root
>
> # Kselftest framework requirement - SKIP code is 4.
> ksft_skip=4
>
> mnt=./huge
> exitcode=0
> -
> -#get huge pagesize and freepages from /proc/meminfo
> -while read -r name size unit; do
> - if [ "$name" = "HugePages_Free:" ]; then
> - freepgs="$size"
> - fi
> - if [ "$name" = "Hugepagesize:" ]; then
> - hpgsize_KB="$size"
> +nr_tests_ran=0
> +
> +if [ ${1:-0} == "-h" ] || [ ${1:-0} == "--help" ]
> +then
> + cat <<EOF
> +usage: ${BASH_SOURCE[0]:-$0} [ -h ]
> + -h: display this message
> +
> +The default behavior is to run all tests.
> +
> +Alternatively, specific groups tests can be run by defining
> +the TEST_ITEMS shell variable with a space-separated string
> +of one or more of the following:
> +- mmap
> + tests for mmap(2)
> +- gup_test
> + tests for gup using gup_test interface
> +- userfaultfd
> + tests for userfaultfd(2)
> +- compaction
> + a test for the patch "Allow compaction of unevictable pages"
> +- mlock
> + tests for mlock(2)
> +- mremap
> + tests for mremap(2)
> +- hugevm
> + tests for very large virtual address space
> +- vmalloc
> + vmalloc smoke tests
> +- hmm
> + hmm smoke tests
> +- madv_populate
> + test memadvise(2) MADV_POPULATE_{READ,WRITE} options
> +- memfd_secret
> + test memfd_secret(2)
> +- process_mrelease
> + test process_mrelease(2)
> +- ksm
> + ksm tests that do not require >=2 NUMA nodes
> +- ksm_numa
> + ksm tests that require >=2 NUMA nodes
> +example: TEST_ITEMS="hmm mmap ksm" ./run_vmtests.sh
> +EOF
> + exit 0
> +fi # $1 == -h || $1 == --help
> +
> +# default behavior: run all tests
> +TEST_ITEMS=${TEST_ITEMS:-default}
> +
> +echo "Selected test items: ${TEST_ITEMS}"
> +
> +test_selected() {
> + if [ "$TEST_ITEMS" == "default" ]; then
> + # If no TEST_ITEMS are specified, run all tests
> + return 0
> fi
> -done < /proc/meminfo
> + echo ${TEST_ITEMS} | grep ${1} 2>&1 >/dev/null
> + return ${?}
> +}
> +
> +# Hugepage setup only needed for hugetlb tests
> +if test_selected "hugetlb"; then
>
> # Simple hugetlbfs tests have a hardcoded minimum requirement of
> # huge pages totaling 256MB (262144KB) in size. The userfaultfd
> @@ -28,7 +80,17 @@ hpgsize_MB=$((hpgsize_KB / 1024))
> half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
> needmem_KB=$((half_ufd_size_MB * 2 * 1024))
>
> -#set proper nr_hugepages
> +# get huge pagesize and freepages from /proc/meminfo
> +while read -r name size unit; do
> + if [ "$name" = "HugePages_Free:" ]; then
> + freepgs="$size"
> + fi
> + if [ "$name" = "Hugepagesize:" ]; then
> + hpgsize_KB="$size"
> + fi
> +done < /proc/meminfo
> +
> +# set proper nr_hugepages
> if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
> nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
> needpgs=$((needmem_KB / hpgsize_KB))
> @@ -57,126 +119,140 @@ else
> exit 1
> fi
>
> -#filter 64bit architectures
> +fi # test_selected "hugetlb"
> +
> +# filter 64bit architectures
> ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64"
> if [ -z "$ARCH" ]; then
> ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
> fi
> VADDR64=0
> -echo "$ARCH64STR" | grep "$ARCH" && VADDR64=1
> +echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
>
> # Usage: run_test [test binary] [arbitrary test arguments...]
> run_test() {
> - local title="running $*"
> - local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
> - printf "%s\n%s\n%s\n" "$sep" "$title" "$sep"
> -
> - "$@"
> - local ret=$?
> - if [ $ret -eq 0 ]; then
> - echo "[PASS]"
> - elif [ $ret -eq $ksft_skip ]; then
> - echo "[SKIP]"
> - exitcode=$ksft_skip
> - else
> - echo "[FAIL]"
> - exitcode=1
> - fi
> + if test_selected ${CATEGORY}; then
> + local title="running $*"
> + local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
> + printf "%s\n%s\n%s\n" "$sep" "$title" "$sep"
> +
> + "$@"
> + local ret=$?
> + if [ $ret -eq 0 ]; then
> + echo "[PASS]"
> + elif [ $ret -eq $ksft_skip ]; then
> + echo "[SKIP]"
> + exitcode=$ksft_skip
> + else
> + echo "[FAIL]"
> + exitcode=1
> + fi
> + nr_tests_ran=$((nr_tests_ran + 1))
> + fi # test_selected
> }
>
> -mkdir "$mnt"
> -mount -t hugetlbfs none "$mnt"
> +# setup only needed for hugetlb tests
> +if test_selected "hugetlb"; then
> + mkdir "$mnt"
> + mount -t hugetlbfs none "$mnt"
> +fi
>
> -run_test ./hugepage-mmap
> +CATEGORY="hugetlb" run_test ./hugepage-mmap
>
> shmmax=$(cat /proc/sys/kernel/shmmax)
> shmall=$(cat /proc/sys/kernel/shmall)
> echo 268435456 > /proc/sys/kernel/shmmax
> echo 4194304 > /proc/sys/kernel/shmall
> -run_test ./hugepage-shm
> +CATEGORY="hugetlb" run_test ./hugepage-shm
> echo "$shmmax" > /proc/sys/kernel/shmmax
> echo "$shmall" > /proc/sys/kernel/shmall
>
> -run_test ./map_hugetlb
> +CATEGORY="hugetlb" run_test ./map_hugetlb
>
> -run_test ./hugepage-mremap "$mnt"/huge_mremap
> -rm -f "$mnt"/huge_mremap
> +CATEGORY="hugetlb" run_test ./hugepage-mremap "$mnt"/huge_mremap
> +test_selected "hugetlb" && rm -f "$mnt"/huge_mremap
>
> -run_test ./hugepage-vmemmap
> +CATEGORY="hugetlb" run_test ./hugepage-vmemmap
>
> -run_test ./hugetlb-madvise "$mnt"/madvise-test
> -rm -f "$mnt"/madvise-test
> +CATEGORY="hugetlb" run_test ./hugetlb-madvise "$mnt"/madvise-test
> +test_selected "hugetlb" && rm -f "$mnt"/madvise-test
>
> -echo "NOTE: The above hugetlb tests provide minimal coverage. Use"
> -echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
> -echo " hugetlb regression testing."
> +if test_selected "hugetlb"; then
> + echo "NOTE: These hugetlb tests provide minimal coverage. Use"
> + echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
> + echo " hugetlb regression testing."
> +fi
>
> -run_test ./map_fixed_noreplace
> +CATEGORY="mmap" run_test ./map_fixed_noreplace
>
> # get_user_pages_fast() benchmark
> -run_test ./gup_test -u
> +CATEGORY="gup_test" run_test ./gup_test -u
> # pin_user_pages_fast() benchmark
> -run_test ./gup_test -a
> +CATEGORY="gup_test" run_test ./gup_test -a
> # Dump pages 0, 19, and 4096, using pin_user_pages:
> -run_test ./gup_test -ct -F 0x1 0 19 0x1000
> +CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000
>
> -run_test ./userfaultfd anon 20 16
> +CATEGORY="userfaultfd" run_test ./userfaultfd anon 20 16
> # Test requires source and destination huge pages. Size of source
> # (half_ufd_size_MB) is passed as argument to test.
> -run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32
> -run_test ./userfaultfd shmem 20 16
> -
> -#cleanup
> -umount "$mnt"
> -rm -rf "$mnt"
> -echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
> +CATEGORY="userfaultfd" run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32
> +CATEGORY="userfaultfd" run_test ./userfaultfd shmem 20 16
> +
> +# cleanup (only needed when running hugetlb tests)
> +if test_selected "hugetlb"; then
> + umount "$mnt"
> + rm -rf "$mnt"
> + echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
> +fi
>
> -run_test ./compaction_test
> +CATEGORY="compaction" run_test ./compaction_test
>
> -run_test sudo -u nobody ./on-fault-limit
> +CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
>
> -run_test ./map_populate
> +CATEGORY="mmap" run_test ./map_populate
>
> -run_test ./mlock-random-test
> +CATEGORY="mlock" run_test ./mlock-random-test
>
> -run_test ./mlock2-tests
> +CATEGORY="mlock" run_test ./mlock2-tests
>
> -run_test ./mrelease_test
> +CATEGORY="process_mrelease" run_test ./mrelease_test
>
> -run_test ./mremap_test
> +CATEGORY="mremap" run_test ./mremap_test
>
> -run_test ./thuge-gen
> +CATEGORY="hugetlb" run_test ./thuge-gen
>
> if [ $VADDR64 -ne 0 ]; then
> - run_test ./virtual_address_range
> + CATEGORY="hugevm" run_test ./virtual_address_range
>
> # virtual address 128TB switch test
> - run_test ./va_128TBswitch
> + CATEGORY="hugevm" run_test ./va_128TBswitch
> fi # VADDR64
>
> # vmalloc stability smoke test
> -run_test ./test_vmalloc.sh smoke
> +CATEGORY="vmalloc" run_test ./test_vmalloc.sh smoke
>
> -run_test ./mremap_dontunmap
> +CATEGORY="mremap" run_test ./mremap_dontunmap
>
> -run_test ./test_hmm.sh smoke
> +CATEGORY="hmm" run_test ./test_hmm.sh smoke
>
> # MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
> -run_test ./madv_populate
> +CATEGORY="madv_populate" run_test ./madv_populate
>
> -run_test ./memfd_secret
> +CATEGORY="memfd_secret" run_test ./memfd_secret
>
> # KSM MADV_MERGEABLE test with 10 identical pages
> -run_test ./ksm_tests -M -p 10
> +CATEGORY="ksm" run_test ./ksm_tests -M -p 10
> # KSM unmerge test
> -run_test ./ksm_tests -U
> +CATEGORY="ksm" run_test ./ksm_tests -U
> # KSM test with 10 zero pages and use_zero_pages = 0
> -run_test ./ksm_tests -Z -p 10 -z 0
> +CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0
> # KSM test with 10 zero pages and use_zero_pages = 1
> -run_test ./ksm_tests -Z -p 10 -z 1
> +CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1
> # KSM test with 2 NUMA nodes and merge_across_nodes = 1
> -run_test ./ksm_tests -N -m 1
> +CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1
> # KSM test with 2 NUMA nodes and merge_across_nodes = 0
> -run_test ./ksm_tests -N -m 0
> +CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
> +
> +echo "Ran $nr_tests_ran tests"
>
> exit $exitcode
> --
> 2.31.1
>