2023-05-17 16:17:47

by Jiaqi Yan

[permalink] [raw]
Subject: [PATCH v1 0/3] Improve hugetlbfs read on HWPOISON hugepages

Today when hardware memory is corrupted in a hugetlb hugepage,
kernel leaves the hugepage in pagecache [1]; otherwise future mmap or
read will suject to silent data corruption. This is implemented by
returning -EIO from hugetlb_read_iter immediately if the hugepage has
HWPOISON flag set.

Since memory_failure already tracks the raw HWPOISON subpages in a
hugepage, a natural improvement is possible: if userspace only asks for
healthy subpages in the pagecache, kernel can return these data.

This patchset implements this improvement. It consist of three parts.
The 1st commit exports the functionality to tell if a subpage inside a
hugetlb hugepage is a raw HWPOISON page. The 2nd commit teaches
hugetlbfs_read_iter to return as many healthy bytes as possible.
The 3rd commit properly tests this new feature.

[1] commit 8625147cafaa ("hugetlbfs: don't delete error page from pagecache")

Jiaqi Yan (3):
mm/hwpoison: find subpage in hugetlb HWPOISON list
hugetlbfs: improve read HWPOISON hugepage
selftests/mm: add tests for HWPOISON hugetlbfs read

fs/hugetlbfs/inode.c | 62 +++-
include/linux/mm.h | 23 ++
mm/memory-failure.c | 26 +-
tools/testing/selftests/mm/.gitignore | 1 +
tools/testing/selftests/mm/Makefile | 1 +
.../selftests/mm/hugetlb-read-hwpoison.c | 322 ++++++++++++++++++
6 files changed, 419 insertions(+), 16 deletions(-)
create mode 100644 tools/testing/selftests/mm/hugetlb-read-hwpoison.c

--
2.40.1.606.ga4b1b128d6-goog



2023-05-17 16:19:10

by Jiaqi Yan

[permalink] [raw]
Subject: [PATCH v1 3/3] selftests/mm: add tests for HWPOISON hugetlbfs read

Add tests for the improvement made to read operations on HWPOISON
hugetlb page with different read granularities.

0) Simple regression test on read.
1) Sequential read page by page should succeed until encounters the 1st
raw HWPOISON subpage.
2) After skip raw HWPOISON subpage by lseek, read always succeeds.

Signed-off-by: Jiaqi Yan <[email protected]>
---
tools/testing/selftests/mm/.gitignore | 1 +
tools/testing/selftests/mm/Makefile | 1 +
.../selftests/mm/hugetlb-read-hwpoison.c | 322 ++++++++++++++++++
3 files changed, 324 insertions(+)
create mode 100644 tools/testing/selftests/mm/hugetlb-read-hwpoison.c

diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore
index 8917455f4f51..fe8224d2ee06 100644
--- a/tools/testing/selftests/mm/.gitignore
+++ b/tools/testing/selftests/mm/.gitignore
@@ -5,6 +5,7 @@ hugepage-mremap
hugepage-shm
hugepage-vmemmap
hugetlb-madvise
+hugetlb-read-hwpoison
khugepaged
map_hugetlb
map_populate
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index 23af4633f0f4..6cc63668c50e 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -37,6 +37,7 @@ TEST_GEN_PROGS += compaction_test
TEST_GEN_PROGS += gup_test
TEST_GEN_PROGS += hmm-tests
TEST_GEN_PROGS += hugetlb-madvise
+TEST_GEN_PROGS += hugetlb-read-hwpoison
TEST_GEN_PROGS += hugepage-mmap
TEST_GEN_PROGS += hugepage-mremap
TEST_GEN_PROGS += hugepage-shm
diff --git a/tools/testing/selftests/mm/hugetlb-read-hwpoison.c b/tools/testing/selftests/mm/hugetlb-read-hwpoison.c
new file mode 100644
index 000000000000..2f8e84eceb3d
--- /dev/null
+++ b/tools/testing/selftests/mm/hugetlb-read-hwpoison.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <linux/magic.h>
+#include <sys/mman.h>
+#include <sys/statfs.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#include "../kselftest.h"
+
+#define PREFIX " ... "
+#define ERROR_PREFIX " !!! "
+
+#define MAX_WRITE_READ_CHUNK_SIZE (getpagesize() * 16)
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
+enum test_status {
+ TEST_PASSED = 0,
+ TEST_FAILED = 1,
+ TEST_SKIPPED = 2,
+};
+
+static char *status_to_str(enum test_status status)
+{
+ switch (status) {
+ case TEST_PASSED:
+ return "TEST_PASSED";
+ case TEST_FAILED:
+ return "TEST_FAILED";
+ case TEST_SKIPPED:
+ return "TEST_SKIPPED";
+ default:
+ return "TEST_???";
+ }
+}
+
+static int setup_filemap(char *filemap, size_t len, size_t wr_chunk_size)
+{
+ char iter = 0;
+
+ for (size_t offset = 0; offset < len;
+ offset += wr_chunk_size) {
+ iter++;
+ memset(filemap + offset, iter, wr_chunk_size);
+ }
+
+ return 0;
+}
+
+static bool verify_chunk(char *buf, size_t len, char val)
+{
+ size_t i;
+
+ for (i = 0; i < len; ++i) {
+ if (buf[i] != val) {
+ printf(ERROR_PREFIX "check fail: buf[%lu] = %u != %u\n",
+ i, buf[i], val);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool seek_read_hugepage_filemap(int fd, size_t len, size_t wr_chunk_size,
+ off_t offset, size_t expected)
+{
+ char buf[MAX_WRITE_READ_CHUNK_SIZE];
+ ssize_t ret_count = 0;
+ ssize_t total_ret_count = 0;
+ char val = offset / wr_chunk_size + offset % wr_chunk_size;
+
+ printf(PREFIX "init val=%u with offset=0x%lx\n", val, offset);
+ printf(PREFIX "expect to read 0x%lx bytes of data in total\n",
+ expected);
+ if (lseek(fd, offset, SEEK_SET) < 0) {
+ perror(ERROR_PREFIX "seek failed");
+ return false;
+ }
+
+ while (offset + total_ret_count < len) {
+ ret_count = read(fd, buf, wr_chunk_size);
+ if (ret_count == 0) {
+ printf(PREFIX "read reach end of the file\n");
+ break;
+ } else if (ret_count < 0) {
+ perror(ERROR_PREFIX "read failed");
+ break;
+ }
+ ++val;
+ if (!verify_chunk(buf, ret_count, val))
+ return false;
+
+ total_ret_count += ret_count;
+ }
+ printf(PREFIX "actually read 0x%lx bytes of data in total\n",
+ total_ret_count);
+
+ return total_ret_count == expected;
+}
+
+static bool read_hugepage_filemap(int fd, size_t len,
+ size_t wr_chunk_size, size_t expected)
+{
+ char buf[MAX_WRITE_READ_CHUNK_SIZE];
+ ssize_t ret_count = 0;
+ ssize_t total_ret_count = 0;
+ char val = 0;
+
+ printf(PREFIX "expect to read 0x%lx bytes of data in total\n",
+ expected);
+ while (total_ret_count < len) {
+ ret_count = read(fd, buf, wr_chunk_size);
+ if (ret_count == 0) {
+ printf(PREFIX "read reach end of the file\n");
+ break;
+ } else if (ret_count < 0) {
+ perror(ERROR_PREFIX "read failed");
+ break;
+ }
+ ++val;
+ if (!verify_chunk(buf, ret_count, val))
+ return false;
+
+ total_ret_count += ret_count;
+ }
+ printf(PREFIX "actually read 0x%lx bytes of data in total\n",
+ total_ret_count);
+
+ return total_ret_count == expected;
+}
+
+static enum test_status
+test_hugetlb_read(int fd, size_t len, size_t wr_chunk_size)
+{
+ enum test_status status = TEST_SKIPPED;
+ char *filemap = NULL;
+
+ if (ftruncate(fd, len) < 0) {
+ perror(ERROR_PREFIX "ftruncate failed");
+ return status;
+ }
+
+ filemap = mmap(NULL, len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, 0);
+ if (filemap == MAP_FAILED) {
+ perror(ERROR_PREFIX "mmap for primary mapping failed");
+ goto done;
+ }
+
+ setup_filemap(filemap, len, wr_chunk_size);
+ status = TEST_FAILED;
+
+ if (read_hugepage_filemap(fd, len, wr_chunk_size, len))
+ status = TEST_PASSED;
+
+ munmap(filemap, len);
+done:
+ if (ftruncate(fd, 0) < 0) {
+ perror(ERROR_PREFIX "ftruncate back to 0 failed");
+ status = TEST_FAILED;
+ }
+
+ return status;
+}
+
+static enum test_status
+test_hugetlb_read_hwpoison(int fd, size_t len, size_t wr_chunk_size,
+ bool skip_hwpoison_page)
+{
+ enum test_status status = TEST_SKIPPED;
+ char *filemap = NULL;
+ char *hwp_addr = NULL;
+ const unsigned long pagesize = getpagesize();
+
+ if (ftruncate(fd, len) < 0) {
+ perror(ERROR_PREFIX "ftruncate failed");
+ return status;
+ }
+
+ filemap = mmap(NULL, len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, 0);
+ if (filemap == MAP_FAILED) {
+ perror(ERROR_PREFIX "mmap for primary mapping failed");
+ goto done;
+ }
+
+ setup_filemap(filemap, len, wr_chunk_size);
+ status = TEST_FAILED;
+
+ /*
+ * Poisoned hugetlb page layout (assume hugepagesize=2MB):
+ * |<---------------------- 1MB ---------------------->|
+ * |<---- healthy page ---->|<---- HWPOISON page ----->|
+ * |<------------------- (1MB - 8KB) ----------------->|
+ */
+ hwp_addr = filemap + len / 2 + pagesize;
+ if (madvise(hwp_addr, pagesize, MADV_HWPOISON) < 0) {
+ perror(ERROR_PREFIX "MADV_HWPOISON failed");
+ goto unmap;
+ }
+
+ if (!skip_hwpoison_page) {
+ /*
+ * Userspace should be able to read (1MB + 1 page) from
+ * the beginning of the HWPOISONed hugepage.
+ */
+ if (read_hugepage_filemap(fd, len, wr_chunk_size,
+ len / 2 + pagesize))
+ status = TEST_PASSED;
+ } else {
+ /*
+ * Userspace should be able to read (1MB - 2 pages) from
+ * HWPOISONed hugepage.
+ */
+ if (seek_read_hugepage_filemap(fd, len, wr_chunk_size,
+ len / 2 + MAX(2 * pagesize, wr_chunk_size),
+ len / 2 - MAX(2 * pagesize, wr_chunk_size)))
+ status = TEST_PASSED;
+ }
+
+unmap:
+ munmap(filemap, len);
+done:
+ if (ftruncate(fd, 0) < 0) {
+ perror(ERROR_PREFIX "ftruncate back to 0 failed");
+ status = TEST_FAILED;
+ }
+
+ return status;
+}
+
+static int create_hugetlbfs_file(struct statfs *file_stat)
+{
+ int fd;
+
+ fd = memfd_create("hugetlb_tmp", MFD_HUGETLB);
+ if (fd < 0) {
+ perror(ERROR_PREFIX "could not open hugetlbfs file");
+ return -1;
+ }
+
+ memset(file_stat, 0, sizeof(*file_stat));
+ if (fstatfs(fd, file_stat)) {
+ perror(ERROR_PREFIX "fstatfs failed");
+ goto close;
+ }
+ if (file_stat->f_type != HUGETLBFS_MAGIC) {
+ printf(ERROR_PREFIX "not hugetlbfs file\n");
+ goto close;
+ }
+
+ return fd;
+close:
+ close(fd);
+ return -1;
+}
+
+int main(void)
+{
+ int fd;
+ struct statfs file_stat;
+ enum test_status status;
+ /* Test read() in different granularity. */
+ size_t wr_chunk_sizes[] = {
+ getpagesize() / 2, getpagesize(),
+ getpagesize() * 2, getpagesize() * 4
+ };
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(wr_chunk_sizes); ++i) {
+ printf(PREFIX "Write/read chunk size=0x%lx\n",
+ wr_chunk_sizes[i]);
+
+ fd = create_hugetlbfs_file(&file_stat);
+ if (fd < 0)
+ goto create_failure;
+ printf("HugeTLB read regression test...\n");
+ status = test_hugetlb_read(fd, file_stat.f_bsize,
+ wr_chunk_sizes[i]);
+ printf("HugeTLB read regression test...%s\n",
+ status_to_str(status));
+ close(fd);
+ if (status == TEST_FAILED)
+ return -1;
+
+ fd = create_hugetlbfs_file(&file_stat);
+ if (fd < 0)
+ goto create_failure;
+ printf("HugeTLB read HWPOISON test...\n");
+ status = test_hugetlb_read_hwpoison(fd, file_stat.f_bsize,
+ wr_chunk_sizes[i], false);
+ printf("HugeTLB read HWPOISON test...%s\n",
+ status_to_str(status));
+ close(fd);
+ if (status == TEST_FAILED)
+ return -1;
+
+ fd = create_hugetlbfs_file(&file_stat);
+ if (fd < 0)
+ goto create_failure;
+ printf("HugeTLB seek then read HWPOISON test...\n");
+ status = test_hugetlb_read_hwpoison(fd, file_stat.f_bsize,
+ wr_chunk_sizes[i], true);
+ printf("HugeTLB seek then read HWPOISON test...%s\n",
+ status_to_str(status));
+ close(fd);
+ if (status == TEST_FAILED)
+ return -1;
+ }
+
+ return 0;
+
+create_failure:
+ printf(ERROR_PREFIX "Abort test: failed to create hugetlbfs file\n");
+ return -1;
+}
--
2.40.1.606.ga4b1b128d6-goog


2023-05-18 00:12:04

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH v1 0/3] Improve hugetlbfs read on HWPOISON hugepages

On 05/17/23 16:09, Jiaqi Yan wrote:
> Today when hardware memory is corrupted in a hugetlb hugepage,
> kernel leaves the hugepage in pagecache [1]; otherwise future mmap or
> read will suject to silent data corruption. This is implemented by
> returning -EIO from hugetlb_read_iter immediately if the hugepage has
> HWPOISON flag set.
>
> Since memory_failure already tracks the raw HWPOISON subpages in a
> hugepage, a natural improvement is possible: if userspace only asks for
> healthy subpages in the pagecache, kernel can return these data.

Thanks for putting this together.

I recall discussing this some time back, and deciding to wait and see
how HGM would progress. Since it may be some time before HGM goes
upstream, it would be reasonable to consider this again.

One quick question.
Do you have an actual use case for this? It certainly is an improvement
over existing functionality. However, I am not aware of too many (?any?)
users actually doing read() calls on hugetlb files.
--
Mike Kravetz

> This patchset implements this improvement. It consist of three parts.
> The 1st commit exports the functionality to tell if a subpage inside a
> hugetlb hugepage is a raw HWPOISON page. The 2nd commit teaches
> hugetlbfs_read_iter to return as many healthy bytes as possible.
> The 3rd commit properly tests this new feature.
>
> [1] commit 8625147cafaa ("hugetlbfs: don't delete error page from pagecache")
>
> Jiaqi Yan (3):
> mm/hwpoison: find subpage in hugetlb HWPOISON list
> hugetlbfs: improve read HWPOISON hugepage
> selftests/mm: add tests for HWPOISON hugetlbfs read
>
> fs/hugetlbfs/inode.c | 62 +++-
> include/linux/mm.h | 23 ++
> mm/memory-failure.c | 26 +-
> tools/testing/selftests/mm/.gitignore | 1 +
> tools/testing/selftests/mm/Makefile | 1 +
> .../selftests/mm/hugetlb-read-hwpoison.c | 322 ++++++++++++++++++
> 6 files changed, 419 insertions(+), 16 deletions(-)
> create mode 100644 tools/testing/selftests/mm/hugetlb-read-hwpoison.c
>
> --
> 2.40.1.606.ga4b1b128d6-goog
>

2023-05-18 16:18:20

by Jiaqi Yan

[permalink] [raw]
Subject: Re: [PATCH v1 0/3] Improve hugetlbfs read on HWPOISON hugepages

On Wed, May 17, 2023 at 4:30 PM Mike Kravetz <[email protected]> wrote:
>
> On 05/17/23 16:09, Jiaqi Yan wrote:
> > Today when hardware memory is corrupted in a hugetlb hugepage,
> > kernel leaves the hugepage in pagecache [1]; otherwise future mmap or
> > read will suject to silent data corruption. This is implemented by
> > returning -EIO from hugetlb_read_iter immediately if the hugepage has
> > HWPOISON flag set.
> >
> > Since memory_failure already tracks the raw HWPOISON subpages in a
> > hugepage, a natural improvement is possible: if userspace only asks for
> > healthy subpages in the pagecache, kernel can return these data.
>
> Thanks for putting this together.
>
> I recall discussing this some time back, and deciding to wait and see
> how HGM would progress. Since it may be some time before HGM goes
> upstream, it would be reasonable to consider this again.

This improvement actually does NOT depend on HGM at all. No page table
related stuff involved here. The other RFC [2] I sent earlier DOES
require HGM. This improvement was brought up by James when we were
working on [2]. In "Future Work" section of the cover letter, I
thought HGM was needed but soon when I code it up, I found I was
wrong.

>
> One quick question.
> Do you have an actual use case for this? It certainly is an improvement
> over existing functionality. However, I am not aware of too many (?any?)
> users actually doing read() calls on hugetlb files.

I don't have any use case. I did search on Github for around half a
hour and all the hugetlb usages are done via mmap.

> --
> Mike Kravetz
>
> > This patchset implements this improvement. It consist of three parts.
> > The 1st commit exports the functionality to tell if a subpage inside a
> > hugetlb hugepage is a raw HWPOISON page. The 2nd commit teaches
> > hugetlbfs_read_iter to return as many healthy bytes as possible.
> > The 3rd commit properly tests this new feature.
> >
> > [1] commit 8625147cafaa ("hugetlbfs: don't delete error page from pagecache")

[2] https://lore.kernel.org/linux-mm/[email protected]/T/#m97c6edef8ad0cc9b064e1fd9369b8521dcfa43de

> >
> > Jiaqi Yan (3):
> > mm/hwpoison: find subpage in hugetlb HWPOISON list
> > hugetlbfs: improve read HWPOISON hugepage
> > selftests/mm: add tests for HWPOISON hugetlbfs read
> >
> > fs/hugetlbfs/inode.c | 62 +++-
> > include/linux/mm.h | 23 ++
> > mm/memory-failure.c | 26 +-
> > tools/testing/selftests/mm/.gitignore | 1 +
> > tools/testing/selftests/mm/Makefile | 1 +
> > .../selftests/mm/hugetlb-read-hwpoison.c | 322 ++++++++++++++++++
> > 6 files changed, 419 insertions(+), 16 deletions(-)
> > create mode 100644 tools/testing/selftests/mm/hugetlb-read-hwpoison.c
> >
> > --
> > 2.40.1.606.ga4b1b128d6-goog
> >

(Sorry if you received twice, was sent in a wrong way a while ago)

2023-05-18 22:35:13

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH v1 0/3] Improve hugetlbfs read on HWPOISON hugepages

On 05/18/23 09:10, Jiaqi Yan wrote:
> On Wed, May 17, 2023 at 4:30 PM Mike Kravetz <[email protected]> wrote:
> >
> > On 05/17/23 16:09, Jiaqi Yan wrote:
> > > Today when hardware memory is corrupted in a hugetlb hugepage,
> > > kernel leaves the hugepage in pagecache [1]; otherwise future mmap or
> > > read will suject to silent data corruption. This is implemented by
> > > returning -EIO from hugetlb_read_iter immediately if the hugepage has
> > > HWPOISON flag set.
> > >
> > > Since memory_failure already tracks the raw HWPOISON subpages in a
> > > hugepage, a natural improvement is possible: if userspace only asks for
> > > healthy subpages in the pagecache, kernel can return these data.
> >
> > Thanks for putting this together.
> >
> > I recall discussing this some time back, and deciding to wait and see
> > how HGM would progress. Since it may be some time before HGM goes
> > upstream, it would be reasonable to consider this again.
>
> This improvement actually does NOT depend on HGM at all. No page table
> related stuff involved here. The other RFC [2] I sent earlier DOES
> require HGM. This improvement was brought up by James when we were
> working on [2]. In "Future Work" section of the cover letter, I
> thought HGM was needed but soon when I code it up, I found I was
> wrong.

Right, this has no HGM dependencies and is actually the only way I can think
of for users to extract some information from a poisoned hugetlb page.

> >
> > One quick question.
> > Do you have an actual use case for this? It certainly is an improvement
> > over existing functionality. However, I am not aware of too many (?any?)
> > users actually doing read() calls on hugetlb files.
>
> I don't have any use case. I did search on Github for around half a
> hour and all the hugetlb usages are done via mmap.
>

Ok, I was mostly curious as mmap seems to be the most common way of
accessing hugetlb pages.

Even though there is not a known use case today, I think this could be
useful for the reason above: extracting data from a poisoned hugetlb page.
Without HGM this is the only way to extract such data.

Unfortunately, read() is not an option for sysV shared memory or private
mappings. HGM would help there.
--
Mike Kravetz

2023-05-23 07:46:55

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v1 3/3] selftests/mm: add tests for HWPOISON hugetlbfs read



Hello,

kernel test robot noticed "kernel-selftests.mm.hugepage-vmemmap.fail" on:

(we know that this commit adds hugetlb-read-hwpoison test, so we actually
want to seek some advice by this report, below will mention further)

commit: d84de15119b74f10be3c0a369561ca9b452d07d7 ("[PATCH v1 3/3] selftests/mm: add tests for HWPOISON hugetlbfs read")
url: https://github.com/intel-lab-lkp/linux/commits/Jiaqi-Yan/mm-hwpoison-find-subpage-in-hugetlb-HWPOISON-list/20230518-003149
base: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git f1fcbaa18b28dec10281551dfe6ed3a3ed80e3d6
patch link: https://lore.kernel.org/all/[email protected]/
patch subject: [PATCH v1 3/3] selftests/mm: add tests for HWPOISON hugetlbfs read

in testcase: kernel-selftests
version: kernel-selftests-x86_64-60acb023-1_20230329
with following parameters:

sc_nr_hugepages: 2
group: mm

test-description: The kernel contains a set of "self tests" under the tools/testing/selftests/ directory. These are intended to be small unit tests to exercise individual code paths in the kernel.
test-url: https://www.kernel.org/doc/Documentation/kselftest.txt


compiler: gcc-11
test machine: 36 threads 1 sockets Intel(R) Core(TM) i9-10980XE CPU @ 3.00GHz (Cascade Lake) with 32G memory

(please refer to attached dmesg/kmsg for entire log/backtrace)


If you fix the issue, kindly add following tag
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-lkp/[email protected]


from below log [1], the new added hugetlb-read-hwpoison test passed.
but will fail while running hugepage-vmemmap test later as:

# selftests: mm: hugepage-vmemmap
# mmap: Cannot allocate memory
not ok 10 selftests: mm: hugepage-vmemmap # exit=1

hugepage-vmemmap can pass while testing with parent kernel, as below [2]

at the same time, we observed lots of
"Not enough free huge pages to test, exiting!"
for various testing.

any advice for this behavior?

BTW, the system upon which we did this test has 32G memory.
not sure if there is a recommended memory size to run these tests?

Thanks!

[1]
(from test log for this d84de15119 kernel)

# selftests: mm: hugetlb-madvise
# Not enough free huge pages to test, exiting!
not ok 5 selftests: mm: hugetlb-madvise # exit=1
# selftests: mm: hugetlb-read-hwpoison
# !!! read failed: Input/output error
# !!! mmap for primary mapping failed: Cannot allocate memory
# !!! mmap for primary mapping failed: Cannot allocate memory
# !!! mmap for primary mapping failed: Cannot allocate memory
# !!! mmap for primary mapping failed: Cannot allocate memory
# !!! mmap for primary mapping failed: Cannot allocate memory
# !!! mmap for primary mapping failed: Cannot allocate memory
# !!! mmap for primary mapping failed: Cannot allocate memory
# !!! mmap for primary mapping failed: Cannot allocate memory
# !!! mmap for primary mapping failed: Cannot allocate memory
# ... Write/read chunk size=0x800
# HugeTLB read regression test...
# ... expect to read 0x200000 bytes of data in total
# ... actually read 0x200000 bytes of data in total
# HugeTLB read regression test...TEST_PASSED
# HugeTLB read HWPOISON test...
# ... expect to read 0x101000 bytes of data in total
# ... actually read 0x101000 bytes of data in total
# HugeTLB read HWPOISON test...TEST_PASSED
# HugeTLB seek then read HWPOISON test...
# ... init val=4 with offset=0x102000
# ... expect to read 0xfe000 bytes of data in total
# ... actually read 0xfe000 bytes of data in total
# HugeTLB seek then read HWPOISON test...TEST_PASSED
# ... Write/read chunk size=0x1000
# HugeTLB read regression test...
# HugeTLB read regression test...TEST_SKIPPED
# HugeTLB read HWPOISON test...
# HugeTLB read HWPOISON test...TEST_SKIPPED
# HugeTLB seek then read HWPOISON test...
# HugeTLB seek then read HWPOISON test...TEST_SKIPPED
# ... Write/read chunk size=0x2000
# HugeTLB read regression test...
# HugeTLB read regression test...TEST_SKIPPED
# HugeTLB read HWPOISON test...
# HugeTLB read HWPOISON test...TEST_SKIPPED
# HugeTLB seek then read HWPOISON test...
# HugeTLB seek then read HWPOISON test...TEST_SKIPPED
# ... Write/read chunk size=0x4000
# HugeTLB read regression test...
# HugeTLB read regression test...TEST_SKIPPED
# HugeTLB read HWPOISON test...
# HugeTLB read HWPOISON test...TEST_SKIPPED
# HugeTLB seek then read HWPOISON test...
# HugeTLB seek then read HWPOISON test...TEST_SKIPPED
ok 6 selftests: mm: hugetlb-read-hwpoison
# selftests: mm: hugepage-mmap
# mmap: Cannot allocate memory
not ok 7 selftests: mm: hugepage-mmap # exit=1
# selftests: mm: hugepage-mremap
# mmap1: Cannot allocate memory
# Map haddr: Returned address is 0xffffffffffffffff
not ok 8 selftests: mm: hugepage-mremap # exit=1
# selftests: mm: hugepage-shm
# shmget: Cannot allocate memory
not ok 9 selftests: mm: hugepage-shm # exit=1
# selftests: mm: hugepage-vmemmap
# mmap: Cannot allocate memory
not ok 10 selftests: mm: hugepage-vmemmap # exit=1


[2]
(from test log for parent kernel)

# selftests: mm: hugetlb-madvise
# Not enough free huge pages to test, exiting!
not ok 5 selftests: mm: hugetlb-madvise # exit=1
# selftests: mm: hugepage-mmap
# mmap: Cannot allocate memory
not ok 6 selftests: mm: hugepage-mmap # exit=1
# selftests: mm: hugepage-mremap
# mmap1: Cannot allocate memory
# Map haddr: Returned address is 0xffffffffffffffff
not ok 7 selftests: mm: hugepage-mremap # exit=1
# selftests: mm: hugepage-shm
# shmget: Cannot allocate memory
not ok 8 selftests: mm: hugepage-shm # exit=1
# selftests: mm: hugepage-vmemmap
# Returned address is 0x7f6024e00000 whose pfn is 1b8600
ok 9 selftests: mm: hugepage-vmemmap


To reproduce:

git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
sudo bin/lkp install job.yaml # job file is attached in this email
bin/lkp split-job --compatible job.yaml # generate the yaml file for lkp run
sudo bin/lkp run generated-yaml-file

# if come across any failure that blocks the test,
# please remove ~/.lkp and /lkp dir to run from a clean state.



--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki



Attachments:
(No filename) (6.23 kB)
config-6.4.0-rc2-00003-gd84de15119b7 (164.71 kB)
job-script (5.99 kB)
kmsg.xz (66.82 kB)
kernel-selftests (579.57 kB)
job.yaml (5.01 kB)
reproduce (284.00 B)
Download all attachments