2023-11-30 17:14:43

by Marco Pagani

[permalink] [raw]
Subject: [PATCH v5] drm/test: add a test suite for GEM objects backed by shmem

This patch introduces an initial KUnit test suite for GEM objects
backed by shmem buffers.

Suggested-by: Javier Martinez Canillas <[email protected]>
Signed-off-by: Marco Pagani <[email protected]>

v5:
- using __drm_kunit_helper_alloc_drm_device() to avoid local struct
v4:
- Add missing MMU dependency for DRM_GEM_SHMEM_HELPER (kernel test robot)
v3:
- Explicitly cast pointers in the helpers
- Removed unused pointer to parent dev in struct fake_dev
- Test entries reordering in Kconfig and Makefile sent as a separate patch
v2:
- Improved description of test cases
- Cleaner error handling using KUnit actions
- Alphabetical order in Kconfig and Makefile
---
drivers/gpu/drm/Kconfig | 3 +-
drivers/gpu/drm/tests/Makefile | 1 +
drivers/gpu/drm/tests/drm_gem_shmem_test.c | 383 +++++++++++++++++++++
3 files changed, 386 insertions(+), 1 deletion(-)
create mode 100644 drivers/gpu/drm/tests/drm_gem_shmem_test.c

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 740c1c0bd068..b7abd436455f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -74,12 +74,13 @@ config DRM_KUNIT_TEST_HELPERS

config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT
+ depends on DRM && KUNIT && MMU
select DRM_BUDDY
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 2645af241ff0..d6183b3d7688 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_format_helper_test.o \
drm_format_test.o \
drm_framebuffer_test.o \
+ drm_gem_shmem_test.o \
drm_managed_test.o \
drm_mm_test.o \
drm_modes_test.o \
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
new file mode 100644
index 000000000000..91202e40cde9
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test suite for GEM objects backed by shmem buffers
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <[email protected]>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/iosys-map.h>
+#include <linux/sizes.h>
+
+#include <kunit/test.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#define TEST_SIZE SZ_1M
+#define TEST_BYTE 0xae
+
+/*
+ * Wrappers to avoid an explicit type casting when passing action
+ * functions to kunit_add_action().
+ */
+static void kfree_wrapper(void *ptr)
+{
+ const void *obj = ptr;
+
+ kfree(obj);
+}
+
+static void sg_free_table_wrapper(void *ptr)
+{
+ struct sg_table *sgt = ptr;
+
+ sg_free_table(sgt);
+}
+
+static void drm_gem_shmem_free_wrapper(void *ptr)
+{
+ struct drm_gem_shmem_object *shmem = ptr;
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object backed by shmem buffer. The test
+ * case succeeds if the GEM object is successfully allocated with the
+ * shmem file node and object functions attributes set, and the size
+ * attribute is equal to the correct size.
+ */
+static void drm_gem_shmem_test_obj_create(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
+ KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
+ KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object from a scatter/gather table exported
+ * via a DMA-BUF. The test case succeed if the GEM object is successfully
+ * created with the shmem file node attribute equal to NULL and the sgt
+ * attribute pointing to the scatter/gather table that has been imported.
+ */
+static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *gem_obj;
+ struct dma_buf buf_mock;
+ struct dma_buf_attachment attach_mock;
+ struct sg_table *sgt;
+ char *buf;
+ int ret;
+
+ /* Create a mock scatter/gather table */
+ buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, buf);
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sgt);
+
+ ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sg_init_one(sgt->sgl, buf, TEST_SIZE);
+
+ /* Init a mock DMA-BUF */
+ buf_mock.size = TEST_SIZE;
+ attach_mock.dmabuf = &buf_mock;
+
+ gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
+ KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
+ KUNIT_EXPECT_NULL(test, gem_obj->filp);
+ KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ kunit_remove_action(test, sg_free_table_wrapper, sgt);
+ kunit_remove_action(test, kfree_wrapper, sgt);
+
+ shmem = to_drm_gem_shmem_obj(gem_obj);
+ KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test pinning backing pages for a shmem GEM object. The test case
+ * succeeds if a suitable number of backing pages are allocated, and
+ * the pages table counter attribute is increased by one.
+ */
+static void drm_gem_shmem_test_pin_pages(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ int i, ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_pin(shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+
+ for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
+
+ drm_gem_shmem_unpin(shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+}
+
+/*
+ * Test creating a virtual mapping for a shmem GEM object. The test
+ * case succeeds if the backing memory is mapped and the reference
+ * counter for virtual mapping is increased by one. Moreover, the test
+ * case writes and then reads a test pattern over the mapped memory.
+ */
+static void drm_gem_shmem_test_vmap(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct iosys_map map;
+ int ret, i;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_NULL(test, shmem->vaddr);
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_vmap(shmem, &map);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
+ KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+
+ iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
+ for (i = 0; i < TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
+
+ drm_gem_shmem_vunmap(shmem, &map);
+ KUNIT_EXPECT_NULL(test, shmem->vaddr);
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+}
+
+/*
+ * Test exporting a scatter/gather table of pinned pages suitable for
+ * PRIME usage from a shmem GEM object. The test case succeeds if a
+ * scatter/gather table large enough to accommodate the backing memory
+ * is successfully exported.
+ */
+static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int si, len = 0;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_pin(shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sgt = drm_gem_shmem_get_sg_table(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ KUNIT_EXPECT_NULL(test, shmem->sgt);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ for_each_sgtable_sg(sgt, sg, si) {
+ KUNIT_EXPECT_NOT_NULL(test, sg);
+ len += sg->length;
+ }
+
+ KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test pinning pages and exporting a scatter/gather table suitable for
+ * driver usage from a shmem GEM object. The test case succeeds if the
+ * backing pages are pinned and a scatter/gather table large enough to
+ * accommodate the backing memory is successfully exported.
+ */
+static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int si, ret, len = 0;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
+
+ for_each_sgtable_sg(sgt, sg, si) {
+ KUNIT_EXPECT_NOT_NULL(test, sg);
+ len += sg->length;
+ }
+
+ KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test updating the madvise state of a shmem GEM object. The test
+ * case checks that the function for setting madv updates it only if
+ * its current value is greater or equal than zero and returns false
+ * if it has a negative value.
+ */
+static void drm_gem_shmem_test_madvise(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_ASSERT_EQ(test, shmem->madv, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_madvise(shmem, 1);
+ KUNIT_EXPECT_TRUE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, 1);
+
+ /* Set madv to a negative value */
+ ret = drm_gem_shmem_madvise(shmem, -1);
+ KUNIT_EXPECT_FALSE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+
+ /* Check that madv cannot be set back to a positive value */
+ ret = drm_gem_shmem_madvise(shmem, 0);
+ KUNIT_EXPECT_FALSE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+}
+
+/*
+ * Test purging a shmem GEM object. First, assert that a newly created
+ * shmem GEM object is not purgeable. Then, set madvise to a positive
+ * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
+ * backing pages. Finally, assert that the shmem GEM object is now
+ * purgeable and purge it.
+ */
+static void drm_gem_shmem_test_purge(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_is_purgeable(shmem);
+ KUNIT_EXPECT_FALSE(test, ret);
+
+ ret = drm_gem_shmem_madvise(shmem, 1);
+ KUNIT_EXPECT_TRUE(test, ret);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+
+ ret = drm_gem_shmem_is_purgeable(shmem);
+ KUNIT_EXPECT_TRUE(test, ret);
+
+ drm_gem_shmem_purge(shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_NULL(test, shmem->sgt);
+ KUNIT_EXPECT_EQ(test, shmem->madv, -1);
+}
+
+static int drm_gem_shmem_test_init(struct kunit *test)
+{
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ /* Allocate a parent device */
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /*
+ * The DRM core will automatically initialize the GEM core and create
+ * a DRM Memory Manager object which provides an address space pool
+ * for GEM objects allocation.
+ */
+ drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
+ 0, DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
+
+ test->priv = drm_dev;
+
+ return 0;
+}
+
+static struct kunit_case drm_gem_shmem_test_cases[] = {
+ KUNIT_CASE(drm_gem_shmem_test_obj_create),
+ KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
+ KUNIT_CASE(drm_gem_shmem_test_pin_pages),
+ KUNIT_CASE(drm_gem_shmem_test_vmap),
+ KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
+ KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
+ KUNIT_CASE(drm_gem_shmem_test_madvise),
+ KUNIT_CASE(drm_gem_shmem_test_purge),
+ {}
+};
+
+static struct kunit_suite drm_gem_shmem_suite = {
+ .name = "drm_gem_shmem",
+ .init = drm_gem_shmem_test_init,
+ .test_cases = drm_gem_shmem_test_cases
+};
+
+kunit_test_suite(drm_gem_shmem_suite);
+
+MODULE_LICENSE("GPL");

base-commit: a13fee31f56449fc600d9e064c7b32302f92dcef
--
2.43.0


2023-12-01 08:38:46

by Maxime Ripard

[permalink] [raw]
Subject: Re: [PATCH v5] drm/test: add a test suite for GEM objects backed by shmem

On Thu, Nov 30, 2023 at 06:14:16PM +0100, Marco Pagani wrote:
> This patch introduces an initial KUnit test suite for GEM objects
> backed by shmem buffers.
>
> Suggested-by: Javier Martinez Canillas <[email protected]>
> Signed-off-by: Marco Pagani <[email protected]>
>
> v5:
> - using __drm_kunit_helper_alloc_drm_device() to avoid local struct
> v4:
> - Add missing MMU dependency for DRM_GEM_SHMEM_HELPER (kernel test robot)
> v3:
> - Explicitly cast pointers in the helpers
> - Removed unused pointer to parent dev in struct fake_dev
> - Test entries reordering in Kconfig and Makefile sent as a separate patch
> v2:
> - Improved description of test cases
> - Cleaner error handling using KUnit actions
> - Alphabetical order in Kconfig and Makefile

Applied to drm-misc-next, thanks!
Maxime


Attachments:
(No filename) (825.00 B)
signature.asc (235.00 B)
Download all attachments

2024-02-18 15:49:41

by Guenter Roeck

[permalink] [raw]
Subject: Re: [PATCH v5] drm/test: add a test suite for GEM objects backed by shmem

Hi,

On Thu, Nov 30, 2023 at 06:14:16PM +0100, Marco Pagani wrote:
> This patch introduces an initial KUnit test suite for GEM objects
> backed by shmem buffers.
>
> Suggested-by: Javier Martinez Canillas <[email protected]>
> Signed-off-by: Marco Pagani <[email protected]>

When running this in qemu, I get lots of warnings backtraces in the drm
core.

WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:327
WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:173
WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:385
WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:211
WARNING: CPU: 0 PID: 1345 at kernel/dma/mapping.c:194
WARNING: CPU: 0 PID: 1347 at drivers/gpu/drm/drm_gem_shmem_helper.c:429
WARNING: CPU: 0 PID: 1349 at drivers/gpu/drm/drm_gem_shmem_helper.c:445

It looks like dma_resv_assert_held() asserts each time it is executed.
The backtrace in kernel/dma/mapping.c is triggered by
if (WARN_ON_ONCE(!dev->dma_mask))
return 0;
in __dma_map_sg_attrs().

Is this a possible problem in the test code, or can it be caused by
some limitations or bugs in the qemu emulation ? If so, do you have any
thoughts or ideas what those limitations / bugs might be ?

Thanks,
Guenter

2024-02-22 15:32:37

by Marco Pagani

[permalink] [raw]
Subject: Re: [PATCH v5] drm/test: add a test suite for GEM objects backed by shmem



On 2024-02-18 16:49, Guenter Roeck wrote:
> Hi,
>
> On Thu, Nov 30, 2023 at 06:14:16PM +0100, Marco Pagani wrote:
>> This patch introduces an initial KUnit test suite for GEM objects
>> backed by shmem buffers.
>>
>> Suggested-by: Javier Martinez Canillas <[email protected]>
>> Signed-off-by: Marco Pagani <[email protected]>
>
> When running this in qemu, I get lots of warnings backtraces in the drm
> core.
>
> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:327
> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:173
> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:385
> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:211
> WARNING: CPU: 0 PID: 1345 at kernel/dma/mapping.c:194
> WARNING: CPU: 0 PID: 1347 at drivers/gpu/drm/drm_gem_shmem_helper.c:429
> WARNING: CPU: 0 PID: 1349 at drivers/gpu/drm/drm_gem_shmem_helper.c:445
>
> It looks like dma_resv_assert_held() asserts each time it is executed.
> The backtrace in kernel/dma/mapping.c is triggered by
> if (WARN_ON_ONCE(!dev->dma_mask))
> return 0;
> in __dma_map_sg_attrs().
>
> Is this a possible problem in the test code, or can it be caused by
> some limitations or bugs in the qemu emulation ? If so, do you have any
> thoughts or ideas what those limitations / bugs might be ?

Hi Guenter,

Thanks for reporting this issue. As you correctly noted, the warnings appear to
be caused by the dma_mask in the mock device being uninitialized. I'll send a
patch to fix it soon.

Marco


2024-02-22 15:53:36

by Guenter Roeck

[permalink] [raw]
Subject: Re: [PATCH v5] drm/test: add a test suite for GEM objects backed by shmem

Hi Marco,

On 2/22/24 07:32, Marco Pagani wrote:
>
>
> On 2024-02-18 16:49, Guenter Roeck wrote:
>> Hi,
>>
>> On Thu, Nov 30, 2023 at 06:14:16PM +0100, Marco Pagani wrote:
>>> This patch introduces an initial KUnit test suite for GEM objects
>>> backed by shmem buffers.
>>>
>>> Suggested-by: Javier Martinez Canillas <[email protected]>
>>> Signed-off-by: Marco Pagani <[email protected]>
>>
>> When running this in qemu, I get lots of warnings backtraces in the drm
>> core.
>>
>> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:327
>> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:173
>> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:385
>> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:211
>> WARNING: CPU: 0 PID: 1345 at kernel/dma/mapping.c:194
>> WARNING: CPU: 0 PID: 1347 at drivers/gpu/drm/drm_gem_shmem_helper.c:429
>> WARNING: CPU: 0 PID: 1349 at drivers/gpu/drm/drm_gem_shmem_helper.c:445
>>
>> It looks like dma_resv_assert_held() asserts each time it is executed.
>> The backtrace in kernel/dma/mapping.c is triggered by
>> if (WARN_ON_ONCE(!dev->dma_mask))
>> return 0;
>> in __dma_map_sg_attrs().
>>
>> Is this a possible problem in the test code, or can it be caused by
>> some limitations or bugs in the qemu emulation ? If so, do you have any
>> thoughts or ideas what those limitations / bugs might be ?
>
> Hi Guenter,
>
> Thanks for reporting this issue. As you correctly noted, the warnings appear to
> be caused by the dma_mask in the mock device being uninitialized. I'll send a
> patch to fix it soon.
>

Thanks a lot for the update.

In this context, the TTM unit tests fail as well in qemu, with worse result:
It seems there is some bad cleanup after a failed test case, causing list
corruptions in the drm core and ultimately a crash. I don't know if this
is also caused by the missing dma_mask initialization.

Thanks,
Guenter


2024-02-22 16:34:07

by Marco Pagani

[permalink] [raw]
Subject: Re: [PATCH v5] drm/test: add a test suite for GEM objects backed by shmem



On 2024-02-22 16:52, Guenter Roeck wrote:
> Hi Marco,
>
> On 2/22/24 07:32, Marco Pagani wrote:
>>
>>
>> On 2024-02-18 16:49, Guenter Roeck wrote:
>>> Hi,
>>>
>>> On Thu, Nov 30, 2023 at 06:14:16PM +0100, Marco Pagani wrote:
>>>> This patch introduces an initial KUnit test suite for GEM objects
>>>> backed by shmem buffers.
>>>>
>>>> Suggested-by: Javier Martinez Canillas <[email protected]>
>>>> Signed-off-by: Marco Pagani <[email protected]>
>>>
>>> When running this in qemu, I get lots of warnings backtraces in the drm
>>> core.
>>>
>>> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:327
>>> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:173
>>> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:385
>>> WARNING: CPU: 0 PID: 1341 at drivers/gpu/drm/drm_gem_shmem_helper.c:211
>>> WARNING: CPU: 0 PID: 1345 at kernel/dma/mapping.c:194
>>> WARNING: CPU: 0 PID: 1347 at drivers/gpu/drm/drm_gem_shmem_helper.c:429
>>> WARNING: CPU: 0 PID: 1349 at drivers/gpu/drm/drm_gem_shmem_helper.c:445
>>>
>>> It looks like dma_resv_assert_held() asserts each time it is executed.
>>> The backtrace in kernel/dma/mapping.c is triggered by
>>>     if (WARN_ON_ONCE(!dev->dma_mask))
>>>         return 0;
>>> in __dma_map_sg_attrs().
>>>
>>> Is this a possible problem in the test code, or can it be caused by
>>> some limitations or bugs in the qemu emulation ? If so, do you have any
>>> thoughts or ideas what those limitations / bugs might be ?
>>
>> Hi Guenter,
>>
>> Thanks for reporting this issue. As you correctly noted, the warnings appear to
>> be caused by the dma_mask in the mock device being uninitialized. I'll send a
>> patch to fix it soon.
>>
>
> Thanks a lot for the update.
>
> In this context, the TTM unit tests fail as well in qemu, with worse result:
> It seems there is some bad cleanup after a failed test case, causing list
> corruptions in the drm core and ultimately a crash. I don't know if this
> is also caused by the missing dma_mask initialization.
>

That's interesting. Which --arch argument are you using to run the
tests with QEMU?
Thanks,
Marco


2024-02-22 18:56:19

by Guenter Roeck

[permalink] [raw]
Subject: Re: [PATCH v5] drm/test: add a test suite for GEM objects backed by shmem

On Thu, Feb 22, 2024 at 05:33:48PM +0100, Marco Pagani wrote:
> >
> > In this context, the TTM unit tests fail as well in qemu, with worse result:
> > It seems there is some bad cleanup after a failed test case, causing list
> > corruptions in the drm core and ultimately a crash. I don't know if this
> > is also caused by the missing dma_mask initialization.
> >
>
> That's interesting. Which --arch argument are you using to run the
> tests with QEMU?

Example (I am not sure if any of those parameters matters; it is just one
of my tests):

qemu-system-x86_64 -kernel arch/x86/boot/bzImage -M q35 -cpu IvyBridge \
-no-reboot -snapshot -smp 2 \
-device e1000,netdev=net0 -netdev user,id=net0 -m 512 \
-drive file=rootfs.ext2,format=raw,if=ide \
--append "earlycon=uart8250,io,0x3f8,9600n8 root=/dev/sda1 console=ttyS0" \
-d unimp,guest_errors -nographic -monitor none

This results in:

[ ... ]
[ 5.989496] KTAP version 1
[ 5.989639] # Subtest: ttm_device
[ 5.989711] # module: ttm_device_test
[ 5.989760] 1..5
[ 6.002044] ok 1 ttm_device_init_basic
[ 6.013557] ok 2 ttm_device_init_multiple
ILLOPC: ffffffffb8ac9350: 0f 0b
[ 6.022481] ok 3 ttm_device_fini_basic
[ 6.026172] ------------[ cut here ]------------
[ 6.026315] WARNING: CPU: 1 PID: 1575 at drivers/gpu/drm/ttm/ttm_device.c:206 ttm_device_init+0x170/0x190
..
[ 6.135016] ok 3 Above the allocation limit
[ 6.138759] ------------[ cut here ]------------
[ 6.138925] WARNING: CPU: 1 PID: 1595 at kernel/dma/mapping.c:503 dma_alloc_attrs+0xf6/0x100
..
[ 6.143850] # ttm_pool_alloc_basic: ASSERTION FAILED at drivers/gpu/drm/ttm/tests/ttm_pool_test.c:162
[ 6.143850] Expected err == 0, but
[ 6.143850] err == -12 (0xfffffffffffffff4)
[ 6.148824] not ok 4 One page, with coherent DMA mappings enabled

From there things go downhill.

[ 6.152821] list_add corruption. prev->next should be next (ffffffffbbd53950), but was 0000000000000000. (prev=ffff8af1c38f9e20).

and so on until the emulation crashes.

Guenter