Subject: [PATCH v5 7/9] drm: Add a drm_get_unmapped_area() helper

From: Thomas Hellstrom <[email protected]>

Unaligned virtual addresses makes it unlikely that huge page-table entries
can be used.
So align virtual buffer object address huge page boundaries to the
underlying physical address huge page boundaries taking buffer object
sizes into account to determine when it might be possible to use huge
page-table entries.

Cc: Andrew Morton <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: "Matthew Wilcox (Oracle)" <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Ralph Campbell <[email protected]>
Cc: "Jérôme Glisse" <[email protected]>
Cc: "Christian König" <[email protected]>
Cc: Dan Williams <[email protected]>
Signed-off-by: Thomas Hellstrom <[email protected]>
Reviewed-by: Roland Scheidegger <[email protected]>
Acked-by: Christian König <[email protected]>
---
drivers/gpu/drm/drm_file.c | 136 +++++++++++++++++++++++++++++++++++++
include/drm/drm_file.h | 5 ++
2 files changed, 141 insertions(+)

diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 92d16724f949..40fae356d202 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -39,10 +39,13 @@
#include <linux/poll.h>
#include <linux/slab.h>

+#include <uapi/asm/mman.h>
+
#include <drm/drm_client.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
+#include <drm/drm_vma_manager.h>

#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -796,3 +799,136 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
return file;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * drm_addr_inflate() attempts to construct an aligned area by inflating
+ * the area size and skipping the unaligned start of the area.
+ * adapted from shmem_get_unmapped_area()
+ */
+static unsigned long drm_addr_inflate(unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags,
+ unsigned long huge_size)
+{
+ unsigned long offset, inflated_len;
+ unsigned long inflated_addr;
+ unsigned long inflated_offset;
+
+ offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
+ if (offset && offset + len < 2 * huge_size)
+ return addr;
+ if ((addr & (huge_size - 1)) == offset)
+ return addr;
+
+ inflated_len = len + huge_size - PAGE_SIZE;
+ if (inflated_len > TASK_SIZE)
+ return addr;
+ if (inflated_len < len)
+ return addr;
+
+ inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
+ 0, flags);
+ if (IS_ERR_VALUE(inflated_addr))
+ return addr;
+ if (inflated_addr & ~PAGE_MASK)
+ return addr;
+
+ inflated_offset = inflated_addr & (huge_size - 1);
+ inflated_addr += offset - inflated_offset;
+ if (inflated_offset > offset)
+ inflated_addr += huge_size;
+
+ if (inflated_addr > TASK_SIZE - len)
+ return addr;
+
+ return inflated_addr;
+}
+
+/**
+ * drm_get_unmapped_area() - Get an unused user-space virtual memory area
+ * suitable for huge page table entries.
+ * @file: The struct file representing the address space being mmap()'d.
+ * @uaddr: Start address suggested by user-space.
+ * @len: Length of the area.
+ * @pgoff: The page offset into the address space.
+ * @flags: mmap flags
+ * @mgr: The address space manager used by the drm driver. This argument can
+ * probably be removed at some point when all drivers use the same
+ * address space manager.
+ *
+ * This function attempts to find an unused user-space virtual memory area
+ * that can accommodate the size we want to map, and that is properly
+ * aligned to facilitate huge page table entries matching actual
+ * huge pages or huge page aligned memory in buffer objects. Buffer objects
+ * are assumed to start at huge page boundary pfns (io memory) or be
+ * populated by huge pages aligned to the start of the buffer object
+ * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
+ *
+ * Return: aligned user-space address.
+ */
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr)
+{
+ unsigned long addr;
+ unsigned long inflated_addr;
+ struct drm_vma_offset_node *node;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /*
+ * @pgoff is the file page-offset the huge page boundaries of
+ * which typically aligns to physical address huge page boundaries.
+ * That's not true for DRM, however, where physical address huge
+ * page boundaries instead are aligned with the offset from
+ * buffer object start. So adjust @pgoff to be the offset from
+ * buffer object start.
+ */
+ drm_vma_offset_lock_lookup(mgr);
+ node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
+ if (node)
+ pgoff -= node->vm_node.start;
+ drm_vma_offset_unlock_lookup(mgr);
+
+ addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ if (addr & ~PAGE_MASK)
+ return addr;
+ if (addr > TASK_SIZE - len)
+ return addr;
+
+ if (len < HPAGE_PMD_SIZE)
+ return addr;
+ if (flags & MAP_FIXED)
+ return addr;
+ /*
+ * Our priority is to support MAP_SHARED mapped hugely;
+ * and support MAP_PRIVATE mapped hugely too, until it is COWed.
+ * But if caller specified an address hint, respect that as before.
+ */
+ if (uaddr)
+ return addr;
+
+ inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
+ HPAGE_PMD_SIZE);
+
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
+ len >= HPAGE_PUD_SIZE)
+ inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
+ flags, HPAGE_PUD_SIZE);
+ return inflated_addr;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr)
+{
+ return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 8b099b347817..06fe3da09b27 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -387,6 +387,11 @@ void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p);
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
+struct drm_vma_offset_manager;
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr);

struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);

--
2.21.1


2020-03-03 21:50:20

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v5 7/9] drm: Add a drm_get_unmapped_area() helper

Hi "Thomas,

I love your patch! Yet something to improve:

[auto build test ERROR on drm-exynos/exynos-drm-next]
[also build test ERROR on drm-intel/for-linux-next drm-tip/drm-tip linus/master v5.6-rc4 next-20200303]
[cannot apply to tegra-drm/drm/tegra/for-next]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url: https://github.com/0day-ci/linux/commits/Thomas-Hellstr-m-VMware/Huge-page-table-entries-for-TTM/20200304-022543
base: https://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git exynos-drm-next
config: c6x-allyesconfig (attached as .config)
compiler: c6x-elf-gcc (GCC) 7.5.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=7.5.0 make.cross ARCH=c6x

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <[email protected]>

All error/warnings (new ones prefixed by >>):

drivers/gpu/drm/drm_file.c: In function 'drm_get_unmapped_area':
>> drivers/gpu/drm/drm_file.c:966:20: error: 'struct mm_struct' has no member named 'get_unmapped_area'
return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
^~
>> drivers/gpu/drm/drm_file.c:967:1: warning: control reaches end of non-void function [-Wreturn-type]
}
^

vim +966 drivers/gpu/drm/drm_file.c

882
883 /**
884 * drm_get_unmapped_area() - Get an unused user-space virtual memory area
885 * suitable for huge page table entries.
886 * @file: The struct file representing the address space being mmap()'d.
887 * @uaddr: Start address suggested by user-space.
888 * @len: Length of the area.
889 * @pgoff: The page offset into the address space.
890 * @flags: mmap flags
891 * @mgr: The address space manager used by the drm driver. This argument can
892 * probably be removed at some point when all drivers use the same
893 * address space manager.
894 *
895 * This function attempts to find an unused user-space virtual memory area
896 * that can accommodate the size we want to map, and that is properly
897 * aligned to facilitate huge page table entries matching actual
898 * huge pages or huge page aligned memory in buffer objects. Buffer objects
899 * are assumed to start at huge page boundary pfns (io memory) or be
900 * populated by huge pages aligned to the start of the buffer object
901 * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
902 *
903 * Return: aligned user-space address.
904 */
905 unsigned long drm_get_unmapped_area(struct file *file,
906 unsigned long uaddr, unsigned long len,
907 unsigned long pgoff, unsigned long flags,
908 struct drm_vma_offset_manager *mgr)
909 {
910 unsigned long addr;
911 unsigned long inflated_addr;
912 struct drm_vma_offset_node *node;
913
914 if (len > TASK_SIZE)
915 return -ENOMEM;
916
917 /*
918 * @pgoff is the file page-offset the huge page boundaries of
919 * which typically aligns to physical address huge page boundaries.
920 * That's not true for DRM, however, where physical address huge
921 * page boundaries instead are aligned with the offset from
922 * buffer object start. So adjust @pgoff to be the offset from
923 * buffer object start.
924 */
925 drm_vma_offset_lock_lookup(mgr);
926 node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
927 if (node)
928 pgoff -= node->vm_node.start;
929 drm_vma_offset_unlock_lookup(mgr);
930
931 addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
932 if (IS_ERR_VALUE(addr))
933 return addr;
934 if (addr & ~PAGE_MASK)
935 return addr;
936 if (addr > TASK_SIZE - len)
937 return addr;
938
939 if (len < HPAGE_PMD_SIZE)
940 return addr;
941 if (flags & MAP_FIXED)
942 return addr;
943 /*
944 * Our priority is to support MAP_SHARED mapped hugely;
945 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
946 * But if caller specified an address hint, respect that as before.
947 */
948 if (uaddr)
949 return addr;
950
951 inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
952 HPAGE_PMD_SIZE);
953
954 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
955 len >= HPAGE_PUD_SIZE)
956 inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
957 flags, HPAGE_PUD_SIZE);
958 return inflated_addr;
959 }
960 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
961 unsigned long drm_get_unmapped_area(struct file *file,
962 unsigned long uaddr, unsigned long len,
963 unsigned long pgoff, unsigned long flags,
964 struct drm_vma_offset_manager *mgr)
965 {
> 966 return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
> 967 }

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]


Attachments:
(No filename) (5.36 kB)
.config.gz (50.45 kB)
Download all attachments