Functionality was added specifically for the DRM TTM driver to support
mapping memory for VM_MIXEDMAP VMAs with customised protection flags,
however this has now been rolled back as issues were found with this
approach.
This series removes the mm changes too, retaining some of the useful
comments.
Lorenzo Stoakes (3):
mm: remove unused vmf_insert_mixed_prot()
mm: Remove vmf_insert_pfn_xxx_prot() for huge page-table entries
drm/ttm: Remove comment referencing now-removed
vmf_insert_mixed_prot()
drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +-
include/linux/huge_mm.h | 39 ++--------------------
include/linux/mm.h | 2 --
include/linux/mm_types.h | 7 +---
mm/huge_memory.c | 31 ++++++++----------
mm/memory.c | 57 +++++++++++----------------------
6 files changed, 35 insertions(+), 103 deletions(-)
--
2.39.2
This functionality's sole user, the drm ttm module, removed support for it
in commit 0d979509539e ("drm/ttm: remove ttm_bo_vm_insert_huge()") as the
whole approach is currently unworkable without a PMD/PUD special bit and
updates to GUP.
Signed-off-by: Lorenzo Stoakes <[email protected]>
---
include/linux/huge_mm.h | 39 ++-------------------------------------
mm/huge_memory.c | 31 +++++++++++++------------------
2 files changed, 15 insertions(+), 55 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 70bd867eba94..3c52fc9d85dc 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -39,44 +39,9 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags);
-vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
- pgprot_t pgprot, bool write);
-/**
- * vmf_insert_pfn_pmd - insert a pmd size pfn
- * @vmf: Structure describing the fault
- * @pfn: pfn to insert
- * @pgprot: page protection to use
- * @write: whether it's a write fault
- *
- * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
- *
- * Return: vm_fault_t value.
- */
-static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
- bool write)
-{
- return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
-}
-vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
- pgprot_t pgprot, bool write);
-
-/**
- * vmf_insert_pfn_pud - insert a pud size pfn
- * @vmf: Structure describing the fault
- * @pfn: pfn to insert
- * @pgprot: page protection to use
- * @write: whether it's a write fault
- *
- * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
- *
- * Return: vm_fault_t value.
- */
-static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
- bool write)
-{
- return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
-}
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_NEVER_DAX,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b0ab247939e0..5a0e5e84ab13 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -889,23 +889,20 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
}
/**
- * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
+ * vmf_insert_pfn_pmd - insert a pmd size pfn
* @vmf: Structure describing the fault
* @pfn: pfn to insert
- * @pgprot: page protection to use
* @write: whether it's a write fault
*
- * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
- * also consult the vmf_insert_mixed_prot() documentation when
- * @pgprot != @vmf->vma->vm_page_prot.
+ * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
*
* Return: vm_fault_t value.
*/
-vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
- pgprot_t pgprot, bool write)
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
{
unsigned long addr = vmf->address & PMD_MASK;
struct vm_area_struct *vma = vmf->vma;
+ pgprot_t pgprot = vma->vm_page_prot;
pgtable_t pgtable = NULL;
/*
@@ -933,7 +930,7 @@ vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
return VM_FAULT_NOPAGE;
}
-EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
@@ -944,9 +941,10 @@ static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
}
static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
+ pud_t *pud, pfn_t pfn, bool write)
{
struct mm_struct *mm = vma->vm_mm;
+ pgprot_t prot = vma->vm_page_prot;
pud_t entry;
spinlock_t *ptl;
@@ -980,23 +978,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
}
/**
- * vmf_insert_pfn_pud_prot - insert a pud size pfn
+ * vmf_insert_pfn_pud - insert a pud size pfn
* @vmf: Structure describing the fault
* @pfn: pfn to insert
- * @pgprot: page protection to use
* @write: whether it's a write fault
*
- * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
- * also consult the vmf_insert_mixed_prot() documentation when
- * @pgprot != @vmf->vma->vm_page_prot.
+ * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
*
* Return: vm_fault_t value.
*/
-vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
- pgprot_t pgprot, bool write)
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
{
unsigned long addr = vmf->address & PUD_MASK;
struct vm_area_struct *vma = vmf->vma;
+ pgprot_t pgprot = vma->vm_page_prot;
/*
* If we had pud_special, we could avoid all these restrictions,
@@ -1014,10 +1009,10 @@ vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
track_pfn_insert(vma, &pgprot, pfn);
- insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
+ insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
return VM_FAULT_NOPAGE;
}
-EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
--
2.39.2
This function no longer exists, however the prot != vma->vm_page_prot case
discussion has been retained and moved to vmf_insert_pfn_prot() so refer to
this instead.
Signed-off-by: Lorenzo Stoakes <[email protected]>
---
drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ca7744b852f5..5df3edadb808 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -254,7 +254,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
* encryption bits. This is because the exact location of the
* data may not be known at mmap() time and may also change
* at arbitrary times while the data is mmap'ed.
- * See vmf_insert_mixed_prot() for a discussion.
+ * See vmf_insert_pfn_prot() for a discussion.
*/
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
--
2.39.2
The sole user of vmf_insert_mixed_prot(), the drm ttm module, stopped using
this in commit f91142c62161 ("drm/ttm: nuke VM_MIXEDMAP on BO mappings v3")
citing use of VM_MIXEDMAP in this case being terribly broken.
Remove this now-dead code and references to it, but retain the useful
description of the prot != vma->vm_page_prot case, moving it to
vmf_insert_pfn_prot() instead.
Signed-off-by: Lorenzo Stoakes <[email protected]>
---
include/linux/mm.h | 2 --
include/linux/mm_types.h | 7 +----
mm/memory.c | 57 +++++++++++++---------------------------
3 files changed, 19 insertions(+), 47 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ce1590933995..ee755bb4e1c1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3330,8 +3330,6 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
-vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3a028db80ffb..5ef0d0da328a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -502,12 +502,7 @@ struct vm_area_struct {
};
struct mm_struct *vm_mm; /* The address space we belong to. */
-
- /*
- * Access permissions of this VMA.
- * See vmf_insert_mixed_prot() for discussion.
- */
- pgprot_t vm_page_prot;
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
* Flags, see mm.h.
diff --git a/mm/memory.c b/mm/memory.c
index 7ca7951adcf5..ee6bcd747867 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2147,8 +2147,20 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
* vmf_insert_pfn_prot should only be used if using multiple VMAs is
* impractical.
*
- * See vmf_insert_mixed_prot() for a discussion of the implication of using
- * a value of @pgprot different from that of @vma->vm_page_prot.
+ * pgprot typically only differs from @vma->vm_page_prot when drivers set
+ * caching- and encryption bits different than those of @vma->vm_page_prot,
+ * because the caching- or encryption mode may not be known at mmap() time.
+ *
+ * This is ok as long as @vma->vm_page_prot is not used by the core vm
+ * to set caching and encryption bits for those vmas (except for COW pages).
+ * This is ensured by core vm only modifying these page table entries using
+ * functions that don't touch caching- or encryption bits, using pte_modify()
+ * if needed. (See for example mprotect()).
+ *
+ * Also when new page-table entries are created, this is only done using the
+ * fault() callback, and never using the value of vma->vm_page_prot,
+ * except for page-table entries that point to anonymous pages as the result
+ * of COW.
*
* Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value.
@@ -2223,9 +2235,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
}
static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
- unsigned long addr, pfn_t pfn, pgprot_t pgprot,
- bool mkwrite)
+ unsigned long addr, pfn_t pfn, bool mkwrite)
{
+ pgprot_t pgprot = vma->vm_page_prot;
int err;
BUG_ON(!vm_mixed_ok(vma, pfn));
@@ -2268,43 +2280,10 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
-/**
- * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
- * @vma: user vma to map to
- * @addr: target user address of this page
- * @pfn: source kernel pfn
- * @pgprot: pgprot flags for the inserted page
- *
- * This is exactly like vmf_insert_mixed(), except that it allows drivers
- * to override pgprot on a per-page basis.
- *
- * Typically this function should be used by drivers to set caching- and
- * encryption bits different than those of @vma->vm_page_prot, because
- * the caching- or encryption mode may not be known at mmap() time.
- * This is ok as long as @vma->vm_page_prot is not used by the core vm
- * to set caching and encryption bits for those vmas (except for COW pages).
- * This is ensured by core vm only modifying these page table entries using
- * functions that don't touch caching- or encryption bits, using pte_modify()
- * if needed. (See for example mprotect()).
- * Also when new page-table entries are created, this is only done using the
- * fault() callback, and never using the value of vma->vm_page_prot,
- * except for page-table entries that point to anonymous pages as the result
- * of COW.
- *
- * Context: Process context. May allocate using %GFP_KERNEL.
- * Return: vm_fault_t value.
- */
-vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn, pgprot_t pgprot)
-{
- return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
-}
-EXPORT_SYMBOL(vmf_insert_mixed_prot);
-
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn)
{
- return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
+ return __vm_insert_mixed(vma, addr, pfn, false);
}
EXPORT_SYMBOL(vmf_insert_mixed);
@@ -2316,7 +2295,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn)
{
- return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
+ return __vm_insert_mixed(vma, addr, pfn, true);
}
EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
--
2.39.2
Am 13.03.23 um 00:40 schrieb Lorenzo Stoakes:
> This function no longer exists, however the prot != vma->vm_page_prot case
> discussion has been retained and moved to vmf_insert_pfn_prot() so refer to
> this instead.
>
> Signed-off-by: Lorenzo Stoakes <[email protected]>
Reviewed-by: Christian König <[email protected]>
Feel free to add my acked-by to the other two patches in the series and
upstream through any branch you like.
Alternatively ping me when you get rbs for the first two patches from
the MM people and I can push this upstream through drm-misc.
Thanks,
Christian.
> ---
> drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index ca7744b852f5..5df3edadb808 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -254,7 +254,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
> * encryption bits. This is because the exact location of the
> * data may not be known at mmap() time and may also change
> * at arbitrary times while the data is mmap'ed.
> - * See vmf_insert_mixed_prot() for a discussion.
> + * See vmf_insert_pfn_prot() for a discussion.
> */
> ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
>
On Mon, Mar 13, 2023 at 08:47:21AM +0100, Christian K?nig wrote:
> Am 13.03.23 um 00:40 schrieb Lorenzo Stoakes:
> > This function no longer exists, however the prot != vma->vm_page_prot case
> > discussion has been retained and moved to vmf_insert_pfn_prot() so refer to
> > this instead.
> >
> > Signed-off-by: Lorenzo Stoakes <[email protected]>
>
> Reviewed-by: Christian K?nig <[email protected]>
>
> Feel free to add my acked-by to the other two patches in the series and
> upstream through any branch you like.
>
> Alternatively ping me when you get rbs for the first two patches from the MM
> people and I can push this upstream through drm-misc.
>
> Thanks,
> Christian.
Thanks, much appreciated! I'd rather go through mm if possible as Andrew has
already taken for mm-unstable pending further review + that's easiest for
dealing with any mm feedback.
P.S. apologies for dropping the umlaut on your name in initial patch, I really
need to sort out my terminal which got horribly confused by non-ANSI characters :)
>
> > ---
> > drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +-
> > 1 file changed, 1 insertion(+), 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> > index ca7744b852f5..5df3edadb808 100644
> > --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> > +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> > @@ -254,7 +254,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
> > * encryption bits. This is because the exact location of the
> > * data may not be known at mmap() time and may also change
> > * at arbitrary times while the data is mmap'ed.
> > - * See vmf_insert_mixed_prot() for a discussion.
> > + * See vmf_insert_pfn_prot() for a discussion.
> > */
> > ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
>