Kefeng Wang (5):
nios2: remove unused INIT_MMAP
x86/sgx: use VM_ACCESS_FLAGS
mm: mprotect: use VM_ACCESS_FLAGS
mm: debug_vm_pgtable: use VM_ACCESS_FLAGS
amdgpu: use VM_ACCESS_FLAGS
arch/nios2/include/asm/processor.h | 3 ---
arch/x86/kernel/cpu/sgx/encl.c | 4 ++--
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +-
mm/debug_vm_pgtable.c | 8 ++------
mm/mprotect.c | 3 +--
5 files changed, 6 insertions(+), 14 deletions(-)
--
2.35.3
Directly use VM_ACCESS_FLAGS instead VMFLAGS.
Signed-off-by: Kefeng Wang <[email protected]>
---
mm/debug_vm_pgtable.c | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index dc7df1254f0a..2b61fde8c38c 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -38,11 +38,7 @@
* Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
* expectations that are being validated here. All future changes in here
* or the documentation need to be in sync.
- */
-
-#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
-
-/*
+ *
* On s390 platform, the lower 4 bits are used to identify given page table
* entry type. But these bits might affect the ability to clear entries with
* pxx_clear() because of how dynamic page table folding works on s390. So
@@ -1125,7 +1121,7 @@ static int __init init_args(struct pgtable_debug_args *args)
*/
memset(args, 0, sizeof(*args));
args->vaddr = get_random_vaddr();
- args->page_prot = vm_get_page_prot(VMFLAGS);
+ args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS);
args->page_prot_none = vm_get_page_prot(VM_NONE);
args->is_contiguous_page = false;
args->pud_pfn = ULONG_MAX;
--
2.35.3
Simplify VM_READ|VM_WRITE|VM_EXEC with VM_ACCESS_FLAGS.
Cc: Jarkko Sakkinen <[email protected]>
Cc: Dave Hansen <[email protected]>
Signed-off-by: Kefeng Wang <[email protected]>
---
arch/x86/kernel/cpu/sgx/encl.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 1ec20807de1e..6225c525372d 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -268,7 +268,7 @@ static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
unsigned long addr,
unsigned long vm_flags)
{
- unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+ unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
struct sgx_encl_page *entry;
entry = xa_load(&encl->page_array, PFN_DOWN(addr));
@@ -502,7 +502,7 @@ static void sgx_vma_open(struct vm_area_struct *vma)
int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
unsigned long end, unsigned long vm_flags)
{
- unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+ unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
struct sgx_encl_page *page;
unsigned long count = 0;
int ret = 0;
--
2.35.3
On Sun, Oct 23, 2022 at 11:07:47PM +0300, Jarkko Sakkinen wrote:
> On Wed, Oct 19, 2022 at 11:49:42AM +0800, Kefeng Wang wrote:
> > Simplify VM_READ|VM_WRITE|VM_EXEC with VM_ACCESS_FLAGS.
> >
> > Cc: Jarkko Sakkinen <[email protected]>
> > Cc: Dave Hansen <[email protected]>
> > Signed-off-by: Kefeng Wang <[email protected]>
> > ---
> > arch/x86/kernel/cpu/sgx/encl.c | 4 ++--
> > 1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
> > index 1ec20807de1e..6225c525372d 100644
> > --- a/arch/x86/kernel/cpu/sgx/encl.c
> > +++ b/arch/x86/kernel/cpu/sgx/encl.c
> > @@ -268,7 +268,7 @@ static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
> > unsigned long addr,
> > unsigned long vm_flags)
> > {
> > - unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
> > + unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
> > struct sgx_encl_page *entry;
> >
> > entry = xa_load(&encl->page_array, PFN_DOWN(addr));
> > @@ -502,7 +502,7 @@ static void sgx_vma_open(struct vm_area_struct *vma)
> > int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
> > unsigned long end, unsigned long vm_flags)
> > {
> > - unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
> > + unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
> > struct sgx_encl_page *page;
> > unsigned long count = 0;
> > int ret = 0;
> > --
> > 2.35.3
> >
>
> Why?
Only benefit I see is a downside: you have xref VM_ACCESS_FLAGS, which
is counter-productive. Zero gain.
BR, Jarkko
On Wed, Oct 19, 2022 at 11:49:42AM +0800, Kefeng Wang wrote:
> Simplify VM_READ|VM_WRITE|VM_EXEC with VM_ACCESS_FLAGS.
>
> Cc: Jarkko Sakkinen <[email protected]>
> Cc: Dave Hansen <[email protected]>
> Signed-off-by: Kefeng Wang <[email protected]>
> ---
> arch/x86/kernel/cpu/sgx/encl.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
> index 1ec20807de1e..6225c525372d 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.c
> +++ b/arch/x86/kernel/cpu/sgx/encl.c
> @@ -268,7 +268,7 @@ static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
> unsigned long addr,
> unsigned long vm_flags)
> {
> - unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
> + unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
> struct sgx_encl_page *entry;
>
> entry = xa_load(&encl->page_array, PFN_DOWN(addr));
> @@ -502,7 +502,7 @@ static void sgx_vma_open(struct vm_area_struct *vma)
> int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
> unsigned long end, unsigned long vm_flags)
> {
> - unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
> + unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS;
> struct sgx_encl_page *page;
> unsigned long count = 0;
> int ret = 0;
> --
> 2.35.3
>
Why?
BR, Jarkko