v2:
-- add copy_to_user_or_kernel() in lib/usercopy.c
-- define userbuf as bool type
Tiezhu Yang (2):
kdump: vmcore: remove copy_to() and add copy_to_user_or_kernel()
kdump: crashdump: use copy_to_user_or_kernel() to simplify code
arch/arm/kernel/crash_dump.c | 12 +++---------
arch/arm64/kernel/crash_dump.c | 12 +++---------
arch/ia64/kernel/crash_dump.c | 12 +++++-------
arch/mips/kernel/crash_dump.c | 11 +++--------
arch/powerpc/kernel/crash_dump.c | 11 ++++-------
arch/riscv/kernel/crash_dump.c | 11 +++--------
arch/sh/kernel/crash_dump.c | 11 +++--------
arch/x86/kernel/crash_dump_32.c | 11 +++--------
arch/x86/kernel/crash_dump_64.c | 15 +++++----------
fs/proc/vmcore.c | 32 +++++++++-----------------------
include/linux/crash_dump.h | 8 ++++----
include/linux/uaccess.h | 1 +
lib/usercopy.c | 15 +++++++++++++++
13 files changed, 61 insertions(+), 101 deletions(-)
--
2.1.0
In arch/*/kernel/crash_dump*.c, there exist many similar code
about copy_oldmem_page(), remove copy_to() in fs/proc/vmcore.c
and add copy_to_user_or_kernel() in lib/usercopy.c, then we can
use copy_to_user_or_kernel() to simplify the related code.
Signed-off-by: Tiezhu Yang <[email protected]>
---
fs/proc/vmcore.c | 28 +++++++---------------------
include/linux/uaccess.h | 1 +
lib/usercopy.c | 15 +++++++++++++++
3 files changed, 23 insertions(+), 21 deletions(-)
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 509f851..f67fd77 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -238,22 +238,8 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
}
-/*
- * Copy to either kernel or user space
- */
-static int copy_to(void *target, void *src, size_t size, int userbuf)
-{
- if (userbuf) {
- if (copy_to_user((char __user *) target, src, size))
- return -EFAULT;
- } else {
- memcpy(target, src, size);
- }
- return 0;
-}
-
#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
-static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
+static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, bool userbuf)
{
struct vmcoredd_node *dump;
u64 offset = 0;
@@ -266,7 +252,7 @@ static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
if (start < offset + dump->size) {
tsz = min(offset + (u64)dump->size - start, (u64)size);
buf = dump->buf + start - offset;
- if (copy_to(dst, buf, tsz, userbuf)) {
+ if (copy_to_user_or_kernel(dst, buf, tsz, userbuf)) {
ret = -EFAULT;
goto out_unlock;
}
@@ -330,7 +316,7 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
* returned otherwise number of bytes read are returned.
*/
static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
- int userbuf)
+ bool userbuf)
{
ssize_t acc = 0, tmp;
size_t tsz;
@@ -347,7 +333,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
/* Read ELF core header */
if (*fpos < elfcorebuf_sz) {
tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
- if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
+ if (copy_to_user_or_kernel(buffer, elfcorebuf + *fpos, tsz, userbuf))
return -EFAULT;
buflen -= tsz;
*fpos += tsz;
@@ -395,7 +381,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
/* Read remaining elf notes */
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
- if (copy_to(buffer, kaddr, tsz, userbuf))
+ if (copy_to_user_or_kernel(buffer, kaddr, tsz, userbuf))
return -EFAULT;
buflen -= tsz;
@@ -435,7 +421,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
static ssize_t read_vmcore(struct file *file, char __user *buffer,
size_t buflen, loff_t *fpos)
{
- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
+ return __read_vmcore((__force char *) buffer, buflen, fpos, true);
}
/*
@@ -461,7 +447,7 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
if (!PageUptodate(page)) {
offset = (loff_t) index << PAGE_SHIFT;
buf = __va((page_to_pfn(page) << PAGE_SHIFT));
- rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
+ rc = __read_vmcore(buf, PAGE_SIZE, &offset, false);
if (rc < 0) {
unlock_page(page);
put_page(page);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ac03940..a25e682e 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -283,6 +283,7 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
#endif /* ARCH_HAS_NOCACHE_UACCESS */
extern __must_check int check_zeroed_user(const void __user *from, size_t size);
+extern __must_check int copy_to_user_or_kernel(void *target, void *src, size_t size, bool userbuf);
/**
* copy_struct_from_user: copy a struct from userspace
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 7413dd3..7431b1b 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -90,3 +90,18 @@ int check_zeroed_user(const void __user *from, size_t size)
return -EFAULT;
}
EXPORT_SYMBOL(check_zeroed_user);
+
+/*
+ * Copy to either user or kernel space
+ */
+int copy_to_user_or_kernel(void *target, void *src, size_t size, bool userbuf)
+{
+ if (userbuf) {
+ if (copy_to_user((char __user *) target, src, size))
+ return -EFAULT;
+ } else {
+ memcpy(target, src, size);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(copy_to_user_or_kernel);
--
2.1.0
Use copy_to_user_or_kernel() to simplify the related code about
copy_oldmem_page() in arch/*/kernel/crash_dump*.c files.
Signed-off-by: Tiezhu Yang <[email protected]>
---
arch/arm/kernel/crash_dump.c | 12 +++---------
arch/arm64/kernel/crash_dump.c | 12 +++---------
arch/ia64/kernel/crash_dump.c | 12 +++++-------
arch/mips/kernel/crash_dump.c | 11 +++--------
arch/powerpc/kernel/crash_dump.c | 11 ++++-------
arch/riscv/kernel/crash_dump.c | 11 +++--------
arch/sh/kernel/crash_dump.c | 11 +++--------
arch/x86/kernel/crash_dump_32.c | 11 +++--------
arch/x86/kernel/crash_dump_64.c | 15 +++++----------
fs/proc/vmcore.c | 4 ++--
include/linux/crash_dump.h | 8 ++++----
11 files changed, 38 insertions(+), 80 deletions(-)
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
index 53cb924..a27c5df 100644
--- a/arch/arm/kernel/crash_dump.c
+++ b/arch/arm/kernel/crash_dump.c
@@ -29,7 +29,7 @@
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset,
- int userbuf)
+ bool userbuf)
{
void *vaddr;
@@ -40,14 +40,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user(buf, vaddr + offset, csize)) {
- iounmap(vaddr);
- return -EFAULT;
- }
- } else {
- memcpy(buf, vaddr + offset, csize);
- }
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ csize = -EFAULT;
iounmap(vaddr);
return csize;
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index 58303a9..d22988f 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -27,7 +27,7 @@
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset,
- int userbuf)
+ bool userbuf)
{
void *vaddr;
@@ -38,14 +38,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
- memunmap(vaddr);
- return -EFAULT;
- }
- } else {
- memcpy(buf, vaddr + offset, csize);
- }
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ csize = -EFAULT;
memunmap(vaddr);
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 0ed3c3d..12128f8 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -33,19 +33,17 @@
*/
ssize_t
copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ size_t csize, unsigned long offset, bool userbuf)
{
void *vaddr;
if (!csize)
return 0;
+
vaddr = __va(pfn<<PAGE_SHIFT);
- if (userbuf) {
- if (copy_to_user(buf, (vaddr + offset), csize)) {
- return -EFAULT;
- }
- } else
- memcpy(buf, (vaddr + offset), csize);
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ return -EFAULT;
+
return csize;
}
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index 2e50f551..7670915 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -16,7 +16,7 @@
* in the current kernel.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ size_t csize, unsigned long offset, bool userbuf)
{
void *vaddr;
@@ -24,13 +24,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
return 0;
vaddr = kmap_local_pfn(pfn);
-
- if (!userbuf) {
- memcpy(buf, vaddr + offset, csize);
- } else {
- if (copy_to_user(buf, vaddr + offset, csize))
- csize = -EFAULT;
- }
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ csize = -EFAULT;
kunmap_local(vaddr);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 5693e1c67..e2e9612 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -69,13 +69,10 @@ void __init setup_kdump_trampoline(void)
#endif /* CONFIG_NONSTATIC_KERNEL */
static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ unsigned long offset, bool userbuf)
{
- if (userbuf) {
- if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
- return -EFAULT;
- } else
- memcpy(buf, (vaddr + offset), csize);
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ return -EFAULT;
return csize;
}
@@ -94,7 +91,7 @@ static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ size_t csize, unsigned long offset, bool userbuf)
{
void *vaddr;
phys_addr_t paddr;
diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c
index 86cc0ad..4167437 100644
--- a/arch/riscv/kernel/crash_dump.c
+++ b/arch/riscv/kernel/crash_dump.c
@@ -22,7 +22,7 @@
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset,
- int userbuf)
+ bool userbuf)
{
void *vaddr;
@@ -33,13 +33,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
- memunmap(vaddr);
- return -EFAULT;
- }
- } else
- memcpy(buf, vaddr + offset, csize);
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ csize = -EFAULT;
memunmap(vaddr);
return csize;
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 5b41b59..4bc071a 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -24,7 +24,7 @@
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ size_t csize, unsigned long offset, bool userbuf)
{
void __iomem *vaddr;
@@ -33,13 +33,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (userbuf) {
- if (copy_to_user((void __user *)buf, (vaddr + offset), csize)) {
- iounmap(vaddr);
- return -EFAULT;
- }
- } else
- memcpy(buf, (vaddr + offset), csize);
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ csize = -EFAULT;
iounmap(vaddr);
return csize;
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 5fcac46..3eff124 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -43,7 +43,7 @@ static inline bool is_crashed_pfn_valid(unsigned long pfn)
* in the current kernel.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ unsigned long offset, bool userbuf)
{
void *vaddr;
@@ -54,13 +54,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
return -EFAULT;
vaddr = kmap_local_pfn(pfn);
-
- if (!userbuf) {
- memcpy(buf, vaddr + offset, csize);
- } else {
- if (copy_to_user(buf, vaddr + offset, csize))
- csize = -EFAULT;
- }
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ csize = -EFAULT;
kunmap_local(vaddr);
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index a7f617a..e8fffdf 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -13,7 +13,7 @@
#include <linux/cc_platform.h>
static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf,
+ unsigned long offset, bool userbuf,
bool encrypted)
{
void *vaddr;
@@ -29,13 +29,8 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user((void __user *)buf, vaddr + offset, csize)) {
- iounmap((void __iomem *)vaddr);
- return -EFAULT;
- }
- } else
- memcpy(buf, vaddr + offset, csize);
+ if (copy_to_user_or_kernel(buf, vaddr + offset, csize, userbuf))
+ csize = -EFAULT;
set_iounmap_nonlazy();
iounmap((void __iomem *)vaddr);
@@ -56,7 +51,7 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
* mapped in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ unsigned long offset, bool userbuf)
{
return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false);
}
@@ -67,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
* machines.
*/
ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ unsigned long offset, bool userbuf)
{
return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
}
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index f67fd77..bba52aa 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -133,7 +133,7 @@ static int open_vmcore(struct inode *inode, struct file *file)
/* Reads a page from the oldmem device from given offset. */
ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
+ u64 *ppos, bool userbuf,
bool encrypted)
{
unsigned long pfn, offset;
@@ -233,7 +233,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
*/
ssize_t __weak
copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
- unsigned long offset, int userbuf)
+ unsigned long offset, bool userbuf)
{
return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
}
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 6208215..033448b 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -25,10 +25,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long size, pgprot_t prot);
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
- unsigned long, int);
+ unsigned long, bool);
extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
size_t csize, unsigned long offset,
- int userbuf);
+ bool userbuf);
void vmcore_cleanup(void);
@@ -136,11 +136,11 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
#ifdef CONFIG_PROC_VMCORE
ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
+ u64 *ppos, bool userbuf,
bool encrypted);
#else
static inline ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
+ u64 *ppos, bool userbuf,
bool encrypted)
{
return -EOPNOTSUPP;
--
2.1.0
Le 11/12/2021 à 04:33, Tiezhu Yang a écrit :
> In arch/*/kernel/crash_dump*.c, there exist many similar code
> about copy_oldmem_page(), remove copy_to() in fs/proc/vmcore.c
> and add copy_to_user_or_kernel() in lib/usercopy.c, then we can
> use copy_to_user_or_kernel() to simplify the related code.
It should be an inline function in uaccess.h, see below why.
>
> Signed-off-by: Tiezhu Yang <[email protected]>
> ---
> fs/proc/vmcore.c | 28 +++++++---------------------
> include/linux/uaccess.h | 1 +
> lib/usercopy.c | 15 +++++++++++++++
> 3 files changed, 23 insertions(+), 21 deletions(-)
>
> diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
> index 509f851..f67fd77 100644
> --- a/fs/proc/vmcore.c
> +++ b/fs/proc/vmcore.c
> @@ -238,22 +238,8 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
> return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
> }
>
> -/*
> - * Copy to either kernel or user space
> - */
> -static int copy_to(void *target, void *src, size_t size, int userbuf)
> -{
> - if (userbuf) {
> - if (copy_to_user((char __user *) target, src, size))
> - return -EFAULT;
> - } else {
> - memcpy(target, src, size);
> - }
> - return 0;
> -}
> -
> #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
> -static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
> +static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, bool userbuf)
Changing int to bool in all the callers should be another patch. You can
have copy_to_user_or_kernel() take a bool in the patch while still
having all the callers using an int.
> {
> struct vmcoredd_node *dump;
> u64 offset = 0;
> @@ -266,7 +252,7 @@ static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
> if (start < offset + dump->size) {
> tsz = min(offset + (u64)dump->size - start, (u64)size);
> buf = dump->buf + start - offset;
> - if (copy_to(dst, buf, tsz, userbuf)) {
> + if (copy_to_user_or_kernel(dst, buf, tsz, userbuf)) {
> ret = -EFAULT;
> goto out_unlock;
> }
> @@ -330,7 +316,7 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
> * returned otherwise number of bytes read are returned.
> */
> static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
> - int userbuf)
> + bool userbuf)
> {
> ssize_t acc = 0, tmp;
> size_t tsz;
> @@ -347,7 +333,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
> /* Read ELF core header */
> if (*fpos < elfcorebuf_sz) {
> tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
> - if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
> + if (copy_to_user_or_kernel(buffer, elfcorebuf + *fpos, tsz, userbuf))
> return -EFAULT;
> buflen -= tsz;
> *fpos += tsz;
> @@ -395,7 +381,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
> /* Read remaining elf notes */
> tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
> kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
> - if (copy_to(buffer, kaddr, tsz, userbuf))
> + if (copy_to_user_or_kernel(buffer, kaddr, tsz, userbuf))
> return -EFAULT;
>
> buflen -= tsz;
> @@ -435,7 +421,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
> static ssize_t read_vmcore(struct file *file, char __user *buffer,
> size_t buflen, loff_t *fpos)
> {
> - return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
> + return __read_vmcore((__force char *) buffer, buflen, fpos, true);
> }
>
> /*
> @@ -461,7 +447,7 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
> if (!PageUptodate(page)) {
> offset = (loff_t) index << PAGE_SHIFT;
> buf = __va((page_to_pfn(page) << PAGE_SHIFT));
> - rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
> + rc = __read_vmcore(buf, PAGE_SIZE, &offset, false);
> if (rc < 0) {
> unlock_page(page);
> put_page(page);
> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
> index ac03940..a25e682e 100644
> --- a/include/linux/uaccess.h
> +++ b/include/linux/uaccess.h
> @@ -283,6 +283,7 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
> #endif /* ARCH_HAS_NOCACHE_UACCESS */
>
> extern __must_check int check_zeroed_user(const void __user *from, size_t size);
> +extern __must_check int copy_to_user_or_kernel(void *target, void *src, size_t size, bool userbuf);
extern keyword is pointless for function prototypes, please don't add
new ones.
>
> /**
> * copy_struct_from_user: copy a struct from userspace
> diff --git a/lib/usercopy.c b/lib/usercopy.c
> index 7413dd3..7431b1b 100644
> --- a/lib/usercopy.c
> +++ b/lib/usercopy.c
> @@ -90,3 +90,18 @@ int check_zeroed_user(const void __user *from, size_t size)
> return -EFAULT;
> }
> EXPORT_SYMBOL(check_zeroed_user);
> +
> +/*
> + * Copy to either user or kernel space
> + */
> +int copy_to_user_or_kernel(void *target, void *src, size_t size, bool userbuf)
> +{
> + if (userbuf) {
> + if (copy_to_user((char __user *) target, src, size))
> + return -EFAULT;
> + } else {
> + memcpy(target, src, size);
> + }
> + return 0;
> +}
> +EXPORT_SYMBOL(copy_to_user_or_kernel);
>
Ref my answer to Andrew, I don't think outlining this fonction is a
worth it. As shown in that mail, the size of the caller is increased by
4 instructions (which is in the noise) but also this new function is not
small. So I see no benefit in term of size, and I don't think there is
any benefit in terms of performance either.
In this patch that's the same. Before the patch, read_vmcore() has a
size of 0x338.
With this patch, read_vmcore() has a size of 0x340. So that's 2
instructions more, so no benefit either.
So I think this should remain an inline function like in your first
patch (but with the new name).
000001a4 <copy_to_user_or_kernel>:
1a4: 2c 06 00 00 cmpwi r6,0
1a8: 94 21 ff f0 stwu r1,-16(r1)
1ac: 41 82 00 50 beq 1fc <copy_to_user_or_kernel+0x58>
1b0: 2c 05 00 00 cmpwi r5,0
1b4: 41 80 00 7c blt 230 <copy_to_user_or_kernel+0x8c>
1b8: 3d 00 b0 00 lis r8,-20480
1bc: 7f 83 40 40 cmplw cr7,r3,r8
1c0: 41 9c 00 14 blt cr7,1d4 <copy_to_user_or_kernel+0x30>
1c4: 40 82 00 64 bne 228 <copy_to_user_or_kernel+0x84>
1c8: 38 60 00 00 li r3,0
1cc: 38 21 00 10 addi r1,r1,16
1d0: 4e 80 00 20 blr
1d4: 7d 23 40 50 subf r9,r3,r8
1d8: 7f 85 48 40 cmplw cr7,r5,r9
1dc: 7c 08 02 a6 mflr r0
1e0: 90 01 00 14 stw r0,20(r1)
1e4: 41 9d 00 38 bgt cr7,21c <copy_to_user_or_kernel+0x78>
1e8: 48 00 00 01 bl 1e8 <copy_to_user_or_kernel+0x44>
1e8: R_PPC_REL24 __copy_tofrom_user
1ec: 80 01 00 14 lwz r0,20(r1)
1f0: 2c 03 00 00 cmpwi r3,0
1f4: 7c 08 03 a6 mtlr r0
1f8: 4b ff ff cc b 1c4 <copy_to_user_or_kernel+0x20>
1fc: 7c 08 02 a6 mflr r0
200: 90 01 00 14 stw r0,20(r1)
204: 48 00 00 01 bl 204 <copy_to_user_or_kernel+0x60>
204: R_PPC_REL24 memcpy
208: 38 60 00 00 li r3,0
20c: 80 01 00 14 lwz r0,20(r1)
210: 38 21 00 10 addi r1,r1,16
214: 7c 08 03 a6 mtlr r0
218: 4e 80 00 20 blr
21c: 80 01 00 14 lwz r0,20(r1)
220: 7c 08 03 a6 mtlr r0
224: 4b ff ff a0 b 1c4 <copy_to_user_or_kernel+0x20>
228: 38 60 ff f2 li r3,-14
22c: 4b ff ff a0 b 1cc <copy_to_user_or_kernel+0x28>
230: 0f e0 00 00 twui r0,0
234: 7c 08 02 a6 mflr r0
238: 90 01 00 14 stw r0,20(r1)
Also note that checkpatch.pl provides the following on your patch:
CHECK: No space is necessary after a cast
#88: FILE: fs/proc/vmcore.c:424:
+ return __read_vmcore((__force char *) buffer, buflen, fpos, true);
CHECK: extern prototypes should be avoided in .h files
#109: FILE: include/linux/uaccess.h:286:
+extern __must_check int copy_to_user_or_kernel(void *target, void *src,
size_t size, bool userbuf);
CHECK: No space is necessary after a cast
#128: FILE: lib/usercopy.c:100:
+ if (copy_to_user((char __user *) target, src, size))
total: 0 errors, 0 warnings, 3 checks, 96 lines checked
NOTE: For some of the reported defects, checkpatch may be able to
mechanically convert to the typical style using --fix or
--fix-inplace.
Commit 2c94767fa768 ("kdump: vmcore: remove copy_to() and add
copy_to_user_or_kernel()") has style problems, please review.
NOTE: If any of the errors are false positives, please report
them to the maintainer, see CHECKPATCH in MAINTAINERS.
Christophe
From: Tiezhu Yang
> Sent: 11 December 2021 03:33
>
> v2:
> -- add copy_to_user_or_kernel() in lib/usercopy.c
> -- define userbuf as bool type
Instead of having a flag to indicate whether the buffer is user or kernel,
would it be better to have two separate buffer pointers.
One for a user space buffer, the other for a kernel space buffer.
Exactly one of the buffers should always be NULL.
That way the flag is never incorrectly set.
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
Le 11/12/2021 à 18:53, David Laight a écrit :
> From: Tiezhu Yang
>> Sent: 11 December 2021 03:33
>>
>> v2:
>> -- add copy_to_user_or_kernel() in lib/usercopy.c
>> -- define userbuf as bool type
>
> Instead of having a flag to indicate whether the buffer is user or kernel,
> would it be better to have two separate buffer pointers.
> One for a user space buffer, the other for a kernel space buffer.
> Exactly one of the buffers should always be NULL.
>
> That way the flag is never incorrectly set.
>
It's a very good idea.
I was worried about the casts forcing the __user property away and back.
With that approach we will preserve the __user tags on user buffers and
enable sparse checking.
The only little drawback I see is that apparently GCC doesn't consider
the NULL value as a constant and therefore doesn't perform constant
folding on pointers. Not sure if this is a problem here.
Christophe
On Sat, Dec 11, 2021 at 05:53:46PM +0000, David Laight wrote:
> From: Tiezhu Yang
> > Sent: 11 December 2021 03:33
> >
> > v2:
> > -- add copy_to_user_or_kernel() in lib/usercopy.c
> > -- define userbuf as bool type
>
> Instead of having a flag to indicate whether the buffer is user or kernel,
> would it be better to have two separate buffer pointers.
> One for a user space buffer, the other for a kernel space buffer.
> Exactly one of the buffers should always be NULL.
No. You should be using an iov_iter instead. See
https://lore.kernel.org/all/[email protected]/
for a start on this.
From: Matthew Wilcox
> Sent: 12 December 2021 11:48
>
> On Sat, Dec 11, 2021 at 05:53:46PM +0000, David Laight wrote:
> > From: Tiezhu Yang
> > > Sent: 11 December 2021 03:33
> > >
> > > v2:
> > > -- add copy_to_user_or_kernel() in lib/usercopy.c
> > > -- define userbuf as bool type
> >
> > Instead of having a flag to indicate whether the buffer is user or kernel,
> > would it be better to have two separate buffer pointers.
> > One for a user space buffer, the other for a kernel space buffer.
> > Exactly one of the buffers should always be NULL.
>
> No. You should be using an iov_iter instead. See
> https://lore.kernel.org/all/[email protected]/
> for a start on this.
iov_iter gets horribly expensive...
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
On Mon, Dec 13, 2021 at 08:30:33AM +0000, David Laight wrote:
> From: Matthew Wilcox
> > Sent: 12 December 2021 11:48
> >
> > On Sat, Dec 11, 2021 at 05:53:46PM +0000, David Laight wrote:
> > > From: Tiezhu Yang
> > > > Sent: 11 December 2021 03:33
> > > >
> > > > v2:
> > > > -- add copy_to_user_or_kernel() in lib/usercopy.c
> > > > -- define userbuf as bool type
> > >
> > > Instead of having a flag to indicate whether the buffer is user or kernel,
> > > would it be better to have two separate buffer pointers.
> > > One for a user space buffer, the other for a kernel space buffer.
> > > Exactly one of the buffers should always be NULL.
> >
> > No. You should be using an iov_iter instead. See
> > https://lore.kernel.org/all/[email protected]/
> > for a start on this.
>
> iov_iter gets horribly expensive...
Oh, right. Reading the kcore is a high-performance path, my mistake.
On 12/13/2021 10:43 PM, Matthew Wilcox wrote:
> On Mon, Dec 13, 2021 at 08:30:33AM +0000, David Laight wrote:
>> From: Matthew Wilcox
>>> Sent: 12 December 2021 11:48
>>>
>>> On Sat, Dec 11, 2021 at 05:53:46PM +0000, David Laight wrote:
>>>> From: Tiezhu Yang
>>>>> Sent: 11 December 2021 03:33
>>>>>
>>>>> v2:
>>>>> -- add copy_to_user_or_kernel() in lib/usercopy.c
>>>>> -- define userbuf as bool type
>>>>
>>>> Instead of having a flag to indicate whether the buffer is user or kernel,
>>>> would it be better to have two separate buffer pointers.
>>>> One for a user space buffer, the other for a kernel space buffer.
>>>> Exactly one of the buffers should always be NULL.
>>>
>>> No. You should be using an iov_iter instead. See
>>> https://lore.kernel.org/all/[email protected]/
>>> for a start on this.
>>
>> iov_iter gets horribly expensive...
>
> Oh, right. Reading the kcore is a high-performance path, my mistake.
>
Hi,
Thank you for your discussions.
The intention of this patchset is to simplify the related code with no
functional changes and no side effects.
At this moment, if you are OK, I will send v3 used with inline function
copy_to_user_or_kernel() to keep it simple, maybe other more changes can
be done in the future if no any side effect.
The v3 will contain the following three patches to make the changes
more clear:
kdump: vmcore: remove copy_to() and add copy_to_user_or_kernel()
kdump: crashdump: use copy_to_user_or_kernel() to simplify code
kdump: vmcore: crashdump: make variable type of userbuf as bool
On Tue, Dec 14, 2021 at 06:03:11PM +0800, Tiezhu Yang wrote:
> On 12/13/2021 10:43 PM, Matthew Wilcox wrote:
> > On Mon, Dec 13, 2021 at 08:30:33AM +0000, David Laight wrote:
> > > From: Matthew Wilcox
> > > > Sent: 12 December 2021 11:48
> > > >
> > > > On Sat, Dec 11, 2021 at 05:53:46PM +0000, David Laight wrote:
> > > > > From: Tiezhu Yang
> > > > > > Sent: 11 December 2021 03:33
> > > > > >
> > > > > > v2:
> > > > > > -- add copy_to_user_or_kernel() in lib/usercopy.c
> > > > > > -- define userbuf as bool type
> > > > >
> > > > > Instead of having a flag to indicate whether the buffer is user or kernel,
> > > > > would it be better to have two separate buffer pointers.
> > > > > One for a user space buffer, the other for a kernel space buffer.
> > > > > Exactly one of the buffers should always be NULL.
> > > >
> > > > No. You should be using an iov_iter instead. See
> > > > https://lore.kernel.org/all/[email protected]/
> > > > for a start on this.
> > >
> > > iov_iter gets horribly expensive...
> >
> > Oh, right. Reading the kcore is a high-performance path, my mistake.
> >
>
> Hi,
>
> Thank you for your discussions.
>
> The intention of this patchset is to simplify the related code with no
> functional changes and no side effects.
>
> At this moment, if you are OK, I will send v3 used with inline function
> copy_to_user_or_kernel() to keep it simple, maybe other more changes can
> be done in the future if no any side effect.
That would be pointless. I already sent a series to remove this,
which you were cc'd on.