It needed for proper prefetch abort handling on ARMv7.
Signed-off-by: Kirill A. Shutemov <[email protected]>
---
arch/arm/include/asm/glue.h | 10 ++++++++--
arch/arm/kernel/entry-armv.S | 14 ++++----------
arch/arm/kernel/entry-common.S | 8 ++++++--
arch/arm/mm/fault.c | 2 +-
4 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
index a0e39d5..5bfaf6f 100644
--- a/arch/arm/include/asm/glue.h
+++ b/arch/arm/include/asm/glue.h
@@ -130,7 +130,10 @@
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
-# define CPU_PABORT_HANDLER(reg, insn) mrc p15, 0, reg, cr6, cr0, 2
+# define CPU_PABORT_HANDLER(addr, ifsr, insn) \
+ mrc p15, 0, addr, c6, c0, 2 ; \
+ mrc p15, 0, ifsr, c5, c0, 1
+
# endif
#endif
@@ -138,7 +141,10 @@
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
-# define CPU_PABORT_HANDLER(reg, insn) mov reg, insn
+#define __hash_5 #5
+# define CPU_PABORT_HANDLER(addr, ifsr, insn) \
+ mov addr, insn ; \
+ mov ifsr, __hash_5
# endif
#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3d727a8..335ae58 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -303,22 +303,16 @@ __pabt_svc:
tst r3, #PSR_I_BIT
biceq r9, r9, #PSR_I_BIT
- @
- @ set args, then call main handler
- @
- @ r0 - address of faulting instruction
- @ r1 - pointer to registers on stack
- @
#ifdef MULTI_PABORT
mov r0, r2 @ pass address of aborted instruction.
ldr r4, .LCprocfns
mov lr, pc
ldr pc, [r4, #PROCESSOR_PABT_FUNC]
#else
- CPU_PABORT_HANDLER(r0, r2)
+ CPU_PABORT_HANDLER(r0, r1, r2)
#endif
msr cpsr_c, r9 @ Maybe enable interrupts
- mov r1, sp @ regs
+ mov r2, sp @ regs
bl do_PrefetchAbort @ call abort handler
@
@@ -697,10 +691,10 @@ __pabt_usr:
mov lr, pc
ldr pc, [r4, #PROCESSOR_PABT_FUNC]
#else
- CPU_PABORT_HANDLER(r0, r2)
+ CPU_PABORT_HANDLER(r0, r1, r2)
#endif
enable_irq @ Enable interrupts
- mov r1, sp @ regs
+ mov r2, sp @ regs
bl do_PrefetchAbort @ call abort handler
UNWIND(.fnend )
/* fall through */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 807cfeb..931ab9a 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -426,10 +426,14 @@ sys_mmap2:
ENDPROC(sys_mmap2)
ENTRY(pabort_ifar)
- mrc p15, 0, r0, cr6, cr0, 2
-ENTRY(pabort_noifar)
+ mrc p15, 0, r0, c6, c0, 2 @ get IFAR
+ mrc p15, 0, r1, c5, c0, 1 @ get IFSR
mov pc, lr
ENDPROC(pabort_ifar)
+ENTRY(pabort_noifar)
+ /* simulate IFSR with section translation fault status */
+ mov r1, #5
+ mov pc, lr
ENDPROC(pabort_noifar)
#ifdef CONFIG_OABI_COMPAT
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index cc8829d..5e27462 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -506,7 +506,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
asmlinkage void __exception
-do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
+do_PrefetchAbort(unsigned long addr, int ifsr, struct pt_regs *regs)
{
do_translation_fault(addr, 0, regs);
}
--
1.6.4.4
Currently, on ARMv7, if an application tries to execute code
(or garbage) on non-executable page it hangs. It caused by incorrect
prefetch abort handling. Now every prefetch abort processes as
a translation fault.
To fix this we have to analize instruction fault status register
to figure out reason why we've got the abort and process it
accordingly.
Signed-off-by: Kirill A. Shutemov <[email protected]>
---
arch/arm/mm/fault.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 50 insertions(+), 1 deletions(-)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5e27462..5abb9a5 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -505,9 +505,58 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, fsr, 0);
}
+
+static struct fsr_info ifsr_info[] = {
+ { do_bad, SIGBUS, 0, "unknown 0" },
+ { do_bad, SIGBUS, 0, "unknown 1" },
+ { do_bad, SIGBUS, 0, "debug event" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
+ { do_bad, SIGBUS, 0, "unknown 4" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
+ { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
+ { do_bad, SIGBUS, 0, "unknown 10" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
+ { do_bad, SIGBUS, 0, "external abort on translation" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "section permission fault" },
+ { do_bad, SIGBUS, 0, "external abort on translation" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "page permission fault" },
+ { do_bad, SIGBUS, 0, "unknown 16" },
+ { do_bad, SIGBUS, 0, "unknown 17" },
+ { do_bad, SIGBUS, 0, "unknown 18" },
+ { do_bad, SIGBUS, 0, "unknown 19" },
+ { do_bad, SIGBUS, 0, "unknown 20" },
+ { do_bad, SIGBUS, 0, "unknown 21" },
+ { do_bad, SIGBUS, 0, "unknown 22" },
+ { do_bad, SIGBUS, 0, "unknown 23" },
+ { do_bad, SIGBUS, 0, "unknown 24" },
+ { do_bad, SIGBUS, 0, "unknown 25" },
+ { do_bad, SIGBUS, 0, "unknown 26" },
+ { do_bad, SIGBUS, 0, "unknown 27" },
+ { do_bad, SIGBUS, 0, "unknown 28" },
+ { do_bad, SIGBUS, 0, "unknown 29" },
+ { do_bad, SIGBUS, 0, "unknown 30" },
+ { do_bad, SIGBUS, 0, "unknown 31" },
+};
+
asmlinkage void __exception
do_PrefetchAbort(unsigned long addr, int ifsr, struct pt_regs *regs)
{
- do_translation_fault(addr, 0, regs);
+ const struct fsr_info *inf = ifsr_info + (ifsr & 15) + ((ifsr & (1 << 10)) >> 6);
+ struct siginfo info;
+
+ if (!inf->fn(addr, ifsr, regs))
+ return;
+
+ printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ inf->name, ifsr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ arm_notify_die("", regs, &info, ifsr, 0);
}
--
1.6.4.4
On Fri, Sep 18, 2009 at 11:55:42PM +0300, Kirill A. Shutemov wrote:
> It needed for proper prefetch abort handling on ARMv7.
I think the only thing which is missing is an explaination about why
this is desirable given that only later CPUs can give this additional
information.
On Sat, Sep 19, 2009 at 12:03:16PM +0100, Russell King - ARM Linux wrote:
> On Fri, Sep 18, 2009 at 11:55:42PM +0300, Kirill A. Shutemov wrote:
> > It needed for proper prefetch abort handling on ARMv7.
>
> I think the only thing which is missing is an explaination about why
> this is desirable given that only later CPUs can give this additional
> information.
So you've posted it to the patch system, without further discussion here.
I think the solution is wrong - it makes instruction permission faults
unnecessarily noisy, which is not what the decoding table is supposed
to be doing. The decoding table's bad entries are there to catch those
_unexpected_ cases.
Instead, I suggest that you have a look at this:
if (fsr & (1 << 11)) /* write? */
mask = VM_WRITE;
else
mask = VM_READ|VM_EXEC|VM_WRITE;
fault = VM_FAULT_BADACCESS;
if (!(vma->vm_flags & mask))
goto out;
in __do_page_fault - if we are handling a prefetch abort, we really only
want to check that the VMA has VM_EXEC permission, not that it can be
read and written as well.
On Sun, Sep 20, 2009 at 11:15 AM, Russell King - ARM Linux
<[email protected]> wrote:
> On Sat, Sep 19, 2009 at 12:03:16PM +0100, Russell King - ARM Linux wrote:
>> On Fri, Sep 18, 2009 at 11:55:42PM +0300, Kirill A. Shutemov wrote:
>> > It needed for proper prefetch abort handling on ARMv7.
>>
>> I think the only thing which is missing is an explaination about why
>> this is desirable given that only later CPUs can give this additional
>> information.
>
> So you've posted it to the patch system, without further discussion here.
>
> I think the solution is wrong - it makes instruction permission faults
> unnecessarily noisy, which is not what the decoding table is supposed
> to be doing. The decoding table's bad entries are there to catch those
> _unexpected_ cases.
>
> Instead, I suggest that you have a look at this:
>
> if (fsr & (1 << 11)) /* write? */
> mask = VM_WRITE;
> else
> mask = VM_READ|VM_EXEC|VM_WRITE;
>
> fault = VM_FAULT_BADACCESS;
> if (!(vma->vm_flags & mask))
> goto out;
>
> in __do_page_fault - if we are handling a prefetch abort, we really only
> want to check that the VMA has VM_EXEC permission, not that it can be
> read and written as well.
>
Ok, so __do_page_fault() should know where we are: in data abort or in
prefetch abort. What is right way to do it? Should we create one more
argument or use one of reserved bits IFSR?
On Sun, Sep 20, 2009 at 12:35:19PM +0300, Kirill A. Shutemov wrote:
> Ok, so __do_page_fault() should know where we are: in data abort or in
> prefetch abort. What is right way to do it? Should we create one more
> argument or use one of reserved bits IFSR?
Well, this is my solution to the problem - could you test it please?
This patch is actually the result of several patches merged together,
so I won't supply a proper description etc.
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index cc8829d..8fbb22d 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,19 @@
#include "fault.h"
+/*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
+ */
+#define FSR_LNX_PF (1 << 31)
+#define FSR_WRITE (1 << 11)
+#define FSR_FS4 (1 << 10)
+#define FSR_FS3_0 (15)
+
+static inline int fsr_fs(unsigned int fsr)
+{
+ return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
+}
+
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
@@ -182,18 +195,35 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
-static int
+/*
+ * Check that the permissions on the VMA allow for the fault which occurred.
+ * If we encountered a write fault, we must have write permission, otherwise
+ * we allow any permission.
+ */
+static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
+{
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+ if (fsr & FSR_WRITE)
+ mask = VM_WRITE;
+ if (fsr & FSR_LNX_PF)
+ mask = VM_EXEC;
+
+ return vma->vm_flags & mask ? false : true;
+}
+
+static noinline int __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
- int fault, mask;
+ int fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
- if (!vma)
+ if (unlikely(!vma))
goto out;
- if (vma->vm_start > addr)
+ if (unlikely(vma->vm_start > addr))
goto check_stack;
/*
@@ -201,47 +231,24 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* memory access, so we can handle it.
*/
good_area:
- if (fsr & (1 << 11)) /* write? */
- mask = VM_WRITE;
- else
- mask = VM_READ|VM_EXEC|VM_WRITE;
-
- fault = VM_FAULT_BADACCESS;
- if (!(vma->vm_flags & mask))
+ if (access_error(fsr, vma)) {
+ fault = VM_FAULT_BADACCESS;
goto out;
+ }
/*
- * If for any reason at all we couldn't handle
- * the fault, make sure we exit gracefully rather
- * than endlessly redo the fault.
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the fault.
*/
-survive:
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- return fault;
- BUG();
- }
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
+ if (unlikely(fault & VM_FAULT_ERROR))
+ return fault;
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
return fault;
-out_of_memory:
- if (!is_global_init(tsk))
- goto out;
-
- /*
- * If we are out of memory for pid1, sleep for a while and retry
- */
- up_read(&mm->mmap_sem);
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
-
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
goto good_area;
@@ -278,6 +285,13 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in
+ * which case, we'll have missed the might_sleep() from
+ * down_read()
+ */
+ might_sleep();
}
fault = __do_page_fault(mm, addr, fsr, tsk);
@@ -289,6 +303,16 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return to
+ * userspace (which will retry the fault, or kill us if we
+ * got oom-killed)
+ */
+ pagefault_out_of_memory();
+ return 0;
+ }
+
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
@@ -296,16 +320,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!user_mode(regs))
goto no_context;
- if (fault & VM_FAULT_OOM) {
- /*
- * We ran out of memory, or some other thing
- * happened to us that made us unable to handle
- * the page fault gracefully.
- */
- printk("VM: killing process %s\n", tsk->comm);
- do_group_exit(SIGKILL);
- return 0;
- }
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
@@ -489,10 +503,10 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *)
asmlinkage void __exception
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
struct siginfo info;
- if (!inf->fn(addr, fsr, regs))
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
@@ -508,6 +522,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
asmlinkage void __exception
do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
- do_translation_fault(addr, 0, regs);
+ do_translation_fault(addr, FSR_LNX_PF, regs);
}
On Sun, Sep 20, 2009 at 3:24 PM, Russell King - ARM Linux
<[email protected]> wrote:
> On Sun, Sep 20, 2009 at 12:35:19PM +0300, Kirill A. Shutemov wrote:
>> Ok, so __do_page_fault() should know where we are: in data abort or in
>> prefetch abort. What is right way to do it? Should we create one more
>> argument or use one of reserved bits IFSR?
>
> Well, this is my solution to the problem - could you test it please?
I'll test it on Monday.
Do you want to ignore IFSR? I don't think that it's a good idea. We will
get infinite loop of faults on an unexpected for kernel type of fault, like
we have now for permission faults. Better to call do_bad() in this case.
> This patch is actually the result of several patches merged together,
> so I won't supply a proper description etc.
>
> diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
> index cc8829d..8fbb22d 100644
> --- a/arch/arm/mm/fault.c
> +++ b/arch/arm/mm/fault.c
> @@ -25,6 +25,19 @@
>
> #include "fault.h"
>
> +/*
> + * Fault status register encodings. We steal bit 31 for our own purposes.
> + */
> +#define FSR_LNX_PF (1 << 31)
> +#define FSR_WRITE (1 << 11)
> +#define FSR_FS4 (1 << 10)
> +#define FSR_FS3_0 (15)
> +
> +static inline int fsr_fs(unsigned int fsr)
> +{
> + return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
> +}
> +
> #ifdef CONFIG_MMU
>
> #ifdef CONFIG_KPROBES
> @@ -182,18 +195,35 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
> #define VM_FAULT_BADMAP 0x010000
> #define VM_FAULT_BADACCESS 0x020000
>
> -static int
> +/*
> + * Check that the permissions on the VMA allow for the fault which occurred.
> + * If we encountered a write fault, we must have write permission, otherwise
> + * we allow any permission.
> + */
> +static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
> +{
> + unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
> +
> + if (fsr & FSR_WRITE)
> + mask = VM_WRITE;
> + if (fsr & FSR_LNX_PF)
> + mask = VM_EXEC;
> +
> + return vma->vm_flags & mask ? false : true;
> +}
> +
> +static noinline int __kprobes
> __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
> struct task_struct *tsk)
> {
> struct vm_area_struct *vma;
> - int fault, mask;
> + int fault;
>
> vma = find_vma(mm, addr);
> fault = VM_FAULT_BADMAP;
> - if (!vma)
> + if (unlikely(!vma))
> goto out;
> - if (vma->vm_start > addr)
> + if (unlikely(vma->vm_start > addr))
> goto check_stack;
>
> /*
> @@ -201,47 +231,24 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
> * memory access, so we can handle it.
> */
> good_area:
> - if (fsr & (1 << 11)) /* write? */
> - mask = VM_WRITE;
> - else
> - mask = VM_READ|VM_EXEC|VM_WRITE;
> -
> - fault = VM_FAULT_BADACCESS;
> - if (!(vma->vm_flags & mask))
> + if (access_error(fsr, vma)) {
> + fault = VM_FAULT_BADACCESS;
> goto out;
> + }
>
> /*
> - * If for any reason at all we couldn't handle
> - * the fault, make sure we exit gracefully rather
> - * than endlessly redo the fault.
> + * If for any reason at all we couldn't handle the fault, make
> + * sure we exit gracefully rather than endlessly redo the fault.
> */
> -survive:
> - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
> - if (unlikely(fault & VM_FAULT_ERROR)) {
> - if (fault & VM_FAULT_OOM)
> - goto out_of_memory;
> - else if (fault & VM_FAULT_SIGBUS)
> - return fault;
> - BUG();
> - }
> + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
> + if (unlikely(fault & VM_FAULT_ERROR))
> + return fault;
> if (fault & VM_FAULT_MAJOR)
> tsk->maj_flt++;
> else
> tsk->min_flt++;
> return fault;
>
> -out_of_memory:
> - if (!is_global_init(tsk))
> - goto out;
> -
> - /*
> - * If we are out of memory for pid1, sleep for a while and retry
> - */
> - up_read(&mm->mmap_sem);
> - yield();
> - down_read(&mm->mmap_sem);
> - goto survive;
> -
> check_stack:
> if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
> goto good_area;
> @@ -278,6 +285,13 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
> if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
> goto no_context;
> down_read(&mm->mmap_sem);
> + } else {
> + /*
> + * The above down_read_trylock() might have succeeded in
> + * which case, we'll have missed the might_sleep() from
> + * down_read()
> + */
> + might_sleep();
> }
>
> fault = __do_page_fault(mm, addr, fsr, tsk);
> @@ -289,6 +303,16 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
> if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
> return 0;
>
> + if (fault & VM_FAULT_OOM) {
> + /*
> + * We ran out of memory, call the OOM killer, and return to
> + * userspace (which will retry the fault, or kill us if we
> + * got oom-killed)
> + */
> + pagefault_out_of_memory();
> + return 0;
> + }
> +
> /*
> * If we are in kernel mode at this point, we
> * have no context to handle this fault with.
> @@ -296,16 +320,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
> if (!user_mode(regs))
> goto no_context;
>
> - if (fault & VM_FAULT_OOM) {
> - /*
> - * We ran out of memory, or some other thing
> - * happened to us that made us unable to handle
> - * the page fault gracefully.
> - */
> - printk("VM: killing process %s\n", tsk->comm);
> - do_group_exit(SIGKILL);
> - return 0;
> - }
> if (fault & VM_FAULT_SIGBUS) {
> /*
> * We had some memory, but were unable to
> @@ -489,10 +503,10 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *)
> asmlinkage void __exception
> do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
> {
> - const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
> + const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
> struct siginfo info;
>
> - if (!inf->fn(addr, fsr, regs))
> + if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
> return;
>
> printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
> @@ -508,6 +522,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
> asmlinkage void __exception
> do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
> {
> - do_translation_fault(addr, 0, regs);
> + do_translation_fault(addr, FSR_LNX_PF, regs);
> }
>
>
>
On Sun, Sep 20, 2009 at 3:24 PM, Russell King - ARM Linux
<[email protected]> wrote:
> On Sun, Sep 20, 2009 at 12:35:19PM +0300, Kirill A. Shutemov wrote:
>> Ok, so __do_page_fault() should know where we are: in data abort or in
>> prefetch abort. What is right way to do it? Should we create one more
>> argument or use one of reserved bits IFSR?
>
> Well, this is my solution to the problem - could you test it please?
Yes, it works. But I still think that ignoring IFSR is a mistake. I'll send
updated patches