Throughout the OpenRISC kernel port VMA is passed as NULL when flushing
kernel tlb entries. Somehow this was missed when I was testing
c28b27416da9 ("openrisc: Implement proper SMP tlb flushing") and now the
SMP kernel fails to completely boot.
In OpenRISC VMA is used only to determine which cores need to have their
TLB entries flushed.
This patch updates the logic to flush tlbs on all cores when the VMA is
passed as NULL. Also, we update places VMA is passed as NULL to use
flush_tlb_kernel_range instead. Now, the only place VMA is passed as
NULL is in the implementation of flush_tlb_kernel_range.
Fixes: c28b27416da9 ("openrisc: Implement proper SMP tlb flushing")
Reported-by: Jan Henrik Weinstock <[email protected]>
Signed-off-by: Stafford Horne <[email protected]>
---
arch/openrisc/kernel/dma.c | 4 ++--
arch/openrisc/kernel/smp.c | 6 ++++--
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 1b16d97e7da7..a82b2caaa560 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -33,7 +33,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
* Flush the page out of the TLB so that the new page flags get
* picked up next time there's an access
*/
- flush_tlb_page(NULL, addr);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
/* Flush page out of dcache */
for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
@@ -56,7 +56,7 @@ page_clear_nocache(pte_t *pte, unsigned long addr,
* Flush the page out of the TLB so that the new page flags get
* picked up next time there's an access
*/
- flush_tlb_page(NULL, addr);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
return 0;
}
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
index 415e209732a3..ba78766cf00b 100644
--- a/arch/openrisc/kernel/smp.c
+++ b/arch/openrisc/kernel/smp.c
@@ -272,7 +272,7 @@ static inline void ipi_flush_tlb_range(void *info)
local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
}
-static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start,
+static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
unsigned long end)
{
unsigned int cpuid;
@@ -320,7 +320,9 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end);
+ const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
+ : cpu_online_mask;
+ smp_flush_tlb_range(cmask, start, end);
}
/* Instruction cache invalidate - performed on each cpu */
--
2.31.1