2012-11-15 09:40:23

by “tiejun.chen”

[permalink] [raw]
Subject: [PATCH 0/6] powerpc/book3e: support kexec and kdump

This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

Tiejun Chen (6):
powerpc/book3e: support CONFIG_RELOCATABLE
book3e/kexec/kdump: enable kexec for kernel
book3e/kexec/kdump: create a 1:1 TLB mapping
book3e/kexec/kdump: introduce a kexec kernel flag
book3e/kexec/kdump: skip ppc32 kexec specfic
book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

arch/powerpc/Kconfig | 2 +-
arch/powerpc/include/asm/exception-64e.h | 8 ++++
arch/powerpc/include/asm/page.h | 2 +
arch/powerpc/include/asm/smp.h | 3 ++
arch/powerpc/kernel/exceptions-64e.S | 15 ++++++-
arch/powerpc/kernel/head_64.S | 43 +++++++++++++++++--
arch/powerpc/kernel/machine_kexec_64.c | 6 +++
arch/powerpc/kernel/misc_64.S | 67 +++++++++++++++++++++++++++++-
arch/powerpc/lib/feature-fixups.c | 7 ++++
arch/powerpc/platforms/85xx/smp.c | 26 ++++++++++++
10 files changed, 173 insertions(+), 6 deletions(-)

Tiejun


2012-11-15 09:40:33

by “tiejun.chen”

[permalink] [raw]
Subject: [PATCH 6/6] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen <[email protected]>
---
arch/powerpc/include/asm/page.h | 2 ++
1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f072e97..2cba08a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
/* See Description below for VIRT_PHYS_OFFSET */
#ifdef CONFIG_RELOCATABLE_PPC32
#define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
#else
#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
#endif
--
1.7.9.5

2012-11-15 09:40:47

by “tiejun.chen”

[permalink] [raw]
Subject: [PATCH 4/6] book3e/kexec/kdump: introduce a kexec kernel flag

We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen <[email protected]>
---
arch/powerpc/include/asm/smp.h | 3 +++
arch/powerpc/kernel/head_64.S | 12 ++++++++++++
arch/powerpc/kernel/misc_64.S | 6 ++++++
arch/powerpc/platforms/85xx/smp.c | 14 ++++++++++++++
4 files changed, 35 insertions(+)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index e807e9d..aadbe9b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -190,6 +190,9 @@ extern void generic_secondary_thread_init(void);
extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
+#ifdef CONFIG_KEXEC
+extern unsigned long __run_at_kexec;
+#endif

extern void __early_start(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d51ffc0..9c30d9f 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,12 @@ __secondary_hold_spinloop:
__secondary_hold_acknowledge:
.llong 0x0

+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+ .globl __run_at_kexec
+__run_at_kexec:
+ .llong 0x0 /* Flag for the secondary kernel from kexec. */
+#endif
+
#ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
* at the loaded address instead of the linked address. This
@@ -441,6 +447,12 @@ _STATIC(__after_prom_start)
#if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+ /* If relocated we need to restore this flag on that relocated address. */
+ ld r7,__run_at_kexec-_stext(r3)
+ std r7,__run_at_kexec-_stext(r26)
+#endif
+
lwz r7,__run_at_load-_stext(r26)
#if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26) /* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ffe6043..b81f8ac 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -608,6 +608,12 @@ _GLOBAL(kexec_sequence)
bl .copy_and_flush /* (dest, src, copy limit, start offset) */
1: /* assume normal blr return */

+ /* notify we're going into kexec kernel for SMP. */
+ LOAD_REG_ADDR(r3,__run_at_kexec)
+ li r4,1
+ std r4,0(r3)
+ sync
+
/* release other cpus to the new kernel secondary start at 0x60 */
mflr r5
li r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 6fcfa12..c7febd5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -137,6 +137,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
int ret = 0;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+ unsigned long *ptr;
+#endif

WARN_ON(nr < 0 || nr >= NR_CPUS);
WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -213,6 +216,14 @@ out:
#else
smp_generic_kick_cpu(nr);

+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+ ptr = (unsigned long *)((unsigned long)&__run_at_kexec);
+ /* We shouldn't access spin_table from the bootloader to up any
+ * secondary cpu for kexec kernel, and kexec kernel already
+ * know how to jump to generic_secondary_smp_init.
+ */
+ if (!*ptr) {
+#endif
out_be32(&spin_table->pir, hw_cpu);
out_be64((u64 *)(&spin_table->addr_h),
__pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
@@ -220,6 +231,9 @@ out:
if (!ioremappable)
flush_dcache_range((ulong)spin_table,
(ulong)spin_table + sizeof(struct epapr_spin_table));
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+ }
+#endif
#endif

local_irq_restore(flags);
--
1.7.9.5

2012-11-15 09:40:29

by “tiejun.chen”

[permalink] [raw]
Subject: [PATCH 2/6] book3e/kexec/kdump: enable kexec for kernel

We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen <[email protected]>
---
arch/powerpc/Kconfig | 2 +-
arch/powerpc/kernel/machine_kexec_64.c | 6 ++++++
arch/powerpc/kernel/misc_64.S | 6 ++++++
3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a902a5c..3000cab8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -357,7 +357,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE

config KEXEC
bool "kexec system call (EXPERIMENTAL)"
- depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) && EXPERIMENTAL
+ depends on (PPC_BOOK3S || FSL_BOOKE || PPC_BOOK3E || (44x && !SMP)) && EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index d7f6090..2c0cbf0 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -32,6 +32,7 @@
int default_machine_kexec_prepare(struct kimage *image)
{
int i;
+#ifndef CONFIG_PPC_BOOK3E
unsigned long begin, end; /* limits of segment */
unsigned long low, high; /* limits of blocked memory range */
struct device_node *node;
@@ -40,6 +41,7 @@ int default_machine_kexec_prepare(struct kimage *image)

if (!ppc_md.hpte_clear_all)
return -ENOENT;
+#endif

/*
* Since we use the kernel fault handlers and paging code to
@@ -50,6 +52,7 @@ int default_machine_kexec_prepare(struct kimage *image)
if (image->segment[i].mem < __pa(_end))
return -ETXTBSY;

+#ifndef CONFIG_PPC_BOOK3E
/*
* For non-LPAR, we absolutely can not overwrite the mmu hash
* table, since we are still using the bolted entries in it to
@@ -91,6 +94,7 @@ int default_machine_kexec_prepare(struct kimage *image)
return -ETXTBSY;
}
}
+#endif

return 0;
}
@@ -358,6 +362,7 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
}

+#ifndef CONFIG_PPC_BOOK3E
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;

@@ -402,3 +407,4 @@ static int __init export_htab_values(void)
return 0;
}
late_initcall(export_htab_values);
+#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 5cfa800..c2acf8c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -532,9 +532,13 @@ _GLOBAL(kexec_sequence)
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */

/* disable interrupts, we are overwriting kernel data next */
+#ifndef CONFIG_PPC_BOOK3E
mfmsr r3
rlwinm r3,r3,0,17,15
mtmsrd r3,1
+#else
+ wrteei 0
+#endif

/* copy dest pages, flush whole dest image */
mr r3,r29
@@ -556,10 +560,12 @@ _GLOBAL(kexec_sequence)
li r6,1
stw r6,kexec_flag-1b(5)

+#ifndef CONFIG_PPC_BOOK3E
/* clear out hardware hash page table and tlb */
ld r5,0(r27) /* deref function descriptor */
mtctr r5
bctrl /* ppc_md.hpte_clear_all(void); */
+#endif

/*
* kexec image calling is:
--
1.7.9.5

2012-11-15 09:40:27

by “tiejun.chen”

[permalink] [raw]
Subject: [PATCH 5/6] book3e/kexec/kdump: skip ppc32 kexec specfic

ppc64 kexec mechanism has a different implementation with ppc32
so skipp those ppc32 specfic.

Signed-off-by: Tiejun Chen <[email protected]>
---
arch/powerpc/platforms/85xx/smp.c | 12 ++++++++++++
1 file changed, 12 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index c7febd5..d3ec57c 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -257,6 +257,7 @@ struct smp_ops_t smp_85xx_ops = {
};

#ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
atomic_t kexec_down_cpus = ATOMIC_INIT(0);

void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -275,6 +276,13 @@ static void mpc85xx_smp_kexec_down(void *arg)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
}
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+ local_irq_disable();
+ mpic_teardown_this_cpu(secondary);
+}
+#endif

static void map_and_flush(unsigned long paddr)
{
@@ -326,11 +334,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)

static void mpc85xx_smp_machine_kexec(struct kimage *image)
{
+#ifdef CONFIG_PPC32
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
+#endif

mpc85xx_smp_flush_dcache_kexec(image);

+#ifdef CONFIG_PPC32
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);

@@ -348,6 +359,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
}
+#endif

default_machine_kexec(image);
}
--
1.7.9.5

2012-11-15 09:40:22

by “tiejun.chen”

[permalink] [raw]
Subject: [PATCH 1/6] powerpc/book3e: support CONFIG_RELOCATABLE

book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen <[email protected]>
---
arch/powerpc/include/asm/exception-64e.h | 8 ++++++++
arch/powerpc/kernel/exceptions-64e.S | 15 ++++++++++++++-
arch/powerpc/kernel/head_64.S | 22 ++++++++++++++++++++++
arch/powerpc/lib/feature-fixups.c | 7 +++++++
4 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..89e940d 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,18 @@ exc_##label##_book3e:
#define TLB_MISS_STATS_SAVE_INFO_BOLTED
#endif

+#ifndef CONFIG_RELOCATABLE
#define SET_IVOR(vector_number, vector_offset) \
li r3,vector_offset@l; \
ori r3,r3,interrupt_base_book3e@l; \
mtspr SPRN_IVOR##vector_number,r3;
+#else
+#define SET_IVOR(vector_number, vector_offset) \
+ LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+ rlwinm r3,r3,0,15,0; \
+ ori r3,r3,vector_offset@l; \
+ mtspr SPRN_IVOR##vector_number,r3;
+#endif

#endif /* _ASM_POWERPC_EXCEPTION_64E_H */

diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 4e7083e..82be30b 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1104,7 +1104,15 @@ skpinv: addi r6,r6,1 /* Increment */
* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
*/
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+ /* We have to find out address from lr. */
+ bl 1f /* Find our address */
+1: mflr r6
+ addi r6,r6,(2f - 1b)
+ tovirt(r6,r6)
+#else
LOAD_REG_IMMEDIATE(r6,2f)
+#endif
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
mtspr SPRN_SRR0,r6
@@ -1355,9 +1363,14 @@ _GLOBAL(book3e_secondary_thread_init)
mflr r28
b 3b

-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+ tovirt(r2,r2)
+ LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
mtspr SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 9e07bd0..aa7df52 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -395,12 +395,22 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldi r25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+ tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi cr0,r7,1 /* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
1: mr r3,r25
bl .relocate
+#if defined(CONFIG_PPC_BOOK3E)
+ /* We should set ivpr again after .relocate. */
+ bl .init_core_book3e
+#endif
#endif

/*
@@ -428,11 +438,23 @@ _STATIC(__after_prom_start)
* variable __run_at_load, if it is set the kernel is treated as relocatable
* kernel, otherwise it will be moved to PHYSICAL_START
*/
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+ tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi cr0,r7,1
bne 3f

+#ifdef CONFIG_PPC_BOOK3E
+ LOAD_REG_ADDR(r5, interrupt_end_book3e)
+ LOAD_REG_ADDR(r11, _stext)
+ sub r5,r5,r11
+#else
li r5,__end_interrupts - _stext /* just copy interrupts */
+#endif
b 5f
3:
#endif
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 7a8a748..13f20ed 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -135,13 +135,20 @@ void do_final_fixups(void)
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
int *src, *dest;
unsigned long length;
+#ifdef CONFIG_PPC_BOOK3E
+ extern char interrupt_end_book3e[];
+#endif

if (PHYSICAL_START == 0)
return;

src = (int *)(KERNELBASE + PHYSICAL_START);
dest = (int *)KERNELBASE;
+#ifdef CONFIG_PPC_BOOK3E
+ length = (interrupt_end_book3e - _stext) / sizeof(int);
+#else
length = (__end_interrupts - _stext) / sizeof(int);
+#endif

while (length--) {
patch_instruction(dest, *src);
--
1.7.9.5

2012-11-15 09:42:56

by “tiejun.chen”

[permalink] [raw]
Subject: [PATCH 3/6] book3e/kexec/kdump: create a 1:1 TLB mapping

book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen <[email protected]>
---
arch/powerpc/kernel/head_64.S | 9 ++++---
arch/powerpc/kernel/misc_64.S | 55 ++++++++++++++++++++++++++++++++++++++++-
2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index aa7df52..d51ffc0 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -425,12 +425,12 @@ _STATIC(__after_prom_start)
tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
#endif
mr. r4,r26 /* In some cases the loader may */
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r4,r4)
+#endif
beq 9f /* have already put us at zero */
li r6,0x100 /* Start offset, the first 0x100 */
/* bytes were copied earlier. */
-#ifdef CONFIG_PPC_BOOK3E
- tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
-#endif

#ifdef CONFIG_RELOCATABLE
/*
@@ -472,6 +472,9 @@ _STATIC(__after_prom_start)
p_end: .llong _end - _stext

4: /* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r26,r26)
+#endif
addis r5,r26,(p_end - _stext)@ha
ld r5,(p_end - _stext)@l(r5) /* get _end */
5: bl .copy_and_flush /* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c2acf8c..ffe6043 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -449,6 +449,49 @@ kexec_flag:


#ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include <asm/mmu.h>
+kexec_create_tlb:
+ /* Invalidate all TLBs to avoid any TLB conflict. */
+ PPC_TLBILX_ALL(0,R0)
+ sync
+ isync
+
+ mfspr r10,SPRN_TLB1CFG
+ andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
+ subi r10,r10,1 /* Often its always safe to use last */
+ lis r9,MAS0_TLBSEL(1)@h
+ rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP MAS2_M
+#else
+#define M_IF_SMP 0
+#endif
+ mtspr SPRN_MAS0,r9
+
+ lis r9,(MAS1_VALID|MAS1_IPROT)@h
+ ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+ mtspr SPRN_MAS1,r9
+
+ LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+ mtspr SPRN_MAS2,r9
+
+ LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+ mtspr SPRN_MAS3,r9
+ li r9,0
+ mtspr SPRN_MAS7,r9
+
+ tlbwe
+ isync
+ blr
+#endif

/* kexec_smp_wait(void)
*
@@ -462,6 +505,10 @@ kexec_flag:
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+ /* Create a 1:1 mapping. */
+ bl kexec_create_tlb
+#endif
bl real_mode

li r4,KEXEC_STATE_REAL_MODE
@@ -478,6 +525,7 @@ _GLOBAL(kexec_smp_wait)
* don't overwrite r3 here, it is live for kexec_wait above.
*/
real_mode: /* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
1: li r9,MSR_RI
li r10,MSR_DR|MSR_IR
mflr r11 /* return address to SRR0 */
@@ -489,7 +537,10 @@ real_mode: /* assume normal blr return */
mtspr SPRN_SRR1,r10
mtspr SPRN_SRR0,r11
rfid
-
+#else
+ /* the real mode is nothing for book3e. */
+ blr
+#endif

/*
* kexec_sequence(newstack, start, image, control, clear_all())
@@ -538,6 +589,8 @@ _GLOBAL(kexec_sequence)
mtmsrd r3,1
#else
wrteei 0
+ /* Create a 1:1 mapping. */
+ bl kexec_create_tlb
#endif

/* copy dest pages, flush whole dest image */
--
1.7.9.5