In sleep PM mode, the clocks of e500 core and unused IP blocks is
turned off. IP blocks which are allowed to wake up the processor
are still running.
Some Freescale chips like MPC8536 and P1022 has deep sleep PM mode
in addtion to the sleep PM mode.
While in deep sleep PM mode, additionally, the power supply is
removed from e500 core and most IP blocks. Only the blocks needed
to wake up the chip out of deep sleep are ON.
This patch supports 32-bit and 36-bit address space.
The sleep mode is equal to the Standby state in Linux. The deep sleep
mode is equal to the Suspend-to-RAM state of Linux Power Management.
Command to enter sleep mode.
echo standby > /sys/power/state
Command to enter deep sleep mode.
echo mem > /sys/power/state
Signed-off-by: Dave Liu <[email protected]>
Signed-off-by: Li Yang <[email protected]>
Signed-off-by: Jin Qing <[email protected]>
Signed-off-by: Jerry Huang <[email protected]>
Cc: Scott Wood <[email protected]>
Signed-off-by: Zhao Chenhui <[email protected]>
---
arch/powerpc/Kconfig | 2 +-
arch/powerpc/include/asm/cacheflush.h | 2 +
arch/powerpc/kernel/Makefile | 1 +
arch/powerpc/kernel/cache_fsl.S | 57 +++
arch/powerpc/platforms/85xx/Makefile | 1 +
arch/powerpc/platforms/85xx/sleep.S | 621 +++++++++++++++++++++++++++++++++
arch/powerpc/sysdev/fsl_pmc.c | 98 +++++-
arch/powerpc/sysdev/fsl_soc.h | 5 +
8 files changed, 768 insertions(+), 19 deletions(-)
create mode 100644 arch/powerpc/kernel/cache_fsl.S
create mode 100644 arch/powerpc/platforms/85xx/sleep.S
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d894069..d7b0517 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -667,7 +667,7 @@ config FSL_PCI
config FSL_PMC
bool
default y
- depends on SUSPEND && (PPC_85xx || PPC_86xx)
+ depends on SUSPEND && (PPC_85xx || PPC_86xx) && !PPC_E500MC
help
Freescale MPC85xx/MPC86xx power management controller support
(suspend/resume). For MPC83xx see platforms/83xx/suspend.c
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index b843e35..6c5f1c2 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -58,6 +58,8 @@ extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
#endif
+extern void flush_dcache_L1(void);
+
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bb282dd..21e025b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_FA_DUMP) += fadump.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_E500) += idle_e500.o
endif
+obj-y += cache_fsl.o
obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
diff --git a/arch/powerpc/kernel/cache_fsl.S b/arch/powerpc/kernel/cache_fsl.S
new file mode 100644
index 0000000..25cd22e
--- /dev/null
+++ b/arch/powerpc/kernel/cache_fsl.S
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2009-2012 Freescale Semiconductor, Inc. All rights reserved.
+ * Scott Wood <[email protected]>
+ * Dave Liu <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#define L2CTL_L2E 0x80000000
+#define L2CTL_L2I 0x40000000
+
+ .section .text
+
+#ifdef CONFIG_FSL_PMC
+ /* r3 = virtual address of L2 controller, WIMG = 01xx */
+_GLOBAL(flush_disable_L2)
+ /* It's a write-through cache, so only invalidation is needed. */
+ mbar
+ isync
+ lwz r4, 0(r3)
+ li r5, 1
+ rlwimi r4, r5, 30, L2CTL_L2E | L2CTL_L2I
+ stw r4, 0(r3)
+
+ /* Wait for the invalidate to finish */
+1: lwz r4, 0(r3)
+ andis. r4, r4, L2CTL_L2I@h
+ bne 1b
+ mbar
+
+ blr
+
+ /* r3 = virtual address of L2 controller, WIMG = 01xx */
+_GLOBAL(invalidate_enable_L2)
+ mbar
+ isync
+ lwz r4, 0(r3)
+ li r5, 3
+ rlwimi r4, r5, 30, L2CTL_L2E | L2CTL_L2I
+ stw r4, 0(r3)
+
+ /* Wait for the invalidate to finish */
+1: lwz r4, 0(r3)
+ andis. r4, r4, L2CTL_L2I@h
+ bne 1b
+ mbar
+
+ blr
+#endif
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 76f679c..8a030a1 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_SMP) += smp.o
obj-y += common.o
+obj-$(CONFIG_FSL_PMC) += sleep.o
obj-$(CONFIG_BSC9131_RDB) += bsc913x_rdb.o
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
diff --git a/arch/powerpc/platforms/85xx/sleep.S b/arch/powerpc/platforms/85xx/sleep.S
new file mode 100644
index 0000000..e6dfede
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/sleep.S
@@ -0,0 +1,621 @@
+/*
+ * Enter and leave deep sleep/sleep state on MPC85xx
+ *
+ * Author: Scott Wood <[email protected]>
+ *
+ * Copyright (C) 2006-2012 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+#include <asm/reg.h>
+#include <asm/asm-offsets.h>
+#include <asm/mmu.h>
+
+#define CCSR_ADDR 0xf0000000
+
+#define L2C_OFFSET 0x20000 /* L2 Cache Controller offset */
+
+#define BPTR_OFFSET 0x20 /* Boot Page Translation Register */
+#define BPTR_EN 0x80000000
+
+#define PMRCCR_OFFSET 0xe0084
+#define PMRCCR_VRCNT_PRE_MASK 0x1f000000
+#define PMRCCR_VRCNT_MASK 0x00ff0000
+
+#define POWMGTSCR_OFFSET 0xe0080
+#define POWMGTSCR_DPSLP 0x00100000 /* deep sleep mode */
+
+#define SS_TB 0x00
+#define SS_HID 0x08 /* 2 HIDs */
+#define SS_IAC 0x10 /* 2 IACs */
+#define SS_DAC 0x18 /* 2 DACs */
+#define SS_DBCR 0x20 /* 3 DBCRs */
+#define SS_PID 0x2c /* 3 PIDs */
+#define SS_SPRG 0x38 /* 8 SPRGs */
+#define SS_IVOR 0x58 /* 20 interrupt vectors */
+#define SS_TCR 0xa8
+#define SS_BUCSR 0xac
+#define SS_L1CSR 0xb0 /* 2 L1CSRs */
+#define SS_MSR 0xb8
+#define SS_USPRG 0xbc
+#define SS_GPREG 0xc0 /* r12-r31 */
+#define SS_LR 0x110
+#define SS_CR 0x114
+#define SS_SP 0x118
+#define SS_CURRENT 0x11c
+#define SS_IVPR 0x120
+#define SS_BPTR 0x124
+
+#define STATE_SAVE_SIZE 0x128
+
+ .section .data
+ .align 5
+mpc85xx_sleep_save_area:
+ .space STATE_SAVE_SIZE
+ccsrbase_low:
+ .long 0
+ccsrbase_high:
+ .long 0
+powmgtreq:
+ .long 0
+
+ .section .text
+ .align 12
+
+ /*
+ * r3 = high word of physical address of CCSR
+ * r4 = low word of physical address of CCSR
+ * r5 = JOG or deep sleep request
+ * JOG-0x00200000, deep sleep-0x00100000
+ */
+_GLOBAL(mpc85xx_enter_deep_sleep)
+ lis r6, ccsrbase_low@ha
+ stw r4, ccsrbase_low@l(r6)
+ lis r6, ccsrbase_high@ha
+ stw r3, ccsrbase_high@l(r6)
+
+ lis r6, powmgtreq@ha
+ stw r5, powmgtreq@l(r6)
+
+ lis r10, mpc85xx_sleep_save_area@h
+ ori r10, r10, mpc85xx_sleep_save_area@l
+
+ mfspr r5, SPRN_HID0
+ mfspr r6, SPRN_HID1
+
+ stw r5, SS_HID+0(r10)
+ stw r6, SS_HID+4(r10)
+
+ mfspr r4, SPRN_IAC1
+ mfspr r5, SPRN_IAC2
+ mfspr r6, SPRN_DAC1
+ mfspr r7, SPRN_DAC2
+
+ stw r4, SS_IAC+0(r10)
+ stw r5, SS_IAC+4(r10)
+ stw r6, SS_DAC+0(r10)
+ stw r7, SS_DAC+4(r10)
+
+ mfspr r4, SPRN_DBCR0
+ mfspr r5, SPRN_DBCR1
+ mfspr r6, SPRN_DBCR2
+
+ stw r4, SS_DBCR+0(r10)
+ stw r5, SS_DBCR+4(r10)
+ stw r6, SS_DBCR+8(r10)
+
+ mfspr r4, SPRN_PID0
+ mfspr r5, SPRN_PID1
+ mfspr r6, SPRN_PID2
+
+ stw r4, SS_PID+0(r10)
+ stw r5, SS_PID+4(r10)
+ stw r6, SS_PID+8(r10)
+
+ mfspr r4, SPRN_SPRG0
+ mfspr r5, SPRN_SPRG1
+ mfspr r6, SPRN_SPRG2
+ mfspr r7, SPRN_SPRG3
+
+ stw r4, SS_SPRG+0x00(r10)
+ stw r5, SS_SPRG+0x04(r10)
+ stw r6, SS_SPRG+0x08(r10)
+ stw r7, SS_SPRG+0x0c(r10)
+
+ mfspr r4, SPRN_SPRG4
+ mfspr r5, SPRN_SPRG5
+ mfspr r6, SPRN_SPRG6
+ mfspr r7, SPRN_SPRG7
+
+ stw r4, SS_SPRG+0x10(r10)
+ stw r5, SS_SPRG+0x14(r10)
+ stw r6, SS_SPRG+0x18(r10)
+ stw r7, SS_SPRG+0x1c(r10)
+
+ mfspr r4, SPRN_IVPR
+ stw r4, SS_IVPR(r10)
+
+ mfspr r4, SPRN_IVOR0
+ mfspr r5, SPRN_IVOR1
+ mfspr r6, SPRN_IVOR2
+ mfspr r7, SPRN_IVOR3
+
+ stw r4, SS_IVOR+0x00(r10)
+ stw r5, SS_IVOR+0x04(r10)
+ stw r6, SS_IVOR+0x08(r10)
+ stw r7, SS_IVOR+0x0c(r10)
+
+ mfspr r4, SPRN_IVOR4
+ mfspr r5, SPRN_IVOR5
+ mfspr r6, SPRN_IVOR6
+ mfspr r7, SPRN_IVOR7
+
+ stw r4, SS_IVOR+0x10(r10)
+ stw r5, SS_IVOR+0x14(r10)
+ stw r6, SS_IVOR+0x18(r10)
+ stw r7, SS_IVOR+0x1c(r10)
+
+ mfspr r4, SPRN_IVOR8
+ mfspr r5, SPRN_IVOR9
+ mfspr r6, SPRN_IVOR10
+ mfspr r7, SPRN_IVOR11
+
+ stw r4, SS_IVOR+0x20(r10)
+ stw r5, SS_IVOR+0x24(r10)
+ stw r6, SS_IVOR+0x28(r10)
+ stw r7, SS_IVOR+0x2c(r10)
+
+ mfspr r4, SPRN_IVOR12
+ mfspr r5, SPRN_IVOR13
+ mfspr r6, SPRN_IVOR14
+ mfspr r7, SPRN_IVOR15
+
+ stw r4, SS_IVOR+0x30(r10)
+ stw r5, SS_IVOR+0x34(r10)
+ stw r6, SS_IVOR+0x38(r10)
+ stw r7, SS_IVOR+0x3c(r10)
+
+ mfspr r4, SPRN_IVOR32
+ mfspr r5, SPRN_IVOR33
+ mfspr r6, SPRN_IVOR34
+ mfspr r7, SPRN_IVOR35
+
+ stw r4, SS_IVOR+0x40(r10)
+ stw r5, SS_IVOR+0x44(r10)
+ stw r6, SS_IVOR+0x48(r10)
+ stw r7, SS_IVOR+0x4c(r10)
+
+ mfspr r4, SPRN_TCR
+ mfspr r5, SPRN_BUCSR
+ mfspr r6, SPRN_L1CSR0
+ mfspr r7, SPRN_L1CSR1
+ mfspr r8, SPRN_USPRG0
+
+ stw r4, SS_TCR(r10)
+ stw r5, SS_BUCSR(r10)
+ stw r6, SS_L1CSR+0(r10)
+ stw r7, SS_L1CSR+4(r10)
+ stw r8, SS_USPRG+0(r10)
+
+ stmw r12, SS_GPREG(r10)
+
+ mfmsr r4
+ mflr r5
+ mfcr r6
+
+ stw r4, SS_MSR(r10)
+ stw r5, SS_LR(r10)
+ stw r6, SS_CR(r10)
+ stw r1, SS_SP(r10)
+ stw r2, SS_CURRENT(r10)
+
+1: mftbu r4
+ mftb r5
+ mftbu r6
+ cmpw r4, r6
+ bne 1b
+
+ stw r4, SS_TB+0(r10)
+ stw r5, SS_TB+4(r10)
+
+ lis r5, ccsrbase_low@ha
+ lwz r4, ccsrbase_low@l(r5)
+ lis r5, ccsrbase_high@ha
+ lwz r3, ccsrbase_high@l(r5)
+
+ /* Disable machine checks and critical exceptions */
+ mfmsr r5
+ rlwinm r5, r5, 0, ~MSR_CE
+ rlwinm r5, r5, 0, ~MSR_ME
+ mtmsr r5
+ isync
+
+ /* Use TLB1[15] to map the CCSR at 0xf0000000 */
+ LOAD_REG_IMMEDIATE(r5, MAS0_TLBSEL(1) | MAS0_ESEL(15))
+ mtspr SPRN_MAS0, r5
+ LOAD_REG_IMMEDIATE(r5,
+ MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(BOOK3E_PAGESZ_1M))
+ mtspr SPRN_MAS1, r5
+ LOAD_REG_IMMEDIATE(r5, CCSR_ADDR | MAS2_I | MAS2_M)
+ mtspr SPRN_MAS2, r5
+ rlwinm r5, r4, 0, MAS3_RPN
+ ori r5, r5, (MAS3_SW | MAS3_SR)
+ mtspr SPRN_MAS3, r5
+ mtspr SPRN_MAS7, r3
+ isync
+ tlbwe
+ isync
+
+ LOAD_REG_IMMEDIATE(r3, CCSR_ADDR + BPTR_OFFSET)
+ lwz r4, 0(r3)
+ stw r4, SS_BPTR(r10)
+
+ LOAD_REG_IMMEDIATE(r3, CCSR_ADDR + L2C_OFFSET)
+ bl flush_disable_L2
+ bl __flush_disable_L1
+
+ /* Enable I-cache, so as not to upset the bus
+ * with our loop.
+ */
+ mfspr r4, SPRN_L1CSR1
+ ori r4, r4, L1CSR1_ICE
+ mtspr SPRN_L1CSR1, r4
+ isync
+
+ /* Set boot page translation */
+ LOAD_REG_IMMEDIATE(r3, CCSR_ADDR + BPTR_OFFSET)
+ lis r4, (mpc85xx_deep_resume - PAGE_OFFSET)@h
+ ori r4, r4, (mpc85xx_deep_resume - PAGE_OFFSET)@l
+ rlwinm r4, r4, 20, 12, 31
+ oris r4, r4, BPTR_EN@h
+ stw r4, 0(r3)
+ lwz r4, 0(r3) /* read-back to flush write */
+ twi 0, r4, 0
+ isync
+
+ /* Disable the decrementer */
+ mfspr r4, SPRN_TCR
+ rlwinm r4, r4, 0, ~TCR_DIE
+ mtspr SPRN_TCR, r4
+
+ mfspr r4, SPRN_TSR
+ oris r4, r4, TSR_DIS@h
+ mtspr SPRN_TSR, r4
+
+ /* set PMRCCR[VRCNT] to wait power stable for 40ms */
+ LOAD_REG_IMMEDIATE(r3, CCSR_ADDR + PMRCCR_OFFSET)
+ lwz r4, 0(r3)
+ li r5, 0x12
+ rlwimi r4, r5, 0, PMRCCR_VRCNT_PRE_MASK
+ li r5, 0xa3
+ rlwimi r4, r5, 0, PMRCCR_VRCNT_MASK
+ stw r4, 0(r3)
+ lwz r4, 0(r3)
+
+ /* set deep sleep bit in POWMGTSCR */
+ lis r3, powmgtreq@ha
+ lwz r8, powmgtreq@l(r3)
+ LOAD_REG_IMMEDIATE(r3, CCSR_ADDR + POWMGTSCR_OFFSET)
+ lwz r4, 0(r3)
+ or r4, r4, r8
+ stw r4, 0(r3)
+ lwz r4, 0(r3) /* read-back to flush write */
+ twi 0, r4, 0
+ isync
+
+ mftb r5
+1: /* spin until either we enter deep sleep, or the sleep process is
+ * aborted due to a pending wakeup event. Wait some time between
+ * accesses, so we don't flood the bus and prevent the pmc from
+ * detecting an idle system.
+ */
+
+ mftb r4
+ subf r7, r5, r4
+ cmpwi r7, 1000
+ blt 1b
+ mr r5, r4
+
+ lwz r6, 0(r3)
+ andis. r6, r6, POWMGTSCR_DPSLP@h
+ bne 1b
+ b 2f
+
+2: mfspr r4, SPRN_PIR
+ andi. r4, r4, 1
+99: bne 99b
+
+ /* Establish a temporary 64MB 0->0 mapping in TLB1[1]. */
+ LOAD_REG_IMMEDIATE(r4, MAS0_TLBSEL(1) | MAS0_ESEL(1))
+ mtspr SPRN_MAS0, r4
+ LOAD_REG_IMMEDIATE(r4,
+ MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(BOOK3E_PAGESZ_64M))
+ mtspr SPRN_MAS1, r4
+ li r4, 0
+ mtspr SPRN_MAS2, r4
+ li r4, (MAS3_SX | MAS3_SW | MAS3_SR)
+ mtspr SPRN_MAS3, r4
+ li r4, 0
+ mtspr SPRN_MAS7, r4
+ isync
+ tlbwe
+ isync
+
+ lis r3, (3f - PAGE_OFFSET)@h
+ ori r3, r3, (3f - PAGE_OFFSET)@l
+ mtctr r3
+ bctr
+
+ /* Locate the resume vector in the last word of the current page. */
+ . = mpc85xx_enter_deep_sleep + 0xffc
+mpc85xx_deep_resume:
+ b 2b
+
+3:
+ /* Restore the contents of TLB1[0]. It is assumed that it covers
+ * the currently executing code and the sleep save area, and that
+ * it does not alias our temporary mapping (which is at virtual zero).
+ */
+ lis r3, (TLBCAM - PAGE_OFFSET)@h
+ ori r3, r3, (TLBCAM - PAGE_OFFSET)@l
+
+ lwz r4, 0(r3)
+ lwz r5, 4(r3)
+ lwz r6, 8(r3)
+ lwz r7, 12(r3)
+ lwz r8, 16(r3)
+
+ mtspr SPRN_MAS0, r4
+ mtspr SPRN_MAS1, r5
+ mtspr SPRN_MAS2, r6
+ mtspr SPRN_MAS3, r7
+ mtspr SPRN_MAS7, r8
+
+ isync
+ tlbwe
+ isync
+
+ /* Access the ccsrbase address with TLB1[0] */
+ lis r5, ccsrbase_low@ha
+ lwz r4, ccsrbase_low@l(r5)
+ lis r5, ccsrbase_high@ha
+ lwz r3, ccsrbase_high@l(r5)
+
+ /* Use TLB1[15] to map the CCSR at 0xf0000000 */
+ LOAD_REG_IMMEDIATE(r5, MAS0_TLBSEL(1) | MAS0_ESEL(15))
+ mtspr SPRN_MAS0, r5
+ LOAD_REG_IMMEDIATE(r5,
+ MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(BOOK3E_PAGESZ_1M))
+ mtspr SPRN_MAS1, r5
+ LOAD_REG_IMMEDIATE(r5, CCSR_ADDR | MAS2_I | MAS2_M)
+ mtspr SPRN_MAS2, r5
+ rlwinm r5, r4, 0, MAS3_RPN
+ ori r5, r5, (MAS3_SW | MAS3_SR)
+ mtspr SPRN_MAS3, r5
+ mtspr SPRN_MAS7, r3
+ isync
+ tlbwe
+ isync
+
+ LOAD_REG_IMMEDIATE(r3, CCSR_ADDR + L2C_OFFSET)
+ bl invalidate_enable_L2
+
+ /* Access the MEM(r10) with TLB1[0] */
+ lis r10, mpc85xx_sleep_save_area@h
+ ori r10, r10, mpc85xx_sleep_save_area@l
+
+ LOAD_REG_IMMEDIATE(r3, CCSR_ADDR + BPTR_OFFSET)
+ lwz r4, SS_BPTR(r10)
+ stw r4, 0(r3) /* restore BPTR */
+
+ /* Program shift running space to PAGE_OFFSET */
+ mfmsr r3
+ lis r4, 1f@h
+ ori r4, r4, 1f@l
+
+ mtsrr1 r3
+ mtsrr0 r4
+ rfi
+
+1: /* Restore the rest of TLB1, in ascending order so that
+ * the TLB1[1] gets invalidated first.
+ *
+ * XXX: It's better to invalidate the temporary mapping
+ * TLB1[15] for CCSR before restore any TLB1 entry include 0.
+ */
+ LOAD_REG_IMMEDIATE(r4, MAS0_TLBSEL(1) | MAS0_ESEL(15))
+ mtspr SPRN_MAS0, r4
+ lis r4, 0
+ mtspr SPRN_MAS1, r4
+ isync
+ tlbwe
+ isync
+
+ lis r3, (TLBCAM + 5*4 - 4)@h
+ ori r3, r3, (TLBCAM + 5*4 - 4)@l
+ li r4, 15
+ mtctr r4
+
+2:
+ lwz r5, 4(r3)
+ lwz r6, 8(r3)
+ lwz r7, 12(r3)
+ lwz r8, 16(r3)
+ lwzu r9, 20(r3)
+
+ mtspr SPRN_MAS0, r5
+ mtspr SPRN_MAS1, r6
+ mtspr SPRN_MAS2, r7
+ mtspr SPRN_MAS3, r8
+ mtspr SPRN_MAS7, r9
+
+ isync
+ tlbwe
+ isync
+ bdnz 2b
+
+ lis r10, mpc85xx_sleep_save_area@h
+ ori r10, r10, mpc85xx_sleep_save_area@l
+
+ lwz r5, SS_HID+0(r10)
+ lwz r6, SS_HID+4(r10)
+
+ isync
+ mtspr SPRN_HID0, r5
+ isync
+
+ msync
+ mtspr SPRN_HID1, r6
+ isync
+
+ lwz r4, SS_IAC+0(r10)
+ lwz r5, SS_IAC+4(r10)
+ lwz r6, SS_DAC+0(r10)
+ lwz r7, SS_DAC+4(r10)
+
+ mtspr SPRN_IAC1, r4
+ mtspr SPRN_IAC2, r5
+ mtspr SPRN_DAC1, r6
+ mtspr SPRN_DAC2, r7
+
+ lwz r4, SS_DBCR+0(r10)
+ lwz r5, SS_DBCR+4(r10)
+ lwz r6, SS_DBCR+8(r10)
+
+ mtspr SPRN_DBCR0, r4
+ mtspr SPRN_DBCR1, r5
+ mtspr SPRN_DBCR2, r6
+
+ lwz r4, SS_PID+0(r10)
+ lwz r5, SS_PID+4(r10)
+ lwz r6, SS_PID+8(r10)
+
+ mtspr SPRN_PID0, r4
+ mtspr SPRN_PID1, r5
+ mtspr SPRN_PID2, r6
+
+ lwz r4, SS_SPRG+0x00(r10)
+ lwz r5, SS_SPRG+0x04(r10)
+ lwz r6, SS_SPRG+0x08(r10)
+ lwz r7, SS_SPRG+0x0c(r10)
+
+ mtspr SPRN_SPRG0, r4
+ mtspr SPRN_SPRG1, r5
+ mtspr SPRN_SPRG2, r6
+ mtspr SPRN_SPRG3, r7
+
+ lwz r4, SS_SPRG+0x10(r10)
+ lwz r5, SS_SPRG+0x14(r10)
+ lwz r6, SS_SPRG+0x18(r10)
+ lwz r7, SS_SPRG+0x1c(r10)
+
+ mtspr SPRN_SPRG4, r4
+ mtspr SPRN_SPRG5, r5
+ mtspr SPRN_SPRG6, r6
+ mtspr SPRN_SPRG7, r7
+
+ lwz r4, SS_IVPR(r10)
+ mtspr SPRN_IVPR, r4
+
+ lwz r4, SS_IVOR+0x00(r10)
+ lwz r5, SS_IVOR+0x04(r10)
+ lwz r6, SS_IVOR+0x08(r10)
+ lwz r7, SS_IVOR+0x0c(r10)
+
+ mtspr SPRN_IVOR0, r4
+ mtspr SPRN_IVOR1, r5
+ mtspr SPRN_IVOR2, r6
+ mtspr SPRN_IVOR3, r7
+
+ lwz r4, SS_IVOR+0x10(r10)
+ lwz r5, SS_IVOR+0x14(r10)
+ lwz r6, SS_IVOR+0x18(r10)
+ lwz r7, SS_IVOR+0x1c(r10)
+
+ mtspr SPRN_IVOR4, r4
+ mtspr SPRN_IVOR5, r5
+ mtspr SPRN_IVOR6, r6
+ mtspr SPRN_IVOR7, r7
+
+ lwz r4, SS_IVOR+0x20(r10)
+ lwz r5, SS_IVOR+0x24(r10)
+ lwz r6, SS_IVOR+0x28(r10)
+ lwz r7, SS_IVOR+0x2c(r10)
+
+ mtspr SPRN_IVOR8, r4
+ mtspr SPRN_IVOR9, r5
+ mtspr SPRN_IVOR10, r6
+ mtspr SPRN_IVOR11, r7
+
+ lwz r4, SS_IVOR+0x30(r10)
+ lwz r5, SS_IVOR+0x34(r10)
+ lwz r6, SS_IVOR+0x38(r10)
+ lwz r7, SS_IVOR+0x3c(r10)
+
+ mtspr SPRN_IVOR12, r4
+ mtspr SPRN_IVOR13, r5
+ mtspr SPRN_IVOR14, r6
+ mtspr SPRN_IVOR15, r7
+
+ lwz r4, SS_IVOR+0x40(r10)
+ lwz r5, SS_IVOR+0x44(r10)
+ lwz r6, SS_IVOR+0x48(r10)
+ lwz r7, SS_IVOR+0x4c(r10)
+
+ mtspr SPRN_IVOR32, r4
+ mtspr SPRN_IVOR33, r5
+ mtspr SPRN_IVOR34, r6
+ mtspr SPRN_IVOR35, r7
+
+ lwz r4, SS_TCR(r10)
+ lwz r5, SS_BUCSR(r10)
+ lwz r6, SS_L1CSR+0(r10)
+ lwz r7, SS_L1CSR+4(r10)
+ lwz r8, SS_USPRG+0(r10)
+
+ mtspr SPRN_TCR, r4
+ mtspr SPRN_BUCSR, r5
+
+ msync
+ isync
+ mtspr SPRN_L1CSR0, r6
+ isync
+
+ mtspr SPRN_L1CSR1, r7
+ isync
+
+ mtspr SPRN_USPRG0, r8
+
+ lmw r12, SS_GPREG(r10)
+
+ lwz r1, SS_SP(r10)
+ lwz r2, SS_CURRENT(r10)
+ lwz r4, SS_MSR(r10)
+ lwz r5, SS_LR(r10)
+ lwz r6, SS_CR(r10)
+
+ msync
+ mtmsr r4
+ isync
+
+ mtlr r5
+ mtcr r6
+
+ li r4, 0
+ mtspr SPRN_TBWL, r4
+
+ lwz r4, SS_TB+0(r10)
+ lwz r5, SS_TB+4(r10)
+
+ mtspr SPRN_TBWU, r4
+ mtspr SPRN_TBWL, r5
+
+ lis r3, 1
+ mtdec r3
+
+ blr
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 592a0f8..45718c5 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -2,6 +2,7 @@
* Suspend/resume support
*
* Copyright 2009 MontaVista Software, Inc.
+ * Copyright 2010-2012 Freescale Semiconductor Inc.
*
* Author: Anton Vorontsov <[email protected]>
*
@@ -19,39 +20,89 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/of_platform.h>
+#include <linux/pm.h>
+#include <asm/cacheflush.h>
+#include <asm/switch_to.h>
+
+#include <sysdev/fsl_soc.h>
struct pmc_regs {
+ /* 0xe0070: Device disable control register */
__be32 devdisr;
+ /* 0xe0074: 2nd Device disable control register */
__be32 devdisr2;
- __be32 :32;
- __be32 :32;
- __be32 pmcsr;
-#define PMCSR_SLP (1 << 17)
+ __be32 res1;
+ /* 0xe007c: Power Management Jog Control Register */
+ __be32 pmjcr;
+ /* 0xe0080: Power management control and status register */
+ __be32 powmgtcsr;
+#define POWMGTCSR_SLP 0x00020000
+#define POWMGTCSR_DPSLP 0x00100000
+ __be32 res3[2];
+ /* 0xe008c: Power management clock disable register */
+ __be32 pmcdr;
};
-static struct device *pmc_dev;
static struct pmc_regs __iomem *pmc_regs;
+static unsigned int pmc_flag;
+
+#define PMC_SLEEP 0x1
+#define PMC_DEEP_SLEEP 0x2
static int pmc_suspend_enter(suspend_state_t state)
{
- int ret;
+ int ret = 0;
+
+ switch (state) {
+#ifdef CONFIG_PPC_85xx
+ case PM_SUSPEND_MEM:
+#ifdef CONFIG_SPE
+ enable_kernel_spe();
+#endif
+ enable_kernel_fp();
+
+ pr_debug("%s: Entering deep sleep\n", __func__);
+
+ local_irq_disable();
+ mpc85xx_enter_deep_sleep(get_immrbase(), POWMGTCSR_DPSLP);
+
+ pr_debug("%s: Resumed from deep sleep\n", __func__);
+ break;
+#endif
- setbits32(&pmc_regs->pmcsr, PMCSR_SLP);
- /* At this point, the CPU is asleep. */
+ case PM_SUSPEND_STANDBY:
+ local_irq_disable();
+#ifdef CONFIG_PPC_85xx
+ flush_dcache_L1();
+#endif
+ setbits32(&pmc_regs->powmgtcsr, POWMGTCSR_SLP);
+ /* At this point, the CPU is asleep. */
- /* Upon resume, wait for SLP bit to be clear. */
- ret = spin_event_timeout((in_be32(&pmc_regs->pmcsr) & PMCSR_SLP) == 0,
- 10000, 10) ? 0 : -ETIMEDOUT;
- if (ret)
- dev_err(pmc_dev, "tired waiting for SLP bit to clear\n");
+ /* Upon resume, wait for SLP bit to be clear. */
+ ret = spin_event_timeout(
+ (in_be32(&pmc_regs->powmgtcsr) & POWMGTCSR_SLP) == 0,
+ 10000, 10);
+ if (!ret) {
+ pr_err("%s: timeout waiting for SLP bit "
+ "to be cleared\n", __func__);
+ ret = -EINVAL;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+
+ }
return ret;
}
static int pmc_suspend_valid(suspend_state_t state)
{
- if (state != PM_SUSPEND_STANDBY)
+ if (((pmc_flag & PMC_SLEEP) && (state == PM_SUSPEND_STANDBY)) ||
+ ((pmc_flag & PMC_DEEP_SLEEP) && (state == PM_SUSPEND_MEM)))
+ return 1;
+ else
return 0;
- return 1;
}
static const struct platform_suspend_ops pmc_suspend_ops = {
@@ -59,14 +110,25 @@ static const struct platform_suspend_ops pmc_suspend_ops = {
.enter = pmc_suspend_enter,
};
-static int pmc_probe(struct platform_device *ofdev)
+static int pmc_probe(struct platform_device *pdev)
{
- pmc_regs = of_iomap(ofdev->dev.of_node, 0);
+ struct device_node *np = pdev->dev.of_node;
+
+ pmc_regs = of_iomap(np, 0);
if (!pmc_regs)
return -ENOMEM;
- pmc_dev = &ofdev->dev;
+ pmc_flag = PMC_SLEEP;
+ if (of_device_is_compatible(np, "fsl,mpc8536-pmc"))
+ pmc_flag |= PMC_DEEP_SLEEP;
+
+ if (of_device_is_compatible(np, "fsl,p1022-pmc"))
+ pmc_flag |= PMC_DEEP_SLEEP;
+
suspend_set_ops(&pmc_suspend_ops);
+
+ pr_info("Freescale PMC driver: sleep(standby)%s\n",
+ (pmc_flag & PMC_DEEP_SLEEP) ? ", deep sleep(mem)" : "");
return 0;
}
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index c6d0073..11d9f94 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -48,5 +48,10 @@ extern struct platform_diu_data_ops diu_ops;
void fsl_hv_restart(char *cmd);
void fsl_hv_halt(void);
+/*
+ * ccsrbar is u64 rather than phys_addr_t so that the assembly
+ * code can be compatible with both 32-bit & 36-bit.
+ */
+extern void mpc85xx_enter_deep_sleep(u64 ccsrbar, u32 powmgtreq);
#endif
#endif
--
1.6.4.1
The cpufreq driver of mpc85xx will disable/enable cpu hotplug temporarily.
Therefore, the related functions should be exported.
Signed-off-by: Zhao Chenhui <[email protected]>
---
include/linux/cpu.h | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ce7a074..df8f73d 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -146,6 +146,8 @@ void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
+extern void cpu_hotplug_disable_before_freeze(void);
+extern void cpu_hotplug_enable_after_thaw(void);
#else /* CONFIG_SMP */
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
@@ -167,6 +169,8 @@ static inline void cpu_maps_update_done(void)
{
}
+static inline void cpu_hotplug_disable_before_freeze(void) {}
+static inline void cpu_hotplug_enable_after_thaw(void) {}
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
--
1.6.4.1
Add APIs for setting wakeup source and lossless Ethernet in low power modes.
These APIs can be used by wake-on-packet feature.
Signed-off-by: Dave Liu <[email protected]>
Signed-off-by: Li Yang <[email protected]>
Signed-off-by: Jin Qing <[email protected]>
Signed-off-by: Zhao Chenhui <[email protected]>
---
arch/powerpc/sysdev/fsl_pmc.c | 77 ++++++++++++++++++++++++++++++++++++++++-
arch/powerpc/sysdev/fsl_soc.h | 12 ++++++
2 files changed, 88 insertions(+), 1 deletions(-)
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 45718c5..b6c8c8f 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -38,6 +38,7 @@ struct pmc_regs {
__be32 powmgtcsr;
#define POWMGTCSR_SLP 0x00020000
#define POWMGTCSR_DPSLP 0x00100000
+#define POWMGTCSR_LOSSLESS 0x00400000
__be32 res3[2];
/* 0xe008c: Power management clock disable register */
__be32 pmcdr;
@@ -48,6 +49,77 @@ static unsigned int pmc_flag;
#define PMC_SLEEP 0x1
#define PMC_DEEP_SLEEP 0x2
+#define PMC_LOSSLESS 0x4
+
+#define PMCDR_MASK_INIT 0x00e008e0
+
+/**
+ * mpc85xx_pmc_set_wake - enable devices as wakeup event source
+ * @dev: a device affected
+ * @enable: True to enable event generation; false to disable
+ *
+ * This enables the device as a wakeup event source, or disables it.
+ *
+ * RETURN VALUE:
+ * 0 is returned on success.
+ * -EINVAL is returned if device is not supposed to wake up the system.
+ * -ENODEV is returned if PMC is unavailable.
+ * Error code depending on the platform is returned if both the platform and
+ * the native mechanism fail to enable the generation of wake-up events
+ */
+int mpc85xx_pmc_set_wake(struct device *dev, bool enable)
+{
+ int ret = 0;
+ struct device_node *clk_np;
+ const u32 *prop;
+ u32 pmcdr_mask;
+
+ if (!pmc_regs) {
+ pr_err("%s: PMC is unavailable\n", __func__);
+ return -ENODEV;
+ }
+
+ if (enable && !device_may_wakeup(dev))
+ return -EINVAL;
+
+ clk_np = of_parse_phandle(dev->of_node, "fsl,pmc-handle", 0);
+ if (!clk_np)
+ return -EINVAL;
+
+ prop = of_get_property(clk_np, "fsl,pmcdr-mask", NULL);
+ if (!prop) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pmcdr_mask = be32_to_cpup(prop);
+
+ if (enable)
+ /* clear to enable clock in low power mode */
+ clrbits32(&pmc_regs->pmcdr, pmcdr_mask);
+ else
+ setbits32(&pmc_regs->pmcdr, pmcdr_mask);
+
+out:
+ of_node_put(clk_np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mpc85xx_pmc_set_wake);
+
+/**
+ * mpc85xx_pmc_set_lossless_ethernet - enable lossless ethernet
+ * in (deep) sleep mode
+ * @enable: True to enable event generation; false to disable
+ */
+void mpc85xx_pmc_set_lossless_ethernet(int enable)
+{
+ if (pmc_flag & PMC_LOSSLESS) {
+ if (enable)
+ setbits32(&pmc_regs->powmgtcsr, POWMGTCSR_LOSSLESS);
+ else
+ clrbits32(&pmc_regs->powmgtcsr, POWMGTCSR_LOSSLESS);
+ }
+}
+EXPORT_SYMBOL_GPL(mpc85xx_pmc_set_lossless_ethernet);
static int pmc_suspend_enter(suspend_state_t state)
{
@@ -123,7 +195,10 @@ static int pmc_probe(struct platform_device *pdev)
pmc_flag |= PMC_DEEP_SLEEP;
if (of_device_is_compatible(np, "fsl,p1022-pmc"))
- pmc_flag |= PMC_DEEP_SLEEP;
+ pmc_flag |= PMC_DEEP_SLEEP | PMC_LOSSLESS;
+
+ /* Init the Power Management Clock Disable Register. */
+ setbits32(&pmc_regs->pmcdr, PMCDR_MASK_INIT);
suspend_set_ops(&pmc_suspend_ops);
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 11d9f94..b1510ef 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#include <asm/mmu.h>
+#include <linux/platform_device.h>
struct spi_device;
@@ -21,6 +22,17 @@ struct device_node;
extern void fsl_rstcr_restart(char *cmd);
+#ifdef CONFIG_FSL_PMC
+extern int mpc85xx_pmc_set_wake(struct device *dev, bool enable);
+extern void mpc85xx_pmc_set_lossless_ethernet(int enable);
+#else
+static inline int mpc85xx_pmc_set_wake(struct device *dev, bool enable)
+{
+ return -ENODEV;
+}
+#define mpc85xx_pmc_set_lossless_ethernet(enable) do { } while (0)
+#endif
+
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
/* The different ports that the DIU can be connected to */
--
1.6.4.1
Some 85xx silicons like MPC8536 and P1022 have a JOG feature, which provides
a dynamic mechanism to lower or raise the CPU core clock at runtime.
This patch adds the support to change CPU frequency using the standard
cpufreq interface. The ratio CORE to CCB can be 1:1(except MPC8536), 3:2,
2:1, 5:2, 3:1, 7:2 and 4:1.
Two CPU cores on P1022 must not in the low power state during the frequency
transition. The driver uses a atomic counter to meet the requirement.
The jog mode frequency transition process on the MPC8536 is similar to
the deep sleep process. The driver need save the CPU state and restore
it after CPU warm reset.
Note:
* The I/O peripherals such as PCIe and eTSEC may lose packets during
the jog mode frequency transition.
* The driver doesn't support MPC8536 Rev 1.0 due to a JOG erratum.
Subsequent revisions of MPC8536 have corrected the erratum.
Signed-off-by: Dave Liu <[email protected]>
Signed-off-by: Li Yang <[email protected]>
Signed-off-by: Jerry Huang <[email protected]>
Signed-off-by: Zhao Chenhui <[email protected]>
CC: Scott Wood <[email protected]>
---
arch/powerpc/platforms/85xx/Makefile | 1 +
arch/powerpc/platforms/85xx/cpufreq-jog.c | 388 +++++++++++++++++++++++++++++
arch/powerpc/platforms/Kconfig | 11 +
arch/powerpc/sysdev/fsl_pmc.c | 3 +
arch/powerpc/sysdev/fsl_soc.h | 2 +
5 files changed, 405 insertions(+), 0 deletions(-)
create mode 100644 arch/powerpc/platforms/85xx/cpufreq-jog.c
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 8a030a1..6156849 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-y += common.o
obj-$(CONFIG_FSL_PMC) += sleep.o
+obj-$(CONFIG_MPC85xx_CPUFREQ) += cpufreq-jog.o
obj-$(CONFIG_BSC9131_RDB) += bsc913x_rdb.o
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
diff --git a/arch/powerpc/platforms/85xx/cpufreq-jog.c b/arch/powerpc/platforms/85xx/cpufreq-jog.c
new file mode 100644
index 0000000..ccc0c33
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/cpufreq-jog.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ * Author: Dave Liu <[email protected]>
+ * Modifier: Chenhui Zhao <[email protected]>
+ *
+ * The cpufreq driver is for Freescale 85xx processor,
+ * based on arch/powerpc/platforms/cell/cbe_cpufreq.c
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ * Christian Krafft <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/of_platform.h>
+#include <linux/suspend.h>
+#include <linux/cpu.h>
+
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/reg.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/smp.h>
+
+#include <sysdev/fsl_soc.h>
+
+static DEFINE_MUTEX(mpc85xx_switch_mutex);
+static void __iomem *guts;
+
+static u32 sysfreq;
+static unsigned int max_pll[2];
+static atomic_t in_jog_process;
+static struct cpufreq_frequency_table *mpc85xx_freqs;
+static int (*set_pll)(unsigned int cpu, unsigned int pll);
+
+static struct cpufreq_frequency_table mpc8536_freqs_table[] = {
+ {3, 0},
+ {4, 0},
+ {5, 0},
+ {6, 0},
+ {7, 0},
+ {8, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static struct cpufreq_frequency_table p1022_freqs_table[] = {
+ {2, 0},
+ {3, 0},
+ {4, 0},
+ {5, 0},
+ {6, 0},
+ {7, 0},
+ {8, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+#define FREQ_500MHz 500000000
+#define FREQ_800MHz 800000000
+
+#define CORE_RATIO_STRIDE 8
+#define CORE_RATIO_MASK 0x3f
+#define CORE_RATIO_SHIFT 16
+
+#define PORPLLSR 0x0 /* Power-On Reset PLL ratio status register */
+
+#define PMJCR 0x7c /* Power Management Jog Control Register */
+#define PMJCR_CORE0_SPD 0x00001000
+#define PMJCR_CORE_SPD 0x00002000
+
+#define POWMGTCSR 0x80 /* Power management control and status register */
+#define POWMGTCSR_JOG 0x00200000
+#define POWMGTCSR_INT_MASK 0x00000f00
+
+static void spin_while_jogging(void *dummy)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ atomic_inc(&in_jog_process);
+
+ while (atomic_read(&in_jog_process) != 0)
+ barrier();
+
+ local_irq_restore(flags);
+}
+
+static int get_pll(int hw_cpu)
+{
+ int shift;
+ u32 val = in_be32(guts + PORPLLSR);
+
+ shift = hw_cpu * CORE_RATIO_STRIDE + CORE_RATIO_SHIFT;
+
+ return (val >> shift) & CORE_RATIO_MASK;
+}
+
+static int mpc8536_set_pll(unsigned int cpu, unsigned int pll)
+{
+ u32 corefreq, val, mask;
+ unsigned int cur_pll = get_pll(0);
+ unsigned long flags;
+
+ if (pll == cur_pll)
+ return 0;
+
+ val = (pll & CORE_RATIO_MASK) << CORE_RATIO_SHIFT;
+
+ corefreq = sysfreq * pll / 2;
+ /*
+ * Set the COREx_SPD bit if the requested core frequency
+ * is larger than the threshold frequency.
+ */
+ if (corefreq > FREQ_800MHz)
+ val |= PMJCR_CORE_SPD;
+
+ mask = (CORE_RATIO_MASK << CORE_RATIO_SHIFT) | PMJCR_CORE_SPD;
+ clrsetbits_be32(guts + PMJCR, mask, val);
+
+ /* readback to sync write */
+ in_be32(guts + PMJCR);
+
+ local_irq_save(flags);
+ mpc85xx_enter_deep_sleep(get_immrbase(), POWMGTCSR_JOG);
+ local_irq_restore(flags);
+
+ /* verify */
+ cur_pll = get_pll(0);
+ if (cur_pll != pll) {
+ pr_err("%s: error. The current PLL is %d instead of %d.\n",
+ __func__, cur_pll, pll);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int p1022_set_pll(unsigned int cpu, unsigned int pll)
+{
+ int index, hw_cpu = get_hard_smp_processor_id(cpu);
+ int shift;
+ u32 corefreq, val, mask = 0;
+ unsigned int cur_pll = get_pll(hw_cpu);
+ unsigned long flags;
+ int ret = 0;
+
+ if (pll == cur_pll)
+ return 0;
+
+ shift = hw_cpu * CORE_RATIO_STRIDE + CORE_RATIO_SHIFT;
+ val = (pll & CORE_RATIO_MASK) << shift;
+
+ corefreq = sysfreq * pll / 2;
+ /*
+ * Set the COREx_SPD bit if the requested core frequency
+ * is larger than the threshold frequency.
+ */
+ if (corefreq > FREQ_500MHz)
+ val |= PMJCR_CORE0_SPD << hw_cpu;
+
+ mask = (CORE_RATIO_MASK << shift) | (PMJCR_CORE0_SPD << hw_cpu);
+ clrsetbits_be32(guts + PMJCR, mask, val);
+
+ /* readback to sync write */
+ in_be32(guts + PMJCR);
+
+ cpu_hotplug_disable_before_freeze();
+ /*
+ * A Jog request can not be asserted when any core is in a low
+ * power state on P1022. Before executing a jog request, any
+ * core which is in a low power state must be waked by a
+ * interrupt, and keep waking up until the sequence is
+ * finished.
+ */
+ for_each_present_cpu(index) {
+ if (!cpu_online(index)) {
+ cpu_hotplug_enable_after_thaw();
+ pr_err("%s: error, core%d is down.\n", __func__, index);
+ return -1;
+ }
+ }
+
+ atomic_set(&in_jog_process, 0);
+ smp_call_function(spin_while_jogging, NULL, 0);
+
+ local_irq_save(flags);
+
+ /* Wait for the other core to wake. */
+ if (!spin_event_timeout(atomic_read(&in_jog_process) == 1, 1000, 100)) {
+ pr_err("%s: timeout, the other core is not at running state.\n",
+ __func__);
+ ret = -1;
+ goto err;
+ }
+
+ out_be32(guts + POWMGTCSR, POWMGTCSR_JOG | POWMGTCSR_INT_MASK);
+
+ if (!spin_event_timeout(
+ (in_be32(guts + POWMGTCSR) & POWMGTCSR_JOG) == 0, 1000, 100)) {
+ pr_err("%s: timeout, fail to switch the core frequency.\n",
+ __func__);
+ ret = -1;
+ goto err;
+ }
+
+ clrbits32(guts + POWMGTCSR, POWMGTCSR_INT_MASK);
+ in_be32(guts + POWMGTCSR);
+
+ atomic_set(&in_jog_process, 0);
+err:
+ local_irq_restore(flags);
+ cpu_hotplug_enable_after_thaw();
+
+ /* verify */
+ cur_pll = get_pll(hw_cpu);
+ if (cur_pll != pll) {
+ pr_err("%s: error, the current PLL of core %d is %d instead of %d.\n",
+ __func__, hw_cpu, cur_pll, pll);
+ return -1;
+ }
+
+ return ret;
+}
+
+/*
+ * cpufreq functions
+ */
+static int mpc85xx_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int i, cur_pll;
+ int hw_cpu = get_hard_smp_processor_id(policy->cpu);
+
+ if (!cpu_present(policy->cpu))
+ return -ENODEV;
+
+ /* the latency of a transition, the unit is ns */
+ policy->cpuinfo.transition_latency = 2000;
+
+ cur_pll = get_pll(hw_cpu);
+
+ /* initialize frequency table */
+ pr_debug("core%d frequency table:\n", hw_cpu);
+ for (i = 0; mpc85xx_freqs[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (mpc85xx_freqs[i].index <= max_pll[hw_cpu]) {
+ /* The frequency unit is kHz. */
+ mpc85xx_freqs[i].frequency =
+ (sysfreq * mpc85xx_freqs[i].index / 2) / 1000;
+ } else {
+ mpc85xx_freqs[i].frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ pr_debug("%d: %dkHz\n", i, mpc85xx_freqs[i].frequency);
+
+ if (mpc85xx_freqs[i].index == cur_pll)
+ policy->cur = mpc85xx_freqs[i].frequency;
+ }
+ pr_debug("current pll is at %d, and core freq is%d\n",
+ cur_pll, policy->cur);
+
+ cpufreq_frequency_table_get_attr(mpc85xx_freqs, policy->cpu);
+
+ /*
+ * This ensures that policy->cpuinfo_min
+ * and policy->cpuinfo_max are set correctly.
+ */
+ return cpufreq_frequency_table_cpuinfo(policy, mpc85xx_freqs);
+}
+
+static int mpc85xx_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+
+ return 0;
+}
+
+static int mpc85xx_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, mpc85xx_freqs);
+}
+
+static int mpc85xx_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ unsigned int new;
+ int ret = 0;
+
+ if (!set_pll)
+ return -ENODEV;
+
+ cpufreq_frequency_table_target(policy,
+ mpc85xx_freqs,
+ target_freq,
+ relation,
+ &new);
+
+ freqs.old = policy->cur;
+ freqs.new = mpc85xx_freqs[new].frequency;
+ freqs.cpu = policy->cpu;
+
+ mutex_lock(&mpc85xx_switch_mutex);
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ ret = set_pll(policy->cpu, mpc85xx_freqs[new].index);
+ if (!ret) {
+ pr_info("cpufreq: Setting core%d frequency to %d kHz and PLL ratio to %d:2\n",
+ policy->cpu, mpc85xx_freqs[new].frequency,
+ mpc85xx_freqs[new].index);
+
+ ppc_proc_freq = freqs.new * 1000ul;
+ }
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ mutex_unlock(&mpc85xx_switch_mutex);
+
+ return ret;
+}
+
+static struct cpufreq_driver mpc85xx_cpufreq_driver = {
+ .verify = mpc85xx_cpufreq_verify,
+ .target = mpc85xx_cpufreq_target,
+ .init = mpc85xx_cpufreq_cpu_init,
+ .exit = mpc85xx_cpufreq_cpu_exit,
+ .name = "mpc85xx-JOG",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+};
+
+static struct of_device_id mpc85xx_jog_ids[] = {
+ { .compatible = "fsl,mpc8536-guts", },
+ { .compatible = "fsl,p1022-guts", },
+ {}
+};
+
+int mpc85xx_jog_probe(void)
+{
+ struct device_node *np;
+ unsigned int svr;
+
+ np = of_find_matching_node(NULL, mpc85xx_jog_ids);
+ if (!np)
+ return -ENODEV;
+
+ guts = of_iomap(np, 0);
+ if (!guts) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ sysfreq = fsl_get_sys_freq();
+
+ if (of_device_is_compatible(np, "fsl,mpc8536-guts")) {
+ svr = mfspr(SPRN_SVR);
+ if ((svr & 0x7fff) == 0x10) {
+ pr_err("MPC8536 Rev 1.0 does not support cpufreq(JOG).\n");
+ of_node_put(np);
+ return -ENODEV;
+ }
+ mpc85xx_freqs = mpc8536_freqs_table;
+ set_pll = mpc8536_set_pll;
+ max_pll[0] = get_pll(0);
+
+ } else if (of_device_is_compatible(np, "fsl,p1022-guts")) {
+ mpc85xx_freqs = p1022_freqs_table;
+ set_pll = p1022_set_pll;
+ max_pll[0] = get_pll(0);
+ max_pll[1] = get_pll(1);
+ }
+
+ pr_info("Freescale MPC85xx cpufreq(JOG) driver\n");
+
+ of_node_put(np);
+ return cpufreq_register_driver(&mpc85xx_cpufreq_driver);
+}
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e7a896a..a1518af 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -213,6 +213,17 @@ config CPU_FREQ_PMAC64
This adds support for frequency switching on Apple iMac G5,
and some of the more recent desktop G5 machines as well.
+config MPC85xx_CPUFREQ
+ bool "Support for Freescale MPC85xx CPU freq"
+ depends on PPC_85xx && FSL_PMC
+ default n
+ select CPU_FREQ_TABLE
+ help
+ This adds support for dynamic frequency switching on
+ Freescale MPC85xx by cpufreq interface. MPC8536 and P1022
+ have a JOG feature, which provides a dynamic mechanism
+ to lower or raise the CPU core clock at runtime.
+
config PPC_PASEMI_CPUFREQ
bool "Support for PA Semi PWRficient"
depends on PPC_PASEMI
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index b6c8c8f..b809a1b 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -202,6 +202,9 @@ static int pmc_probe(struct platform_device *pdev)
suspend_set_ops(&pmc_suspend_ops);
+#ifdef CONFIG_MPC85xx_CPUFREQ
+ mpc85xx_jog_probe();
+#endif
pr_info("Freescale PMC driver: sleep(standby)%s\n",
(pmc_flag & PMC_DEEP_SLEEP) ? ", deep sleep(mem)" : "");
return 0;
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index b1510ef..25be25c 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -65,5 +65,7 @@ void fsl_hv_halt(void);
* code can be compatible with both 32-bit & 36-bit.
*/
extern void mpc85xx_enter_deep_sleep(u64 ccsrbar, u32 powmgtreq);
+
+extern int mpc85xx_jog_probe(void);
#endif
#endif
--
1.6.4.1
On Aug 7, 2012, at 3:43 AM, Zhao Chenhui wrote:
> The cpufreq driver of mpc85xx will disable/enable cpu hotplug temporarily.
> Therefore, the related functions should be exported.
>
> Signed-off-by: Zhao Chenhui <[email protected]>
> ---
> include/linux/cpu.h | 4 ++++
> 1 files changed, 4 insertions(+), 0 deletions(-)
Rafael, Srivatsa,
Wanted to get your ack on export these functions for direct calling by arch code.
- k
>
> diff --git a/include/linux/cpu.h b/include/linux/cpu.h
> index ce7a074..df8f73d 100644
> --- a/include/linux/cpu.h
> +++ b/include/linux/cpu.h
> @@ -146,6 +146,8 @@ void notify_cpu_starting(unsigned int cpu);
> extern void cpu_maps_update_begin(void);
> extern void cpu_maps_update_done(void);
>
> +extern void cpu_hotplug_disable_before_freeze(void);
> +extern void cpu_hotplug_enable_after_thaw(void);
> #else /* CONFIG_SMP */
>
> #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
> @@ -167,6 +169,8 @@ static inline void cpu_maps_update_done(void)
> {
> }
>
> +static inline void cpu_hotplug_disable_before_freeze(void) {}
> +static inline void cpu_hotplug_enable_after_thaw(void) {}
> #endif /* CONFIG_SMP */
> extern struct bus_type cpu_subsys;
>
> --
> 1.6.4.1
>
On 08/07/2012 11:21 PM, Kumar Gala wrote:
>
> On Aug 7, 2012, at 3:43 AM, Zhao Chenhui wrote:
>
>> The cpufreq driver of mpc85xx will disable/enable cpu hotplug temporarily.
>> Therefore, the related functions should be exported.
>>
>> Signed-off-by: Zhao Chenhui <[email protected]>
>> ---
>> include/linux/cpu.h | 4 ++++
>> 1 files changed, 4 insertions(+), 0 deletions(-)
>
> Rafael, Srivatsa,
>
> Wanted to get your ack on export these functions for direct calling by arch code.
>
Why not just use get_online_cpus()/put_online_cpus()?
In the case of suspend/resume/hibernation, we had introduced these CPU hotplug disable
functions because we would end up doing CPU hotplug ourselves, further down the path.
So if we did a get_online_cpus(), we would end up deadlocking ourselves. Whereas, the
patch 4/4 looks like a straightforward case of wanting to simply disable CPU hotplug..
I don't see where you are doing CPU hotplug yourself in the path. So IMO, just
get/put_online_cpus() should do.
Regards,
Srivatsa S. Bhat
>
>>
>> diff --git a/include/linux/cpu.h b/include/linux/cpu.h
>> index ce7a074..df8f73d 100644
>> --- a/include/linux/cpu.h
>> +++ b/include/linux/cpu.h
>> @@ -146,6 +146,8 @@ void notify_cpu_starting(unsigned int cpu);
>> extern void cpu_maps_update_begin(void);
>> extern void cpu_maps_update_done(void);
>>
>> +extern void cpu_hotplug_disable_before_freeze(void);
>> +extern void cpu_hotplug_enable_after_thaw(void);
>> #else /* CONFIG_SMP */
>>
>> #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
>> @@ -167,6 +169,8 @@ static inline void cpu_maps_update_done(void)
>> {
>> }
>>
>> +static inline void cpu_hotplug_disable_before_freeze(void) {}
>> +static inline void cpu_hotplug_enable_after_thaw(void) {}
>> #endif /* CONFIG_SMP */
>> extern struct bus_type cpu_subsys;
>>
>> --
>> 1.6.4.1
>>
>
On Wed, Aug 08, 2012 at 11:43:22AM +0530, Srivatsa S. Bhat wrote:
> On 08/07/2012 11:21 PM, Kumar Gala wrote:
> >
> > On Aug 7, 2012, at 3:43 AM, Zhao Chenhui wrote:
> >
> >> The cpufreq driver of mpc85xx will disable/enable cpu hotplug temporarily.
> >> Therefore, the related functions should be exported.
> >>
> >> Signed-off-by: Zhao Chenhui <[email protected]>
> >> ---
> >> include/linux/cpu.h | 4 ++++
> >> 1 files changed, 4 insertions(+), 0 deletions(-)
> >
> > Rafael, Srivatsa,
> >
> > Wanted to get your ack on export these functions for direct calling by arch code.
> >
>
> Why not just use get_online_cpus()/put_online_cpus()?
>
> In the case of suspend/resume/hibernation, we had introduced these CPU hotplug disable
> functions because we would end up doing CPU hotplug ourselves, further down the path.
> So if we did a get_online_cpus(), we would end up deadlocking ourselves. Whereas, the
> patch 4/4 looks like a straightforward case of wanting to simply disable CPU hotplug..
> I don't see where you are doing CPU hotplug yourself in the path. So IMO, just
> get/put_online_cpus() should do.
>
> Regards,
> Srivatsa S. Bhat
>
Thanks for your comment. I will try to use get/put_online_cpus() in my patch.
-Chenhui
On Tue, Aug 07, 2012 at 04:43:25PM +0800, Zhao Chenhui wrote:
> The cpufreq driver of mpc85xx will disable/enable cpu hotplug temporarily.
> Therefore, the related functions should be exported.
>
> Signed-off-by: Zhao Chenhui <[email protected]>
> ---
> include/linux/cpu.h | 4 ++++
> 1 files changed, 4 insertions(+), 0 deletions(-)
>
> diff --git a/include/linux/cpu.h b/include/linux/cpu.h
> index ce7a074..df8f73d 100644
> --- a/include/linux/cpu.h
> +++ b/include/linux/cpu.h
> @@ -146,6 +146,8 @@ void notify_cpu_starting(unsigned int cpu);
> extern void cpu_maps_update_begin(void);
> extern void cpu_maps_update_done(void);
>
> +extern void cpu_hotplug_disable_before_freeze(void);
> +extern void cpu_hotplug_enable_after_thaw(void);
> #else /* CONFIG_SMP */
>
> #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
> @@ -167,6 +169,8 @@ static inline void cpu_maps_update_done(void)
> {
> }
>
> +static inline void cpu_hotplug_disable_before_freeze(void) {}
> +static inline void cpu_hotplug_enable_after_thaw(void) {}
> #endif /* CONFIG_SMP */
> extern struct bus_type cpu_subsys;
>
> --
> 1.6.4.1
>
Hi kumar,
I will not use these API in the 4/4 patch. please ignore this patch.
-Chenhui
On Tue, Aug 7, 2012 at 3:43 AM, Zhao Chenhui <[email protected]> wrote:
> +int mpc85xx_pmc_set_wake(struct device *dev, bool enable)
> +{
> + int ret = 0;
> + struct device_node *clk_np;
> + const u32 *prop;
> + u32 pmcdr_mask;
> +
> + if (!pmc_regs) {
> + pr_err("%s: PMC is unavailable\n", __func__);
You have a 'struct device', so please use dev_err instead.
> + return -ENODEV;
> + }
> +
> + if (enable && !device_may_wakeup(dev))
> + return -EINVAL;
> +
> + clk_np = of_parse_phandle(dev->of_node, "fsl,pmc-handle", 0);
> + if (!clk_np)
> + return -EINVAL;
> +
> + prop = of_get_property(clk_np, "fsl,pmcdr-mask", NULL);
> + if (!prop) {
> + ret = -EINVAL;
> + goto out;
> + }
> + pmcdr_mask = be32_to_cpup(prop);
> +
> + if (enable)
> + /* clear to enable clock in low power mode */
> + clrbits32(&pmc_regs->pmcdr, pmcdr_mask);
> + else
> + setbits32(&pmc_regs->pmcdr, pmcdr_mask);
> +
> +out:
> + of_node_put(clk_np);
> + return ret;
> +}
> +EXPORT_SYMBOL_GPL(mpc85xx_pmc_set_wake);
Use EXPORT_SYMBOL, not EXPORT_SYMBOL_GPL.
> +
> +/**
> + * mpc85xx_pmc_set_lossless_ethernet - enable lossless ethernet
> + * in (deep) sleep mode
> + * @enable: True to enable event generation; false to disable
> + */
> +void mpc85xx_pmc_set_lossless_ethernet(int enable)
Should this be 'bool enable'?
> @@ -21,6 +22,17 @@ struct device_node;
>
> extern void fsl_rstcr_restart(char *cmd);
>
> +#ifdef CONFIG_FSL_PMC
> +extern int mpc85xx_pmc_set_wake(struct device *dev, bool enable);
> +extern void mpc85xx_pmc_set_lossless_ethernet(int enable);
Don't use 'extern' for functions.
--
Timur Tabi
Linux kernel developer at Freescale
On Sat, Aug 11, 2012 at 08:19:43AM -0500, Tabi Timur-B04825 wrote:
> On Tue, Aug 7, 2012 at 3:43 AM, Zhao Chenhui <[email protected]> wrote:
>
> > + return -EINVAL;
> > +
> > + prop = of_get_property(clk_np, "fsl,pmcdr-mask", NULL);
> > + if (!prop) {
> > + ret = -EINVAL;
> > + goto out;
> > + }
> > + pmcdr_mask = be32_to_cpup(prop);
> > +
> > + if (enable)
> > + /* clear to enable clock in low power mode */
> > + clrbits32(&pmc_regs->pmcdr, pmcdr_mask);
> > + else
> > + setbits32(&pmc_regs->pmcdr, pmcdr_mask);
> > +
> > +out:
> > + of_node_put(clk_np);
> > + return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(mpc85xx_pmc_set_wake);
>
> Use EXPORT_SYMBOL, not EXPORT_SYMBOL_GPL.
>
Hi kumar,
Is that ok with upstream?
-Chenhui
On Sat, Aug 11, 2012 at 08:19:43AM -0500, Tabi Timur-B04825 wrote:
> On Tue, Aug 7, 2012 at 3:43 AM, Zhao Chenhui <[email protected]> wrote:
>
> > +int mpc85xx_pmc_set_wake(struct device *dev, bool enable)
> > +{
> > + int ret = 0;
> > + struct device_node *clk_np;
> > + const u32 *prop;
> > + u32 pmcdr_mask;
> > +
> > + if (!pmc_regs) {
> > + pr_err("%s: PMC is unavailable\n", __func__);
>
> You have a 'struct device', so please use dev_err instead.
>
> > + return -ENODEV;
> > + }
> > +
> > + if (enable && !device_may_wakeup(dev))
> > + return -EINVAL;
> > +
> > + clk_np = of_parse_phandle(dev->of_node, "fsl,pmc-handle", 0);
> > + if (!clk_np)
> > + return -EINVAL;
> > +
> > + prop = of_get_property(clk_np, "fsl,pmcdr-mask", NULL);
> > + if (!prop) {
> > + ret = -EINVAL;
> > + goto out;
> > + }
> > + pmcdr_mask = be32_to_cpup(prop);
> > +
> > + if (enable)
> > + /* clear to enable clock in low power mode */
> > + clrbits32(&pmc_regs->pmcdr, pmcdr_mask);
> > + else
> > + setbits32(&pmc_regs->pmcdr, pmcdr_mask);
> > +
> > +out:
> > + of_node_put(clk_np);
> > + return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(mpc85xx_pmc_set_wake);
>
> Use EXPORT_SYMBOL, not EXPORT_SYMBOL_GPL.
>
> > +
> > +/**
> > + * mpc85xx_pmc_set_lossless_ethernet - enable lossless ethernet
> > + * in (deep) sleep mode
> > + * @enable: True to enable event generation; false to disable
> > + */
> > +void mpc85xx_pmc_set_lossless_ethernet(int enable)
>
> Should this be 'bool enable'?
>
> > @@ -21,6 +22,17 @@ struct device_node;
> >
> > extern void fsl_rstcr_restart(char *cmd);
> >
> > +#ifdef CONFIG_FSL_PMC
> > +extern int mpc85xx_pmc_set_wake(struct device *dev, bool enable);
> > +extern void mpc85xx_pmc_set_lossless_ethernet(int enable);
>
> Don't use 'extern' for functions.
>
Why? I think there is no difference.
-Chenhui
Zhao Chenhui wrote:
>>> > >+#ifdef CONFIG_FSL_PMC
>>> > >+extern int mpc85xx_pmc_set_wake(struct device *dev, bool enable);
>>> > >+extern void mpc85xx_pmc_set_lossless_ethernet(int enable);
>> >
>> >Don't use 'extern' for functions.
>> >
> Why? I think there is no difference.
It's unnecessary, and it makes the line longer.
--
Timur Tabi
Linux kernel developer at Freescale