We can simplify build_update_entries by unifying the code for the 36 bit
physical addressing with MIPS32 case with the general case, by using
pte_off_ variables in all cases & handling the trivial
_PAGE_GLOBAL_SHIFT == 0 case in build_convert_pte_to_entrylo. This
leaves XPA as the only special case.
Signed-off-by: Paul Burton <[email protected]>
---
arch/mips/mm/tlbex.c | 38 +++++++++++++++++---------------------
1 file changed, 17 insertions(+), 21 deletions(-)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0bd3755..45234ad 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -626,6 +626,11 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
unsigned int reg)
{
+ if (_PAGE_GLOBAL_SHIFT == 0) {
+ /* pte_t is already in EntryLo format */
+ return;
+ }
+
if (cpu_has_rixi && _PAGE_NO_EXEC) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -1003,10 +1008,17 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{
- if (config_enabled(CONFIG_XPA)) {
- int pte_off_even = sizeof(pte_t) / 2;
- int pte_off_odd = pte_off_even + sizeof(pte_t);
+ int pte_off_even = 0;
+ int pte_off_odd = sizeof(pte_t);
+
+ if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) &&
+ config_enabled(CONFIG_32BIT)) {
+ /* The low 32 bits of EntryLo is stored in pte_high */
+ pte_off_even += offsetof(pte_t, pte_high);
+ pte_off_odd += offsetof(pte_t, pte_high);
+ }
+ if (config_enabled(CONFIG_XPA)) {
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
@@ -1025,24 +1037,8 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
return;
}
- /*
- * 64bit address support (36bit on a 32bit CPU) in a 32bit
- * Kernel is a special case. Only a few CPUs use it.
- */
- if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
- int pte_off_even = sizeof(pte_t) / 2;
- int pte_off_odd = pte_off_even + sizeof(pte_t);
-
- uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
-
- uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
- UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
- return;
- }
-
- UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
- UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
+ UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
+ UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
if (r45k_bvahwbug())
build_tlb_probe_entry(p);
build_convert_pte_to_entrylo(p, tmp);
--
2.8.0
On Fri, Apr 15, 2016 at 11:36:59AM +0100, Paul Burton wrote:
> We can simplify build_update_entries by unifying the code for the 36 bit
> physical addressing with MIPS32 case with the general case, by using
> pte_off_ variables in all cases & handling the trivial
> _PAGE_GLOBAL_SHIFT == 0 case in build_convert_pte_to_entrylo. This
> leaves XPA as the only special case.
>
> Signed-off-by: Paul Burton <[email protected]>
> ---
>
> arch/mips/mm/tlbex.c | 38 +++++++++++++++++---------------------
> 1 file changed, 17 insertions(+), 21 deletions(-)
>
> diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
> index 0bd3755..45234ad 100644
> --- a/arch/mips/mm/tlbex.c
> +++ b/arch/mips/mm/tlbex.c
> @@ -626,6 +626,11 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
> static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
> unsigned int reg)
> {
> + if (_PAGE_GLOBAL_SHIFT == 0) {
> + /* pte_t is already in EntryLo format */
> + return;
> + }
> +
> if (cpu_has_rixi && _PAGE_NO_EXEC) {
> if (fill_includes_sw_bits) {
> UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
> @@ -1003,10 +1008,17 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
>
> static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
> {
> - if (config_enabled(CONFIG_XPA)) {
> - int pte_off_even = sizeof(pte_t) / 2;
> - int pte_off_odd = pte_off_even + sizeof(pte_t);
> + int pte_off_even = 0;
> + int pte_off_odd = sizeof(pte_t);
> +
> + if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) &&
> + config_enabled(CONFIG_32BIT)) {
> + /* The low 32 bits of EntryLo is stored in pte_high */
> + pte_off_even += offsetof(pte_t, pte_high);
> + pte_off_odd += offsetof(pte_t, pte_high);
pte_high doesn't exist unless CONFIG_CPU_MIPS32=y (e.g. you
can set CONFIG_CPU_MIPS64=y, CONFIG_CPU_MIPS32=n and CONFIG_32BIT=y).
With that fixed it looks good to me.
Reviewed-by: James Hogan <[email protected]>
Cheers
James
> + }
>
> + if (config_enabled(CONFIG_XPA)) {
> uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
> UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
> UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
> @@ -1025,24 +1037,8 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
> return;
> }
>
> - /*
> - * 64bit address support (36bit on a 32bit CPU) in a 32bit
> - * Kernel is a special case. Only a few CPUs use it.
> - */
> - if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
> - int pte_off_even = sizeof(pte_t) / 2;
> - int pte_off_odd = pte_off_even + sizeof(pte_t);
> -
> - uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
> - UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
> -
> - uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
> - UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
> - return;
> - }
> -
> - UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
> - UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
> + UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
> + UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
> if (r45k_bvahwbug())
> build_tlb_probe_entry(p);
> build_convert_pte_to_entrylo(p, tmp);
> --
> 2.8.0
>