Use asid2idx() and asid feature macro for cleanup.
No functional change.
Signed-off-by: Yunfeng Ye <[email protected]>
---
arch/arm64/mm/context.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index cd72576ae2b7..076f14a75bd5 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -50,10 +50,10 @@ static u32 get_cpu_asid_bits(void)
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
fallthrough;
- case 0:
+ case ID_AA64MMFR0_ASID_8:
asid = 8;
break;
- case 2:
+ case ID_AA64MMFR0_ASID_16:
asid = 16;
}
@@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
- u64 newasid = generation | (asid & ~ASID_MASK);
+ u64 newasid = generation | asid2idx(asid);
/*
* If our current ASID was active during a rollover, we
@@ -306,7 +306,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
out_unlock:
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
- asid &= ~ASID_MASK;
+ asid = asid2idx(asid);
/* Set the equivalent of USER_ASID_BIT */
if (asid && arm64_kernel_unmapped_at_el0())
--
2.27.0
On Thu, Oct 28, 2021 at 08:27:23PM +0800, Yunfeng Ye wrote:
> Use asid2idx() and asid feature macro for cleanup.
>
> No functional change.
>
> Signed-off-by: Yunfeng Ye <[email protected]>
> ---
> arch/arm64/mm/context.c | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
> index cd72576ae2b7..076f14a75bd5 100644
> --- a/arch/arm64/mm/context.c
> +++ b/arch/arm64/mm/context.c
> @@ -50,10 +50,10 @@ static u32 get_cpu_asid_bits(void)
> pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
> smp_processor_id(), fld);
> fallthrough;
> - case 0:
> + case ID_AA64MMFR0_ASID_8:
> asid = 8;
> break;
> - case 2:
> + case ID_AA64MMFR0_ASID_16:
> asid = 16;
> }
I think this change is fine.
> @@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm)
> u64 generation = atomic64_read(&asid_generation);
>
> if (asid != 0) {
> - u64 newasid = generation | (asid & ~ASID_MASK);
> + u64 newasid = generation | asid2idx(asid);
>
> /*
> * If our current ASID was active during a rollover, we
> @@ -306,7 +306,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
> out_unlock:
> raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
>
> - asid &= ~ASID_MASK;
> + asid = asid2idx(asid);
While functionally the code is the same, I don't think this was the
intention of asid2idx(). It's meant to provide an index into asid_map,
while the ASID_MASK lines isolate the asid number and add a new
generation to it.
--
Catalin
On 2021/12/7 0:23, Catalin Marinas wrote:
> On Thu, Oct 28, 2021 at 08:27:23PM +0800, Yunfeng Ye wrote:
>> Use asid2idx() and asid feature macro for cleanup.
>>
>> No functional change.
>>
>> Signed-off-by: Yunfeng Ye <[email protected]>
>> ---
>> arch/arm64/mm/context.c | 8 ++++----
>> 1 file changed, 4 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
>> index cd72576ae2b7..076f14a75bd5 100644
>> --- a/arch/arm64/mm/context.c
>> +++ b/arch/arm64/mm/context.c
>> @@ -50,10 +50,10 @@ static u32 get_cpu_asid_bits(void)
>> pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
>> smp_processor_id(), fld);
>> fallthrough;
>> - case 0:
>> + case ID_AA64MMFR0_ASID_8:
>> asid = 8;
>> break;
>> - case 2:
>> + case ID_AA64MMFR0_ASID_16:
>> asid = 16;
>> }
>
> I think this change is fine.
>
>> @@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm)
>> u64 generation = atomic64_read(&asid_generation);
>>
>> if (asid != 0) {
>> - u64 newasid = generation | (asid & ~ASID_MASK);
>> + u64 newasid = generation | asid2idx(asid);
>>
>> /*
>> * If our current ASID was active during a rollover, we
>> @@ -306,7 +306,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
>> out_unlock:
>> raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
>>
>> - asid &= ~ASID_MASK;
>> + asid = asid2idx(asid);
>
> While functionally the code is the same, I don't think this was the
> intention of asid2idx(). It's meant to provide an index into asid_map,
> while the ASID_MASK lines isolate the asid number and add a new
> generation to it.
>
The commit 0c8ea531b774 ("arm64: mm: Allocate ASIDs in pairs") introduce the
asid2idx and idx2asid macro, but these macros is not really useful after the
commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI is not in use").
I think "(asid & ~ASID_MASK)" can be instead by a macro, it is the same code with
asid2idx(). Can it be renamed? (eg, ctxid2asid)
Thanks.
On Tue, Dec 07, 2021 at 10:21:26AM +0800, Yunfeng Ye wrote:
> On 2021/12/7 0:23, Catalin Marinas wrote:
> > On Thu, Oct 28, 2021 at 08:27:23PM +0800, Yunfeng Ye wrote:
> >> @@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm)
> >> u64 generation = atomic64_read(&asid_generation);
> >>
> >> if (asid != 0) {
> >> - u64 newasid = generation | (asid & ~ASID_MASK);
> >> + u64 newasid = generation | asid2idx(asid);
> >>
> >> /*
> >> * If our current ASID was active during a rollover, we
> >> @@ -306,7 +306,7 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
> >> out_unlock:
> >> raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
> >>
> >> - asid &= ~ASID_MASK;
> >> + asid = asid2idx(asid);
> >
> > While functionally the code is the same, I don't think this was the
> > intention of asid2idx(). It's meant to provide an index into asid_map,
> > while the ASID_MASK lines isolate the asid number and add a new
> > generation to it.
>
> The commit 0c8ea531b774 ("arm64: mm: Allocate ASIDs in pairs") introduce the
> asid2idx and idx2asid macro, but these macros is not really useful after the
> commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI is not in use").
>
> I think "(asid & ~ASID_MASK)" can be instead by a macro, it is the same code with
> asid2idx(). Can it be renamed? (eg, ctxid2asid)
Yes, that would work.
--
Catalin