Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3A355C433F5 for ; Wed, 8 Dec 2021 09:17:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230492AbhLHJUb (ORCPT ); Wed, 8 Dec 2021 04:20:31 -0500 Received: from szxga08-in.huawei.com ([45.249.212.255]:29101 "EHLO szxga08-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230472AbhLHJUa (ORCPT ); Wed, 8 Dec 2021 04:20:30 -0500 Received: from dggpeml500024.china.huawei.com (unknown [172.30.72.55]) by szxga08-in.huawei.com (SkyGuard) with ESMTP id 4J8BLQ5RCrz1DJxG; Wed, 8 Dec 2021 17:14:06 +0800 (CST) Received: from [10.174.176.231] (10.174.176.231) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.20; Wed, 8 Dec 2021 17:16:56 +0800 To: , , , , , CC: , Hewenliang From: Yunfeng Ye Subject: [PATCH v2 1/2] arm64: mm: Rename asid2idx() to ctxid2asid() Message-ID: <506863a9-b934-3af3-c70d-3284e14478de@huawei.com> Date: Wed, 8 Dec 2021 17:16:45 +0800 User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101 Thunderbird/60.6.1 MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Language: en-US Content-Transfer-Encoding: 7bit X-Originating-IP: [10.174.176.231] X-ClientProxiedBy: dggems703-chm.china.huawei.com (10.3.19.180) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org The commit 0c8ea531b774 ("arm64: mm: Allocate ASIDs in pairs") introduce the asid2idx and idx2asid macro, but these macros are not really useful after the commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI is not in use"). The code "(asid & ~ASID_MASK)" can be instead by a macro, which is the same code with asid2idx(). So rename it to ctxid2asid() for a better understanding. Also we add asid2ctxid() macro, the contextid can be generated based on the asid and generation through this macro. --- arch/arm64/mm/context.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index cd72576ae2b7..bbc2708fe928 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -35,8 +35,8 @@ static unsigned long *pinned_asid_map; #define ASID_FIRST_VERSION (1UL << asid_bits) #define NUM_USER_ASIDS ASID_FIRST_VERSION -#define asid2idx(asid) ((asid) & ~ASID_MASK) -#define idx2asid(idx) asid2idx(idx) +#define ctxid2asid(asid) ((asid) & ~ASID_MASK) +#define asid2ctxid(asid, genid) ((asid) | (genid)) /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -120,7 +120,7 @@ static void flush_context(void) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid2idx(asid), asid_map); + __set_bit(ctxid2asid(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { - u64 newasid = generation | (asid & ~ASID_MASK); + u64 newasid = asid2ctxid(ctxid2asid(asid), generation); /* * If our current ASID was active during a rollover, we @@ -183,7 +183,7 @@ static u64 new_context(struct mm_struct *mm) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - if (!__test_and_set_bit(asid2idx(asid), asid_map)) + if (!__test_and_set_bit(ctxid2asid(asid), asid_map)) return newasid; } @@ -209,7 +209,7 @@ static u64 new_context(struct mm_struct *mm) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return idx2asid(asid) | generation; + return asid2ctxid(asid, generation); } void check_and_switch_context(struct mm_struct *mm) @@ -300,13 +300,13 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) } nr_pinned_asids++; - __set_bit(asid2idx(asid), pinned_asid_map); + __set_bit(ctxid2asid(asid), pinned_asid_map); refcount_set(&mm->context.pinned, 1); out_unlock: raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); - asid &= ~ASID_MASK; + asid = ctxid2asid(asid); /* Set the equivalent of USER_ASID_BIT */ if (asid && arm64_kernel_unmapped_at_el0()) @@ -327,7 +327,7 @@ void arm64_mm_context_put(struct mm_struct *mm) raw_spin_lock_irqsave(&cpu_asid_lock, flags); if (refcount_dec_and_test(&mm->context.pinned)) { - __clear_bit(asid2idx(asid), pinned_asid_map); + __clear_bit(ctxid2asid(asid), pinned_asid_map); nr_pinned_asids--; } -- 2.27.0