Received: by 2002:a05:6a10:9848:0:0:0:0 with SMTP id x8csp256273pxf; Wed, 24 Mar 2021 04:32:56 -0700 (PDT) X-Google-Smtp-Source: ABdhPJwSOasH8Jutvjp80stEmIUmKF8Y+sFMwaowTuue4WvAeJ0HNFKOLfGDsSm15mQT+1f19O14 X-Received: by 2002:a05:6402:2076:: with SMTP id bd22mr2891261edb.378.1616585575986; Wed, 24 Mar 2021 04:32:55 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1616585575; cv=none; d=google.com; s=arc-20160816; b=CybCHuLvX5xl4E4lzjkvYrK3b+cg+NPh3kG8qeGOVNw9ZDVYBuDNPlLaLJv3hKUNPI MJR7MxBEmw4TaCs1TA+HdDLptYP+w8GjSGkhYwUYQ+AAjtBc6qKLXvvQkdXg3IW7QW5p VWnoYuuID2a27K3ML9Siy94MXv0cj59PmRURZ8hD8qqKSpbvzYxBsLjviQwzArR+P4FF zeBFf8/dIG0FmXnTf2E3atI4HBVFZEBqiyWVo40xjCOLfRE+bBNndj+vu+09SbugAeLH R3AB+TVyKKnr9FJM7IBrrYCOCXpZYVKvKaRTeWyGldYYpCW8P5s2SgyJkGP9nBqD1lXB vtSg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:message-id:date:subject:cc:to:from :dkim-signature; bh=TGuQs1GKVCeGZJq8KKQ0GdoWEwDR0EiwltGzby4il/U=; b=FqQ9+00+oVuklGvBVpdBbDMXAaCNjHD7mjqf4P1CIAiJyl+w+4ZMkz3GifqJAql2kn UckXsilTiCxl4tjHU6fPsipHgvD1aZBxE3XzvNzIj03nFjQVq01SYZkLNHemZtFMDBo0 LIPd3Yg+OC6TzGIlUGQRL2ukJsTqrXWMqP7y0OkVVKh/RzL1kBYU1PE+UrfAPD/LLk3K 2gqEQ3wXD6tycpgOi6XZohcIrHV3E92lW0LErj+I9eCrBLRxYnegr3Uzsg/UHoRllb5u yHAxl0q/ofBtbl6ow/gcS8BV00AWRfAmOApBY7cGKJ62lneoJw9+QYlXjS/wIi1hFdGO defQ== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@kernel.org header.s=k20201202 header.b=RmAEAbix; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [23.128.96.18]) by mx.google.com with ESMTP id f16si1490765eds.608.2021.03.24.04.32.33; Wed, 24 Mar 2021 04:32:55 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) client-ip=23.128.96.18; Authentication-Results: mx.google.com; dkim=pass header.i=@kernel.org header.s=k20201202 header.b=RmAEAbix; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231976AbhCXKP5 (ORCPT + 99 others); Wed, 24 Mar 2021 06:15:57 -0400 Received: from mail.kernel.org ([198.145.29.99]:41636 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230094AbhCXKPn (ORCPT ); Wed, 24 Mar 2021 06:15:43 -0400 Received: by mail.kernel.org (Postfix) with ESMTPSA id 75A43619BB; Wed, 24 Mar 2021 10:15:40 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1616580942; bh=oBzpQjECDLbDpeG6Pa0irL9Xx1mC7G4xHusdndHrJFI=; h=From:To:Cc:Subject:Date:From; b=RmAEAbix+lvoibPoKrLrV/xrPrwYCy3ac5Uy2fs6uPNOOSthwgzAO23q16IhH6EYd po3W9PNUOzWu13yhSOgCnGlnY8aGvLkfX5pCYZIA07H+aSrM6NImcJe1u9tO2GzEP5 WZE7gS/VbMsAP83NzCOXy49LILr0epAPyqe610A3H+jNm1cYaMGv0ymg3bC70Fr2Zr W9tjNlBEEn12OOtdzSO+Bl0Xm6qwMmUEn6LY9X4pWb7b2Jil5nQj/vcVubAIzAaFOV YYEoH0mTZtogMaUaU9SCHDMwOVrTmGTsk887ChnddylqP75isIZYDLhEduJOzSkyNZ JzEa6N05afXzQ== From: guoren@kernel.org To: guoren@kernel.org Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org, Guo Ren , Catalin Marinas , Will Deacon , Peter Zijlstra , Palmer Dabbelt , Anup Patel , Arnd Bergmann Subject: [PATCH] riscv: locks: introduce ticket-based spinlock implementation Date: Wed, 24 Mar 2021 10:14:52 +0000 Message-Id: <1616580892-80815-1-git-send-email-guoren@kernel.org> X-Mailer: git-send-email 2.7.4 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Guo Ren This patch introduces a ticket lock implementation for riscv, along the same lines as the implementation for arch/arm & arch/csky. Signed-off-by: Guo Ren Cc: Catalin Marinas Cc: Will Deacon Cc: Peter Zijlstra Cc: Palmer Dabbelt Cc: Anup Patel Cc: Arnd Bergmann --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/Kbuild | 1 + arch/riscv/include/asm/spinlock.h | 158 ++++++++++++-------------------- arch/riscv/include/asm/spinlock_types.h | 19 ++-- 4 files changed, 74 insertions(+), 105 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 87d7b52..7c56a20 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -30,6 +30,7 @@ config RISCV select ARCH_HAS_STRICT_KERNEL_RWX if MMU select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT + select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 445ccc9..e57ef80 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -3,5 +3,6 @@ generic-y += early_ioremap.h generic-y += extable.h generic-y += flat.h generic-y += kvm_para.h +generic-y += qrwlock.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h index f4f7fa1..2c81764 100644 --- a/arch/riscv/include/asm/spinlock.h +++ b/arch/riscv/include/asm/spinlock.h @@ -7,129 +7,91 @@ #ifndef _ASM_RISCV_SPINLOCK_H #define _ASM_RISCV_SPINLOCK_H -#include -#include -#include - /* - * Simple spin lock operations. These provide no fairness guarantees. + * Ticket-based spin-locking. */ +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + arch_spinlock_t lockval; + u32 tmp; + + asm volatile ( + "1: lr.w %0, %2 \n" + " mv %1, %0 \n" + " addw %0, %0, %3 \n" + " sc.w %0, %0, %2 \n" + " bnez %0, 1b \n" + : "=&r" (tmp), "=&r" (lockval), "+A" (lock->lock) + : "r" (1 << TICKET_NEXT) + : "memory"); -/* FIXME: Replace this with a ticket lock, like MIPS. */ - -#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) + while (lockval.tickets.next != lockval.tickets.owner) { + /* + * FIXME - we need wfi/wfe here to prevent: + * - cache line bouncing + * - saving cpu pipeline in multi-harts-per-core + * processor + */ + lockval.tickets.owner = READ_ONCE(lock->tickets.owner); + } -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - smp_store_release(&lock->lock, 0); + __atomic_acquire_fence(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { - int tmp = 1, busy; - - __asm__ __volatile__ ( - " amoswap.w %0, %2, %1\n" - RISCV_ACQUIRE_BARRIER - : "=r" (busy), "+A" (lock->lock) - : "r" (tmp) + u32 tmp, contended, res; + + do { + asm volatile ( + " lr.w %0, %3 \n" + " srliw %1, %0, %5 \n" + " slliw %2, %0, %5 \n" + " or %1, %2, %1 \n" + " li %2, 0 \n" + " sub %1, %1, %0 \n" + " bnez %1, 1f \n" + " addw %0, %0, %4 \n" + " sc.w %2, %0, %3 \n" + "1: \n" + : "=&r" (tmp), "=&r" (contended), "=&r" (res), + "+A" (lock->lock) + : "r" (1 << TICKET_NEXT), "I" (TICKET_NEXT) : "memory"); + } while (res); - return !busy; -} - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - while (1) { - if (arch_spin_is_locked(lock)) - continue; - - if (arch_spin_trylock(lock)) - break; + if (!contended) { + __atomic_acquire_fence(); + return 1; + } else { + return 0; } } -/***********************************************************/ - -static inline void arch_read_lock(arch_rwlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { - int tmp; - - __asm__ __volatile__( - "1: lr.w %1, %0\n" - " bltz %1, 1b\n" - " addi %1, %1, 1\n" - " sc.w %1, %1, %0\n" - " bnez %1, 1b\n" - RISCV_ACQUIRE_BARRIER - : "+A" (lock->lock), "=&r" (tmp) - :: "memory"); + smp_store_release(&lock->tickets.owner, lock->tickets.owner + 1); + /* FIXME - we need ipi/sev here to notify above */ } -static inline void arch_write_lock(arch_rwlock_t *lock) +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - int tmp; - - __asm__ __volatile__( - "1: lr.w %1, %0\n" - " bnez %1, 1b\n" - " li %1, -1\n" - " sc.w %1, %1, %0\n" - " bnez %1, 1b\n" - RISCV_ACQUIRE_BARRIER - : "+A" (lock->lock), "=&r" (tmp) - :: "memory"); + return lock.tickets.owner == lock.tickets.next; } -static inline int arch_read_trylock(arch_rwlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { - int busy; - - __asm__ __volatile__( - "1: lr.w %1, %0\n" - " bltz %1, 1f\n" - " addi %1, %1, 1\n" - " sc.w %1, %1, %0\n" - " bnez %1, 1b\n" - RISCV_ACQUIRE_BARRIER - "1:\n" - : "+A" (lock->lock), "=&r" (busy) - :: "memory"); - - return !busy; + return !arch_spin_value_unlocked(READ_ONCE(*lock)); } -static inline int arch_write_trylock(arch_rwlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { - int busy; - - __asm__ __volatile__( - "1: lr.w %1, %0\n" - " bnez %1, 1f\n" - " li %1, -1\n" - " sc.w %1, %1, %0\n" - " bnez %1, 1b\n" - RISCV_ACQUIRE_BARRIER - "1:\n" - : "+A" (lock->lock), "=&r" (busy) - :: "memory"); + struct __raw_tickets tickets = READ_ONCE(lock->tickets); - return !busy; + return (tickets.next - tickets.owner) > 1; } +#define arch_spin_is_contended arch_spin_is_contended -static inline void arch_read_unlock(arch_rwlock_t *lock) -{ - __asm__ __volatile__( - RISCV_RELEASE_BARRIER - " amoadd.w x0, %1, %0\n" - : "+A" (lock->lock) - : "r" (-1) - : "memory"); -} - -static inline void arch_write_unlock(arch_rwlock_t *lock) -{ - smp_store_release(&lock->lock, 0); -} +#include #endif /* _ASM_RISCV_SPINLOCK_H */ diff --git a/arch/riscv/include/asm/spinlock_types.h b/arch/riscv/include/asm/spinlock_types.h index f398e76..d7b38bf 100644 --- a/arch/riscv/include/asm/spinlock_types.h +++ b/arch/riscv/include/asm/spinlock_types.h @@ -10,16 +10,21 @@ # error "please don't include this file directly" #endif +#define TICKET_NEXT 16 + typedef struct { - volatile unsigned int lock; + union { + u32 lock; + struct __raw_tickets { + /* little endian */ + u16 owner; + u16 next; + } tickets; + }; } arch_spinlock_t; -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } - -typedef struct { - volatile unsigned int lock; -} arch_rwlock_t; +#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } -#define __ARCH_RW_LOCK_UNLOCKED { 0 } +#include #endif /* _ASM_RISCV_SPINLOCK_TYPES_H */ -- 2.7.4