Received: by 2002:ac0:946b:0:0:0:0:0 with SMTP id j40csp2768328imj; Mon, 11 Feb 2019 08:11:02 -0800 (PST) X-Google-Smtp-Source: AHgI3IYbLGMvh963A0MasyiOuBjljrHRxcXF19ouLpnNWWTMDOEfiGYaDzUwHfrcHaKaAyClXusU X-Received: by 2002:a62:c42:: with SMTP id u63mr36793180pfi.73.1549901462101; Mon, 11 Feb 2019 08:11:02 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1549901462; cv=none; d=google.com; s=arc-20160816; b=rCuEybZBe53wPOH/7q7fMj7GJouNfodKvi4j01fL9AnGW7LbUlw885o2q9iGj8944C tYWkiU86/Qvx5tU0h098dFpOXBQcLQMoUHd+XUyH5d6YI7b2ZoaBIC/DxBtSxkY1mnQq J+CYd/C/AIFfjLboF2L2BnggXOWMoZFbrkt4G1nBfvn6guyN+SGkjL+T3mnAyon8rIAw 3vq6wzhvTGpPxggpnUSfDkGxEMy7xaaWKDyOIfOwgyvx+/HynV1doZiAFXBbG6hr/BGz MQqvZoM8pHpOHg/94KZUZUsb03wXtlRDXGW0eGswEO3DsPBzul0/4128HVJO1HYpiB09 Gt2Q== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:content-transfer-encoding:mime-version :user-agent:references:in-reply-to:message-id:date:subject:cc:to :from:dkim-signature; bh=bJw/Yc4rc2wOJQ75z4if8H7EZyAd2S5PSZ0S7PeZKwQ=; b=MyV+RJkhD6bDrpa55/xp86sXhYctwbf347KAnX9COi8QAQauJFVMdQPpuw6VBRtQAr elHjtHZG2ajP84UCh+1WjIUxeaJPbhCPPFQ10YsRLisYrFy9hJIzFeRpkL9rdnMY0Xy4 J59HZb9AajOSCoypdVe/Iq4x9blTRvWcJBVG4aTXjqF1hooaCjYkzzq2mBBfSgq3Yz6H ex9zAHGG01z07LCIcQ5xRx3FIgG2KqrzrcZBZShyEeFmpSlcbsSvh6VvQi6HIwsGO71n LYbVMyOo0uzUIXgwDfVDkZl/dQlNJOJuUoM1+S5R0oeoSPdffoTRRuyDkDS0diNvQkqu SokA== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@kernel.org header.s=default header.b=WVWbJOSg; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id g186si1759124pgc.320.2019.02.11.08.10.45; Mon, 11 Feb 2019 08:11:02 -0800 (PST) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; dkim=pass header.i=@kernel.org header.s=default header.b=WVWbJOSg; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1729526AbfBKO0M (ORCPT + 99 others); Mon, 11 Feb 2019 09:26:12 -0500 Received: from mail.kernel.org ([198.145.29.99]:59868 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1729516AbfBKO0I (ORCPT ); Mon, 11 Feb 2019 09:26:08 -0500 Received: from localhost (5356596B.cm-6-7b.dynamic.ziggo.nl [83.86.89.107]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 0548020821; Mon, 11 Feb 2019 14:26:06 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1549895167; bh=z23Rps77Q6VnImrSBG0VjQfh5g0s6MpfCTL3wiilGJg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=WVWbJOSgFA2cjjXyQkmxkKvIefELnneM+DtHP4iAPPuT/8PERqLDXRniRrk/VIwkZ P1mkM9VfkTqKRXok/rIRbwEQ4Qjw4GIh+2+Fdh38wP1MTD6eeZtl97oIie/Qdps8Ty zPSR2wL7DGIhRL0Q67NnZ0RF9ZXtMDRpCkDLZ6xg= From: Greg Kroah-Hartman To: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman , stable@vger.kernel.org, Benjamin Herrenschmidt , Arnd Bergmann , Will Deacon , Sasha Levin Subject: [PATCH 4.20 085/352] arm64: io: Ensure calls to delay routines are ordered against prior readX() Date: Mon, 11 Feb 2019 15:15:12 +0100 Message-Id: <20190211141851.378634614@linuxfoundation.org> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190211141846.543045703@linuxfoundation.org> References: <20190211141846.543045703@linuxfoundation.org> User-Agent: quilt/0.65 X-stable: review X-Patchwork-Hint: ignore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org 4.20-stable review patch. If anyone has any objections, please let me know. ------------------ [ Upstream commit 6460d32014717686d3b7963595950ba2c6d1bb5e ] A relatively standard idiom for ensuring that a pair of MMIO writes to a device arrive at that device with a specified minimum delay between them is as follows: writel_relaxed(42, dev_base + CTL1); readl(dev_base + CTL1); udelay(10); writel_relaxed(42, dev_base + CTL2); the intention being that the read-back from the device will push the prior write to CTL1, and the udelay will hold up the write to CTL1 until at least 10us have elapsed. Unfortunately, on arm64 where the underlying delay loop is implemented as a read of the architected counter, the CPU does not guarantee ordering from the readl() to the delay loop and therefore the delay loop could in theory be speculated and not provide the desired interval between the two writes. Fix this in a similar manner to PowerPC by introducing a dummy control dependency on the output of readX() which, combined with the ISB in the read of the architected counter, guarantees that a subsequent delay loop can not be executed until the readX() has returned its result. Cc: Benjamin Herrenschmidt Cc: Arnd Bergmann Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- arch/arm64/include/asm/io.h | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 9f8b915af3a7..d42d00d8d5b6 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -104,7 +104,22 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) } /* IO barriers */ -#define __iormb() rmb() +#define __iormb(v) \ +({ \ + unsigned long tmp; \ + \ + rmb(); \ + \ + /* \ + * Create a dummy control dependency from the IO read to any \ + * later instructions. This ensures that a subsequent call to \ + * udelay() will be ordered due to the ISB in get_cycles(). \ + */ \ + asm volatile("eor %0, %1, %1\n" \ + "cbnz %0, ." \ + : "=r" (tmp) : "r" (v) : "memory"); \ +}) + #define __iowmb() wmb() #define mmiowb() do { } while (0) @@ -129,10 +144,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * following Normal memory access. Writes are ordered relative to any prior * Normal memory access. */ -#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) -#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) -#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) -#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) @@ -183,9 +198,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); /* * io{read,write}{16,32,64}be() macros */ -#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) -#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) -#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; }) +#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; }) +#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; }) +#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; }) #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) -- 2.19.1