Received: by 2002:ac0:946b:0:0:0:0:0 with SMTP id j40csp2724415imj; Mon, 11 Feb 2019 07:32:48 -0800 (PST) X-Google-Smtp-Source: AHgI3Ib2kUnAXGghKJbMdb7NGm9eOx3S4pGWk6RPnVk5MtwdDtVzDRTeXmvnyLbAgktoVK976pXW X-Received: by 2002:a63:6a05:: with SMTP id f5mr33442840pgc.72.1549899168754; Mon, 11 Feb 2019 07:32:48 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1549899168; cv=none; d=google.com; s=arc-20160816; b=ZVDG2/dmXUQf0tHtuOxTpIr214YMfe1Tg6G5f0h30B7t4ilALc8omWsOzpDZanjyp7 nyDnoq1OAsDWE8+kQ25ypB843QiEzucbItOlvsqbmFcHWvA1Jjqxzy9FfwZB8cv6MjB9 /PJmENCuRN4VhH6teq9CpLqa+AYmAIidaeA0xV1HYXou5AiX8XGmAvDPsPTaX+fm8Pp+ viewPa13UHUy/Su2GKVn14GDIqaSeJqL/prjfWdoq/XY9Xwi3LMoVBfu5aw5R9uQSwl2 QiOg/eQO+c5iUK3b0lLW0l8ULkZls+wzUVq1jirF0f2QjWerzjVecZ+q9T/P6r+ymsrI SU4Q== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:content-transfer-encoding:mime-version :user-agent:references:in-reply-to:message-id:date:subject:cc:to :from:dkim-signature; bh=ei6CTyqkSnAPzvULErgtti6KpQQRBWK+KVD0hf6y7O8=; b=apogpTm/bxL/BMfauEiulmfCHzoWZfHV62KxM+GBn44WCkP5AdKABag9blzaZ6mhCa SVWloipeBjDI+3NfZEID/++77NoXBTLAdSlcI/FzYf4Ltrs7L0LZ+CWdNquZowCPxIt8 Op4U2722UG7aaRuguNYpXEexH4u2vLzuoqyTd0VGXWSlh4fDKtmDXemQiQrUkT1/bmdt vUBuc9XpNLKyclTeaA7AP8MQUNb0X+1sWH/Lfs1Sk/HywnkDWMekmzi7FT9ylfcD1B0e Gfj0s4cKT1ToRDvcgrTkK1EmDwtJSDstP9WOjNtgysHytpApr5GfTtPRsAShwSvMw+IQ k1nw== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@kernel.org header.s=default header.b=YG7gkfa5; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id u128si5319370pgu.230.2019.02.11.07.32.32; Mon, 11 Feb 2019 07:32:48 -0800 (PST) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; dkim=pass header.i=@kernel.org header.s=default header.b=YG7gkfa5; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S2389136AbfBKO4R (ORCPT + 99 others); Mon, 11 Feb 2019 09:56:17 -0500 Received: from mail.kernel.org ([198.145.29.99]:43378 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S2388630AbfBKO4M (ORCPT ); Mon, 11 Feb 2019 09:56:12 -0500 Received: from localhost (5356596B.cm-6-7b.dynamic.ziggo.nl [83.86.89.107]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 1FFB420700; Mon, 11 Feb 2019 14:56:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1549896971; bh=09K1wji6XboLq3nNqJhpm/awcQOGes5OUlcCjfs2ogA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=YG7gkfa5SZNPaD+XxP0ajMFs+nEsO3QSv+gb+KjfJ3vEAZRHTGsHPUnzsSghXWDRd TivlTdTX27F3WcyG6eH+ObwCQlWFJc5P7lUUXLCBeKYp2tSmw511uwMggzLXFdKOGz cSVvDcJOfDvCt0fOkm9OGRJXb/7wXsG/hZXKAgIs= From: Greg Kroah-Hartman To: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman , stable@vger.kernel.org, Benjamin Herrenschmidt , Arnd Bergmann , Will Deacon , Sasha Levin Subject: [PATCH 4.14 047/205] arm64: io: Ensure calls to delay routines are ordered against prior readX() Date: Mon, 11 Feb 2019 15:17:25 +0100 Message-Id: <20190211141831.958423973@linuxfoundation.org> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190211141827.214852402@linuxfoundation.org> References: <20190211141827.214852402@linuxfoundation.org> User-Agent: quilt/0.65 X-stable: review X-Patchwork-Hint: ignore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org 4.14-stable review patch. If anyone has any objections, please let me know. ------------------ [ Upstream commit 6460d32014717686d3b7963595950ba2c6d1bb5e ] A relatively standard idiom for ensuring that a pair of MMIO writes to a device arrive at that device with a specified minimum delay between them is as follows: writel_relaxed(42, dev_base + CTL1); readl(dev_base + CTL1); udelay(10); writel_relaxed(42, dev_base + CTL2); the intention being that the read-back from the device will push the prior write to CTL1, and the udelay will hold up the write to CTL1 until at least 10us have elapsed. Unfortunately, on arm64 where the underlying delay loop is implemented as a read of the architected counter, the CPU does not guarantee ordering from the readl() to the delay loop and therefore the delay loop could in theory be speculated and not provide the desired interval between the two writes. Fix this in a similar manner to PowerPC by introducing a dummy control dependency on the output of readX() which, combined with the ISB in the read of the architected counter, guarantees that a subsequent delay loop can not be executed until the readX() has returned its result. Cc: Benjamin Herrenschmidt Cc: Arnd Bergmann Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- arch/arm64/include/asm/io.h | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 35b2e50f17fb..b2bc7dbc1fa6 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -106,7 +106,22 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) } /* IO barriers */ -#define __iormb() rmb() +#define __iormb(v) \ +({ \ + unsigned long tmp; \ + \ + rmb(); \ + \ + /* \ + * Create a dummy control dependency from the IO read to any \ + * later instructions. This ensures that a subsequent call to \ + * udelay() will be ordered due to the ISB in get_cycles(). \ + */ \ + asm volatile("eor %0, %1, %1\n" \ + "cbnz %0, ." \ + : "=r" (tmp) : "r" (v) : "memory"); \ +}) + #define __iowmb() wmb() #define mmiowb() do { } while (0) @@ -131,10 +146,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * following Normal memory access. Writes are ordered relative to any prior * Normal memory access. */ -#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) -#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) -#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) -#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) @@ -185,9 +200,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); /* * io{read,write}{16,32,64}be() macros */ -#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) -#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) -#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; }) +#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; }) +#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; }) +#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; }) #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) -- 2.19.1