Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751492AbdFAHAa (ORCPT ); Thu, 1 Jun 2017 03:00:30 -0400 Received: from mail.fireflyinternet.com ([109.228.58.192]:64416 "EHLO fireflyinternet.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1751047AbdFAHA1 (ORCPT ); Thu, 1 Jun 2017 03:00:27 -0400 X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=78.156.65.138; From: Chris Wilson To: linux-kernel@vger.kernel.org Cc: x86@kernel.org, intel-gfx@lists.freedesktop.org, Chris Wilson , Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" Subject: [PATCH 2/3] x86-32: Expand static copy_to_user() Date: Thu, 1 Jun 2017 07:58:42 +0100 Message-Id: <20170601065843.2392-3-chris@chris-wilson.co.uk> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170601065843.2392-1-chris@chris-wilson.co.uk> References: <20170601065843.2392-1-chris@chris-wilson.co.uk> X-Originating-IP: 78.156.65.138 X-Country: code=GB country="United Kingdom" ip=78.156.65.138 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 2110 Lines: 73 For known compile-time fixed sizes, teach x86-32 copy_to_user() to convert them to the simpler put_user and inline it similar to the optimisation applied to copy_from_user() and already used by x86-64. Signed-off-by: Chris Wilson Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" --- arch/x86/include/asm/uaccess_32.h | 48 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 44d17d1ab07c..a02aa9db34ed 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -16,6 +16,54 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero static __always_inline unsigned long __must_check raw_copy_to_user(void __user *to, const void *from, unsigned long n) { + if (__builtin_constant_p(n)) { + unsigned long ret = 0; + + switch (n) { + case 1: + __uaccess_begin(); + __put_user_asm(*(u8 *)from, to, ret, + "b", "b", "iq", 1); + __uaccess_end(); + return ret; + case 2: + __uaccess_begin(); + __put_user_asm(*(u16 *)from, to, ret, + "w", "w", "ir", 2); + __uaccess_end(); + return ret; + case 4: + __uaccess_begin(); + __put_user_asm(*(u32 *)from, to, ret, + "l", "k", "ir", 4); + __uaccess_end(); + return ret; + case 6: + __uaccess_begin(); + __put_user_asm(*(u32 *)from, to, ret, + "l", "k", "ir", 4); + if (likely(!ret)) { + asm("":::"memory"); + __put_user_asm(*(u16 *)(4 + (char *)from), + (u16 __user *)(4 + (char __user *)to), + ret, "w", "w", "ir", 2); + } + __uaccess_end(); + return ret; + case 8: + __uaccess_begin(); + __put_user_asm(*(u32 *)from, to, ret, + "l", "k", "ir", 4); + if (likely(!ret)) { + asm("":::"memory"); + __put_user_asm(*(u32 *)(4 + (char *)from), + (u32 __user *)(4 + (char __user *)to), + ret, "l", "k", "ir", 4); + } + __uaccess_end(); + return ret; + } + } return __copy_user_ll((__force void *)to, from, n); } -- 2.11.0