Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933585AbcCIRWj (ORCPT ); Wed, 9 Mar 2016 12:22:39 -0500 Received: from kanga.kvack.org ([205.233.56.17]:52584 "EHLO kanga.kvack.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750929AbcCIRWb (ORCPT ); Wed, 9 Mar 2016 12:22:31 -0500 Date: Wed, 9 Mar 2016 12:22:25 -0500 From: Benjamin LaHaise To: Ingo Molnar , Russell King Cc: Thomas Gleixner , "H. Peter Anvin" , x86@kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH] x86_32: add support for 64 bit __get_user() Message-ID: <20160309172225.GN12913@kvack.org> Mime-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline User-Agent: Mutt/1.4.2.2i Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 2263 Lines: 64 The existing __get_user() implementation does not support fetching 64 bit values on 32 bit x86. Implement this in a way that does not generate any incorrect warnings as cautioned by Russell King. Test code available at http://www.kvack.org/~bcrl/x86_32-get_user.tar . Signed-off-by: Benjamin LaHaise diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index a4a30e4..2d0607a 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -333,7 +333,23 @@ do { \ } while (0) #ifdef CONFIG_X86_32 -#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() +#define __get_user_asm_u64(x, addr, err, errret) \ + asm volatile(ASM_STAC "\n" \ + "1: movl %2,%%eax\n" \ + "2: movl %3,%%edx\n" \ + "3: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "4: mov %4,%0\n" \ + " xorl %%eax,%%eax\n" \ + " xorl %%edx,%%edx\n" \ + " jmp 3b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=r" (err), "=A"(x) \ + : "m" (__m(addr)), "m" __m(((u32 *)(addr)) + 1), \ + "i" (errret), "0" (err)) + #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() #else #define __get_user_asm_u64(x, ptr, retval, errret) \ @@ -420,11 +436,20 @@ do { \ #define __get_user_nocheck(x, ptr, size) \ ({ \ int __gu_err; \ - unsigned long __gu_val; \ - __uaccess_begin(); \ - __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ - __uaccess_end(); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + if (size == 8) { \ + unsigned long __gu_val[2]; \ + __gu_err = 0; \ + __uaccess_begin(); \ + __get_user_asm_u64(__gu_val, ptr, __gu_err, -EFAULT); \ + __uaccess_end(); \ + (x) = *(__force __typeof__((ptr)))__gu_val; \ + } else { \ + unsigned long __gu_val; \ + __uaccess_begin(); \ + __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ + __uaccess_end(); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + } \ __builtin_expect(__gu_err, 0); \ }) -- "Thought is the essence of where you are now."