Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754176AbbGaOEN (ORCPT ); Fri, 31 Jul 2015 10:04:13 -0400 Received: from terminus.zytor.com ([198.137.202.10]:57590 "EHLO terminus.zytor.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754160AbbGaOEL (ORCPT ); Fri, 31 Jul 2015 10:04:11 -0400 Date: Fri, 31 Jul 2015 07:03:04 -0700 From: tip-bot for Brian Gerst Message-ID: Cc: peterz@infradead.org, bp@alien8.de, linux-kernel@vger.kernel.org, tglx@linutronix.de, mingo@kernel.org, hpa@zytor.com, luto@amacapital.net, torvalds@linux-foundation.org, brgerst@gmail.com, dvlasenk@redhat.com, luto@kernel.org Reply-To: peterz@infradead.org, bp@alien8.de, linux-kernel@vger.kernel.org, tglx@linutronix.de, mingo@kernel.org, hpa@zytor.com, luto@amacapital.net, torvalds@linux-foundation.org, brgerst@gmail.com, dvlasenk@redhat.com, luto@kernel.org In-Reply-To: <1438148483-11932-2-git-send-email-brgerst@gmail.com> References: <1438148483-11932-2-git-send-email-brgerst@gmail.com> To: linux-tip-commits@vger.kernel.org Subject: [tip:x86/asm] x86/vm86: Move vm86 fields out of 'thread_struct' Git-Commit-ID: 9fda6a0681e070b496235b132bc70ceb80300211 X-Mailer: tip-git-log-daemon Robot-ID: Robot-Unsubscribe: Contact to get blacklisted from these emails MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset=UTF-8 Content-Disposition: inline Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 10346 Lines: 330 Commit-ID: 9fda6a0681e070b496235b132bc70ceb80300211 Gitweb: http://git.kernel.org/tip/9fda6a0681e070b496235b132bc70ceb80300211 Author: Brian Gerst AuthorDate: Wed, 29 Jul 2015 01:41:16 -0400 Committer: Ingo Molnar CommitDate: Fri, 31 Jul 2015 13:31:07 +0200 x86/vm86: Move vm86 fields out of 'thread_struct' Allocate a separate structure for the vm86 fields. Signed-off-by: Brian Gerst Acked-by: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1438148483-11932-2-git-send-email-brgerst@gmail.com [ Build fixes. ] Signed-off-by: Ingo Molnar --- arch/x86/include/asm/processor.h | 11 +++------- arch/x86/include/asm/vm86.h | 19 ++++++++++++++++- arch/x86/kernel/process.c | 3 +++ arch/x86/kernel/vm86_32.c | 46 +++++++++++++++++++++++----------------- arch/x86/mm/fault.c | 6 ++++-- 5 files changed, 55 insertions(+), 30 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index befc134..9615a4e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -6,8 +6,8 @@ /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; +struct vm86; -#include #include #include #include @@ -400,13 +400,9 @@ struct thread_struct { unsigned long cr2; unsigned long trap_nr; unsigned long error_code; -#ifdef CONFIG_X86_32 +#ifdef CONFIG_VM86 /* Virtual 86 mode info */ - struct vm86plus_struct __user *vm86_info; - unsigned long screen_bitmap; - unsigned long v86flags; - unsigned long v86mask; - unsigned long saved_sp0; + struct vm86 *vm86; #endif /* IO permissions: */ unsigned long *io_bitmap_ptr; @@ -718,7 +714,6 @@ static inline void spin_lock_prefetch(const void *x) #define INIT_THREAD { \ .sp0 = TOP_OF_INIT_STACK, \ - .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h index 1d8de3f..20b43b7 100644 --- a/arch/x86/include/asm/vm86.h +++ b/arch/x86/include/asm/vm86.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_VM86_H #define _ASM_X86_VM86_H - #include #include @@ -58,6 +57,14 @@ struct kernel_vm86_struct { */ }; +struct vm86 { + struct vm86plus_struct __user *vm86_info; + unsigned long screen_bitmap; + unsigned long v86flags; + unsigned long v86mask; + unsigned long saved_sp0; +}; + #ifdef CONFIG_VM86 void handle_vm86_fault(struct kernel_vm86_regs *, long); @@ -67,6 +74,14 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *); struct task_struct; void release_vm86_irqs(struct task_struct *); +#define free_vm86(t) do { \ + struct thread_struct *__t = (t); \ + if (__t->vm86 != NULL) { \ + kfree(__t->vm86); \ + __t->vm86 = NULL; \ + } \ +} while (0) + #else #define handle_vm86_fault(a, b) @@ -77,6 +92,8 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) return 0; } +#define free_vm86(t) do { } while(0) + #endif /* CONFIG_VM86 */ #endif /* _ASM_X86_VM86_H */ diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 397688b..2199d9b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -29,6 +29,7 @@ #include #include #include +#include /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -110,6 +111,8 @@ void exit_thread(void) kfree(bp); } + free_vm86(t); + fpu__drop(fpu); } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index e6c2b47..bfa59b1 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -81,8 +82,8 @@ /* * virtual flags (16 and 32-bit versions) */ -#define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) -#define VEFLAGS (current->thread.v86flags) +#define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags)) +#define VEFLAGS (current->thread.vm86->v86flags) #define set_flags(X, new, mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) @@ -96,6 +97,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) struct pt_regs *ret; struct task_struct *tsk = current; struct vm86plus_struct __user *user; + struct vm86 *vm86 = current->thread.vm86; long err = 0; /* @@ -105,12 +107,12 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) */ local_irq_enable(); - if (!tsk->thread.vm86_info) { + if (!vm86 || !vm86->vm86_info) { pr_alert("no vm86_info: BAD\n"); do_exit(SIGSEGV); } - set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | tsk->thread.v86mask); - user = tsk->thread.vm86_info; + set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask); + user = vm86->vm86_info; if (!access_ok(VERIFY_WRITE, user, VMPI.is_vm86pus ? sizeof(struct vm86plus_struct) : @@ -137,7 +139,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) put_user_ex(regs->fs, &user->regs.fs); put_user_ex(regs->gs, &user->regs.gs); - put_user_ex(tsk->thread.screen_bitmap, &user->screen_bitmap); + put_user_ex(vm86->screen_bitmap, &user->screen_bitmap); } put_user_catch(err); if (err) { pr_alert("could not access userspace vm86_info\n"); @@ -145,10 +147,10 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) } tss = &per_cpu(cpu_tss, get_cpu()); - tsk->thread.sp0 = tsk->thread.saved_sp0; + tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; load_sp0(tss, &tsk->thread); - tsk->thread.saved_sp0 = 0; + vm86->saved_sp0 = 0; put_cpu(); ret = KVM86->regs32; @@ -242,9 +244,15 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus, { struct tss_struct *tss; struct task_struct *tsk = current; + struct vm86 *vm86 = tsk->thread.vm86; unsigned long err = 0; - if (tsk->thread.saved_sp0) + if (!vm86) { + if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) + return -ENOMEM; + tsk->thread.vm86 = vm86; + } + if (vm86->saved_sp0) return -EPERM; if (!access_ok(VERIFY_READ, v86, plus ? @@ -295,7 +303,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus, } info->regs32 = current_pt_regs(); - tsk->thread.vm86_info = v86; + vm86->vm86_info = v86; /* * The flags register is also special: we cannot trust that the user @@ -311,16 +319,16 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus, switch (info->cpu_type) { case CPU_286: - tsk->thread.v86mask = 0; + vm86->v86mask = 0; break; case CPU_386: - tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; case CPU_486: - tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; default: - tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; } @@ -328,7 +336,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus, * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL) */ info->regs32->ax = VM86_SIGNAL; - tsk->thread.saved_sp0 = tsk->thread.sp0; + vm86->saved_sp0 = tsk->thread.sp0; lazy_save_gs(info->regs32->gs); tss = &per_cpu(cpu_tss, get_cpu()); @@ -338,7 +346,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus, load_sp0(tss, &tsk->thread); put_cpu(); - tsk->thread.screen_bitmap = info->screen_bitmap; + vm86->screen_bitmap = info->screen_bitmap; if (info->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); @@ -408,7 +416,7 @@ static inline void clear_AC(struct kernel_vm86_regs *regs) static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) { - set_flags(VEFLAGS, flags, current->thread.v86mask); + set_flags(VEFLAGS, flags, current->thread.vm86->v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); @@ -418,7 +426,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) { - set_flags(VFLAGS, flags, current->thread.v86mask); + set_flags(VFLAGS, flags, current->thread.vm86->v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); @@ -433,7 +441,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) if (VEFLAGS & X86_EFLAGS_VIF) flags |= X86_EFLAGS_IF; flags |= X86_EFLAGS_IOPL; - return flags | (VEFLAGS & current->thread.v86mask); + return flags | (VEFLAGS & current->thread.vm86->v86mask); } static inline int is_revectored(int nr, struct revectored_struct *bitmap) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9dc9098..34a368d 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -301,14 +301,16 @@ static inline void check_v8086_mode(struct pt_regs *regs, unsigned long address, struct task_struct *tsk) { +#ifdef CONFIG_VM86 unsigned long bit; - if (!v8086_mode(regs)) + if (!v8086_mode(regs) || !tsk->thread.vm86) return; bit = (address - 0xA0000) >> PAGE_SHIFT; if (bit < 32) - tsk->thread.screen_bitmap |= 1 << bit; + tsk->thread.vm86->screen_bitmap |= 1 << bit; +#endif } static bool low_pfn(unsigned long pfn) -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/