2015-07-29 05:41:36

by Brian Gerst

[permalink] [raw]
Subject: [PATCH v4] x86: vm86 cleanups

The goal of this set of patches is to change vm86 support to return to
userspace with the normal exit paths instead of leaving data on the kernel
stack and jumping directly into the exit asm routines. This fixes issues
like ptrace and syscall auditing not working with vm86, and makes possible
cleanups in the syscall exit work code.

Changes from v3:
- Removed patches already accepted
- Renamed some fields per Ingo
- Added patches to allow disabling hardware IRQ support and to fix includes
- Added helper macro for free_vm86 (Note: could not use inline function due
to header issues)

Changes from v2:
- Use gs slot of regs32 (present but unused in lazy mode)
- Add access_ok() checks before get_user_try/put_user_try

Changes from v1:
- Added first two patches
- Changed userspace access to copy each field explicitly instead of relying
on the same order of members in the structure.


2015-07-29 05:43:27

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 1/8] x86/vm86: Move vm86 fields out of thread_struct

Allocate a separate structure for the vm86 fields.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/include/asm/processor.h | 11 +++-------
arch/x86/include/asm/vm86.h | 19 ++++++++++++++++-
arch/x86/kernel/process.c | 3 +++
arch/x86/kernel/vm86_32.c | 46 +++++++++++++++++++++++-----------------
arch/x86/mm/fault.c | 4 ++--
5 files changed, 53 insertions(+), 30 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 42c460c..19577dd 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -6,8 +6,8 @@
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
+struct vm86;

-#include <asm/vm86.h>
#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
@@ -400,13 +400,9 @@ struct thread_struct {
unsigned long cr2;
unsigned long trap_nr;
unsigned long error_code;
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_VM86
/* Virtual 86 mode info */
- struct vm86plus_struct __user *vm86_info;
- unsigned long screen_bitmap;
- unsigned long v86flags;
- unsigned long v86mask;
- unsigned long saved_sp0;
+ struct vm86 *vm86;
#endif
/* IO permissions: */
unsigned long *io_bitmap_ptr;
@@ -710,7 +706,6 @@ static inline void spin_lock_prefetch(const void *x)

#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
- .vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
}
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 1d8de3f..20b43b7 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_VM86_H
#define _ASM_X86_VM86_H

-
#include <asm/ptrace.h>
#include <uapi/asm/vm86.h>

@@ -58,6 +57,14 @@ struct kernel_vm86_struct {
*/
};

+struct vm86 {
+ struct vm86plus_struct __user *vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags;
+ unsigned long v86mask;
+ unsigned long saved_sp0;
+};
+
#ifdef CONFIG_VM86

void handle_vm86_fault(struct kernel_vm86_regs *, long);
@@ -67,6 +74,14 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
struct task_struct;
void release_vm86_irqs(struct task_struct *);

+#define free_vm86(t) do { \
+ struct thread_struct *__t = (t); \
+ if (__t->vm86 != NULL) { \
+ kfree(__t->vm86); \
+ __t->vm86 = NULL; \
+ } \
+} while (0)
+
#else

#define handle_vm86_fault(a, b)
@@ -77,6 +92,8 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
return 0;
}

+#define free_vm86(t) do { } while(0)
+
#endif /* CONFIG_VM86 */

#endif /* _ASM_X86_VM86_H */
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 397688b..2199d9b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -29,6 +29,7 @@
#include <asm/debugreg.h>
#include <asm/nmi.h>
#include <asm/tlbflush.h>
+#include <asm/vm86.h>

/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -110,6 +111,8 @@ void exit_thread(void)
kfree(bp);
}

+ free_vm86(t);
+
fpu__drop(fpu);
}

diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index e6c2b47..bfa59b1 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -44,6 +44,7 @@
#include <linux/ptrace.h>
#include <linux/audit.h>
#include <linux/stddef.h>
+#include <linux/slab.h>

#include <asm/uaccess.h>
#include <asm/io.h>
@@ -81,8 +82,8 @@
/*
* virtual flags (16 and 32-bit versions)
*/
-#define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
-#define VEFLAGS (current->thread.v86flags)
+#define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags))
+#define VEFLAGS (current->thread.vm86->v86flags)

#define set_flags(X, new, mask) \
((X) = ((X) & ~(mask)) | ((new) & (mask)))
@@ -96,6 +97,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
struct pt_regs *ret;
struct task_struct *tsk = current;
struct vm86plus_struct __user *user;
+ struct vm86 *vm86 = current->thread.vm86;
long err = 0;

/*
@@ -105,12 +107,12 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
*/
local_irq_enable();

- if (!tsk->thread.vm86_info) {
+ if (!vm86 || !vm86->vm86_info) {
pr_alert("no vm86_info: BAD\n");
do_exit(SIGSEGV);
}
- set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | tsk->thread.v86mask);
- user = tsk->thread.vm86_info;
+ set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
+ user = vm86->vm86_info;

if (!access_ok(VERIFY_WRITE, user, VMPI.is_vm86pus ?
sizeof(struct vm86plus_struct) :
@@ -137,7 +139,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
put_user_ex(regs->fs, &user->regs.fs);
put_user_ex(regs->gs, &user->regs.gs);

- put_user_ex(tsk->thread.screen_bitmap, &user->screen_bitmap);
+ put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
} put_user_catch(err);
if (err) {
pr_alert("could not access userspace vm86_info\n");
@@ -145,10 +147,10 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
}

tss = &per_cpu(cpu_tss, get_cpu());
- tsk->thread.sp0 = tsk->thread.saved_sp0;
+ tsk->thread.sp0 = vm86->saved_sp0;
tsk->thread.sysenter_cs = __KERNEL_CS;
load_sp0(tss, &tsk->thread);
- tsk->thread.saved_sp0 = 0;
+ vm86->saved_sp0 = 0;
put_cpu();

ret = KVM86->regs32;
@@ -242,9 +244,15 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
{
struct tss_struct *tss;
struct task_struct *tsk = current;
+ struct vm86 *vm86 = tsk->thread.vm86;
unsigned long err = 0;

- if (tsk->thread.saved_sp0)
+ if (!vm86) {
+ if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
+ return -ENOMEM;
+ tsk->thread.vm86 = vm86;
+ }
+ if (vm86->saved_sp0)
return -EPERM;

if (!access_ok(VERIFY_READ, v86, plus ?
@@ -295,7 +303,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
}

info->regs32 = current_pt_regs();
- tsk->thread.vm86_info = v86;
+ vm86->vm86_info = v86;

/*
* The flags register is also special: we cannot trust that the user
@@ -311,16 +319,16 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,

switch (info->cpu_type) {
case CPU_286:
- tsk->thread.v86mask = 0;
+ vm86->v86mask = 0;
break;
case CPU_386:
- tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
case CPU_486:
- tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
default:
- tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
}

@@ -328,7 +336,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
* Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
*/
info->regs32->ax = VM86_SIGNAL;
- tsk->thread.saved_sp0 = tsk->thread.sp0;
+ vm86->saved_sp0 = tsk->thread.sp0;
lazy_save_gs(info->regs32->gs);

tss = &per_cpu(cpu_tss, get_cpu());
@@ -338,7 +346,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
load_sp0(tss, &tsk->thread);
put_cpu();

- tsk->thread.screen_bitmap = info->screen_bitmap;
+ vm86->screen_bitmap = info->screen_bitmap;
if (info->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);

@@ -408,7 +416,7 @@ static inline void clear_AC(struct kernel_vm86_regs *regs)

static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
{
- set_flags(VEFLAGS, flags, current->thread.v86mask);
+ set_flags(VEFLAGS, flags, current->thread.vm86->v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
@@ -418,7 +426,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs

static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
{
- set_flags(VFLAGS, flags, current->thread.v86mask);
+ set_flags(VFLAGS, flags, current->thread.vm86->v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
@@ -433,7 +441,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
if (VEFLAGS & X86_EFLAGS_VIF)
flags |= X86_EFLAGS_IF;
flags |= X86_EFLAGS_IOPL;
- return flags | (VEFLAGS & current->thread.v86mask);
+ return flags | (VEFLAGS & current->thread.vm86->v86mask);
}

static inline int is_revectored(int nr, struct revectored_struct *bitmap)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 81dcebf..5196ac4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -315,12 +315,12 @@ check_v8086_mode(struct pt_regs *regs, unsigned long address,
{
unsigned long bit;

- if (!v8086_mode(regs))
+ if (!v8086_mode(regs) || !tsk->thread.vm86)
return;

bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
- tsk->thread.screen_bitmap |= 1 << bit;
+ tsk->thread.vm86->screen_bitmap |= 1 << bit;
}

static bool low_pfn(unsigned long pfn)
--
2.4.3

2015-07-29 05:42:51

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 2/8] x86/vm86: Move fields from kernel_vm86_struct

Move the non-regs fields to the off-stack data.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/include/asm/vm86.h | 16 ++++++++--------
arch/x86/kernel/vm86_32.c | 42 ++++++++++++++++++++++--------------------
2 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 20b43b7..47c7648 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -37,13 +37,7 @@ struct kernel_vm86_struct {
* Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
* in kernelspace, hence we need not reget the data from userspace.
*/
-#define VM86_TSS_ESP0 flags
- unsigned long flags;
- unsigned long screen_bitmap;
- unsigned long cpu_type;
- struct revectored_struct int_revectored;
- struct revectored_struct int21_revectored;
- struct vm86plus_info_struct vm86plus;
+#define VM86_TSS_ESP0 regs32
struct pt_regs *regs32; /* here we save the pointer to the old regs */
/*
* The below is not part of the structure, but the stack layout continues
@@ -59,10 +53,16 @@ struct kernel_vm86_struct {

struct vm86 {
struct vm86plus_struct __user *vm86_info;
- unsigned long screen_bitmap;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
+
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
};

#ifdef CONFIG_VM86
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index bfa59b1..f71b4b9 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -68,7 +68,6 @@


#define KVM86 ((struct kernel_vm86_struct *)regs)
-#define VMPI KVM86->vm86plus


/*
@@ -114,7 +113,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
user = vm86->vm86_info;

- if (!access_ok(VERIFY_WRITE, user, VMPI.is_vm86pus ?
+ if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
sizeof(struct vm86plus_struct) :
sizeof(struct vm86_struct))) {
pr_alert("could not access userspace vm86_info\n");
@@ -282,25 +281,27 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
get_user_ex(info->regs.fs, &v86->regs.fs);
get_user_ex(info->regs.gs, &v86->regs.gs);

- get_user_ex(info->flags, &v86->flags);
- get_user_ex(info->screen_bitmap, &v86->screen_bitmap);
- get_user_ex(info->cpu_type, &v86->cpu_type);
+ get_user_ex(vm86->flags, &v86->flags);
+ get_user_ex(vm86->screen_bitmap, &v86->screen_bitmap);
+ get_user_ex(vm86->cpu_type, &v86->cpu_type);
} get_user_catch(err);
if (err)
return err;

- if (copy_from_user(&info->int_revectored, &v86->int_revectored,
+ if (copy_from_user(&vm86->int_revectored, &v86->int_revectored,
sizeof(struct revectored_struct)))
return -EFAULT;
- if (copy_from_user(&info->int21_revectored, &v86->int21_revectored,
+ if (copy_from_user(&vm86->int21_revectored, &v86->int21_revectored,
sizeof(struct revectored_struct)))
return -EFAULT;
if (plus) {
- if (copy_from_user(&info->vm86plus, &v86->vm86plus,
+ if (copy_from_user(&vm86->vm86plus, &v86->vm86plus,
sizeof(struct vm86plus_info_struct)))
return -EFAULT;
- info->vm86plus.is_vm86pus = 1;
- }
+ vm86->vm86plus.is_vm86pus = 1;
+ } else
+ memset(&vm86->vm86plus, 0,
+ sizeof(struct vm86plus_info_struct));

info->regs32 = current_pt_regs();
vm86->vm86_info = v86;
@@ -317,7 +318,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,

info->regs.pt.orig_ax = info->regs32->orig_ax;

- switch (info->cpu_type) {
+ switch (vm86->cpu_type) {
case CPU_286:
vm86->v86mask = 0;
break;
@@ -346,8 +347,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
load_sp0(tss, &tsk->thread);
put_cpu();

- vm86->screen_bitmap = info->screen_bitmap;
- if (info->flags & VM86_SCREEN_BITMAP)
+ if (vm86->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);

/*call __audit_syscall_exit since we do not exit via the normal paths */
@@ -539,12 +539,13 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
{
unsigned long __user *intr_ptr;
unsigned long segoffs;
+ struct kernel_vm86_info *vm86 = current->thread.vm86;

if (regs->pt.cs == BIOSSEG)
goto cannot_handle;
- if (is_revectored(i, &KVM86->int_revectored))
+ if (is_revectored(i, &vm86->int_revectored))
goto cannot_handle;
- if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+ if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
goto cannot_handle;
intr_ptr = (unsigned long __user *) (i << 2);
if (get_user(segoffs, intr_ptr))
@@ -568,7 +569,7 @@ cannot_handle:

int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{
- if (VMPI.is_vm86pus) {
+ if (current->thread.vm86->vm86plus.is_vm86pus) {
if ((trapno == 3) || (trapno == 1)) {
KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
/* setting this flag forces the code in entry_32.S to
@@ -595,12 +596,13 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
unsigned char __user *ssp;
unsigned short ip, sp, orig_flags;
int data32, pref_done;
+ struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;

#define CHECK_IF_IN_TRAP \
- if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
+ if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
newflags |= X86_EFLAGS_TF
#define VM86_FAULT_RETURN do { \
- if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
+ if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
return_to_32bit(regs, VM86_PICRETURN); \
if (orig_flags & X86_EFLAGS_TF) \
handle_vm86_trap(regs, 0, 1); \
@@ -670,8 +672,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
case 0xcd: {
int intno = popb(csp, ip, simulate_sigsegv);
IP(regs) = ip;
- if (VMPI.vm86dbg_active) {
- if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
+ if (vmpi->vm86dbg_active) {
+ if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3])
return_to_32bit(regs, VM86_INTx + (intno << 8));
}
do_int(regs, intno, ssp, sp);
--
2.4.3

2015-07-29 05:41:40

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 3/8] x86/vm86: Eliminate kernel_vm86_struct

Now there is no vm86-specific data left on the kernel stack while in
userspace, except for the 32-bit regs.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/include/asm/vm86.h | 25 +-----------
arch/x86/kernel/vm86_32.c | 95 +++++++++++++++++++--------------------------
2 files changed, 42 insertions(+), 78 deletions(-)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 47c7648..226d6c1 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -27,32 +27,9 @@ struct kernel_vm86_regs {
unsigned short gs, __gsh;
};

-struct kernel_vm86_struct {
- struct kernel_vm86_regs regs;
-/*
- * the below part remains on the kernel stack while we are in VM86 mode.
- * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
- * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
- * 'struct kernel_vm86_regs' with the then actual values.
- * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
- * in kernelspace, hence we need not reget the data from userspace.
- */
-#define VM86_TSS_ESP0 regs32
- struct pt_regs *regs32; /* here we save the pointer to the old regs */
-/*
- * The below is not part of the structure, but the stack layout continues
- * this way. In front of 'return-eip' may be some data, depending on
- * compilation, so we don't rely on this and save the pointer to 'oldregs'
- * in 'regs32' above.
- * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
-
- long return-eip; from call to vm86()
- struct pt_regs oldregs; user space registers as saved by syscall
- */
-};
-
struct vm86 {
struct vm86plus_struct __user *vm86_info;
+ struct pt_regs *regs32;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index f71b4b9..696ef76 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -67,9 +67,6 @@
*/


-#define KVM86 ((struct kernel_vm86_struct *)regs)
-
-
/*
* 8- and 16-bit register defines..
*/
@@ -152,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
vm86->saved_sp0 = 0;
put_cpu();

- ret = KVM86->regs32;
+ ret = vm86->regs32;

lazy_load_gs(ret->gs);

@@ -194,29 +191,16 @@ out:


static int do_vm86_irq_handling(int subfunction, int irqnumber);
-static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
- struct kernel_vm86_struct *info);
+static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus);

SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
{
- struct kernel_vm86_struct info; /* declare this _on top_,
- * this avoids wasting of stack space.
- * This remains on the stack until we
- * return to 32 bit user space.
- */
-
- return do_sys_vm86((struct vm86plus_struct __user *) v86, false, &info);
+ return do_sys_vm86((struct vm86plus_struct __user *) v86, false);
}


SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
{
- struct kernel_vm86_struct info; /* declare this _on top_,
- * this avoids wasting of stack space.
- * This remains on the stack until we
- * return to 32 bit user space.
- */
-
switch (cmd) {
case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ:
@@ -234,16 +218,17 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
}

/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
- return do_sys_vm86((struct vm86plus_struct __user *) arg, true, &info);
+ return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
}


-static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
- struct kernel_vm86_struct *info)
+static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
{
struct tss_struct *tss;
struct task_struct *tsk = current;
struct vm86 *vm86 = tsk->thread.vm86;
+ struct kernel_vm86_regs vm86regs;
+ struct pt_regs *regs32 = current_pt_regs();
unsigned long err = 0;

if (!vm86) {
@@ -259,27 +244,27 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
sizeof(struct vm86plus_struct)))
return -EFAULT;

- memset(info, 0, sizeof(*info));
+ memset(&vm86regs, 0, sizeof(vm86regs));
get_user_try {
unsigned short seg;
- get_user_ex(info->regs.pt.bx, &v86->regs.ebx);
- get_user_ex(info->regs.pt.cx, &v86->regs.ecx);
- get_user_ex(info->regs.pt.dx, &v86->regs.edx);
- get_user_ex(info->regs.pt.si, &v86->regs.esi);
- get_user_ex(info->regs.pt.di, &v86->regs.edi);
- get_user_ex(info->regs.pt.bp, &v86->regs.ebp);
- get_user_ex(info->regs.pt.ax, &v86->regs.eax);
- get_user_ex(info->regs.pt.ip, &v86->regs.eip);
+ get_user_ex(vm86regs.pt.bx, &v86->regs.ebx);
+ get_user_ex(vm86regs.pt.cx, &v86->regs.ecx);
+ get_user_ex(vm86regs.pt.dx, &v86->regs.edx);
+ get_user_ex(vm86regs.pt.si, &v86->regs.esi);
+ get_user_ex(vm86regs.pt.di, &v86->regs.edi);
+ get_user_ex(vm86regs.pt.bp, &v86->regs.ebp);
+ get_user_ex(vm86regs.pt.ax, &v86->regs.eax);
+ get_user_ex(vm86regs.pt.ip, &v86->regs.eip);
get_user_ex(seg, &v86->regs.cs);
- info->regs.pt.cs = seg;
- get_user_ex(info->regs.pt.flags, &v86->regs.eflags);
- get_user_ex(info->regs.pt.sp, &v86->regs.esp);
+ vm86regs.pt.cs = seg;
+ get_user_ex(vm86regs.pt.flags, &v86->regs.eflags);
+ get_user_ex(vm86regs.pt.sp, &v86->regs.esp);
get_user_ex(seg, &v86->regs.ss);
- info->regs.pt.ss = seg;
- get_user_ex(info->regs.es, &v86->regs.es);
- get_user_ex(info->regs.ds, &v86->regs.ds);
- get_user_ex(info->regs.fs, &v86->regs.fs);
- get_user_ex(info->regs.gs, &v86->regs.gs);
+ vm86regs.pt.ss = seg;
+ get_user_ex(vm86regs.es, &v86->regs.es);
+ get_user_ex(vm86regs.ds, &v86->regs.ds);
+ get_user_ex(vm86regs.fs, &v86->regs.fs);
+ get_user_ex(vm86regs.gs, &v86->regs.gs);

get_user_ex(vm86->flags, &v86->flags);
get_user_ex(vm86->screen_bitmap, &v86->screen_bitmap);
@@ -302,8 +287,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
} else
memset(&vm86->vm86plus, 0,
sizeof(struct vm86plus_info_struct));
-
- info->regs32 = current_pt_regs();
+ vm86->regs32 = regs32;
vm86->vm86_info = v86;

/*
@@ -311,12 +295,12 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
* has set it up safely, so this makes sure interrupt etc flags are
* inherited from protected mode.
*/
- VEFLAGS = info->regs.pt.flags;
- info->regs.pt.flags &= SAFE_MASK;
- info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
- info->regs.pt.flags |= X86_VM_MASK;
+ VEFLAGS = vm86regs.pt.flags;
+ vm86regs.pt.flags &= SAFE_MASK;
+ vm86regs.pt.flags |= regs32->flags & ~SAFE_MASK;
+ vm86regs.pt.flags |= X86_VM_MASK;

- info->regs.pt.orig_ax = info->regs32->orig_ax;
+ vm86regs.pt.orig_ax = regs32->orig_ax;

switch (vm86->cpu_type) {
case CPU_286:
@@ -336,12 +320,13 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
/*
* Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
*/
- info->regs32->ax = VM86_SIGNAL;
+ regs32->ax = VM86_SIGNAL;
vm86->saved_sp0 = tsk->thread.sp0;
- lazy_save_gs(info->regs32->gs);
+ lazy_save_gs(regs32->gs);

tss = &per_cpu(cpu_tss, get_cpu());
- tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ /* Set new sp0 right below 32-bit regs */
+ tsk->thread.sp0 = (unsigned long) regs32;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread);
@@ -364,7 +349,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
#endif
"jmp resume_userspace"
: /* no outputs */
- :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
+ :"r" (&vm86regs), "r" (task_thread_info(tsk)), "r" (0));
unreachable(); /* we never return here */
}

@@ -539,7 +524,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
{
unsigned long __user *intr_ptr;
unsigned long segoffs;
- struct kernel_vm86_info *vm86 = current->thread.vm86;
+ struct vm86 *vm86 = current->thread.vm86;

if (regs->pt.cs == BIOSSEG)
goto cannot_handle;
@@ -569,12 +554,14 @@ cannot_handle:

int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{
- if (current->thread.vm86->vm86plus.is_vm86pus) {
+ struct vm86 *vm86 = current->thread.vm86;
+
+ if (vm86->vm86plus.is_vm86pus) {
if ((trapno == 3) || (trapno == 1)) {
- KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
+ vm86->regs32->ax = VM86_TRAP + (trapno << 8);
/* setting this flag forces the code in entry_32.S to
the path where we call save_v86_state() and change
- the stack pointer to KVM86->regs32 */
+ the stack pointer to regs32 */
set_thread_flag(TIF_NOTIFY_RESUME);
return 0;
}
--
2.4.3

2015-07-29 05:42:49

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86

Change to use the normal pt_regs area to enter and exit vm86 mode. This is
done by increasing the padding at the top of the stack to make room for the
extra vm86 segment slots in the IRET frame. It then saves the 32-bit regs
in the off-stack vm86 data, and copies in the vm86 regs. Exiting back to
32-bit mode does the reverse. This allows removing the hacks to jump directly
into the exit asm code due to having to change the stack pointer. Returning
normally from the vm86 syscall and the exception handlers allows things like
ptrace and auditing to work properly.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/entry/entry_32.S | 24 +-------
arch/x86/include/asm/thread_info.h | 11 ++--
arch/x86/include/asm/vm86.h | 6 +-
arch/x86/kernel/signal.c | 3 +
arch/x86/kernel/vm86_32.c | 110 +++++++++++++++----------------------
5 files changed, 60 insertions(+), 94 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 21dc60a..f940e24 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -525,34 +525,12 @@ work_resched:

work_notifysig: # deal with pending signals and
# notify-resume requests
-#ifdef CONFIG_VM86
- testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
- movl %esp, %eax
- jnz work_notifysig_v86 # returning to kernel-space or
- # vm86-space
-1:
-#else
- movl %esp, %eax
-#endif
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
- movb PT_CS(%esp), %bl
- andb $SEGMENT_RPL_MASK, %bl
- cmpb $USER_RPL, %bl
- jb resume_kernel
+ movl %esp, %eax
xorl %edx, %edx
call do_notify_resume
jmp resume_userspace
-
-#ifdef CONFIG_VM86
- ALIGN
-work_notifysig_v86:
- pushl %ecx # save ti_flags for do_notify_resume
- call save_v86_state # %eax contains pt_regs pointer
- popl %ecx
- movl %eax, %esp
- jmp 1b
-#endif
END(work_pending)

# perform syscall exit tracing
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 225ee54..fdad5c2 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -27,14 +27,17 @@
* Without this offset, that can result in a page fault. (We are
* careful that, in this case, the value we read doesn't matter.)
*
- * In vm86 mode, the hardware frame is much longer still, but we neither
- * access the extra members from NMI context, nor do we write such a
- * frame at sp0 at all.
+ * In vm86 mode, the hardware frame is much longer still, so add 16
+ * bytes to make room for the real-mode segments.
*
* x86_64 has a fixed-length stack frame.
*/
#ifdef CONFIG_X86_32
-# define TOP_OF_KERNEL_STACK_PADDING 8
+# ifdef CONFIG_VM86
+# define TOP_OF_KERNEL_STACK_PADDING 16
+# else
+# define TOP_OF_KERNEL_STACK_PADDING 8
+# endif
#else
# define TOP_OF_KERNEL_STACK_PADDING 0
#endif
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 226d6c1..dd45aa1 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -29,7 +29,7 @@ struct kernel_vm86_regs {

struct vm86 {
struct vm86plus_struct __user *vm86_info;
- struct pt_regs *regs32;
+ struct pt_regs regs32;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
@@ -46,7 +46,7 @@ struct vm86 {

void handle_vm86_fault(struct kernel_vm86_regs *, long);
int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
-struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
+void save_v86_state(struct kernel_vm86_regs *, int);

struct task_struct;
void release_vm86_irqs(struct task_struct *);
@@ -69,6 +69,8 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
return 0;
}

+static inline void save_v86_state(struct kernel_vm86_regs *, int) { }
+
#define free_vm86(t) do { } while(0)

#endif /* CONFIG_VM86 */
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 7e88cc7..bfd736e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -635,6 +635,9 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
bool stepping, failed;
struct fpu *fpu = &current->thread.fpu;

+ if (v8086_mode(regs))
+ save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
+
/* Are we from a system call? */
if (syscall_get_nr(current, regs) >= 0) {
/* If so, check system call restarting.. */
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 696ef76..ffe98ec 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -50,6 +50,7 @@
#include <asm/io.h>
#include <asm/tlbflush.h>
#include <asm/irq.h>
+#include <asm/traps.h>

/*
* Known problems:
@@ -87,10 +88,9 @@
#define SAFE_MASK (0xDD5)
#define RETURN_MASK (0xDFF)

-struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+void save_v86_state(struct kernel_vm86_regs *regs, int retval)
{
struct tss_struct *tss;
- struct pt_regs *ret;
struct task_struct *tsk = current;
struct vm86plus_struct __user *user;
struct vm86 *vm86 = current->thread.vm86;
@@ -149,11 +149,11 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
vm86->saved_sp0 = 0;
put_cpu();

- ret = vm86->regs32;
+ memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));

- lazy_load_gs(ret->gs);
+ lazy_load_gs(vm86->regs32.gs);

- return ret;
+ regs->pt.ax = retval;
}

static void mark_screen_rdonly(struct mm_struct *mm)
@@ -228,7 +228,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
struct task_struct *tsk = current;
struct vm86 *vm86 = tsk->thread.vm86;
struct kernel_vm86_regs vm86regs;
- struct pt_regs *regs32 = current_pt_regs();
+ struct pt_regs *regs = current_pt_regs();
unsigned long err = 0;

if (!vm86) {
@@ -287,7 +287,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
} else
memset(&vm86->vm86plus, 0,
sizeof(struct vm86plus_info_struct));
- vm86->regs32 = regs32;
+
+ memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
vm86->vm86_info = v86;

/*
@@ -297,10 +298,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
*/
VEFLAGS = vm86regs.pt.flags;
vm86regs.pt.flags &= SAFE_MASK;
- vm86regs.pt.flags |= regs32->flags & ~SAFE_MASK;
+ vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
vm86regs.pt.flags |= X86_VM_MASK;

- vm86regs.pt.orig_ax = regs32->orig_ax;
+ vm86regs.pt.orig_ax = regs->orig_ax;

switch (vm86->cpu_type) {
case CPU_286:
@@ -318,15 +319,14 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
}

/*
- * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
+ * Save old state
*/
- regs32->ax = VM86_SIGNAL;
vm86->saved_sp0 = tsk->thread.sp0;
- lazy_save_gs(regs32->gs);
+ lazy_save_gs(vm86->regs32.gs);

tss = &per_cpu(cpu_tss, get_cpu());
- /* Set new sp0 right below 32-bit regs */
- tsk->thread.sp0 = (unsigned long) regs32;
+ /* make room for real-mode segments */
+ tsk->thread.sp0 += 16;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread);
@@ -335,41 +335,14 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
if (vm86->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);

- /*call __audit_syscall_exit since we do not exit via the normal paths */
-#ifdef CONFIG_AUDITSYSCALL
- if (unlikely(current->audit_context))
- __audit_syscall_exit(1, 0);
-#endif
-
- __asm__ __volatile__(
- "movl %0,%%esp\n\t"
- "movl %1,%%ebp\n\t"
-#ifdef CONFIG_X86_32_LAZY_GS
- "mov %2, %%gs\n\t"
-#endif
- "jmp resume_userspace"
- : /* no outputs */
- :"r" (&vm86regs), "r" (task_thread_info(tsk)), "r" (0));
- unreachable(); /* we never return here */
-}
-
-static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
-{
- struct pt_regs *regs32;
-
- regs32 = save_v86_state(regs16);
- regs32->ax = retval;
- __asm__ __volatile__("movl %0,%%esp\n\t"
- "movl %1,%%ebp\n\t"
- "jmp resume_userspace"
- : : "r" (regs32), "r" (current_thread_info()));
+ memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
+ force_iret();
+ return regs->ax;
}

static inline void set_IF(struct kernel_vm86_regs *regs)
{
VEFLAGS |= X86_EFLAGS_VIF;
- if (VEFLAGS & X86_EFLAGS_VIP)
- return_to_32bit(regs, VM86_STI);
}

static inline void clear_IF(struct kernel_vm86_regs *regs)
@@ -549,7 +522,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
return;

cannot_handle:
- return_to_32bit(regs, VM86_INTx + (i << 8));
+ save_v86_state(regs, VM86_INTx + (i << 8));
}

int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
@@ -558,11 +531,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)

if (vm86->vm86plus.is_vm86pus) {
if ((trapno == 3) || (trapno == 1)) {
- vm86->regs32->ax = VM86_TRAP + (trapno << 8);
- /* setting this flag forces the code in entry_32.S to
- the path where we call save_v86_state() and change
- the stack pointer to regs32 */
- set_thread_flag(TIF_NOTIFY_RESUME);
+ save_v86_state(regs, VM86_TRAP + (trapno << 8));
return 0;
}
do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
@@ -588,12 +557,6 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
#define CHECK_IF_IN_TRAP \
if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
newflags |= X86_EFLAGS_TF
-#define VM86_FAULT_RETURN do { \
- if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
- return_to_32bit(regs, VM86_PICRETURN); \
- if (orig_flags & X86_EFLAGS_TF) \
- handle_vm86_trap(regs, 0, 1); \
- return; } while (0)

orig_flags = *(unsigned short *)&regs->pt.flags;

@@ -632,7 +595,7 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
SP(regs) -= 2;
}
IP(regs) = ip;
- VM86_FAULT_RETURN;
+ goto vm86_fault_return;

/* popf */
case 0x9d:
@@ -652,7 +615,7 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
else
set_vflags_short(newflags, regs);

- VM86_FAULT_RETURN;
+ goto check_vip;
}

/* int xx */
@@ -660,8 +623,10 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
int intno = popb(csp, ip, simulate_sigsegv);
IP(regs) = ip;
if (vmpi->vm86dbg_active) {
- if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3])
- return_to_32bit(regs, VM86_INTx + (intno << 8));
+ if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
+ save_v86_state(regs, VM86_INTx + (intno << 8));
+ return;
+ }
}
do_int(regs, intno, ssp, sp);
return;
@@ -692,14 +657,14 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
} else {
set_vflags_short(newflags, regs);
}
- VM86_FAULT_RETURN;
+ goto check_vip;
}

/* cli */
case 0xfa:
IP(regs) = ip;
clear_IF(regs);
- VM86_FAULT_RETURN;
+ goto vm86_fault_return;

/* sti */
/*
@@ -711,12 +676,27 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
case 0xfb:
IP(regs) = ip;
set_IF(regs);
- VM86_FAULT_RETURN;
+ goto check_vip;

default:
- return_to_32bit(regs, VM86_UNKNOWN);
+ save_v86_state(regs, VM86_UNKNOWN);
+ }
+
+ return;
+
+check_vip:
+ if (VEFLAGS & X86_EFLAGS_VIP) {
+ save_v86_state(regs, VM86_STI);
+ return;
}

+vm86_fault_return:
+ if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
+ save_v86_state(regs, VM86_PICRETURN);
+ return;
+ }
+ if (orig_flags & X86_EFLAGS_TF)
+ handle_vm86_trap(regs, 0, X86_TRAP_DB);
return;

simulate_sigsegv:
@@ -730,7 +710,7 @@ simulate_sigsegv:
* should be a mixture of the two, but how do we
* get the information? [KD]
*/
- return_to_32bit(regs, VM86_UNKNOWN);
+ save_v86_state(regs, VM86_UNKNOWN);
}

/* ---------------- vm86 special IRQ passing stuff ----------------- */
--
2.4.3

2015-07-29 05:42:31

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 5/8] x86/vm86: Add a separate config option for hardware IRQ handling

Allow disabling hardware interrupt support for vm86.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/Kconfig | 8 ++++++++
arch/x86/include/asm/irq_vectors.h | 10 ----------
arch/x86/include/asm/vm86.h | 20 ++++++++++++++++++--
arch/x86/kernel/vm86_32.c | 12 ++++++++++--
4 files changed, 36 insertions(+), 14 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cbd2d62..7c7ec31 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1067,6 +1067,14 @@ config VM86
bool
default X86_LEGACY_VM86

+config VM86_INTERRUPTS
+ bool "Enable VM86 interrupt support"
+ default y
+ depends on VM86
+ ---help---
+ This option allows VM86 programs to request interrupts for
+ real mode hardware drivers.
+
config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
default y
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4c2d2eb..6ca9fd6 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -117,16 +117,6 @@

#define FPU_IRQ 13

-#define FIRST_VM86_IRQ 3
-#define LAST_VM86_IRQ 15
-
-#ifndef __ASSEMBLY__
-static inline int invalid_vm86_irq(int irq)
-{
- return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
-}
-#endif
-
/*
* Size the maximum number of interrupts.
*
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index dd45aa1..05f6394 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -49,7 +49,6 @@ int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
void save_v86_state(struct kernel_vm86_regs *, int);

struct task_struct;
-void release_vm86_irqs(struct task_struct *);

#define free_vm86(t) do { \
struct thread_struct *__t = (t); \
@@ -62,7 +61,6 @@ void release_vm86_irqs(struct task_struct *);
#else

#define handle_vm86_fault(a, b)
-#define release_vm86_irqs(a)

static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
{
@@ -75,4 +73,22 @@ static inline void save_v86_state(struct kernel_vm86_regs *, int) { }

#endif /* CONFIG_VM86 */

+#ifdef CONFIG_VM86_INTERRUPTS
+
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
+
+static inline int invalid_vm86_irq(int irq)
+{
+ return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
+}
+
+void release_vm86_irqs(struct task_struct *);
+
+#else /* CONFIG_VM86_INTERRUPTS */
+
+static inline void release_vm86_irqs(struct task_struct *tsk) { }
+
+#endif /* CONFIG_VM86_INTERRUPTS */
+
#endif /* _ASM_X86_VM86_H */
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index ffe98ec..9cdd33c 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -189,8 +189,15 @@ out:
}


-
+#ifdef CONFIG_VM86_INTERRUPTS
static int do_vm86_irq_handling(int subfunction, int irqnumber);
+#else
+static inline int do_vm86_irq_handling(int subfunction, int irqnumber)
+{
+ return -EINVAL;
+}
+#endif
+
static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus);

SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
@@ -713,6 +720,7 @@ simulate_sigsegv:
save_v86_state(regs, VM86_UNKNOWN);
}

+#ifdef CONFIG_VM86_INTERRUPTS
/* ---------------- vm86 special IRQ passing stuff ----------------- */

#define VM86_IRQNAME "vm86irq"
@@ -828,4 +836,4 @@ static int do_vm86_irq_handling(int subfunction, int irqnumber)
}
return -EINVAL;
}
-
+#endif
--
2.4.3

2015-07-29 05:41:42

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 6/8] x86/vm86: Clean up vm86.h includes

vm86.h was being implicitly included in alot of places via processor.h, which
in turn got it from math_emu.h. Break that chain and explicitly include vm86.h
in all files that need it. Also remove unused vm86 field from math_emu_info.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/include/asm/math_emu.h | 6 +-----
arch/x86/include/asm/syscalls.h | 1 +
arch/x86/kernel/process_32.c | 1 +
arch/x86/kernel/signal.c | 1 +
arch/x86/kernel/traps.c | 1 +
arch/x86/kernel/vm86_32.c | 1 +
arch/x86/math-emu/get_address.c | 1 +
arch/x86/mm/fault.c | 1 +
8 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/math_emu.h b/arch/x86/include/asm/math_emu.h
index 031f626..0d9b14f 100644
--- a/arch/x86/include/asm/math_emu.h
+++ b/arch/x86/include/asm/math_emu.h
@@ -2,7 +2,6 @@
#define _ASM_X86_MATH_EMU_H

#include <asm/ptrace.h>
-#include <asm/vm86.h>

/* This structure matches the layout of the data saved to the stack
following a device-not-present interrupt, part of it saved
@@ -10,9 +9,6 @@
*/
struct math_emu_info {
long ___orig_eip;
- union {
- struct pt_regs *regs;
- struct kernel_vm86_regs *vm86;
- };
+ struct pt_regs *regs;
};
#endif /* _ASM_X86_MATH_EMU_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 592a6a6..91dfcaf 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -37,6 +37,7 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *);
asmlinkage unsigned long sys_sigreturn(void);

/* kernel/vm86_32.c */
+struct vm86_struct;
asmlinkage long sys_vm86old(struct vm86_struct __user *);
asmlinkage long sys_vm86(unsigned long, unsigned long);

diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f73c962..c13df2c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -53,6 +53,7 @@
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
+#include <asm/vm86.h>

asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index bfd736e..07eb844 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -31,6 +31,7 @@
#include <asm/vdso.h>
#include <asm/mce.h>
#include <asm/sighandling.h>
+#include <asm/vm86.h>

#ifdef CONFIG_X86_64
#include <asm/proto.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 8e65d8a..86a82ea 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -62,6 +62,7 @@
#include <asm/fpu/xstate.h>
#include <asm/trace/mpx.h>
#include <asm/mpx.h>
+#include <asm/vm86.h>

#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 9cdd33c..6fce378 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -51,6 +51,7 @@
#include <asm/tlbflush.h>
#include <asm/irq.h>
#include <asm/traps.h>
+#include <asm/vm86.h>

/*
* Known problems:
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99..a2eefb1 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -21,6 +21,7 @@

#include <asm/uaccess.h>
#include <asm/desc.h>
+#include <asm/vm86.h>

#include "fpu_system.h"
#include "exception.h"
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 5196ac4..cf576f5 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -21,6 +21,7 @@
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
#include <asm/fixmap.h> /* VSYSCALL_ADDR */
#include <asm/vsyscall.h> /* emulate_vsyscall */
+#include <asm/vm86.h> /* struct vm86 */

#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
--
2.4.3

2015-07-29 05:42:11

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 7/8] x86/vm86: Rename vm86->vm86_info to user_vm86

Make it clearer that this is the pointer to the userspace vm86 state area.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/include/asm/vm86.h | 2 +-
arch/x86/kernel/vm86_32.c | 70 +++++++++++++++++++++++----------------------
2 files changed, 37 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 05f6394..5c027c5 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -28,7 +28,7 @@ struct kernel_vm86_regs {
};

struct vm86 {
- struct vm86plus_struct __user *vm86_info;
+ struct vm86plus_struct __user *user_vm86;
struct pt_regs regs32;
unsigned long v86flags;
unsigned long v86mask;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 6fce378..aca499d 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -104,17 +104,17 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
*/
local_irq_enable();

- if (!vm86 || !vm86->vm86_info) {
- pr_alert("no vm86_info: BAD\n");
+ if (!vm86 || !vm86->user_vm86) {
+ pr_alert("no user_vm86: BAD\n");
do_exit(SIGSEGV);
}
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
- user = vm86->vm86_info;
+ user = vm86->user_vm86;

if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
sizeof(struct vm86plus_struct) :
sizeof(struct vm86_struct))) {
- pr_alert("could not access userspace vm86_info\n");
+ pr_alert("could not access userspace vm86 info\n");
do_exit(SIGSEGV);
}

@@ -139,7 +139,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
} put_user_catch(err);
if (err) {
- pr_alert("could not access userspace vm86_info\n");
+ pr_alert("could not access userspace vm86 info\n");
do_exit(SIGSEGV);
}

@@ -199,11 +199,11 @@ static inline int do_vm86_irq_handling(int subfunction, int irqnumber)
}
#endif

-static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus);
+static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);

-SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
+SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
{
- return do_sys_vm86((struct vm86plus_struct __user *) v86, false);
+ return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
}


@@ -230,7 +230,7 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
}


-static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
+static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
{
struct tss_struct *tss;
struct task_struct *tsk = current;
@@ -247,7 +247,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
if (vm86->saved_sp0)
return -EPERM;

- if (!access_ok(VERIFY_READ, v86, plus ?
+ if (!access_ok(VERIFY_READ, user_vm86, plus ?
sizeof(struct vm86_struct) :
sizeof(struct vm86plus_struct)))
return -EFAULT;
@@ -255,40 +255,42 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
memset(&vm86regs, 0, sizeof(vm86regs));
get_user_try {
unsigned short seg;
- get_user_ex(vm86regs.pt.bx, &v86->regs.ebx);
- get_user_ex(vm86regs.pt.cx, &v86->regs.ecx);
- get_user_ex(vm86regs.pt.dx, &v86->regs.edx);
- get_user_ex(vm86regs.pt.si, &v86->regs.esi);
- get_user_ex(vm86regs.pt.di, &v86->regs.edi);
- get_user_ex(vm86regs.pt.bp, &v86->regs.ebp);
- get_user_ex(vm86regs.pt.ax, &v86->regs.eax);
- get_user_ex(vm86regs.pt.ip, &v86->regs.eip);
- get_user_ex(seg, &v86->regs.cs);
+ get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
+ get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
+ get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
+ get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
+ get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
+ get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
+ get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
+ get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
+ get_user_ex(seg, &user_vm86->regs.cs);
vm86regs.pt.cs = seg;
- get_user_ex(vm86regs.pt.flags, &v86->regs.eflags);
- get_user_ex(vm86regs.pt.sp, &v86->regs.esp);
- get_user_ex(seg, &v86->regs.ss);
+ get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
+ get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
+ get_user_ex(seg, &user_vm86->regs.ss);
vm86regs.pt.ss = seg;
- get_user_ex(vm86regs.es, &v86->regs.es);
- get_user_ex(vm86regs.ds, &v86->regs.ds);
- get_user_ex(vm86regs.fs, &v86->regs.fs);
- get_user_ex(vm86regs.gs, &v86->regs.gs);
-
- get_user_ex(vm86->flags, &v86->flags);
- get_user_ex(vm86->screen_bitmap, &v86->screen_bitmap);
- get_user_ex(vm86->cpu_type, &v86->cpu_type);
+ get_user_ex(vm86regs.es, &user_vm86->regs.es);
+ get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
+ get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
+ get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
+
+ get_user_ex(vm86->flags, &user_vm86->flags);
+ get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
+ get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
} get_user_catch(err);
if (err)
return err;

- if (copy_from_user(&vm86->int_revectored, &v86->int_revectored,
+ if (copy_from_user(&vm86->int_revectored,
+ &user_vm86->int_revectored,
sizeof(struct revectored_struct)))
return -EFAULT;
- if (copy_from_user(&vm86->int21_revectored, &v86->int21_revectored,
+ if (copy_from_user(&vm86->int21_revectored,
+ &user_vm86->int21_revectored,
sizeof(struct revectored_struct)))
return -EFAULT;
if (plus) {
- if (copy_from_user(&vm86->vm86plus, &v86->vm86plus,
+ if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
sizeof(struct vm86plus_info_struct)))
return -EFAULT;
vm86->vm86plus.is_vm86pus = 1;
@@ -297,7 +299,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
sizeof(struct vm86plus_info_struct));

memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
- vm86->vm86_info = v86;
+ vm86->user_vm86 = user_vm86;

/*
* The flags register is also special: we cannot trust that the user
--
2.4.3

2015-07-29 05:41:47

by Brian Gerst

[permalink] [raw]
Subject: [PATCH 8/8] x86/vm86: Rename vm86->v86flags and v86mask

Rename v86flags to veflags, and v86mask to veflags_mask.

Signed-off-by: Brian Gerst <[email protected]>
---
arch/x86/include/asm/vm86.h | 4 ++--
arch/x86/kernel/vm86_32.c | 20 ++++++++++----------
2 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 5c027c5..13f91a6 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -30,8 +30,8 @@ struct kernel_vm86_regs {
struct vm86 {
struct vm86plus_struct __user *user_vm86;
struct pt_regs regs32;
- unsigned long v86flags;
- unsigned long v86mask;
+ unsigned long veflags;
+ unsigned long veflags_mask;
unsigned long saved_sp0;

unsigned long flags;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index aca499d..8b8f9de 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -80,8 +80,8 @@
/*
* virtual flags (16 and 32-bit versions)
*/
-#define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags))
-#define VEFLAGS (current->thread.vm86->v86flags)
+#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
+#define VEFLAGS (current->thread.vm86->veflags)

#define set_flags(X, new, mask) \
((X) = ((X) & ~(mask)) | ((new) & (mask)))
@@ -108,7 +108,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
pr_alert("no user_vm86: BAD\n");
do_exit(SIGSEGV);
}
- set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
+ set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
user = vm86->user_vm86;

if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
@@ -315,16 +315,16 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)

switch (vm86->cpu_type) {
case CPU_286:
- vm86->v86mask = 0;
+ vm86->veflags_mask = 0;
break;
case CPU_386:
- vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
case CPU_486:
- vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
default:
- vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
}

@@ -384,7 +384,7 @@ static inline void clear_AC(struct kernel_vm86_regs *regs)

static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
{
- set_flags(VEFLAGS, flags, current->thread.vm86->v86mask);
+ set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
@@ -394,7 +394,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs

static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
{
- set_flags(VFLAGS, flags, current->thread.vm86->v86mask);
+ set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
@@ -409,7 +409,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
if (VEFLAGS & X86_EFLAGS_VIF)
flags |= X86_EFLAGS_IF;
flags |= X86_EFLAGS_IOPL;
- return flags | (VEFLAGS & current->thread.vm86->v86mask);
+ return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
}

static inline int is_revectored(int nr, struct revectored_struct *bitmap)
--
2.4.3

2015-07-29 15:24:32

by Andy Lutomirski

[permalink] [raw]
Subject: Re: [PATCH 1/8] x86/vm86: Move vm86 fields out of thread_struct

On Tue, Jul 28, 2015 at 10:41 PM, Brian Gerst <[email protected]> wrote:
> Allocate a separate structure for the vm86 fields.

Acked-by: Andy Lutomirski <[email protected]>

2015-07-29 15:32:57

by Andy Lutomirski

[permalink] [raw]
Subject: Re: [PATCH 2/8] x86/vm86: Move fields from kernel_vm86_struct

On Tue, Jul 28, 2015 at 10:41 PM, Brian Gerst <[email protected]> wrote:
> Move the non-regs fields to the off-stack data.

Acked-by: Andy Lutomirski <[email protected]>

Although I admit I haven't carefully reviewed the mindless conversion
part of the patch.

2015-07-29 15:34:08

by Andy Lutomirski

[permalink] [raw]
Subject: Re: [PATCH 3/8] x86/vm86: Eliminate kernel_vm86_struct

On Tue, Jul 28, 2015 at 10:41 PM, Brian Gerst <[email protected]> wrote:
> Now there is no vm86-specific data left on the kernel stack while in
> userspace, except for the 32-bit regs.

This is *so* much better.

Acked-by: Andy Lutomirski <[email protected]>

2015-07-29 15:50:37

by Andy Lutomirski

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86

On Tue, Jul 28, 2015 at 10:41 PM, Brian Gerst <[email protected]> wrote:
> Change to use the normal pt_regs area to enter and exit vm86 mode. This is
> done by increasing the padding at the top of the stack to make room for the
> extra vm86 segment slots in the IRET frame. It then saves the 32-bit regs
> in the off-stack vm86 data, and copies in the vm86 regs. Exiting back to
> 32-bit mode does the reverse. This allows removing the hacks to jump directly
> into the exit asm code due to having to change the stack pointer. Returning
> normally from the vm86 syscall and the exception handlers allows things like
> ptrace and auditing to work properly.

With caveats below:

Acked-by: Andy Lutomirski <[email protected]>

> - movb PT_CS(%esp), %bl
> - andb $SEGMENT_RPL_MASK, %bl
> - cmpb $USER_RPL, %bl
> - jb resume_kernel

I agree with this hunk, and I have the same hunk in my tree. However,
it has nothing to do with vm86 and, on the off chance that this hunk
is wrong, I think it should be bisect-friendly.

If you want to preserve my bit of archeology, you could include:

https://git.kernel.org/cgit/linux/kernel/git/luto/linux.git/commit/?h=x86/entry&id=bb2a2fc1dc6423e87e7f1c3c9c4567a47f727b6e

in this series and stick your patch on top. (Or, Ingo, if you want to
apply just that one patch from my pile and merge this on top, you'd
get exactly the same result.)

> --- a/arch/x86/include/asm/thread_info.h
> +++ b/arch/x86/include/asm/thread_info.h
> @@ -27,14 +27,17 @@
> * Without this offset, that can result in a page fault. (We are
> * careful that, in this case, the value we read doesn't matter.)
> *
> - * In vm86 mode, the hardware frame is much longer still, but we neither
> - * access the extra members from NMI context, nor do we write such a
> - * frame at sp0 at all.
> + * In vm86 mode, the hardware frame is much longer still, so add 16
> + * bytes to make room for the real-mode segments.

I've now read this comment a few times, and I think it could be
better. How about:

In vm86 mode, the hardware frame is extended by 4 bytes for each of
ds, es, fs and gs. To allow room for those extra slots while still
keeping pt_regs in its normal location on entries from vm86 mode, we
need at least 16 bytes of padding. In the vm86 case, we don't need to
worry about the SYSENTER + NMI issue: we disallow SYSENTER from vm86
mode, so we don't need to worry about an NMI trying to read above the
top of the vm86 hardware frame.


--Andy

2015-07-29 17:14:56

by Brian Gerst

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86

On Wed, Jul 29, 2015 at 11:50 AM, Andy Lutomirski <[email protected]> wrote:
> On Tue, Jul 28, 2015 at 10:41 PM, Brian Gerst <[email protected]> wrote:
>> Change to use the normal pt_regs area to enter and exit vm86 mode. This is
>> done by increasing the padding at the top of the stack to make room for the
>> extra vm86 segment slots in the IRET frame. It then saves the 32-bit regs
>> in the off-stack vm86 data, and copies in the vm86 regs. Exiting back to
>> 32-bit mode does the reverse. This allows removing the hacks to jump directly
>> into the exit asm code due to having to change the stack pointer. Returning
>> normally from the vm86 syscall and the exception handlers allows things like
>> ptrace and auditing to work properly.
>
> With caveats below:
>
> Acked-by: Andy Lutomirski <[email protected]>
>
>> - movb PT_CS(%esp), %bl
>> - andb $SEGMENT_RPL_MASK, %bl
>> - cmpb $USER_RPL, %bl
>> - jb resume_kernel
>
> I agree with this hunk, and I have the same hunk in my tree. However,
> it has nothing to do with vm86 and, on the off chance that this hunk
> is wrong, I think it should be bisect-friendly.

I think it was causing signal handling to fail, but I can't remember
exactly. If I kept the code it would have needed to add a test for
the VM flag. But I came to the same conclusion you did, that it was
unneeded.

> If you want to preserve my bit of archeology, you could include:
>
> https://git.kernel.org/cgit/linux/kernel/git/luto/linux.git/commit/?h=x86/entry&id=bb2a2fc1dc6423e87e7f1c3c9c4567a47f727b6e
>
> in this series and stick your patch on top. (Or, Ingo, if you want to
> apply just that one patch from my pile and merge this on top, you'd
> get exactly the same result.)

I'd be OK with that.

>> --- a/arch/x86/include/asm/thread_info.h
>> +++ b/arch/x86/include/asm/thread_info.h
>> @@ -27,14 +27,17 @@
>> * Without this offset, that can result in a page fault. (We are
>> * careful that, in this case, the value we read doesn't matter.)
>> *
>> - * In vm86 mode, the hardware frame is much longer still, but we neither
>> - * access the extra members from NMI context, nor do we write such a
>> - * frame at sp0 at all.
>> + * In vm86 mode, the hardware frame is much longer still, so add 16
>> + * bytes to make room for the real-mode segments.
>
> I've now read this comment a few times, and I think it could be
> better. How about:
>
> In vm86 mode, the hardware frame is extended by 4 bytes for each of
> ds, es, fs and gs. To allow room for those extra slots while still
> keeping pt_regs in its normal location on entries from vm86 mode, we
> need at least 16 bytes of padding. In the vm86 case, we don't need to
> worry about the SYSENTER + NMI issue: we disallow SYSENTER from vm86
> mode, so we don't need to worry about an NMI trying to read above the
> top of the vm86 hardware frame.

That looks good.

--
Brian Gerst

2015-07-29 17:16:40

by Linus Torvalds

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86

On Wed, Jul 29, 2015 at 10:14 AM, Brian Gerst <[email protected]> wrote:
>
> I think it was causing signal handling to fail, but I can't remember
> exactly.

Ugh.

If that hunk made a difference, then there is something wrong with
your patch-series. So please double-check.

Linus

2015-07-29 17:36:55

by Brian Gerst

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86

On Wed, Jul 29, 2015 at 1:16 PM, Linus Torvalds
<[email protected]> wrote:
> On Wed, Jul 29, 2015 at 10:14 AM, Brian Gerst <[email protected]> wrote:
>>
>> I think it was causing signal handling to fail, but I can't remember
>> exactly.
>
> Ugh.
>
> If that hunk made a difference, then there is something wrong with
> your patch-series. So please double-check.
>
> Linus

I think I remember now what the issue was. Since entering vm86 mode
uses force_iret(), the work_pending code path was being taken. I had
to move the call to save_v86_state out of here to handle_signal(),
otherwise it would just restore the 32-bit regs and exit the syscall
without ever entering vm86 mode. But that meant that the test for
kernel CS was seeing the vm86 regs instead of 32-bit regs, and was
failing because it didn't account for the VM bit (if the real-mode CS
looked like RPL 0). A fault would get stuck in a loop because it
couldn't exit to the signal handling code.

--
Brian Gerst

2015-07-29 17:47:19

by Linus Torvalds

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86

On Wed, Jul 29, 2015 at 10:36 AM, Brian Gerst <[email protected]> wrote:
>
> I think I remember now what the issue was. Since entering vm86 mode
> uses force_iret(), the work_pending code path was being taken. I had
> to move the call to save_v86_state out of here to handle_signal(),
> otherwise it would just restore the 32-bit regs and exit the syscall
> without ever entering vm86 mode. But that meant that the test for
> kernel CS was seeing the vm86 regs instead of 32-bit regs, and was
> failing because it didn't account for the VM bit (if the real-mode CS
> looked like RPL 0). A fault would get stuck in a loop because it
> couldn't exit to the signal handling code.

Hmm. I think we'd want a big comment about that code then, and how it
is only used for user-space (including vm86 mode).

Linus

2015-07-29 17:50:45

by Andy Lutomirski

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86

On Wed, Jul 29, 2015 at 10:47 AM, Linus Torvalds
<[email protected]> wrote:
> On Wed, Jul 29, 2015 at 10:36 AM, Brian Gerst <[email protected]> wrote:
>>
>> I think I remember now what the issue was. Since entering vm86 mode
>> uses force_iret(), the work_pending code path was being taken. I had
>> to move the call to save_v86_state out of here to handle_signal(),
>> otherwise it would just restore the 32-bit regs and exit the syscall
>> without ever entering vm86 mode. But that meant that the test for
>> kernel CS was seeing the vm86 regs instead of 32-bit regs, and was
>> failing because it didn't account for the VM bit (if the real-mode CS
>> looked like RPL 0). A fault would get stuck in a loop because it
>> couldn't exit to the signal handling code.
>
> Hmm. I think we'd want a big comment about that code then, and how it
> is only used for user-space (including vm86 mode).

I think that a very tiny comment might be sufficient, because I'm
planning to resubmit my patch series that removes all the work_xyz asm
entirely as soon as Brian's patches land in -tip :) I've just been
waiting for v86 mode to GTFO of the asm loop so the rest could be done
in C. I had a small patch to deal with vm86, but Brian's is much
better, so I decided to hold off.

--Andy

2015-07-31 08:57:58

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 5/8] x86/vm86: Add a separate config option for hardware IRQ handling


* Brian Gerst <[email protected]> wrote:

> Allow disabling hardware interrupt support for vm86.
>
> Signed-off-by: Brian Gerst <[email protected]>
> ---
> arch/x86/Kconfig | 8 ++++++++
> arch/x86/include/asm/irq_vectors.h | 10 ----------
> arch/x86/include/asm/vm86.h | 20 ++++++++++++++++++--
> arch/x86/kernel/vm86_32.c | 12 ++++++++++--
> 4 files changed, 36 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index cbd2d62..7c7ec31 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -1067,6 +1067,14 @@ config VM86
> bool
> default X86_LEGACY_VM86
>
> +config VM86_INTERRUPTS
> + bool "Enable VM86 interrupt support"
> + default y
> + depends on VM86
> + ---help---
> + This option allows VM86 programs to request interrupts for
> + real mode hardware drivers.

So I'm wondering what the justification for this is. People can disable vm86
already via CONFIG_X86_LEGACY_VM86. The extra config just uglifies the code
unnecessarily.

Thanks,

Ingo

2015-07-31 09:12:34

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86


* Brian Gerst <[email protected]> wrote:

> --- a/arch/x86/include/asm/vm86.h
> +++ b/arch/x86/include/asm/vm86.h

> +static inline void save_v86_state(struct kernel_vm86_regs *, int) { }
> +

So this is not a valid inline function and the patches were clearly not built with
!VM86 such as 64-bit defconfig:

In file included from arch/x86/kernel/signal.c:34:0:
./arch/x86/include/asm/vm86.h: In function ‘save_v86_state’:
./arch/x86/include/asm/vm86.h:70:42: error: parameter name omitted
static inline void save_v86_state(struct kernel_vm86_regs *, int) { }
./arch/x86/include/asm/vm86.h:70:42: error: parameter name omitted

I fixed this up.

Thanks,

Ingo

2015-07-31 09:17:53

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86


* Ingo Molnar <[email protected]> wrote:

>
> * Brian Gerst <[email protected]> wrote:
>
> > --- a/arch/x86/include/asm/vm86.h
> > +++ b/arch/x86/include/asm/vm86.h
>
> > +static inline void save_v86_state(struct kernel_vm86_regs *, int) { }
> > +
>
> So this is not a valid inline function and the patches were clearly not built with
> !VM86 such as 64-bit defconfig:
>
> In file included from arch/x86/kernel/signal.c:34:0:
> ./arch/x86/include/asm/vm86.h: In function ‘save_v86_state’:
> ./arch/x86/include/asm/vm86.h:70:42: error: parameter name omitted
> static inline void save_v86_state(struct kernel_vm86_regs *, int) { }
> ./arch/x86/include/asm/vm86.h:70:42: error: parameter name omitted
>
> I fixed this up.

Another build fix was needed for:

arch/x86/mm/fault.c: In function ‘check_v8086_mode’:
arch/x86/mm/fault.c:319:39: error: ‘struct thread_struct’ has no member named ‘vm86’

that's on 32-bit defconfig.

Thanks,

Ingo

============>

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 714a52b79e69..eef44d9a3f77 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -302,6 +302,7 @@ static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
struct task_struct *tsk)
{
+#ifdef CONFIG_VM86
unsigned long bit;

if (!v8086_mode(regs) || !tsk->thread.vm86)
@@ -310,6 +311,7 @@ check_v8086_mode(struct pt_regs *regs, unsigned long address,
bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
tsk->thread.vm86->screen_bitmap |= 1 << bit;
+#endif
}

static bool low_pfn(unsigned long pfn)

2015-07-31 09:47:50

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86


* Ingo Molnar <[email protected]> wrote:

>
> > I fixed this up.
>
> Another build fix was needed for:
>
> arch/x86/mm/fault.c: In function ‘check_v8086_mode’:
> arch/x86/mm/fault.c:319:39: error: ‘struct thread_struct’ has no member named ‘vm86’
>
> that's on 32-bit defconfig.

A third one occurs in:

drivers/scsi/dpt_i2o.c:1933:7: error: ‘CPU_386’ undeclared (first use in this function)
drivers/scsi/dpt_i2o.c:1936:7: error: ‘CPU_486’ undeclared (first use in this function)
drivers/scsi/dpt_i2o.c:1939:7: error: ‘CPU_586’ undeclared (first use in this function)

due to the include file changes.

I fixed this up too.

Thanks,

Ingo

2015-07-31 12:10:53

by Brian Gerst

[permalink] [raw]
Subject: Re: [PATCH 5/8] x86/vm86: Add a separate config option for hardware IRQ handling

On Fri, Jul 31, 2015 at 4:57 AM, Ingo Molnar <[email protected]> wrote:
>
> * Brian Gerst <[email protected]> wrote:
>
>> Allow disabling hardware interrupt support for vm86.
>>
>> Signed-off-by: Brian Gerst <[email protected]>
>> ---
>> arch/x86/Kconfig | 8 ++++++++
>> arch/x86/include/asm/irq_vectors.h | 10 ----------
>> arch/x86/include/asm/vm86.h | 20 ++++++++++++++++++--
>> arch/x86/kernel/vm86_32.c | 12 ++++++++++--
>> 4 files changed, 36 insertions(+), 14 deletions(-)
>>
>> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
>> index cbd2d62..7c7ec31 100644
>> --- a/arch/x86/Kconfig
>> +++ b/arch/x86/Kconfig
>> @@ -1067,6 +1067,14 @@ config VM86
>> bool
>> default X86_LEGACY_VM86
>>
>> +config VM86_INTERRUPTS
>> + bool "Enable VM86 interrupt support"
>> + default y
>> + depends on VM86
>> + ---help---
>> + This option allows VM86 programs to request interrupts for
>> + real mode hardware drivers.
>
> So I'm wondering what the justification for this is. People can disable vm86
> already via CONFIG_X86_LEGACY_VM86. The extra config just uglifies the code
> unnecessarily.
>
> Thanks,
>
> Ingo

Disabling even less-used code that could have system stability impact.
We've discouraged user-mode drivers for a very long time. Ironically,
other than being configured through the vm86 syscall, there isn't
really anything vm86-specific about it. All it does is register an
IRQ handler that sends a signal to the task.

--
Brian Gerst

2015-07-31 12:14:04

by Brian Gerst

[permalink] [raw]
Subject: Re: [PATCH 4/8] x86/vm86: Use the normal pt_regs area for vm86

On Fri, Jul 31, 2015 at 5:47 AM, Ingo Molnar <[email protected]> wrote:
>
> * Ingo Molnar <[email protected]> wrote:
>
>>
>> > I fixed this up.
>>
>> Another build fix was needed for:
>>
>> arch/x86/mm/fault.c: In function ‘check_v8086_mode’:
>> arch/x86/mm/fault.c:319:39: error: ‘struct thread_struct’ has no member named ‘vm86’
>>
>> that's on 32-bit defconfig.
>
> A third one occurs in:
>
> drivers/scsi/dpt_i2o.c:1933:7: error: ‘CPU_386’ undeclared (first use in this function)
> drivers/scsi/dpt_i2o.c:1936:7: error: ‘CPU_486’ undeclared (first use in this function)
> drivers/scsi/dpt_i2o.c:1939:7: error: ‘CPU_586’ undeclared (first use in this function)
>
> due to the include file changes.
>
> I fixed this up too.
>
> Thanks,
>
> Ingo

I'll admit that I forgot to test the 64-bit build, but the SCSI
driver... WTF is it doing with the cpu type?

--
Brian Gerst

2015-07-31 13:50:45

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 5/8] x86/vm86: Add a separate config option for hardware IRQ handling


* Brian Gerst <[email protected]> wrote:

> On Fri, Jul 31, 2015 at 4:57 AM, Ingo Molnar <[email protected]> wrote:
> >
> > * Brian Gerst <[email protected]> wrote:
> >
> >> Allow disabling hardware interrupt support for vm86.
> >>
> >> Signed-off-by: Brian Gerst <[email protected]>
> >> ---
> >> arch/x86/Kconfig | 8 ++++++++
> >> arch/x86/include/asm/irq_vectors.h | 10 ----------
> >> arch/x86/include/asm/vm86.h | 20 ++++++++++++++++++--
> >> arch/x86/kernel/vm86_32.c | 12 ++++++++++--
> >> 4 files changed, 36 insertions(+), 14 deletions(-)
> >>
> >> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> >> index cbd2d62..7c7ec31 100644
> >> --- a/arch/x86/Kconfig
> >> +++ b/arch/x86/Kconfig
> >> @@ -1067,6 +1067,14 @@ config VM86
> >> bool
> >> default X86_LEGACY_VM86
> >>
> >> +config VM86_INTERRUPTS
> >> + bool "Enable VM86 interrupt support"
> >> + default y
> >> + depends on VM86
> >> + ---help---
> >> + This option allows VM86 programs to request interrupts for
> >> + real mode hardware drivers.
> >
> > So I'm wondering what the justification for this is. People can disable vm86
> > already via CONFIG_X86_LEGACY_VM86. The extra config just uglifies the code
> > unnecessarily.
> >
> > Thanks,
> >
> > Ingo
>
> Disabling even less-used code that could have system stability impact. We've
> discouraged user-mode drivers for a very long time. Ironically, other than
> being configured through the vm86 syscall, there isn't really anything
> vm86-specific about it. All it does is register an IRQ handler that sends a
> signal to the task.

So is this actually used by anything? Could we get away with disabling it, just to
see whether anything cares?

Thanks,

Ingo

2015-07-31 13:59:28

by Brian Gerst

[permalink] [raw]
Subject: Re: [PATCH 5/8] x86/vm86: Add a separate config option for hardware IRQ handling

On Fri, Jul 31, 2015 at 9:50 AM, Ingo Molnar <[email protected]> wrote:
>
> * Brian Gerst <[email protected]> wrote:
>
>> On Fri, Jul 31, 2015 at 4:57 AM, Ingo Molnar <[email protected]> wrote:
>> >
>> > * Brian Gerst <[email protected]> wrote:
>> >
>> >> Allow disabling hardware interrupt support for vm86.
>> >>
>> >> Signed-off-by: Brian Gerst <[email protected]>
>> >> ---
>> >> arch/x86/Kconfig | 8 ++++++++
>> >> arch/x86/include/asm/irq_vectors.h | 10 ----------
>> >> arch/x86/include/asm/vm86.h | 20 ++++++++++++++++++--
>> >> arch/x86/kernel/vm86_32.c | 12 ++++++++++--
>> >> 4 files changed, 36 insertions(+), 14 deletions(-)
>> >>
>> >> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
>> >> index cbd2d62..7c7ec31 100644
>> >> --- a/arch/x86/Kconfig
>> >> +++ b/arch/x86/Kconfig
>> >> @@ -1067,6 +1067,14 @@ config VM86
>> >> bool
>> >> default X86_LEGACY_VM86
>> >>
>> >> +config VM86_INTERRUPTS
>> >> + bool "Enable VM86 interrupt support"
>> >> + default y
>> >> + depends on VM86
>> >> + ---help---
>> >> + This option allows VM86 programs to request interrupts for
>> >> + real mode hardware drivers.
>> >
>> > So I'm wondering what the justification for this is. People can disable vm86
>> > already via CONFIG_X86_LEGACY_VM86. The extra config just uglifies the code
>> > unnecessarily.
>> >
>> > Thanks,
>> >
>> > Ingo
>>
>> Disabling even less-used code that could have system stability impact. We've
>> discouraged user-mode drivers for a very long time. Ironically, other than
>> being configured through the vm86 syscall, there isn't really anything
>> vm86-specific about it. All it does is register an IRQ handler that sends a
>> signal to the task.
>
> So is this actually used by anything? Could we get away with disabling it, just to
> see whether anything cares?

My best guess would be some very old X11 drivers that needed
interrupts to run the Video BIOS code.

--
Brian Gerst

Subject: [tip:x86/asm] x86/vm86: Move vm86 fields out of 'thread_struct'

Commit-ID: 9fda6a0681e070b496235b132bc70ceb80300211
Gitweb: http://git.kernel.org/tip/9fda6a0681e070b496235b132bc70ceb80300211
Author: Brian Gerst <[email protected]>
AuthorDate: Wed, 29 Jul 2015 01:41:16 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 31 Jul 2015 13:31:07 +0200

x86/vm86: Move vm86 fields out of 'thread_struct'

Allocate a separate structure for the vm86 fields.

Signed-off-by: Brian Gerst <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
[ Build fixes. ]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/processor.h | 11 +++-------
arch/x86/include/asm/vm86.h | 19 ++++++++++++++++-
arch/x86/kernel/process.c | 3 +++
arch/x86/kernel/vm86_32.c | 46 +++++++++++++++++++++++-----------------
arch/x86/mm/fault.c | 6 ++++--
5 files changed, 55 insertions(+), 30 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index befc134..9615a4e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -6,8 +6,8 @@
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
+struct vm86;

-#include <asm/vm86.h>
#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
@@ -400,13 +400,9 @@ struct thread_struct {
unsigned long cr2;
unsigned long trap_nr;
unsigned long error_code;
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_VM86
/* Virtual 86 mode info */
- struct vm86plus_struct __user *vm86_info;
- unsigned long screen_bitmap;
- unsigned long v86flags;
- unsigned long v86mask;
- unsigned long saved_sp0;
+ struct vm86 *vm86;
#endif
/* IO permissions: */
unsigned long *io_bitmap_ptr;
@@ -718,7 +714,6 @@ static inline void spin_lock_prefetch(const void *x)

#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
- .vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
}
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 1d8de3f..20b43b7 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_VM86_H
#define _ASM_X86_VM86_H

-
#include <asm/ptrace.h>
#include <uapi/asm/vm86.h>

@@ -58,6 +57,14 @@ struct kernel_vm86_struct {
*/
};

+struct vm86 {
+ struct vm86plus_struct __user *vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags;
+ unsigned long v86mask;
+ unsigned long saved_sp0;
+};
+
#ifdef CONFIG_VM86

void handle_vm86_fault(struct kernel_vm86_regs *, long);
@@ -67,6 +74,14 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
struct task_struct;
void release_vm86_irqs(struct task_struct *);

+#define free_vm86(t) do { \
+ struct thread_struct *__t = (t); \
+ if (__t->vm86 != NULL) { \
+ kfree(__t->vm86); \
+ __t->vm86 = NULL; \
+ } \
+} while (0)
+
#else

#define handle_vm86_fault(a, b)
@@ -77,6 +92,8 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
return 0;
}

+#define free_vm86(t) do { } while(0)
+
#endif /* CONFIG_VM86 */

#endif /* _ASM_X86_VM86_H */
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 397688b..2199d9b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -29,6 +29,7 @@
#include <asm/debugreg.h>
#include <asm/nmi.h>
#include <asm/tlbflush.h>
+#include <asm/vm86.h>

/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -110,6 +111,8 @@ void exit_thread(void)
kfree(bp);
}

+ free_vm86(t);
+
fpu__drop(fpu);
}

diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index e6c2b47..bfa59b1 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -44,6 +44,7 @@
#include <linux/ptrace.h>
#include <linux/audit.h>
#include <linux/stddef.h>
+#include <linux/slab.h>

#include <asm/uaccess.h>
#include <asm/io.h>
@@ -81,8 +82,8 @@
/*
* virtual flags (16 and 32-bit versions)
*/
-#define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
-#define VEFLAGS (current->thread.v86flags)
+#define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags))
+#define VEFLAGS (current->thread.vm86->v86flags)

#define set_flags(X, new, mask) \
((X) = ((X) & ~(mask)) | ((new) & (mask)))
@@ -96,6 +97,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
struct pt_regs *ret;
struct task_struct *tsk = current;
struct vm86plus_struct __user *user;
+ struct vm86 *vm86 = current->thread.vm86;
long err = 0;

/*
@@ -105,12 +107,12 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
*/
local_irq_enable();

- if (!tsk->thread.vm86_info) {
+ if (!vm86 || !vm86->vm86_info) {
pr_alert("no vm86_info: BAD\n");
do_exit(SIGSEGV);
}
- set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | tsk->thread.v86mask);
- user = tsk->thread.vm86_info;
+ set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
+ user = vm86->vm86_info;

if (!access_ok(VERIFY_WRITE, user, VMPI.is_vm86pus ?
sizeof(struct vm86plus_struct) :
@@ -137,7 +139,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
put_user_ex(regs->fs, &user->regs.fs);
put_user_ex(regs->gs, &user->regs.gs);

- put_user_ex(tsk->thread.screen_bitmap, &user->screen_bitmap);
+ put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
} put_user_catch(err);
if (err) {
pr_alert("could not access userspace vm86_info\n");
@@ -145,10 +147,10 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
}

tss = &per_cpu(cpu_tss, get_cpu());
- tsk->thread.sp0 = tsk->thread.saved_sp0;
+ tsk->thread.sp0 = vm86->saved_sp0;
tsk->thread.sysenter_cs = __KERNEL_CS;
load_sp0(tss, &tsk->thread);
- tsk->thread.saved_sp0 = 0;
+ vm86->saved_sp0 = 0;
put_cpu();

ret = KVM86->regs32;
@@ -242,9 +244,15 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
{
struct tss_struct *tss;
struct task_struct *tsk = current;
+ struct vm86 *vm86 = tsk->thread.vm86;
unsigned long err = 0;

- if (tsk->thread.saved_sp0)
+ if (!vm86) {
+ if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
+ return -ENOMEM;
+ tsk->thread.vm86 = vm86;
+ }
+ if (vm86->saved_sp0)
return -EPERM;

if (!access_ok(VERIFY_READ, v86, plus ?
@@ -295,7 +303,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
}

info->regs32 = current_pt_regs();
- tsk->thread.vm86_info = v86;
+ vm86->vm86_info = v86;

/*
* The flags register is also special: we cannot trust that the user
@@ -311,16 +319,16 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,

switch (info->cpu_type) {
case CPU_286:
- tsk->thread.v86mask = 0;
+ vm86->v86mask = 0;
break;
case CPU_386:
- tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
case CPU_486:
- tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
default:
- tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
}

@@ -328,7 +336,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
* Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
*/
info->regs32->ax = VM86_SIGNAL;
- tsk->thread.saved_sp0 = tsk->thread.sp0;
+ vm86->saved_sp0 = tsk->thread.sp0;
lazy_save_gs(info->regs32->gs);

tss = &per_cpu(cpu_tss, get_cpu());
@@ -338,7 +346,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
load_sp0(tss, &tsk->thread);
put_cpu();

- tsk->thread.screen_bitmap = info->screen_bitmap;
+ vm86->screen_bitmap = info->screen_bitmap;
if (info->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);

@@ -408,7 +416,7 @@ static inline void clear_AC(struct kernel_vm86_regs *regs)

static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
{
- set_flags(VEFLAGS, flags, current->thread.v86mask);
+ set_flags(VEFLAGS, flags, current->thread.vm86->v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
@@ -418,7 +426,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs

static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
{
- set_flags(VFLAGS, flags, current->thread.v86mask);
+ set_flags(VFLAGS, flags, current->thread.vm86->v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
@@ -433,7 +441,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
if (VEFLAGS & X86_EFLAGS_VIF)
flags |= X86_EFLAGS_IF;
flags |= X86_EFLAGS_IOPL;
- return flags | (VEFLAGS & current->thread.v86mask);
+ return flags | (VEFLAGS & current->thread.vm86->v86mask);
}

static inline int is_revectored(int nr, struct revectored_struct *bitmap)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9dc9098..34a368d 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -301,14 +301,16 @@ static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
struct task_struct *tsk)
{
+#ifdef CONFIG_VM86
unsigned long bit;

- if (!v8086_mode(regs))
+ if (!v8086_mode(regs) || !tsk->thread.vm86)
return;

bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
- tsk->thread.screen_bitmap |= 1 << bit;
+ tsk->thread.vm86->screen_bitmap |= 1 << bit;
+#endif
}

static bool low_pfn(unsigned long pfn)

Subject: [tip:x86/asm] x86/vm86: Move fields from ' struct kernel_vm86_struct' to 'struct vm86'

Commit-ID: d4ce0f26c790af8e829d3fad0a6787f40f98e24f
Gitweb: http://git.kernel.org/tip/d4ce0f26c790af8e829d3fad0a6787f40f98e24f
Author: Brian Gerst <[email protected]>
AuthorDate: Wed, 29 Jul 2015 01:41:17 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 31 Jul 2015 13:31:08 +0200

x86/vm86: Move fields from 'struct kernel_vm86_struct' to 'struct vm86'

Move the non-regs fields to the off-stack data.

Signed-off-by: Brian Gerst <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/vm86.h | 16 ++++++++--------
arch/x86/kernel/vm86_32.c | 42 ++++++++++++++++++++++--------------------
2 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 20b43b7..47c7648 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -37,13 +37,7 @@ struct kernel_vm86_struct {
* Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
* in kernelspace, hence we need not reget the data from userspace.
*/
-#define VM86_TSS_ESP0 flags
- unsigned long flags;
- unsigned long screen_bitmap;
- unsigned long cpu_type;
- struct revectored_struct int_revectored;
- struct revectored_struct int21_revectored;
- struct vm86plus_info_struct vm86plus;
+#define VM86_TSS_ESP0 regs32
struct pt_regs *regs32; /* here we save the pointer to the old regs */
/*
* The below is not part of the structure, but the stack layout continues
@@ -59,10 +53,16 @@ struct kernel_vm86_struct {

struct vm86 {
struct vm86plus_struct __user *vm86_info;
- unsigned long screen_bitmap;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
+
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
};

#ifdef CONFIG_VM86
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index bfa59b1..f71b4b9 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -68,7 +68,6 @@


#define KVM86 ((struct kernel_vm86_struct *)regs)
-#define VMPI KVM86->vm86plus


/*
@@ -114,7 +113,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
user = vm86->vm86_info;

- if (!access_ok(VERIFY_WRITE, user, VMPI.is_vm86pus ?
+ if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
sizeof(struct vm86plus_struct) :
sizeof(struct vm86_struct))) {
pr_alert("could not access userspace vm86_info\n");
@@ -282,25 +281,27 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
get_user_ex(info->regs.fs, &v86->regs.fs);
get_user_ex(info->regs.gs, &v86->regs.gs);

- get_user_ex(info->flags, &v86->flags);
- get_user_ex(info->screen_bitmap, &v86->screen_bitmap);
- get_user_ex(info->cpu_type, &v86->cpu_type);
+ get_user_ex(vm86->flags, &v86->flags);
+ get_user_ex(vm86->screen_bitmap, &v86->screen_bitmap);
+ get_user_ex(vm86->cpu_type, &v86->cpu_type);
} get_user_catch(err);
if (err)
return err;

- if (copy_from_user(&info->int_revectored, &v86->int_revectored,
+ if (copy_from_user(&vm86->int_revectored, &v86->int_revectored,
sizeof(struct revectored_struct)))
return -EFAULT;
- if (copy_from_user(&info->int21_revectored, &v86->int21_revectored,
+ if (copy_from_user(&vm86->int21_revectored, &v86->int21_revectored,
sizeof(struct revectored_struct)))
return -EFAULT;
if (plus) {
- if (copy_from_user(&info->vm86plus, &v86->vm86plus,
+ if (copy_from_user(&vm86->vm86plus, &v86->vm86plus,
sizeof(struct vm86plus_info_struct)))
return -EFAULT;
- info->vm86plus.is_vm86pus = 1;
- }
+ vm86->vm86plus.is_vm86pus = 1;
+ } else
+ memset(&vm86->vm86plus, 0,
+ sizeof(struct vm86plus_info_struct));

info->regs32 = current_pt_regs();
vm86->vm86_info = v86;
@@ -317,7 +318,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,

info->regs.pt.orig_ax = info->regs32->orig_ax;

- switch (info->cpu_type) {
+ switch (vm86->cpu_type) {
case CPU_286:
vm86->v86mask = 0;
break;
@@ -346,8 +347,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
load_sp0(tss, &tsk->thread);
put_cpu();

- vm86->screen_bitmap = info->screen_bitmap;
- if (info->flags & VM86_SCREEN_BITMAP)
+ if (vm86->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);

/*call __audit_syscall_exit since we do not exit via the normal paths */
@@ -539,12 +539,13 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
{
unsigned long __user *intr_ptr;
unsigned long segoffs;
+ struct kernel_vm86_info *vm86 = current->thread.vm86;

if (regs->pt.cs == BIOSSEG)
goto cannot_handle;
- if (is_revectored(i, &KVM86->int_revectored))
+ if (is_revectored(i, &vm86->int_revectored))
goto cannot_handle;
- if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+ if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
goto cannot_handle;
intr_ptr = (unsigned long __user *) (i << 2);
if (get_user(segoffs, intr_ptr))
@@ -568,7 +569,7 @@ cannot_handle:

int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{
- if (VMPI.is_vm86pus) {
+ if (current->thread.vm86->vm86plus.is_vm86pus) {
if ((trapno == 3) || (trapno == 1)) {
KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
/* setting this flag forces the code in entry_32.S to
@@ -595,12 +596,13 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
unsigned char __user *ssp;
unsigned short ip, sp, orig_flags;
int data32, pref_done;
+ struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;

#define CHECK_IF_IN_TRAP \
- if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
+ if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
newflags |= X86_EFLAGS_TF
#define VM86_FAULT_RETURN do { \
- if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
+ if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
return_to_32bit(regs, VM86_PICRETURN); \
if (orig_flags & X86_EFLAGS_TF) \
handle_vm86_trap(regs, 0, 1); \
@@ -670,8 +672,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
case 0xcd: {
int intno = popb(csp, ip, simulate_sigsegv);
IP(regs) = ip;
- if (VMPI.vm86dbg_active) {
- if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
+ if (vmpi->vm86dbg_active) {
+ if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3])
return_to_32bit(regs, VM86_INTx + (intno << 8));
}
do_int(regs, intno, ssp, sp);

Subject: [tip:x86/asm] x86/vm86: Eliminate 'struct kernel_vm86_struct'

Commit-ID: 90c6085a248f8f964588617f51329688bcc9f2bc
Gitweb: http://git.kernel.org/tip/90c6085a248f8f964588617f51329688bcc9f2bc
Author: Brian Gerst <[email protected]>
AuthorDate: Wed, 29 Jul 2015 01:41:18 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 31 Jul 2015 13:31:08 +0200

x86/vm86: Eliminate 'struct kernel_vm86_struct'

Now there is no vm86-specific data left on the kernel stack
while in userspace, except for the 32-bit regs.

Signed-off-by: Brian Gerst <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/vm86.h | 25 +-----------
arch/x86/kernel/vm86_32.c | 95 +++++++++++++++++++--------------------------
2 files changed, 42 insertions(+), 78 deletions(-)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 47c7648..226d6c1 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -27,32 +27,9 @@ struct kernel_vm86_regs {
unsigned short gs, __gsh;
};

-struct kernel_vm86_struct {
- struct kernel_vm86_regs regs;
-/*
- * the below part remains on the kernel stack while we are in VM86 mode.
- * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
- * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
- * 'struct kernel_vm86_regs' with the then actual values.
- * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
- * in kernelspace, hence we need not reget the data from userspace.
- */
-#define VM86_TSS_ESP0 regs32
- struct pt_regs *regs32; /* here we save the pointer to the old regs */
-/*
- * The below is not part of the structure, but the stack layout continues
- * this way. In front of 'return-eip' may be some data, depending on
- * compilation, so we don't rely on this and save the pointer to 'oldregs'
- * in 'regs32' above.
- * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
-
- long return-eip; from call to vm86()
- struct pt_regs oldregs; user space registers as saved by syscall
- */
-};
-
struct vm86 {
struct vm86plus_struct __user *vm86_info;
+ struct pt_regs *regs32;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index f71b4b9..696ef76 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -67,9 +67,6 @@
*/


-#define KVM86 ((struct kernel_vm86_struct *)regs)
-
-
/*
* 8- and 16-bit register defines..
*/
@@ -152,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
vm86->saved_sp0 = 0;
put_cpu();

- ret = KVM86->regs32;
+ ret = vm86->regs32;

lazy_load_gs(ret->gs);

@@ -194,29 +191,16 @@ out:


static int do_vm86_irq_handling(int subfunction, int irqnumber);
-static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
- struct kernel_vm86_struct *info);
+static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus);

SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
{
- struct kernel_vm86_struct info; /* declare this _on top_,
- * this avoids wasting of stack space.
- * This remains on the stack until we
- * return to 32 bit user space.
- */
-
- return do_sys_vm86((struct vm86plus_struct __user *) v86, false, &info);
+ return do_sys_vm86((struct vm86plus_struct __user *) v86, false);
}


SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
{
- struct kernel_vm86_struct info; /* declare this _on top_,
- * this avoids wasting of stack space.
- * This remains on the stack until we
- * return to 32 bit user space.
- */
-
switch (cmd) {
case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ:
@@ -234,16 +218,17 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
}

/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
- return do_sys_vm86((struct vm86plus_struct __user *) arg, true, &info);
+ return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
}


-static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
- struct kernel_vm86_struct *info)
+static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
{
struct tss_struct *tss;
struct task_struct *tsk = current;
struct vm86 *vm86 = tsk->thread.vm86;
+ struct kernel_vm86_regs vm86regs;
+ struct pt_regs *regs32 = current_pt_regs();
unsigned long err = 0;

if (!vm86) {
@@ -259,27 +244,27 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
sizeof(struct vm86plus_struct)))
return -EFAULT;

- memset(info, 0, sizeof(*info));
+ memset(&vm86regs, 0, sizeof(vm86regs));
get_user_try {
unsigned short seg;
- get_user_ex(info->regs.pt.bx, &v86->regs.ebx);
- get_user_ex(info->regs.pt.cx, &v86->regs.ecx);
- get_user_ex(info->regs.pt.dx, &v86->regs.edx);
- get_user_ex(info->regs.pt.si, &v86->regs.esi);
- get_user_ex(info->regs.pt.di, &v86->regs.edi);
- get_user_ex(info->regs.pt.bp, &v86->regs.ebp);
- get_user_ex(info->regs.pt.ax, &v86->regs.eax);
- get_user_ex(info->regs.pt.ip, &v86->regs.eip);
+ get_user_ex(vm86regs.pt.bx, &v86->regs.ebx);
+ get_user_ex(vm86regs.pt.cx, &v86->regs.ecx);
+ get_user_ex(vm86regs.pt.dx, &v86->regs.edx);
+ get_user_ex(vm86regs.pt.si, &v86->regs.esi);
+ get_user_ex(vm86regs.pt.di, &v86->regs.edi);
+ get_user_ex(vm86regs.pt.bp, &v86->regs.ebp);
+ get_user_ex(vm86regs.pt.ax, &v86->regs.eax);
+ get_user_ex(vm86regs.pt.ip, &v86->regs.eip);
get_user_ex(seg, &v86->regs.cs);
- info->regs.pt.cs = seg;
- get_user_ex(info->regs.pt.flags, &v86->regs.eflags);
- get_user_ex(info->regs.pt.sp, &v86->regs.esp);
+ vm86regs.pt.cs = seg;
+ get_user_ex(vm86regs.pt.flags, &v86->regs.eflags);
+ get_user_ex(vm86regs.pt.sp, &v86->regs.esp);
get_user_ex(seg, &v86->regs.ss);
- info->regs.pt.ss = seg;
- get_user_ex(info->regs.es, &v86->regs.es);
- get_user_ex(info->regs.ds, &v86->regs.ds);
- get_user_ex(info->regs.fs, &v86->regs.fs);
- get_user_ex(info->regs.gs, &v86->regs.gs);
+ vm86regs.pt.ss = seg;
+ get_user_ex(vm86regs.es, &v86->regs.es);
+ get_user_ex(vm86regs.ds, &v86->regs.ds);
+ get_user_ex(vm86regs.fs, &v86->regs.fs);
+ get_user_ex(vm86regs.gs, &v86->regs.gs);

get_user_ex(vm86->flags, &v86->flags);
get_user_ex(vm86->screen_bitmap, &v86->screen_bitmap);
@@ -302,8 +287,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
} else
memset(&vm86->vm86plus, 0,
sizeof(struct vm86plus_info_struct));
-
- info->regs32 = current_pt_regs();
+ vm86->regs32 = regs32;
vm86->vm86_info = v86;

/*
@@ -311,12 +295,12 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
* has set it up safely, so this makes sure interrupt etc flags are
* inherited from protected mode.
*/
- VEFLAGS = info->regs.pt.flags;
- info->regs.pt.flags &= SAFE_MASK;
- info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
- info->regs.pt.flags |= X86_VM_MASK;
+ VEFLAGS = vm86regs.pt.flags;
+ vm86regs.pt.flags &= SAFE_MASK;
+ vm86regs.pt.flags |= regs32->flags & ~SAFE_MASK;
+ vm86regs.pt.flags |= X86_VM_MASK;

- info->regs.pt.orig_ax = info->regs32->orig_ax;
+ vm86regs.pt.orig_ax = regs32->orig_ax;

switch (vm86->cpu_type) {
case CPU_286:
@@ -336,12 +320,13 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
/*
* Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
*/
- info->regs32->ax = VM86_SIGNAL;
+ regs32->ax = VM86_SIGNAL;
vm86->saved_sp0 = tsk->thread.sp0;
- lazy_save_gs(info->regs32->gs);
+ lazy_save_gs(regs32->gs);

tss = &per_cpu(cpu_tss, get_cpu());
- tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ /* Set new sp0 right below 32-bit regs */
+ tsk->thread.sp0 = (unsigned long) regs32;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread);
@@ -364,7 +349,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
#endif
"jmp resume_userspace"
: /* no outputs */
- :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
+ :"r" (&vm86regs), "r" (task_thread_info(tsk)), "r" (0));
unreachable(); /* we never return here */
}

@@ -539,7 +524,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
{
unsigned long __user *intr_ptr;
unsigned long segoffs;
- struct kernel_vm86_info *vm86 = current->thread.vm86;
+ struct vm86 *vm86 = current->thread.vm86;

if (regs->pt.cs == BIOSSEG)
goto cannot_handle;
@@ -569,12 +554,14 @@ cannot_handle:

int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{
- if (current->thread.vm86->vm86plus.is_vm86pus) {
+ struct vm86 *vm86 = current->thread.vm86;
+
+ if (vm86->vm86plus.is_vm86pus) {
if ((trapno == 3) || (trapno == 1)) {
- KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
+ vm86->regs32->ax = VM86_TRAP + (trapno << 8);
/* setting this flag forces the code in entry_32.S to
the path where we call save_v86_state() and change
- the stack pointer to KVM86->regs32 */
+ the stack pointer to regs32 */
set_thread_flag(TIF_NOTIFY_RESUME);
return 0;
}

Subject: [tip:x86/asm] x86/vm86: Use the normal pt_regs area for vm86

Commit-ID: 5ed92a8ab71f8865ba07811429c988c72299b315
Gitweb: http://git.kernel.org/tip/5ed92a8ab71f8865ba07811429c988c72299b315
Author: Brian Gerst <[email protected]>
AuthorDate: Wed, 29 Jul 2015 01:41:19 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 31 Jul 2015 13:31:09 +0200

x86/vm86: Use the normal pt_regs area for vm86

Change to use the normal pt_regs area to enter and exit vm86
mode. This is done by increasing the padding at the top of the
stack to make room for the extra vm86 segment slots in the IRET
frame. It then saves the 32-bit regs in the off-stack vm86
data, and copies in the vm86 regs. Exiting back to 32-bit mode
does the reverse. This allows removing the hacks to jump
directly into the exit asm code due to having to change the
stack pointer. Returning normally from the vm86 syscall and the
exception handlers allows things like ptrace and auditing to work properly.

Signed-off-by: Brian Gerst <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/entry/entry_32.S | 24 +-------
arch/x86/include/asm/thread_info.h | 11 ++--
arch/x86/include/asm/vm86.h | 6 +-
arch/x86/kernel/signal.c | 3 +
arch/x86/kernel/vm86_32.c | 110 +++++++++++++++----------------------
5 files changed, 60 insertions(+), 94 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 21dc60a..f940e24 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -525,34 +525,12 @@ work_resched:

work_notifysig: # deal with pending signals and
# notify-resume requests
-#ifdef CONFIG_VM86
- testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
- movl %esp, %eax
- jnz work_notifysig_v86 # returning to kernel-space or
- # vm86-space
-1:
-#else
- movl %esp, %eax
-#endif
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
- movb PT_CS(%esp), %bl
- andb $SEGMENT_RPL_MASK, %bl
- cmpb $USER_RPL, %bl
- jb resume_kernel
+ movl %esp, %eax
xorl %edx, %edx
call do_notify_resume
jmp resume_userspace
-
-#ifdef CONFIG_VM86
- ALIGN
-work_notifysig_v86:
- pushl %ecx # save ti_flags for do_notify_resume
- call save_v86_state # %eax contains pt_regs pointer
- popl %ecx
- movl %eax, %esp
- jmp 1b
-#endif
END(work_pending)

# perform syscall exit tracing
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 225ee54..fdad5c2 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -27,14 +27,17 @@
* Without this offset, that can result in a page fault. (We are
* careful that, in this case, the value we read doesn't matter.)
*
- * In vm86 mode, the hardware frame is much longer still, but we neither
- * access the extra members from NMI context, nor do we write such a
- * frame at sp0 at all.
+ * In vm86 mode, the hardware frame is much longer still, so add 16
+ * bytes to make room for the real-mode segments.
*
* x86_64 has a fixed-length stack frame.
*/
#ifdef CONFIG_X86_32
-# define TOP_OF_KERNEL_STACK_PADDING 8
+# ifdef CONFIG_VM86
+# define TOP_OF_KERNEL_STACK_PADDING 16
+# else
+# define TOP_OF_KERNEL_STACK_PADDING 8
+# endif
#else
# define TOP_OF_KERNEL_STACK_PADDING 0
#endif
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 226d6c1..e45386e 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -29,7 +29,7 @@ struct kernel_vm86_regs {

struct vm86 {
struct vm86plus_struct __user *vm86_info;
- struct pt_regs *regs32;
+ struct pt_regs regs32;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
@@ -46,7 +46,7 @@ struct vm86 {

void handle_vm86_fault(struct kernel_vm86_regs *, long);
int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
-struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
+void save_v86_state(struct kernel_vm86_regs *, int);

struct task_struct;
void release_vm86_irqs(struct task_struct *);
@@ -69,6 +69,8 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
return 0;
}

+static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { }
+
#define free_vm86(t) do { } while(0)

#endif /* CONFIG_VM86 */
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 7e88cc7..bfd736e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -635,6 +635,9 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
bool stepping, failed;
struct fpu *fpu = &current->thread.fpu;

+ if (v8086_mode(regs))
+ save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
+
/* Are we from a system call? */
if (syscall_get_nr(current, regs) >= 0) {
/* If so, check system call restarting.. */
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 696ef76..ffe98ec 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -50,6 +50,7 @@
#include <asm/io.h>
#include <asm/tlbflush.h>
#include <asm/irq.h>
+#include <asm/traps.h>

/*
* Known problems:
@@ -87,10 +88,9 @@
#define SAFE_MASK (0xDD5)
#define RETURN_MASK (0xDFF)

-struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+void save_v86_state(struct kernel_vm86_regs *regs, int retval)
{
struct tss_struct *tss;
- struct pt_regs *ret;
struct task_struct *tsk = current;
struct vm86plus_struct __user *user;
struct vm86 *vm86 = current->thread.vm86;
@@ -149,11 +149,11 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
vm86->saved_sp0 = 0;
put_cpu();

- ret = vm86->regs32;
+ memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));

- lazy_load_gs(ret->gs);
+ lazy_load_gs(vm86->regs32.gs);

- return ret;
+ regs->pt.ax = retval;
}

static void mark_screen_rdonly(struct mm_struct *mm)
@@ -228,7 +228,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
struct task_struct *tsk = current;
struct vm86 *vm86 = tsk->thread.vm86;
struct kernel_vm86_regs vm86regs;
- struct pt_regs *regs32 = current_pt_regs();
+ struct pt_regs *regs = current_pt_regs();
unsigned long err = 0;

if (!vm86) {
@@ -287,7 +287,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
} else
memset(&vm86->vm86plus, 0,
sizeof(struct vm86plus_info_struct));
- vm86->regs32 = regs32;
+
+ memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
vm86->vm86_info = v86;

/*
@@ -297,10 +298,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
*/
VEFLAGS = vm86regs.pt.flags;
vm86regs.pt.flags &= SAFE_MASK;
- vm86regs.pt.flags |= regs32->flags & ~SAFE_MASK;
+ vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
vm86regs.pt.flags |= X86_VM_MASK;

- vm86regs.pt.orig_ax = regs32->orig_ax;
+ vm86regs.pt.orig_ax = regs->orig_ax;

switch (vm86->cpu_type) {
case CPU_286:
@@ -318,15 +319,14 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
}

/*
- * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
+ * Save old state
*/
- regs32->ax = VM86_SIGNAL;
vm86->saved_sp0 = tsk->thread.sp0;
- lazy_save_gs(regs32->gs);
+ lazy_save_gs(vm86->regs32.gs);

tss = &per_cpu(cpu_tss, get_cpu());
- /* Set new sp0 right below 32-bit regs */
- tsk->thread.sp0 = (unsigned long) regs32;
+ /* make room for real-mode segments */
+ tsk->thread.sp0 += 16;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread);
@@ -335,41 +335,14 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
if (vm86->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);

- /*call __audit_syscall_exit since we do not exit via the normal paths */
-#ifdef CONFIG_AUDITSYSCALL
- if (unlikely(current->audit_context))
- __audit_syscall_exit(1, 0);
-#endif
-
- __asm__ __volatile__(
- "movl %0,%%esp\n\t"
- "movl %1,%%ebp\n\t"
-#ifdef CONFIG_X86_32_LAZY_GS
- "mov %2, %%gs\n\t"
-#endif
- "jmp resume_userspace"
- : /* no outputs */
- :"r" (&vm86regs), "r" (task_thread_info(tsk)), "r" (0));
- unreachable(); /* we never return here */
-}
-
-static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
-{
- struct pt_regs *regs32;
-
- regs32 = save_v86_state(regs16);
- regs32->ax = retval;
- __asm__ __volatile__("movl %0,%%esp\n\t"
- "movl %1,%%ebp\n\t"
- "jmp resume_userspace"
- : : "r" (regs32), "r" (current_thread_info()));
+ memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
+ force_iret();
+ return regs->ax;
}

static inline void set_IF(struct kernel_vm86_regs *regs)
{
VEFLAGS |= X86_EFLAGS_VIF;
- if (VEFLAGS & X86_EFLAGS_VIP)
- return_to_32bit(regs, VM86_STI);
}

static inline void clear_IF(struct kernel_vm86_regs *regs)
@@ -549,7 +522,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
return;

cannot_handle:
- return_to_32bit(regs, VM86_INTx + (i << 8));
+ save_v86_state(regs, VM86_INTx + (i << 8));
}

int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
@@ -558,11 +531,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)

if (vm86->vm86plus.is_vm86pus) {
if ((trapno == 3) || (trapno == 1)) {
- vm86->regs32->ax = VM86_TRAP + (trapno << 8);
- /* setting this flag forces the code in entry_32.S to
- the path where we call save_v86_state() and change
- the stack pointer to regs32 */
- set_thread_flag(TIF_NOTIFY_RESUME);
+ save_v86_state(regs, VM86_TRAP + (trapno << 8));
return 0;
}
do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
@@ -588,12 +557,6 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
#define CHECK_IF_IN_TRAP \
if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
newflags |= X86_EFLAGS_TF
-#define VM86_FAULT_RETURN do { \
- if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
- return_to_32bit(regs, VM86_PICRETURN); \
- if (orig_flags & X86_EFLAGS_TF) \
- handle_vm86_trap(regs, 0, 1); \
- return; } while (0)

orig_flags = *(unsigned short *)&regs->pt.flags;

@@ -632,7 +595,7 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
SP(regs) -= 2;
}
IP(regs) = ip;
- VM86_FAULT_RETURN;
+ goto vm86_fault_return;

/* popf */
case 0x9d:
@@ -652,7 +615,7 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
else
set_vflags_short(newflags, regs);

- VM86_FAULT_RETURN;
+ goto check_vip;
}

/* int xx */
@@ -660,8 +623,10 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
int intno = popb(csp, ip, simulate_sigsegv);
IP(regs) = ip;
if (vmpi->vm86dbg_active) {
- if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3])
- return_to_32bit(regs, VM86_INTx + (intno << 8));
+ if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
+ save_v86_state(regs, VM86_INTx + (intno << 8));
+ return;
+ }
}
do_int(regs, intno, ssp, sp);
return;
@@ -692,14 +657,14 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
} else {
set_vflags_short(newflags, regs);
}
- VM86_FAULT_RETURN;
+ goto check_vip;
}

/* cli */
case 0xfa:
IP(regs) = ip;
clear_IF(regs);
- VM86_FAULT_RETURN;
+ goto vm86_fault_return;

/* sti */
/*
@@ -711,12 +676,27 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
case 0xfb:
IP(regs) = ip;
set_IF(regs);
- VM86_FAULT_RETURN;
+ goto check_vip;

default:
- return_to_32bit(regs, VM86_UNKNOWN);
+ save_v86_state(regs, VM86_UNKNOWN);
+ }
+
+ return;
+
+check_vip:
+ if (VEFLAGS & X86_EFLAGS_VIP) {
+ save_v86_state(regs, VM86_STI);
+ return;
}

+vm86_fault_return:
+ if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
+ save_v86_state(regs, VM86_PICRETURN);
+ return;
+ }
+ if (orig_flags & X86_EFLAGS_TF)
+ handle_vm86_trap(regs, 0, X86_TRAP_DB);
return;

simulate_sigsegv:
@@ -730,7 +710,7 @@ simulate_sigsegv:
* should be a mixture of the two, but how do we
* get the information? [KD]
*/
- return_to_32bit(regs, VM86_UNKNOWN);
+ save_v86_state(regs, VM86_UNKNOWN);
}

/* ---------------- vm86 special IRQ passing stuff ----------------- */

Subject: [tip:x86/asm] x86/vm86: Move the vm86 IRQ definitions to vm86.h

Commit-ID: af3e565a8542c4be699a0403b88fd6c691f5914f
Gitweb: http://git.kernel.org/tip/af3e565a8542c4be699a0403b88fd6c691f5914f
Author: Ingo Molnar <[email protected]>
AuthorDate: Fri, 31 Jul 2015 10:59:20 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 31 Jul 2015 13:31:10 +0200

x86/vm86: Move the vm86 IRQ definitions to vm86.h

Move vm86 specific definitions from irq_vectors.h to vm86.h.

Based on patch from Brian Gerst.

Originally-from: Brian Gerst <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Brian Gerst <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/irq_vectors.h | 10 ----------
arch/x86/include/asm/vm86.h | 15 ++++++++++++++-
2 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4c2d2eb..6ca9fd6 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -117,16 +117,6 @@

#define FPU_IRQ 13

-#define FIRST_VM86_IRQ 3
-#define LAST_VM86_IRQ 15
-
-#ifndef __ASSEMBLY__
-static inline int invalid_vm86_irq(int irq)
-{
- return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
-}
-#endif
-
/*
* Size the maximum number of interrupts.
*
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index e45386e..b063196 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -49,7 +49,6 @@ int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
void save_v86_state(struct kernel_vm86_regs *, int);

struct task_struct;
-void release_vm86_irqs(struct task_struct *);

#define free_vm86(t) do { \
struct thread_struct *__t = (t); \
@@ -59,6 +58,20 @@ void release_vm86_irqs(struct task_struct *);
} \
} while (0)

+/*
+ * Support for VM86 programs to request interrupts for
+ * real mode hardware drivers:
+ */
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
+
+static inline int invalid_vm86_irq(int irq)
+{
+ return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
+}
+
+void release_vm86_irqs(struct task_struct *);
+
#else

#define handle_vm86_fault(a, b)

Subject: [tip:x86/asm] x86/vm86: Clean up vm86.h includes

Commit-ID: ba3e127ec105e790eeec4034d9769e018e4a1b54
Gitweb: http://git.kernel.org/tip/ba3e127ec105e790eeec4034d9769e018e4a1b54
Author: Brian Gerst <[email protected]>
AuthorDate: Wed, 29 Jul 2015 01:41:21 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 31 Jul 2015 13:31:10 +0200

x86/vm86: Clean up vm86.h includes

vm86.h was being implicitly included in alot of places via
processor.h, which in turn got it from math_emu.h. Break that
chain and explicitly include vm86.h in all files that need it.
Also remove unused vm86 field from math_emu_info.

Signed-off-by: Brian Gerst <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
[ Fixed build failure. ]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/math_emu.h | 6 +-----
arch/x86/include/asm/syscalls.h | 1 +
arch/x86/kernel/process_32.c | 1 +
arch/x86/kernel/signal.c | 1 +
arch/x86/kernel/traps.c | 1 +
arch/x86/kernel/vm86_32.c | 1 +
arch/x86/math-emu/get_address.c | 1 +
arch/x86/mm/fault.c | 1 +
drivers/scsi/dpt_i2o.c | 3 +++
9 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/math_emu.h b/arch/x86/include/asm/math_emu.h
index 031f626..0d9b14f 100644
--- a/arch/x86/include/asm/math_emu.h
+++ b/arch/x86/include/asm/math_emu.h
@@ -2,7 +2,6 @@
#define _ASM_X86_MATH_EMU_H

#include <asm/ptrace.h>
-#include <asm/vm86.h>

/* This structure matches the layout of the data saved to the stack
following a device-not-present interrupt, part of it saved
@@ -10,9 +9,6 @@
*/
struct math_emu_info {
long ___orig_eip;
- union {
- struct pt_regs *regs;
- struct kernel_vm86_regs *vm86;
- };
+ struct pt_regs *regs;
};
#endif /* _ASM_X86_MATH_EMU_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 592a6a6..91dfcaf 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -37,6 +37,7 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *);
asmlinkage unsigned long sys_sigreturn(void);

/* kernel/vm86_32.c */
+struct vm86_struct;
asmlinkage long sys_vm86old(struct vm86_struct __user *);
asmlinkage long sys_vm86(unsigned long, unsigned long);

diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f73c962..c13df2c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -53,6 +53,7 @@
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
+#include <asm/vm86.h>

asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index bfd736e..07eb844 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -31,6 +31,7 @@
#include <asm/vdso.h>
#include <asm/mce.h>
#include <asm/sighandling.h>
+#include <asm/vm86.h>

#ifdef CONFIG_X86_64
#include <asm/proto.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 8e65d8a..86a82ea 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -62,6 +62,7 @@
#include <asm/fpu/xstate.h>
#include <asm/trace/mpx.h>
#include <asm/mpx.h>
+#include <asm/vm86.h>

#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index ffe98ec..0de1f66 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -51,6 +51,7 @@
#include <asm/tlbflush.h>
#include <asm/irq.h>
#include <asm/traps.h>
+#include <asm/vm86.h>

/*
* Known problems:
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99..a2eefb1 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -21,6 +21,7 @@

#include <asm/uaccess.h>
#include <asm/desc.h>
+#include <asm/vm86.h>

#include "fpu_system.h"
#include "exception.h"
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 34a368d..eef44d9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -20,6 +20,7 @@
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
#include <asm/fixmap.h> /* VSYSCALL_ADDR */
#include <asm/vsyscall.h> /* emulate_vsyscall */
+#include <asm/vm86.h> /* struct vm86 */

#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index f35ed53..d4cda5e 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1924,6 +1924,9 @@ static void adpt_alpha_info(sysInfo_S* si)
#endif

#if defined __i386__
+
+#include <uapi/asm/vm86.h>
+
static void adpt_i386_info(sysInfo_S* si)
{
// This is all the info we need for now

Subject: [tip:x86/asm] x86/vm86: Rename vm86->vm86_info to user_vm86

Commit-ID: 1342635638cba9b7c8eac776da5e54390d14d313
Gitweb: http://git.kernel.org/tip/1342635638cba9b7c8eac776da5e54390d14d313
Author: Brian Gerst <[email protected]>
AuthorDate: Wed, 29 Jul 2015 01:41:22 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 31 Jul 2015 13:31:11 +0200

x86/vm86: Rename vm86->vm86_info to user_vm86

Make it clearer that this is the pointer to the userspace vm86
state area.

Signed-off-by: Brian Gerst <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/vm86.h | 2 +-
arch/x86/kernel/vm86_32.c | 70 +++++++++++++++++++++++----------------------
2 files changed, 37 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index b063196..c93ae73 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -28,7 +28,7 @@ struct kernel_vm86_regs {
};

struct vm86 {
- struct vm86plus_struct __user *vm86_info;
+ struct vm86plus_struct __user *user_vm86;
struct pt_regs regs32;
unsigned long v86flags;
unsigned long v86mask;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 0de1f66..52aa33e 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -104,17 +104,17 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
*/
local_irq_enable();

- if (!vm86 || !vm86->vm86_info) {
- pr_alert("no vm86_info: BAD\n");
+ if (!vm86 || !vm86->user_vm86) {
+ pr_alert("no user_vm86: BAD\n");
do_exit(SIGSEGV);
}
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
- user = vm86->vm86_info;
+ user = vm86->user_vm86;

if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
sizeof(struct vm86plus_struct) :
sizeof(struct vm86_struct))) {
- pr_alert("could not access userspace vm86_info\n");
+ pr_alert("could not access userspace vm86 info\n");
do_exit(SIGSEGV);
}

@@ -139,7 +139,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
} put_user_catch(err);
if (err) {
- pr_alert("could not access userspace vm86_info\n");
+ pr_alert("could not access userspace vm86 info\n");
do_exit(SIGSEGV);
}

@@ -192,11 +192,11 @@ out:


static int do_vm86_irq_handling(int subfunction, int irqnumber);
-static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus);
+static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);

-SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
+SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
{
- return do_sys_vm86((struct vm86plus_struct __user *) v86, false);
+ return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
}


@@ -223,7 +223,7 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
}


-static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
+static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
{
struct tss_struct *tss;
struct task_struct *tsk = current;
@@ -240,7 +240,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
if (vm86->saved_sp0)
return -EPERM;

- if (!access_ok(VERIFY_READ, v86, plus ?
+ if (!access_ok(VERIFY_READ, user_vm86, plus ?
sizeof(struct vm86_struct) :
sizeof(struct vm86plus_struct)))
return -EFAULT;
@@ -248,40 +248,42 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
memset(&vm86regs, 0, sizeof(vm86regs));
get_user_try {
unsigned short seg;
- get_user_ex(vm86regs.pt.bx, &v86->regs.ebx);
- get_user_ex(vm86regs.pt.cx, &v86->regs.ecx);
- get_user_ex(vm86regs.pt.dx, &v86->regs.edx);
- get_user_ex(vm86regs.pt.si, &v86->regs.esi);
- get_user_ex(vm86regs.pt.di, &v86->regs.edi);
- get_user_ex(vm86regs.pt.bp, &v86->regs.ebp);
- get_user_ex(vm86regs.pt.ax, &v86->regs.eax);
- get_user_ex(vm86regs.pt.ip, &v86->regs.eip);
- get_user_ex(seg, &v86->regs.cs);
+ get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
+ get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
+ get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
+ get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
+ get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
+ get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
+ get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
+ get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
+ get_user_ex(seg, &user_vm86->regs.cs);
vm86regs.pt.cs = seg;
- get_user_ex(vm86regs.pt.flags, &v86->regs.eflags);
- get_user_ex(vm86regs.pt.sp, &v86->regs.esp);
- get_user_ex(seg, &v86->regs.ss);
+ get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
+ get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
+ get_user_ex(seg, &user_vm86->regs.ss);
vm86regs.pt.ss = seg;
- get_user_ex(vm86regs.es, &v86->regs.es);
- get_user_ex(vm86regs.ds, &v86->regs.ds);
- get_user_ex(vm86regs.fs, &v86->regs.fs);
- get_user_ex(vm86regs.gs, &v86->regs.gs);
-
- get_user_ex(vm86->flags, &v86->flags);
- get_user_ex(vm86->screen_bitmap, &v86->screen_bitmap);
- get_user_ex(vm86->cpu_type, &v86->cpu_type);
+ get_user_ex(vm86regs.es, &user_vm86->regs.es);
+ get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
+ get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
+ get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
+
+ get_user_ex(vm86->flags, &user_vm86->flags);
+ get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
+ get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
} get_user_catch(err);
if (err)
return err;

- if (copy_from_user(&vm86->int_revectored, &v86->int_revectored,
+ if (copy_from_user(&vm86->int_revectored,
+ &user_vm86->int_revectored,
sizeof(struct revectored_struct)))
return -EFAULT;
- if (copy_from_user(&vm86->int21_revectored, &v86->int21_revectored,
+ if (copy_from_user(&vm86->int21_revectored,
+ &user_vm86->int21_revectored,
sizeof(struct revectored_struct)))
return -EFAULT;
if (plus) {
- if (copy_from_user(&vm86->vm86plus, &v86->vm86plus,
+ if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
sizeof(struct vm86plus_info_struct)))
return -EFAULT;
vm86->vm86plus.is_vm86pus = 1;
@@ -290,7 +292,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
sizeof(struct vm86plus_info_struct));

memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
- vm86->vm86_info = v86;
+ vm86->user_vm86 = user_vm86;

/*
* The flags register is also special: we cannot trust that the user

Subject: [tip:x86/asm] x86/vm86: Rename vm86->v86flags and v86mask

Commit-ID: decd275e62d5eef4b947fab89652fa6afdadf2f2
Gitweb: http://git.kernel.org/tip/decd275e62d5eef4b947fab89652fa6afdadf2f2
Author: Brian Gerst <[email protected]>
AuthorDate: Wed, 29 Jul 2015 01:41:23 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Fri, 31 Jul 2015 13:31:11 +0200

x86/vm86: Rename vm86->v86flags and v86mask

Rename v86flags to veflags, and v86mask to veflags_mask.

Signed-off-by: Brian Gerst <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/vm86.h | 4 ++--
arch/x86/kernel/vm86_32.c | 20 ++++++++++----------
2 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index c93ae73..1e491f3 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -30,8 +30,8 @@ struct kernel_vm86_regs {
struct vm86 {
struct vm86plus_struct __user *user_vm86;
struct pt_regs regs32;
- unsigned long v86flags;
- unsigned long v86mask;
+ unsigned long veflags;
+ unsigned long veflags_mask;
unsigned long saved_sp0;

unsigned long flags;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 52aa33e..abd8b856 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -80,8 +80,8 @@
/*
* virtual flags (16 and 32-bit versions)
*/
-#define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags))
-#define VEFLAGS (current->thread.vm86->v86flags)
+#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
+#define VEFLAGS (current->thread.vm86->veflags)

#define set_flags(X, new, mask) \
((X) = ((X) & ~(mask)) | ((new) & (mask)))
@@ -108,7 +108,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
pr_alert("no user_vm86: BAD\n");
do_exit(SIGSEGV);
}
- set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
+ set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
user = vm86->user_vm86;

if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
@@ -308,16 +308,16 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)

switch (vm86->cpu_type) {
case CPU_286:
- vm86->v86mask = 0;
+ vm86->veflags_mask = 0;
break;
case CPU_386:
- vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
case CPU_486:
- vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
default:
- vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+ vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
}

@@ -377,7 +377,7 @@ static inline void clear_AC(struct kernel_vm86_regs *regs)

static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
{
- set_flags(VEFLAGS, flags, current->thread.vm86->v86mask);
+ set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
@@ -387,7 +387,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs

static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
{
- set_flags(VFLAGS, flags, current->thread.vm86->v86mask);
+ set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
@@ -402,7 +402,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
if (VEFLAGS & X86_EFLAGS_VIF)
flags |= X86_EFLAGS_IF;
flags |= X86_EFLAGS_IOPL;
- return flags | (VEFLAGS & current->thread.vm86->v86mask);
+ return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
}

static inline int is_revectored(int nr, struct revectored_struct *bitmap)

2015-07-31 14:53:57

by Brian Gerst

[permalink] [raw]
Subject: Re: [tip:x86/asm] x86/vm86: Clean up vm86.h includes

On Fri, Jul 31, 2015 at 10:04 AM, tip-bot for Brian Gerst
<[email protected]> wrote:
> Commit-ID: ba3e127ec105e790eeec4034d9769e018e4a1b54
> Gitweb: http://git.kernel.org/tip/ba3e127ec105e790eeec4034d9769e018e4a1b54
> Author: Brian Gerst <[email protected]>
> AuthorDate: Wed, 29 Jul 2015 01:41:21 -0400
> Committer: Ingo Molnar <[email protected]>
> CommitDate: Fri, 31 Jul 2015 13:31:10 +0200
>
> x86/vm86: Clean up vm86.h includes
>
> vm86.h was being implicitly included in alot of places via
> processor.h, which in turn got it from math_emu.h. Break that
> chain and explicitly include vm86.h in all files that need it.
> Also remove unused vm86 field from math_emu_info.
>
> Signed-off-by: Brian Gerst <[email protected]>
> Cc: Andy Lutomirski <[email protected]>
> Cc: Borislav Petkov <[email protected]>
> Cc: Denys Vlasenko <[email protected]>
> Cc: H. Peter Anvin <[email protected]>
> Cc: Linus Torvalds <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Link: http://lkml.kernel.org/r/[email protected]
> [ Fixed build failure. ]
> Signed-off-by: Ingo Molnar <[email protected]>


> diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
> index f35ed53..d4cda5e 100644
> --- a/drivers/scsi/dpt_i2o.c
> +++ b/drivers/scsi/dpt_i2o.c
> @@ -1924,6 +1924,9 @@ static void adpt_alpha_info(sysInfo_S* si)
> #endif
>
> #if defined __i386__
> +
> +#include <uapi/asm/vm86.h>
> +
> static void adpt_i386_info(sysInfo_S* si)
> {
> // This is all the info we need for now

It would have been better to just remove the the CPU_* defines from
this driver. boot_cpu_data.x86 is simply the family ID from CPUID,
and nowhere else uses defines like this.

--
Brian Gerst

2015-08-01 08:22:55

by Ingo Molnar

[permalink] [raw]
Subject: Re: [tip:x86/asm] x86/vm86: Clean up vm86.h includes


* Brian Gerst <[email protected]> wrote:

> On Fri, Jul 31, 2015 at 10:04 AM, tip-bot for Brian Gerst
> <[email protected]> wrote:
> > Commit-ID: ba3e127ec105e790eeec4034d9769e018e4a1b54
> > Gitweb: http://git.kernel.org/tip/ba3e127ec105e790eeec4034d9769e018e4a1b54
> > Author: Brian Gerst <[email protected]>
> > AuthorDate: Wed, 29 Jul 2015 01:41:21 -0400
> > Committer: Ingo Molnar <[email protected]>
> > CommitDate: Fri, 31 Jul 2015 13:31:10 +0200
> >
> > x86/vm86: Clean up vm86.h includes
> >
> > vm86.h was being implicitly included in alot of places via
> > processor.h, which in turn got it from math_emu.h. Break that
> > chain and explicitly include vm86.h in all files that need it.
> > Also remove unused vm86 field from math_emu_info.
> >
> > Signed-off-by: Brian Gerst <[email protected]>
> > Cc: Andy Lutomirski <[email protected]>
> > Cc: Borislav Petkov <[email protected]>
> > Cc: Denys Vlasenko <[email protected]>
> > Cc: H. Peter Anvin <[email protected]>
> > Cc: Linus Torvalds <[email protected]>
> > Cc: Peter Zijlstra <[email protected]>
> > Cc: Thomas Gleixner <[email protected]>
> > Link: http://lkml.kernel.org/r/[email protected]
> > [ Fixed build failure. ]
> > Signed-off-by: Ingo Molnar <[email protected]>
>
>
> > diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
> > index f35ed53..d4cda5e 100644
> > --- a/drivers/scsi/dpt_i2o.c
> > +++ b/drivers/scsi/dpt_i2o.c
> > @@ -1924,6 +1924,9 @@ static void adpt_alpha_info(sysInfo_S* si)
> > #endif
> >
> > #if defined __i386__
> > +
> > +#include <uapi/asm/vm86.h>
> > +
> > static void adpt_i386_info(sysInfo_S* si)
> > {
> > // This is all the info we need for now
>
> It would have been better to just remove the the CPU_* defines from
> this driver. boot_cpu_data.x86 is simply the family ID from CPUID,
> and nowhere else uses defines like this.

Please send a followup patch for that.

Thanks,

Ingo

2015-08-05 08:51:45

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 5/8] x86/vm86: Add a separate config option for hardware IRQ handling


* Brian Gerst <[email protected]> wrote:

> >> Disabling even less-used code that could have system stability impact. We've
> >> discouraged user-mode drivers for a very long time. Ironically, other than
> >> being configured through the vm86 syscall, there isn't really anything
> >> vm86-specific about it. All it does is register an IRQ handler that sends a
> >> signal to the task.
> >
> > So is this actually used by anything? Could we get away with disabling it,
> > just to see whether anything cares?
>
> My best guess would be some very old X11 drivers that needed interrupts to run
> the Video BIOS code.

So let's keep it - but not complicate it with another layer of disabling logic.
People that rely on legacies will enable vm86 as a single block - they won't
necessarily know how deeply they rely on it.

What _would_ be useful is to have a 3-mode vm86 sysctl:

1: enabled
0: disabled
-1: disabled permanently (one-shot disabling after bootup)

That way a distro can permanently disable vm86 for a particular bootup by setting
it to -1 in /etc/sysctl.conf.

The kernel should default that setting to '0'.

Thanks,

Ingo