This patch changes assembly code that accesses thread_info to use %ebp
rather than %ebx.
This allows me to take advantage of the fact that %ebp is restored by
user mode in the sysenter register pop removal patch.
diff --exclude-from=/home/ldb/src/exclude -urNdp linux-2.5.54-preldb/arch/i386/kernel/entry.S linux-2.5.54-ldb/arch/i386/kernel/entry.S
--- linux-2.5.54-preldb/arch/i386/kernel/entry.S 2003-01-06 16:01:40.000000000 +0100
+++ linux-2.5.54-ldb/arch/i386/kernel/entry.S 2003-01-06 04:54:58.000000000 +0100
@@ -145,16 +145,16 @@ ENTRY(lcall7)
# gates, which has to be cleaned up later..
pushl %eax
SAVE_ALL
- movl %esp, %ebx
- pushl %ebx
+ movl %esp, %ebp
+ pushl %ebp
pushl $0x7
do_lcall:
- movl EIP(%ebx), %eax # due to call gates, this is eflags, not eip..
- movl CS(%ebx), %edx # this is eip..
- movl EFLAGS(%ebx), %ecx # and this is cs..
- movl %eax,EFLAGS(%ebx) #
- movl %edx,EIP(%ebx) # Now we move them to their "normal" places
- movl %ecx,CS(%ebx) #
+ movl EIP(%ebp), %eax # due to call gates, this is eflags, not eip..
+ movl CS(%ebp), %edx # this is eip..
+ movl EFLAGS(%ebp), %ecx # and this is cs..
+ movl %eax,EFLAGS(%ebp) #
+ movl %edx,EIP(%ebp) # Now we move them to their "normal" places
+ movl %ecx,CS(%ebp) #
#
# Call gates don't clear TF and NT in eflags like
@@ -166,8 +166,8 @@ do_lcall:
pushl %eax
popfl
- andl $-8192, %ebx # GET_THREAD_INFO
- movl TI_EXEC_DOMAIN(%ebx), %edx # Get the execution domain
+ andl $-8192, %ebp # GET_THREAD_INFO
+ movl TI_EXEC_DOMAIN(%ebp), %edx # Get the execution domain
call *4(%edx) # Call the lcall7 handler for the domain
addl $4, %esp
popl %eax
@@ -178,8 +178,8 @@ ENTRY(lcall27)
# gates, which has to be cleaned up later..
pushl %eax
SAVE_ALL
- movl %esp, %ebx
- pushl %ebx
+ movl %esp, %ebp
+ pushl %ebp
pushl $0x27
jmp do_lcall
@@ -187,7 +187,7 @@ ENTRY(lcall27)
ENTRY(ret_from_fork)
# NOTE: this function takes a parameter but it's unused on x86.
call schedule_tail
- GET_THREAD_INFO(%ebx)
+ GET_THREAD_INFO(%ebp)
jmp syscall_exit
/*
@@ -202,7 +202,7 @@ ENTRY(ret_from_fork)
ret_from_exception:
preempt_stop
ret_from_intr:
- GET_THREAD_INFO(%ebx)
+ GET_THREAD_INFO(%ebp)
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
testl $(VM_MASK | 3), %eax
@@ -211,7 +211,7 @@ ENTRY(resume_userspace)
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
- movl TI_FLAGS(%ebx), %ecx
+ movl TI_FLAGS(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return?
jne work_pending
@@ -219,18 +219,18 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- cmpl $0,TI_PRE_COUNT(%ebx) # non-zero preempt_count ?
+ cmpl $0,TI_PRE_COUNT(%ebp) # non-zero preempt_count ?
jnz restore_all
need_resched:
- movl TI_FLAGS(%ebx), %ecx # need_resched set ?
+ movl TI_FLAGS(%ebp), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl
jz restore_all
testl $IF_MASK,EFLAGS(%esp) # interrupts off (execption path) ?
jz restore_all
- movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebx)
+ movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebp)
sti
call schedule
- movl $0,TI_PRE_COUNT(%ebx)
+ movl $0,TI_PRE_COUNT(%ebp)
cli
jmp need_resched
#endif
@@ -262,21 +262,21 @@ ENTRY(sysenter_entry)
pushl %eax
SAVE_ALL
- GET_THREAD_INFO(%ebx)
+ GET_THREAD_INFO(%ebp)
cmpl $(NR_syscalls), %eax
jae syscall_badsys
- testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx)
+ testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebp)
jnz syscall_trace_entry
call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp)
cli
- movl TI_FLAGS(%ebx), %ecx
+ movl TI_FLAGS(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx
jne syscall_exit_work
RESTORE_INT_REGS
@@ -286,11 +286,11 @@ ENTRY(sysenter_entry)
ENTRY(system_call)
pushl %eax # save orig_eax
SAVE_ALL
- GET_THREAD_INFO(%ebx)
+ GET_THREAD_INFO(%ebp)
cmpl $(NR_syscalls), %eax
jae syscall_badsys
# system call tracing in operation
- testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx)
+ testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebp)
jnz syscall_trace_entry
syscall_call:
call *sys_call_table(,%eax,4)
@@ -299,7 +299,7 @@ syscall_exit:
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
- movl TI_FLAGS(%ebx), %ecx
+ movl TI_FLAGS(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx # current->work
jne syscall_exit_work
restore_all:
@@ -315,7 +315,7 @@ work_resched:
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
- movl TI_FLAGS(%ebx), %ecx
+ movl TI_FLAGS(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
jz restore_all
@@ -370,7 +370,7 @@ syscall_exit_work:
syscall_fault:
pushl %eax # save orig_eax
SAVE_ALL
- GET_THREAD_INFO(%ebx)
+ GET_THREAD_INFO(%ebp)
movl $-EFAULT,EAX(%esp)
jmp resume_userspace
On Thu, 9 Jan 2003, Luca Barbieri wrote:
> This patch changes assembly code that accesses thread_info to use %ebp
> rather than %ebx.
>
> This allows me to take advantage of the fact that %ebp is restored by
> user mode in the sysenter register pop removal patch.
If you use EBP as an index register, i.e., "movl (%ebp), %eax", it
will be relative to the SS, not ES or DS. Is this what you want?
Cheers,
Dick Johnson
Penguin : Linux version 2.4.18 on an i686 machine (797.90 BogoMips).
Why is the government concerned about the lunatic fringe? Think about it.
On Thu, 9 Jan 2003, Luca Barbieri wrote:
>
> This patch changes assembly code that accesses thread_info to use %ebp
> rather than %ebx.
>
> This allows me to take advantage of the fact that %ebp is restored by
> user mode in the sysenter register pop removal patch.
Hmm.. Did you check what fork/execve/vm86 do? I know at least the vm86()
stuff sets up %ebx before calling the asm functions in entry.S, I bet
those need to be changed to use %ebp too with something like this.
Linus
On Thu, 9 Jan 2003, Richard B. Johnson wrote:
>
> If you use EBP as an index register, i.e., "movl (%ebp), %eax", it
> will be relative to the SS, not ES or DS. Is this what you want?
That's fine, both SS and DS are 32-bit flat segments everywhere in the
kernel (they have different descriptors - __KERNEL_DS vs __USER_DS, but
they do the same thing)
Linus
> Hmm.. Did you check what fork
It only seems to use ret_from_fork, that sets up %ebp on its own.
> execve
Doesn't seem to use any function in entry.S
> vm86 do? I know at least the vm86()
> stuff sets up %ebx before calling the asm functions in entry.S, I bet
> those need to be changed to use %ebp too with something like this.
Right. This should fix it (compiles but untested):
--- arch/i386/kernel/vm86.c~ 2003-01-02 04:21:07.000000000 +0100
+++ arch/i386/kernel/vm86.c 2003-01-09 21:24:58.000000000 +0100
@@ -298,9 +298,10 @@
__asm__ __volatile__(
"xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
"movl %0,%%esp\n\t"
+ "movl %1,%%ebp\n\t"
"jmp resume_userspace"
: /* no outputs */
- :"r" (&info->regs), "b" (tsk->thread_info) : "ax");
+ :"r" (&info->regs), "r" (tsk->thread_info) : "ax");
/* we never return here */
}
@@ -311,8 +312,9 @@
regs32 = save_v86_state(regs16);
regs32->eax = retval;
__asm__ __volatile__("movl %0,%%esp\n\t"
+ "movl %1,%%ebp\n\t"
"jmp resume_userspace"
- : : "r" (regs32), "b" (current_thread_info()));
+ : : "r" (regs32), "r" (current_thread_info()));
}
static inline void set_IF(struct kernel_vm86_regs * regs)