2007-11-21 10:19:45

by Roland McGrath

[permalink] [raw]
Subject: [PATCH 1/5] x86: get_desc_base


This defines the get_desc_base function in asm-x86/desc_64.h to match the
one in desc_32.h. If these two files ever get merged together, this
function could be the same in both.

Signed-off-by: Roland McGrath <[email protected]>

diff --git a/include/asm-x86/desc_64.h b/include/asm-x86/desc_64.h
index 7d9c938..70bd72f 100644
--- a/include/asm-x86/desc_64.h
+++ b/include/asm-x86/desc_64.h
@@ -199,6 +199,16 @@ static inline void load_LDT(mm_context_t *pc)

extern struct desc_ptr idt_descr;

+static inline unsigned long get_desc_base(const void *ptr)
+{
+ const u32 *desc = ptr;
+ unsigned long base;
+ base = ((desc[0] >> 16) & 0x0000ffff) |
+ ((desc[1] << 16) & 0x00ff0000) |
+ (desc[1] & 0xff000000);
+ return base;
+}
+
#endif /* !__ASSEMBLY__ */

#endif


2007-11-21 10:20:51

by Roland McGrath

[permalink] [raw]
Subject: [PATCH 2/5] x86: use get_desc_base


This changes a couple of places to use the get_desc_base function.
They were duplicating the same calculation with different equivalent code.

Signed-off-by: Roland McGrath <[email protected]>

diff --git a/arch/x86/ia32/tls32.c b/arch/x86/ia32/tls32.c
index 1cc4340..5291596 100644
--- a/arch/x86/ia32/tls32.c
+++ b/arch/x86/ia32/tls32.c
@@ -85,11 +85,6 @@ asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
* Get the current Thread-Local Storage area:
*/

-#define GET_BASE(desc) ( \
- (((desc)->a >> 16) & 0x0000ffff) | \
- (((desc)->b << 16) & 0x00ff0000) | \
- ( (desc)->b & 0xff000000) )
-
#define GET_LIMIT(desc) ( \
((desc)->a & 0x0ffff) | \
((desc)->b & 0xf0000) )
@@ -117,7 +112,7 @@ int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)

memset(&info, 0, sizeof(struct user_desc));
info.entry_number = idx;
- info.base_addr = GET_BASE(desc);
+ info.base_addr = get_desc_base(desc);
info.limit = GET_LIMIT(desc);
info.seg_32bit = GET_32BIT(desc);
info.contents = GET_CONTENTS(desc);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6309b27..b8d131d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -453,11 +453,7 @@ static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)

static inline u32 read_32bit_tls(struct task_struct *t, int tls)
{
- struct desc_struct *desc = (void *)t->thread.tls_array;
- desc += tls;
- return desc->base0 |
- (((u32)desc->base1) << 16) |
- (((u32)desc->base2) << 24);
+ return get_desc_base(&t->thread.tls_array[tls]);
}

/*

2007-11-21 10:22:20

by Roland McGrath

[permalink] [raw]
Subject: [PATCH 3/5] x86: ptrace fs/gs_base


The fs_base and gs_base fields are available in user_regs_struct.
But reading these via ptrace (PTRACE_GETREGS or PTRACE_PEEKUSR) does
not give a reliably useful value. The thread_struct fields are 0
when do_arch_prctl decided to use a GDT slot instead of MSR_FS_BASE,
which it does for a value under 1<<32.

This changes ptrace access to fs_base and gs_base to work like
PTRACE_ARCH_PRCTL does. That is, it reads the base address that
user-mode memory access using the fs/gs instruction prefixes will
use, regardless of how it's being implemented in the kernel. The
MSR vs GDT is an implementation detail that is pretty much hidden
from userland in the actual using, and there is no reason that
ptrace should give the internal implementation picture rather than
the user-mode semantic picture. In the case of setting the value,
this can implicitly change the fsindex/gsindex value (also
separately in user_regs_struct), which is what happens when the
thread calls arch_prctl itself. In a PTRACE_SETREGS, the fs_base
change will come after the fsindex change due to the order of the
struct, and so a change the debugger made to fs_base will have the
effect intended, another part of the user_regs_struct will now
differ when read back from what the debugger wrote.

This makes PTRACE_ARCH_PRCTL obsolete. We could consider declaring
it deprecated and removing it one day, though there is no hurry.
For the foreseeable future, debuggers have to assume an old kernel
that does not report reliable fs_base/gs_base values in user_regs_struct
and stick to PTRACE_ARCH_PRCTL anyway.

Signed-off-by: Roland McGrath <[email protected]>

diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
index 607085f..3ff7ddc 100644
--- a/arch/x86/kernel/ptrace_64.c
+++ b/arch/x86/kernel/ptrace_64.c
@@ -22,6 +22,7 @@
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
+#include <asm/prctl.h>
#include <asm/i387.h>
#include <asm/debugreg.h>
#include <asm/ldt.h>
@@ -260,12 +261,22 @@ static int putreg(struct task_struct *child,
case offsetof(struct user_regs_struct,fs_base):
if (value >= TASK_SIZE_OF(child))
return -EIO;
- child->thread.fs = value;
+ /*
+ * When changing the segment base, use do_arch_prctl
+ * to set either thread.fs or thread.fsindex and the
+ * corresponding GDT slot.
+ */
+ if (child->thread.fs != value)
+ return do_arch_prctl(child, ARCH_SET_FS, value);
return 0;
case offsetof(struct user_regs_struct,gs_base):
+ /*
+ * Exactly the same here as the %fs handling above.
+ */
if (value >= TASK_SIZE_OF(child))
return -EIO;
- child->thread.gs = value;
+ if (child->thread.gs != value)
+ return do_arch_prctl(child, ARCH_SET_GS, value);
return 0;
case offsetof(struct user_regs_struct, eflags):
value &= FLAG_MASK;
@@ -296,9 +307,25 @@ static unsigned long getreg(struct task_struct *child, unsigned long regno)
case offsetof(struct user_regs_struct, es):
return child->thread.es;
case offsetof(struct user_regs_struct, fs_base):
- return child->thread.fs;
+ /*
+ * do_arch_prctl may have used a GDT slot instead of
+ * the MSR. To userland, it appears the same either
+ * way, except the %fs segment selector might not be 0.
+ */
+ if (child->thread.fs != 0)
+ return child->thread.fs;
+ if (child->thread.fsindex != FS_TLS_SEL)
+ return 0;
+ return get_desc_base(&child->thread.tls_array[FS_TLS]);
case offsetof(struct user_regs_struct, gs_base):
- return child->thread.gs;
+ /*
+ * Exactly the same here as the %fs handling above.
+ */
+ if (child->thread.gs != 0)
+ return child->thread.gs;
+ if (child->thread.gsindex != GS_TLS_SEL)
+ return 0;
+ return get_desc_base(&child->thread.tls_array[GS_TLS]);
default:
regno = regno - sizeof(struct pt_regs);
val = get_stack_long(child, regno);

2007-11-21 10:23:20

by Roland McGrath

[permalink] [raw]
Subject: [PATCH 4/5] x86: desc_empty


This replaces the desc_empty macro with an inline. It now handles
easily any of the four different types used between 32/64 code to
refer to these 8 bytes. It's identical in both asm-x86/processor_64.h
and asm-x86/processor_32.h, so if these files ever get merged this
function can be in the common code.

This also removes the desc_equal macro because nothing uses it.

Signed-off-by: Roland McGrath <[email protected]>

diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
index 13976b0..34e8063 100644
--- a/include/asm-x86/processor_32.h
+++ b/include/asm-x86/processor_32.h
@@ -30,11 +30,12 @@ struct desc_struct {
unsigned long a,b;
};

-#define desc_empty(desc) \
- (!((desc)->a | (desc)->b))
+static inline int desc_empty(const void *ptr)
+{
+ const u32 *desc = ptr;
+ return !(desc[0] | desc[1]);
+}

-#define desc_equal(desc1, desc2) \
- (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h
index e4f1997..2dd739a 100644
--- a/include/asm-x86/processor_64.h
+++ b/include/asm-x86/processor_64.h
@@ -32,11 +32,11 @@
#define VIP_MASK 0x00100000 /* virtual interrupt pending */
#define ID_MASK 0x00200000

-#define desc_empty(desc) \
- (!((desc)->a | (desc)->b))
-
-#define desc_equal(desc1, desc2) \
- (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+static inline int desc_empty(const void *ptr)
+{
+ const u32 *desc = ptr;
+ return !(desc[0] | desc[1]);
+}

/*
* Default implementation of macro that returns current

2007-11-21 10:25:29

by Roland McGrath

[permalink] [raw]
Subject: [PATCH 5/5] x86: TLS cleanup


This consolidates the four different places that implemented the same
encoding magic for the GDT-slot 32-bit TLS support. The old tls32.c is
renamed and only slightly modified to be the shared implementation guts.

Signed-off-by: Roland McGrath <[email protected]>

diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
index e2edda2..3c8b746 100644
--- a/arch/x86/ia32/Makefile
+++ b/arch/x86/ia32/Makefile
@@ -2,7 +2,7 @@
# Makefile for the ia32 kernel emulation subsystem.
#

-obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \
+obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o \
ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o \
mmap32.o

diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index df588f0..0a71fa9 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -644,8 +644,8 @@ ia32_sys_call_table:
.quad compat_sys_futex /* 240 */
.quad compat_sys_sched_setaffinity
.quad compat_sys_sched_getaffinity
- .quad sys32_set_thread_area
- .quad sys32_get_thread_area
+ .quad sys_set_thread_area
+ .quad sys_get_thread_area
.quad compat_sys_io_setup /* 245 */
.quad sys_io_destroy
.quad compat_sys_io_getevents
diff --git a/arch/x86/ia32/tls32.c b/arch/x86/ia32/tls32.c
deleted file mode 100644
index 5291596..0000000
--- a/arch/x86/ia32/tls32.c
+++ /dev/null
@@ -1,158 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/user.h>
-
-#include <asm/uaccess.h>
-#include <asm/desc.h>
-#include <asm/system.h>
-#include <asm/ldt.h>
-#include <asm/processor.h>
-#include <asm/proto.h>
-
-/*
- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
- */
-static int get_free_idx(void)
-{
- struct thread_struct *t = &current->thread;
- int idx;
-
- for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
- if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
- return idx + GDT_ENTRY_TLS_MIN;
- return -ESRCH;
-}
-
-/*
- * Set a given TLS descriptor:
- * When you want addresses > 32bit use arch_prctl()
- */
-int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
-{
- struct user_desc info;
- struct n_desc_struct *desc;
- int cpu, idx;
-
- if (copy_from_user(&info, u_info, sizeof(info)))
- return -EFAULT;
-
- idx = info.entry_number;
-
- /*
- * index -1 means the kernel should try to find and
- * allocate an empty descriptor:
- */
- if (idx == -1) {
- idx = get_free_idx();
- if (idx < 0)
- return idx;
- if (put_user(idx, &u_info->entry_number))
- return -EFAULT;
- }
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
-
- /*
- * We must not get preempted while modifying the TLS.
- */
- cpu = get_cpu();
-
- if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
- } else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
- if (t == &current->thread)
- load_TLS(t, cpu);
-
- put_cpu();
- return 0;
-}
-
-asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
-{
- return do_set_thread_area(&current->thread, u_info);
-}
-
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
-
-int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
-{
- struct user_desc info;
- struct n_desc_struct *desc;
- int idx;
-
- if (get_user(idx, &u_info->entry_number))
- return -EFAULT;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
-
- memset(&info, 0, sizeof(struct user_desc));
- info.entry_number = idx;
- info.base_addr = get_desc_base(desc);
- info.limit = GET_LIMIT(desc);
- info.seg_32bit = GET_32BIT(desc);
- info.contents = GET_CONTENTS(desc);
- info.read_exec_only = !GET_WRITABLE(desc);
- info.limit_in_pages = GET_LIMIT_PAGES(desc);
- info.seg_not_present = !GET_PRESENT(desc);
- info.useable = GET_USEABLE(desc);
- info.lm = GET_LONGMODE(desc);
-
- if (copy_to_user(u_info, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
-asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info)
-{
- return do_get_thread_area(&current->thread, u_info);
-}
-
-
-int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
-{
- struct n_desc_struct *desc;
- struct user_desc info;
- struct user_desc __user *cp;
- int idx;
-
- cp = (void __user *)childregs->rsi;
- if (copy_from_user(&info, cp, sizeof(info)))
- return -EFAULT;
- if (LDT_empty(&info))
- return -EINVAL;
-
- idx = info.entry_number;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
-
- return 0;
-}
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index a7bc93c..e660584 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -10,6 +10,7 @@ obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\
quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o

+obj-y += tls.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index 5a88890..203a9d8 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -13,6 +13,8 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
i8253.o

+obj-$(CONFIG_IA32_EMULATION) += tls.o
+
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 7b89958..1521e6f 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -55,6 +55,7 @@

#include <asm/tlbflush.h>
#include <asm/cpu.h>
+#include <asm/proto.h>

asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");

@@ -480,33 +481,16 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
set_tsk_thread_flag(p, TIF_IO_BITMAP);
}

+ err = 0;
+
/*
* Set a new TLS for the child thread?
*/
- if (clone_flags & CLONE_SETTLS) {
- struct desc_struct *desc;
- struct user_desc info;
- int idx;
-
- err = -EFAULT;
- if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
- goto out;
- err = -EINVAL;
- if (LDT_empty(&info))
- goto out;
-
- idx = info.entry_number;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- goto out;
-
- desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
+ if (clone_flags & CLONE_SETTLS)
+ err = do_set_thread_area(p, -1,
+ (struct user_desc __user *)childregs->esi, 0);

- err = 0;
- out:
- if (err && p->thread.io_bitmap_ptr) {
+ if (err && p->thread.io_bitmap_ptr) {
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
@@ -851,120 +835,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}

-/*
- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
- */
-static int get_free_idx(void)
-{
- struct thread_struct *t = &current->thread;
- int idx;
-
- for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
- if (desc_empty(t->tls_array + idx))
- return idx + GDT_ENTRY_TLS_MIN;
- return -ESRCH;
-}
-
-/*
- * Set a given TLS descriptor:
- */
-asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
-{
- struct thread_struct *t = &current->thread;
- struct user_desc info;
- struct desc_struct *desc;
- int cpu, idx;
-
- if (copy_from_user(&info, u_info, sizeof(info)))
- return -EFAULT;
- idx = info.entry_number;
-
- /*
- * index -1 means the kernel should try to find and
- * allocate an empty descriptor:
- */
- if (idx == -1) {
- idx = get_free_idx();
- if (idx < 0)
- return idx;
- if (put_user(idx, &u_info->entry_number))
- return -EFAULT;
- }
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- /*
- * We must not get preempted while modifying the TLS.
- */
- cpu = get_cpu();
-
- if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
- } else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
- load_TLS(t, cpu);
-
- put_cpu();
-
- return 0;
-}
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_BASE(desc) ( \
- (((desc)->a >> 16) & 0x0000ffff) | \
- (((desc)->b << 16) & 0x00ff0000) | \
- ( (desc)->b & 0xff000000) )
-
-#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-
-asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
-{
- struct user_desc info;
- struct desc_struct *desc;
- int idx;
-
- if (get_user(idx, &u_info->entry_number))
- return -EFAULT;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- memset(&info, 0, sizeof(info));
-
- desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- info.entry_number = idx;
- info.base_addr = GET_BASE(desc);
- info.limit = GET_LIMIT(desc);
- info.seg_32bit = GET_32BIT(desc);
- info.contents = GET_CONTENTS(desc);
- info.read_exec_only = !GET_WRITABLE(desc);
- info.limit_in_pages = GET_LIMIT_PAGES(desc);
- info.seg_not_present = !GET_PRESENT(desc);
- info.useable = GET_USEABLE(desc);
-
- if (copy_to_user(u_info, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b8d131d..3fdbf78 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -513,7 +513,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
if (clone_flags & CLONE_SETTLS) {
#ifdef CONFIG_IA32_EMULATION
if (test_thread_flag(TIF_IA32))
- err = ia32_child_tls(p, childregs);
+ err = do_set_thread_area(p, -1,
+ (struct user_desc __user *)childregs->rsi, 0);
else
#endif
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
index ff5431c..dd2e18b 100644
--- a/arch/x86/kernel/ptrace_32.c
+++ b/arch/x86/kernel/ptrace_32.c
@@ -24,6 +24,7 @@
#include <asm/debugreg.h>
#include <asm/ldt.h>
#include <asm/desc.h>
+#include <asm/proto.h>

/*
* does not yet catch signals sent when the child dies.
@@ -276,85 +277,6 @@ void ptrace_disable(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
}

-/*
- * Perform get_thread_area on behalf of the traced child.
- */
-static int
-ptrace_get_thread_area(struct task_struct *child,
- int idx, struct user_desc __user *user_desc)
-{
- struct user_desc info;
- struct desc_struct *desc;
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_BASE(desc) ( \
- (((desc)->a >> 16) & 0x0000ffff) | \
- (((desc)->b << 16) & 0x00ff0000) | \
- ( (desc)->b & 0xff000000) )
-
-#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- info.entry_number = idx;
- info.base_addr = GET_BASE(desc);
- info.limit = GET_LIMIT(desc);
- info.seg_32bit = GET_32BIT(desc);
- info.contents = GET_CONTENTS(desc);
- info.read_exec_only = !GET_WRITABLE(desc);
- info.limit_in_pages = GET_LIMIT_PAGES(desc);
- info.seg_not_present = !GET_PRESENT(desc);
- info.useable = GET_USEABLE(desc);
-
- if (copy_to_user(user_desc, &info, sizeof(info)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Perform set_thread_area on behalf of the traced child.
- */
-static int
-ptrace_set_thread_area(struct task_struct *child,
- int idx, struct user_desc __user *user_desc)
-{
- struct user_desc info;
- struct desc_struct *desc;
-
- if (copy_from_user(&info, user_desc, sizeof(info)))
- return -EFAULT;
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
- } else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
-
- return 0;
-}
-
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
struct user * dummy = NULL;
@@ -601,13 +523,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}

case PTRACE_GET_THREAD_AREA:
- ret = ptrace_get_thread_area(child, addr,
- (struct user_desc __user *) data);
+ if (addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+ (struct user_desc __user *) data);
break;

case PTRACE_SET_THREAD_AREA:
- ret = ptrace_set_thread_area(child, addr,
- (struct user_desc __user *) data);
+ if (addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+ (struct user_desc __user *) data, 0);
break;

default:
diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
index 3ff7ddc..07de75d 100644
--- a/arch/x86/kernel/ptrace_64.c
+++ b/arch/x86/kernel/ptrace_64.c
@@ -474,23 +474,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
64bit debugger to fully examine them too. Better
don't use it against 64bit processes, use
PTRACE_ARCH_PRCTL instead. */
- case PTRACE_SET_THREAD_AREA: {
- struct user_desc __user *p;
- int old;
- p = (struct user_desc __user *)data;
- get_user(old, &p->entry_number);
- put_user(addr, &p->entry_number);
- ret = do_set_thread_area(&child->thread, p);
- put_user(old, &p->entry_number);
- break;
case PTRACE_GET_THREAD_AREA:
- p = (struct user_desc __user *)data;
- get_user(old, &p->entry_number);
- put_user(addr, &p->entry_number);
- ret = do_get_thread_area(&child->thread, p);
- put_user(old, &p->entry_number);
+ if (addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+ (struct user_desc __user *) data);
+
+ break;
+ case PTRACE_SET_THREAD_AREA:
+ if (addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+ (struct user_desc __user *) data, 0);
break;
- }
#endif
/* normal 64bit interface to access TLS data.
Works just like arch_prctl, except that the arguments
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
new file mode 100644
index 0000000..0a1a744
--- /dev/null
+++ b/arch/x86/kernel/tls.c
@@ -0,0 +1,139 @@
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/desc.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/processor.h>
+#include <asm/proto.h>
+
+/*
+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+ */
+static int get_free_idx(void)
+{
+ struct thread_struct *t = &current->thread;
+ int idx;
+
+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+ if (desc_empty(&t->tls_array[idx]))
+ return idx + GDT_ENTRY_TLS_MIN;
+ return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ */
+int do_set_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *u_info,
+ int can_allocate)
+{
+ struct thread_struct *t = &p->thread;
+ struct user_desc info;
+ u32 *desc;
+ int cpu;
+
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+
+ if (idx == -1)
+ idx = info.entry_number;
+
+ /*
+ * index -1 means the kernel should try to find and
+ * allocate an empty descriptor:
+ */
+ if (idx == -1 && can_allocate) {
+ idx = get_free_idx();
+ if (idx < 0)
+ return idx;
+ if (put_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ }
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
+
+ /*
+ * We must not get preempted while modifying the TLS.
+ */
+ cpu = get_cpu();
+
+ if (LDT_empty(&info)) {
+ desc[0] = 0;
+ desc[1] = 0;
+ } else {
+ desc[0] = LDT_entry_a(&info);
+ desc[1] = LDT_entry_b(&info);
+ }
+ if (t == &current->thread)
+ load_TLS(t, cpu);
+
+ put_cpu();
+ return 0;
+}
+
+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
+{
+ return do_set_thread_area(current, -1, u_info, 1);
+}
+
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_LIMIT(desc) ( \
+ ((desc)[0] & 0x0ffff) | \
+ ((desc)[1] & 0xf0000) )
+
+#define GET_32BIT(desc) (((desc)[1] >> 22) & 1)
+#define GET_CONTENTS(desc) (((desc)[1] >> 10) & 3)
+#define GET_WRITABLE(desc) (((desc)[1] >> 9) & 1)
+#define GET_LIMIT_PAGES(desc) (((desc)[1] >> 23) & 1)
+#define GET_PRESENT(desc) (((desc)[1] >> 15) & 1)
+#define GET_USEABLE(desc) (((desc)[1] >> 20) & 1)
+#define GET_LONGMODE(desc) (((desc)[1] >> 21) & 1)
+
+int do_get_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *u_info)
+{
+ struct thread_struct *t = &p->thread;
+ struct user_desc info;
+ u32 *desc;
+
+ if (idx == -1 && get_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
+
+ memset(&info, 0, sizeof(struct user_desc));
+ info.entry_number = idx;
+ info.base_addr = get_desc_base((void *)desc);
+ info.limit = GET_LIMIT(desc);
+ info.seg_32bit = GET_32BIT(desc);
+ info.contents = GET_CONTENTS(desc);
+ info.read_exec_only = !GET_WRITABLE(desc);
+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
+ info.seg_not_present = !GET_PRESENT(desc);
+ info.useable = GET_USEABLE(desc);
+#ifdef CONFIG_X86_64
+ info.lm = GET_LONGMODE(desc);
+#endif
+
+ if (copy_to_user(u_info, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
+{
+ return do_get_thread_area(current, -1, u_info);
+}
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
index 0190b7c..363633b 100644
--- a/include/asm-x86/ia32.h
+++ b/include/asm-x86/ia32.h
@@ -159,12 +159,6 @@ struct ustat32 {
#define IA32_STACK_TOP IA32_PAGE_OFFSET

#ifdef __KERNEL__
-struct user_desc;
-struct siginfo_t;
-int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
-int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
-int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
-
struct linux_binprm;
extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top, int exec_stack);
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index dabba55..a7aa17d 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -24,6 +24,11 @@ extern void ia32_syscall(void);
extern void ia32_cstar_target(void);
extern void ia32_sysenter_target(void);

+extern int do_get_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *info);
+extern int do_set_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *info, int can_allocate);
+
extern void config_acpi_tables(void);
extern void ia32_syscall(void);

2007-11-21 11:59:20

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [PATCH 5/5] x86: TLS cleanup

On Wed, 21 Nov 2007, Roland McGrath wrote:

>
> This consolidates the four different places that implemented the same
> encoding magic for the GDT-slot 32-bit TLS support. The old tls32.c is
> renamed and only slightly modified to be the shared implementation guts.

Cool patchset.

One minor nit: Please do not put stuff into proto.h anymore. I'm
desperately trying to get rid of it.

> diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
> index dabba55..a7aa17d 100644
> --- a/include/asm-x86/proto.h
> +++ b/include/asm-x86/proto.h
> @@ -24,6 +24,11 @@ extern void ia32_syscall(void);
> extern void ia32_cstar_target(void);
> extern void ia32_sysenter_target(void);
>
> +extern int do_get_thread_area(struct task_struct *p, int idx,
> + struct user_desc __user *info);
> +extern int do_set_thread_area(struct task_struct *p, int idx,
> + struct user_desc __user *info, int can_allocate);
> +
> extern void config_acpi_tables(void);
> extern void ia32_syscall(void);

I pull it into the x86 git tree and look for a better place for
do_[sg]et_thread_area()

Thanks,

tglx

2007-11-21 18:27:31

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 2/5] x86: use get_desc_base

On Wed, 2007-11-21 at 02:20 -0800, Roland McGrath wrote:
> This changes a couple of places to use the get_desc_base function.
> They were duplicating the same calculation with different equivalent code.
>
> Signed-off-by: Roland McGrath <[email protected]>
>
> diff --git a/arch/x86/ia32/tls32.c b/arch/x86/ia32/tls32.c
> index 1cc4340..5291596 100644
> --- a/arch/x86/ia32/tls32.c
> +++ b/arch/x86/ia32/tls32.c
> @@ -85,11 +85,6 @@ asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
> * Get the current Thread-Local Storage area:
> */
>
> -#define GET_BASE(desc) ( \
> - (((desc)->a >> 16) & 0x0000ffff) | \
> - (((desc)->b << 16) & 0x00ff0000) | \
> - ( (desc)->b & 0xff000000) )

Are you sure there still aren't more of these things floating around? I
recall ptrace and process.c even having one at one point, hopefully they
are all gone.

Nice cleanups,

Zach


2007-11-21 18:31:24

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 5/5] x86: TLS cleanup

On Wed, 2007-11-21 at 02:25 -0800, Roland McGrath wrote:
> This consolidates the four different places that implemented the same
> encoding magic for the GDT-slot 32-bit TLS support. The old tls32.c is
> renamed and only slightly modified to be the shared implementation guts.

> -#define GET_BASE(desc) ( \
> - (((desc)->a >> 16) & 0x0000ffff) | \
> - (((desc)->b << 16) & 0x00ff0000) | \
> - ( (desc)->b & 0xff000000) )
> -
> -#define GET_LIMIT(desc) ( \
> - ((desc)->a & 0x0ffff) | \
> - ((desc)->b & 0xf0000) )

That was the other redundant definition, thanks.

I had a bit of trouble verifying correctness here because of much
brownian motion. Any possibility of a pure movement / fixup separation
to make it easier on the eyes?

Zach

2007-11-21 22:38:15

by Roland McGrath

[permalink] [raw]
Subject: Re: [PATCH 5/5] x86: TLS cleanup

> I had a bit of trouble verifying correctness here because of much
> brownian motion. Any possibility of a pure movement / fixup separation
> to make it easier on the eyes?

Yeah, sorry about that. It was late and the whole TLS thing was a sudden
afterthought while I was in the middle of doing something else, so I didn't
feel like slicing up the patch any more. And in my tree, GIT didn't even
do so great a job with noticing this rename. I'll send a replacement.

Thanks,
Roland

2007-11-21 22:39:17

by Roland McGrath

[permalink] [raw]
Subject: [replacement PATCH 5/6] x86: tls32 moved


This renames arch/x86/ia32/tls32.c to arch/x86/kernel/tls.c, which does
nothing now but paves the way to consolidate this code for 32-bit too.

Signed-off-by: Roland McGrath <[email protected]>
---
arch/x86/ia32/Makefile | 2 +-
arch/x86/ia32/tls32.c | 158 -------------------------------------------
arch/x86/kernel/Makefile_64 | 2 +
arch/x86/kernel/tls.c | 158 +++++++++++++++++++++++++++++++++++++++++++
4 files changed, 161 insertions(+), 159 deletions(-)

diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
index e2edda2..3c8b746 100644
--- a/arch/x86/ia32/Makefile
+++ b/arch/x86/ia32/Makefile
@@ -2,7 +2,7 @@
# Makefile for the ia32 kernel emulation subsystem.
#

-obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \
+obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o \
ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o \
mmap32.o

diff --git a/arch/x86/ia32/tls32.c b/arch/x86/ia32/tls32.c
deleted file mode 100644
index 5291596..0000000
--- a/arch/x86/ia32/tls32.c
+++ /dev/null
@@ -1,158 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/user.h>
-
-#include <asm/uaccess.h>
-#include <asm/desc.h>
-#include <asm/system.h>
-#include <asm/ldt.h>
-#include <asm/processor.h>
-#include <asm/proto.h>
-
-/*
- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
- */
-static int get_free_idx(void)
-{
- struct thread_struct *t = &current->thread;
- int idx;
-
- for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
- if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
- return idx + GDT_ENTRY_TLS_MIN;
- return -ESRCH;
-}
-
-/*
- * Set a given TLS descriptor:
- * When you want addresses > 32bit use arch_prctl()
- */
-int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
-{
- struct user_desc info;
- struct n_desc_struct *desc;
- int cpu, idx;
-
- if (copy_from_user(&info, u_info, sizeof(info)))
- return -EFAULT;
-
- idx = info.entry_number;
-
- /*
- * index -1 means the kernel should try to find and
- * allocate an empty descriptor:
- */
- if (idx == -1) {
- idx = get_free_idx();
- if (idx < 0)
- return idx;
- if (put_user(idx, &u_info->entry_number))
- return -EFAULT;
- }
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
-
- /*
- * We must not get preempted while modifying the TLS.
- */
- cpu = get_cpu();
-
- if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
- } else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
- if (t == &current->thread)
- load_TLS(t, cpu);
-
- put_cpu();
- return 0;
-}
-
-asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
-{
- return do_set_thread_area(&current->thread, u_info);
-}
-
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
-
-int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
-{
- struct user_desc info;
- struct n_desc_struct *desc;
- int idx;
-
- if (get_user(idx, &u_info->entry_number))
- return -EFAULT;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
-
- memset(&info, 0, sizeof(struct user_desc));
- info.entry_number = idx;
- info.base_addr = get_desc_base(desc);
- info.limit = GET_LIMIT(desc);
- info.seg_32bit = GET_32BIT(desc);
- info.contents = GET_CONTENTS(desc);
- info.read_exec_only = !GET_WRITABLE(desc);
- info.limit_in_pages = GET_LIMIT_PAGES(desc);
- info.seg_not_present = !GET_PRESENT(desc);
- info.useable = GET_USEABLE(desc);
- info.lm = GET_LONGMODE(desc);
-
- if (copy_to_user(u_info, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
-asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info)
-{
- return do_get_thread_area(&current->thread, u_info);
-}
-
-
-int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
-{
- struct n_desc_struct *desc;
- struct user_desc info;
- struct user_desc __user *cp;
- int idx;
-
- cp = (void __user *)childregs->rsi;
- if (copy_from_user(&info, cp, sizeof(info)))
- return -EFAULT;
- if (LDT_empty(&info))
- return -EINVAL;
-
- idx = info.entry_number;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
-
- return 0;
-}
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index 5a88890..203a9d8 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -13,6 +13,8 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
i8253.o

+obj-$(CONFIG_IA32_EMULATION) += tls.o
+
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
new file mode 100644
index 0000000..5291596
--- /dev/null
+++ b/arch/x86/kernel/tls.c
@@ -0,0 +1,158 @@
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/desc.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/processor.h>
+#include <asm/proto.h>
+
+/*
+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+ */
+static int get_free_idx(void)
+{
+ struct thread_struct *t = &current->thread;
+ int idx;
+
+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+ if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
+ return idx + GDT_ENTRY_TLS_MIN;
+ return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ * When you want addresses > 32bit use arch_prctl()
+ */
+int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
+{
+ struct user_desc info;
+ struct n_desc_struct *desc;
+ int cpu, idx;
+
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+
+ idx = info.entry_number;
+
+ /*
+ * index -1 means the kernel should try to find and
+ * allocate an empty descriptor:
+ */
+ if (idx == -1) {
+ idx = get_free_idx();
+ if (idx < 0)
+ return idx;
+ if (put_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ }
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
+
+ /*
+ * We must not get preempted while modifying the TLS.
+ */
+ cpu = get_cpu();
+
+ if (LDT_empty(&info)) {
+ desc->a = 0;
+ desc->b = 0;
+ } else {
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
+ if (t == &current->thread)
+ load_TLS(t, cpu);
+
+ put_cpu();
+ return 0;
+}
+
+asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
+{
+ return do_set_thread_area(&current->thread, u_info);
+}
+
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_LIMIT(desc) ( \
+ ((desc)->a & 0x0ffff) | \
+ ((desc)->b & 0xf0000) )
+
+#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
+#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
+#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
+#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
+
+int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
+{
+ struct user_desc info;
+ struct n_desc_struct *desc;
+ int idx;
+
+ if (get_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
+
+ memset(&info, 0, sizeof(struct user_desc));
+ info.entry_number = idx;
+ info.base_addr = get_desc_base(desc);
+ info.limit = GET_LIMIT(desc);
+ info.seg_32bit = GET_32BIT(desc);
+ info.contents = GET_CONTENTS(desc);
+ info.read_exec_only = !GET_WRITABLE(desc);
+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
+ info.seg_not_present = !GET_PRESENT(desc);
+ info.useable = GET_USEABLE(desc);
+ info.lm = GET_LONGMODE(desc);
+
+ if (copy_to_user(u_info, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info)
+{
+ return do_get_thread_area(&current->thread, u_info);
+}
+
+
+int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
+{
+ struct n_desc_struct *desc;
+ struct user_desc info;
+ struct user_desc __user *cp;
+ int idx;
+
+ cp = (void __user *)childregs->rsi;
+ if (copy_from_user(&info, cp, sizeof(info)))
+ return -EFAULT;
+ if (LDT_empty(&info))
+ return -EINVAL;
+
+ idx = info.entry_number;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+
+ return 0;
+}

2007-11-21 22:40:11

by Roland McGrath

[permalink] [raw]
Subject: [replacement PATCH 6/6] x86: TLS cleanup


This consolidates the four different places that implemented the same
encoding magic for the GDT-slot 32-bit TLS support. The old tls32.c was
renamed and is now only slightly modified to be the shared implementation.

Signed-off-by: Roland McGrath <[email protected]>
---
arch/x86/ia32/ia32entry.S | 4 +-
arch/x86/kernel/Makefile_32 | 1 +
arch/x86/kernel/process_32.c | 143 ++----------------------------------------
arch/x86/kernel/process_64.c | 3 +-
arch/x86/kernel/ptrace_32.c | 91 +++------------------------
arch/x86/kernel/ptrace_64.c | 26 +++-----
arch/x86/kernel/tls.c | 97 +++++++++++-----------------
include/asm-x86/ia32.h | 6 --
include/asm-x86/ptrace.h | 11 +++
9 files changed, 80 insertions(+), 302 deletions(-)

diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index df588f0..0a71fa9 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -644,8 +644,8 @@ ia32_sys_call_table:
.quad compat_sys_futex /* 240 */
.quad compat_sys_sched_setaffinity
.quad compat_sys_sched_getaffinity
- .quad sys32_set_thread_area
- .quad sys32_get_thread_area
+ .quad sys_set_thread_area
+ .quad sys_get_thread_area
.quad compat_sys_io_setup /* 245 */
.quad sys_io_destroy
.quad compat_sys_io_getevents
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index a7bc93c..e660584 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -10,6 +10,7 @@ obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\
quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o

+obj-y += tls.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 7b89958..ebbbfc5 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -480,33 +480,16 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
set_tsk_thread_flag(p, TIF_IO_BITMAP);
}

+ err = 0;
+
/*
* Set a new TLS for the child thread?
*/
- if (clone_flags & CLONE_SETTLS) {
- struct desc_struct *desc;
- struct user_desc info;
- int idx;
-
- err = -EFAULT;
- if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
- goto out;
- err = -EINVAL;
- if (LDT_empty(&info))
- goto out;
-
- idx = info.entry_number;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- goto out;
-
- desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
+ if (clone_flags & CLONE_SETTLS)
+ err = do_set_thread_area(p, -1,
+ (struct user_desc __user *)childregs->esi, 0);

- err = 0;
- out:
- if (err && p->thread.io_bitmap_ptr) {
+ if (err && p->thread.io_bitmap_ptr) {
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
@@ -851,120 +834,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}

-/*
- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
- */
-static int get_free_idx(void)
-{
- struct thread_struct *t = &current->thread;
- int idx;
-
- for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
- if (desc_empty(t->tls_array + idx))
- return idx + GDT_ENTRY_TLS_MIN;
- return -ESRCH;
-}
-
-/*
- * Set a given TLS descriptor:
- */
-asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
-{
- struct thread_struct *t = &current->thread;
- struct user_desc info;
- struct desc_struct *desc;
- int cpu, idx;
-
- if (copy_from_user(&info, u_info, sizeof(info)))
- return -EFAULT;
- idx = info.entry_number;
-
- /*
- * index -1 means the kernel should try to find and
- * allocate an empty descriptor:
- */
- if (idx == -1) {
- idx = get_free_idx();
- if (idx < 0)
- return idx;
- if (put_user(idx, &u_info->entry_number))
- return -EFAULT;
- }
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- /*
- * We must not get preempted while modifying the TLS.
- */
- cpu = get_cpu();
-
- if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
- } else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
- load_TLS(t, cpu);
-
- put_cpu();
-
- return 0;
-}
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_BASE(desc) ( \
- (((desc)->a >> 16) & 0x0000ffff) | \
- (((desc)->b << 16) & 0x00ff0000) | \
- ( (desc)->b & 0xff000000) )
-
-#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-
-asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
-{
- struct user_desc info;
- struct desc_struct *desc;
- int idx;
-
- if (get_user(idx, &u_info->entry_number))
- return -EFAULT;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- memset(&info, 0, sizeof(info));
-
- desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- info.entry_number = idx;
- info.base_addr = GET_BASE(desc);
- info.limit = GET_LIMIT(desc);
- info.seg_32bit = GET_32BIT(desc);
- info.contents = GET_CONTENTS(desc);
- info.read_exec_only = !GET_WRITABLE(desc);
- info.limit_in_pages = GET_LIMIT_PAGES(desc);
- info.seg_not_present = !GET_PRESENT(desc);
- info.useable = GET_USEABLE(desc);
-
- if (copy_to_user(u_info, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b8d131d..3fdbf78 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -513,7 +513,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
if (clone_flags & CLONE_SETTLS) {
#ifdef CONFIG_IA32_EMULATION
if (test_thread_flag(TIF_IA32))
- err = ia32_child_tls(p, childregs);
+ err = do_set_thread_area(p, -1,
+ (struct user_desc __user *)childregs->rsi, 0);
else
#endif
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
diff --git a/arch/x86/kernel/ptrace_32.c b/arch/x86/kernel/ptrace_32.c
index ff5431c..39a94a0 100644
--- a/arch/x86/kernel/ptrace_32.c
+++ b/arch/x86/kernel/ptrace_32.c
@@ -276,85 +276,6 @@ void ptrace_disable(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
}

-/*
- * Perform get_thread_area on behalf of the traced child.
- */
-static int
-ptrace_get_thread_area(struct task_struct *child,
- int idx, struct user_desc __user *user_desc)
-{
- struct user_desc info;
- struct desc_struct *desc;
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_BASE(desc) ( \
- (((desc)->a >> 16) & 0x0000ffff) | \
- (((desc)->b << 16) & 0x00ff0000) | \
- ( (desc)->b & 0xff000000) )
-
-#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- info.entry_number = idx;
- info.base_addr = GET_BASE(desc);
- info.limit = GET_LIMIT(desc);
- info.seg_32bit = GET_32BIT(desc);
- info.contents = GET_CONTENTS(desc);
- info.read_exec_only = !GET_WRITABLE(desc);
- info.limit_in_pages = GET_LIMIT_PAGES(desc);
- info.seg_not_present = !GET_PRESENT(desc);
- info.useable = GET_USEABLE(desc);
-
- if (copy_to_user(user_desc, &info, sizeof(info)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Perform set_thread_area on behalf of the traced child.
- */
-static int
-ptrace_set_thread_area(struct task_struct *child,
- int idx, struct user_desc __user *user_desc)
-{
- struct user_desc info;
- struct desc_struct *desc;
-
- if (copy_from_user(&info, user_desc, sizeof(info)))
- return -EFAULT;
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
- } else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
-
- return 0;
-}
-
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
struct user * dummy = NULL;
@@ -601,13 +522,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}

case PTRACE_GET_THREAD_AREA:
- ret = ptrace_get_thread_area(child, addr,
- (struct user_desc __user *) data);
+ if (addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+ (struct user_desc __user *) data);
break;

case PTRACE_SET_THREAD_AREA:
- ret = ptrace_set_thread_area(child, addr,
- (struct user_desc __user *) data);
+ if (addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+ (struct user_desc __user *) data, 0);
break;

default:
diff --git a/arch/x86/kernel/ptrace_64.c b/arch/x86/kernel/ptrace_64.c
index 3ff7ddc..07de75d 100644
--- a/arch/x86/kernel/ptrace_64.c
+++ b/arch/x86/kernel/ptrace_64.c
@@ -474,23 +474,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
64bit debugger to fully examine them too. Better
don't use it against 64bit processes, use
PTRACE_ARCH_PRCTL instead. */
- case PTRACE_SET_THREAD_AREA: {
- struct user_desc __user *p;
- int old;
- p = (struct user_desc __user *)data;
- get_user(old, &p->entry_number);
- put_user(addr, &p->entry_number);
- ret = do_set_thread_area(&child->thread, p);
- put_user(old, &p->entry_number);
- break;
case PTRACE_GET_THREAD_AREA:
- p = (struct user_desc __user *)data;
- get_user(old, &p->entry_number);
- put_user(addr, &p->entry_number);
- ret = do_get_thread_area(&child->thread, p);
- put_user(old, &p->entry_number);
+ if (addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+ (struct user_desc __user *) data);
+
+ break;
+ case PTRACE_SET_THREAD_AREA:
+ if (addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+ (struct user_desc __user *) data, 0);
break;
- }
#endif
/* normal 64bit interface to access TLS data.
Works just like arch_prctl, except that the arguments
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 5291596..0a1a744 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -19,31 +19,34 @@ static int get_free_idx(void)
int idx;

for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
- if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
+ if (desc_empty(&t->tls_array[idx]))
return idx + GDT_ENTRY_TLS_MIN;
return -ESRCH;
}

/*
* Set a given TLS descriptor:
- * When you want addresses > 32bit use arch_prctl()
*/
-int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
+int do_set_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *u_info,
+ int can_allocate)
{
+ struct thread_struct *t = &p->thread;
struct user_desc info;
- struct n_desc_struct *desc;
- int cpu, idx;
+ u32 *desc;
+ int cpu;

if (copy_from_user(&info, u_info, sizeof(info)))
return -EFAULT;

- idx = info.entry_number;
+ if (idx == -1)
+ idx = info.entry_number;

/*
* index -1 means the kernel should try to find and
* allocate an empty descriptor:
*/
- if (idx == -1) {
+ if (idx == -1 && can_allocate) {
idx = get_free_idx();
if (idx < 0)
return idx;
@@ -54,7 +57,7 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;

- desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
+ desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];

/*
* We must not get preempted while modifying the TLS.
@@ -62,11 +65,11 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
cpu = get_cpu();

if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
+ desc[0] = 0;
+ desc[1] = 0;
} else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
+ desc[0] = LDT_entry_a(&info);
+ desc[1] = LDT_entry_b(&info);
}
if (t == &current->thread)
load_TLS(t, cpu);
@@ -75,9 +78,9 @@ int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
return 0;
}

-asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
{
- return do_set_thread_area(&current->thread, u_info);
+ return do_set_thread_area(current, -1, u_info, 1);
}


@@ -86,33 +89,34 @@ asmlinkage long sys32_set_thread_area(struct user_desc __user *u_info)
*/

#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
-
-int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
+ ((desc)[0] & 0x0ffff) | \
+ ((desc)[1] & 0xf0000) )
+
+#define GET_32BIT(desc) (((desc)[1] >> 22) & 1)
+#define GET_CONTENTS(desc) (((desc)[1] >> 10) & 3)
+#define GET_WRITABLE(desc) (((desc)[1] >> 9) & 1)
+#define GET_LIMIT_PAGES(desc) (((desc)[1] >> 23) & 1)
+#define GET_PRESENT(desc) (((desc)[1] >> 15) & 1)
+#define GET_USEABLE(desc) (((desc)[1] >> 20) & 1)
+#define GET_LONGMODE(desc) (((desc)[1] >> 21) & 1)
+
+int do_get_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *u_info)
{
+ struct thread_struct *t = &p->thread;
struct user_desc info;
- struct n_desc_struct *desc;
- int idx;
+ u32 *desc;

- if (get_user(idx, &u_info->entry_number))
+ if (idx == -1 && get_user(idx, &u_info->entry_number))
return -EFAULT;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;

- desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
+ desc = (u32 *) &t->tls_array[idx - GDT_ENTRY_TLS_MIN];

memset(&info, 0, sizeof(struct user_desc));
info.entry_number = idx;
- info.base_addr = get_desc_base(desc);
+ info.base_addr = get_desc_base((void *)desc);
info.limit = GET_LIMIT(desc);
info.seg_32bit = GET_32BIT(desc);
info.contents = GET_CONTENTS(desc);
@@ -120,39 +124,16 @@ int do_get_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
info.limit_in_pages = GET_LIMIT_PAGES(desc);
info.seg_not_present = !GET_PRESENT(desc);
info.useable = GET_USEABLE(desc);
+#ifdef CONFIG_X86_64
info.lm = GET_LONGMODE(desc);
+#endif

if (copy_to_user(u_info, &info, sizeof(info)))
return -EFAULT;
return 0;
}

-asmlinkage long sys32_get_thread_area(struct user_desc __user *u_info)
-{
- return do_get_thread_area(&current->thread, u_info);
-}
-
-
-int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
{
- struct n_desc_struct *desc;
- struct user_desc info;
- struct user_desc __user *cp;
- int idx;
-
- cp = (void __user *)childregs->rsi;
- if (copy_from_user(&info, cp, sizeof(info)))
- return -EFAULT;
- if (LDT_empty(&info))
- return -EINVAL;
-
- idx = info.entry_number;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
-
- return 0;
+ return do_get_thread_area(current, -1, u_info);
}
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
index 0190b7c..aa97332 100644
--- a/include/asm-x86/ia32.h
+++ b/include/asm-x86/ia32.h
@@ -159,12 +159,6 @@ struct ustat32 {
#define IA32_STACK_TOP IA32_PAGE_OFFSET

#ifdef __KERNEL__
-struct user_desc;
-struct siginfo_t;
-int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
-int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
-int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
-
struct linux_binprm;
extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top, int exec_stack);
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 51ddb25..105d153 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -137,6 +137,17 @@ enum {
};
#endif /* __KERNEL__ */
#endif /* !__i386__ */
+
+#ifdef __KERNEL__
+
+struct user_desc;
+extern int do_get_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *info);
+extern int do_set_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *info, int can_allocate);
+
+#endif /* __KERNEL__ */
+
#endif /* !__ASSEMBLY__ */

#endif

2007-11-22 09:10:58

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [replacement PATCH 6/6] x86: TLS cleanup

On Wed, 21 Nov 2007, Roland McGrath wrote:

>
> This consolidates the four different places that implemented the same
> encoding magic for the GDT-slot 32-bit TLS support. The old tls32.c was
> renamed and is now only slightly modified to be the shared implementation.

Applied, thanks

tglx