Commit d987bdff authored by Shai Fultheim's avatar Shai Fultheim Committed by Linus Torvalds

[PATCH] percpu: init_tss

Use the percpu infrastructure rather than open-coded array[NR_CPUS].
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent cf0cf37d
...@@ -506,7 +506,7 @@ void __init early_cpu_init(void) ...@@ -506,7 +506,7 @@ void __init early_cpu_init(void)
void __init cpu_init (void) void __init cpu_init (void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct * t = init_tss + cpu; struct tss_struct * t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = &current->thread; struct thread_struct *thread = &current->thread;
if (test_and_set_bit(cpu, &cpu_initialized)) { if (test_and_set_bit(cpu, &cpu_initialized)) {
......
...@@ -40,10 +40,7 @@ EXPORT_SYMBOL(init_task); ...@@ -40,10 +40,7 @@ EXPORT_SYMBOL(init_task);
/* /*
* per-CPU TSS segments. Threads are completely 'soft' on Linux, * per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned * no more per-task TSS's.
* so they are allowed to end up in the .data.cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/ */
struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS }; DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS;
...@@ -87,7 +87,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) ...@@ -87,7 +87,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
* because the ->io_bitmap_max value must match the bitmap * because the ->io_bitmap_max value must match the bitmap
* contents: * contents:
*/ */
tss = init_tss + get_cpu(); tss = &per_cpu(init_tss, get_cpu());
set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
......
...@@ -299,7 +299,7 @@ void exit_thread(void) ...@@ -299,7 +299,7 @@ void exit_thread(void)
/* The process may have allocated an io port bitmap... nuke it. */ /* The process may have allocated an io port bitmap... nuke it. */
if (unlikely(NULL != t->io_bitmap_ptr)) { if (unlikely(NULL != t->io_bitmap_ptr)) {
int cpu = get_cpu(); int cpu = get_cpu();
struct tss_struct *tss = init_tss + cpu; struct tss_struct *tss = &per_cpu(init_tss, cpu);
kfree(t->io_bitmap_ptr); kfree(t->io_bitmap_ptr);
t->io_bitmap_ptr = NULL; t->io_bitmap_ptr = NULL;
...@@ -517,7 +517,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas ...@@ -517,7 +517,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
struct thread_struct *prev = &prev_p->thread, struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread; *next = &next_p->thread;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct *tss = init_tss + cpu; struct tss_struct *tss = &per_cpu(init_tss, cpu);
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
......
...@@ -24,7 +24,7 @@ extern asmlinkage void sysenter_entry(void); ...@@ -24,7 +24,7 @@ extern asmlinkage void sysenter_entry(void);
void enable_sep_cpu(void *info) void enable_sep_cpu(void *info)
{ {
int cpu = get_cpu(); int cpu = get_cpu();
struct tss_struct *tss = init_tss + cpu; struct tss_struct *tss = &per_cpu(init_tss, cpu);
tss->ss1 = __KERNEL_CS; tss->ss1 = __KERNEL_CS;
tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss; tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
......
...@@ -121,7 +121,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) ...@@ -121,7 +121,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
tss = init_tss + get_cpu(); tss = &per_cpu(init_tss, get_cpu());
current->thread.esp0 = current->thread.saved_esp0; current->thread.esp0 = current->thread.saved_esp0;
current->thread.sysenter_cs = __KERNEL_CS; current->thread.sysenter_cs = __KERNEL_CS;
load_esp0(tss, &current->thread); load_esp0(tss, &current->thread);
...@@ -303,7 +303,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk ...@@ -303,7 +303,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs));
asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs));
tss = init_tss + get_cpu(); tss = &per_cpu(init_tss, get_cpu());
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
if (cpu_has_sep) if (cpu_has_sep)
tsk->thread.sysenter_cs = 0; tsk->thread.sysenter_cs = 0;
......
...@@ -83,7 +83,7 @@ do_fpu_end(void) ...@@ -83,7 +83,7 @@ do_fpu_end(void)
static void fix_processor_context(void) static void fix_processor_context(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct * t = init_tss + cpu; struct tss_struct * t = &per_cpu(init_tss, cpu);
set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TSS].b &= 0xfffffdff; per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TSS].b &= 0xfffffdff;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/config.h> #include <linux/config.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/percpu.h>
/* flag for disabling the tsc */ /* flag for disabling the tsc */
extern int tsc_disable; extern int tsc_disable;
...@@ -84,8 +85,8 @@ struct cpuinfo_x86 { ...@@ -84,8 +85,8 @@ struct cpuinfo_x86 {
extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data; extern struct cpuinfo_x86 new_cpu_data;
extern struct tss_struct init_tss[NR_CPUS];
extern struct tss_struct doublefault_tss; extern struct tss_struct doublefault_tss;
DECLARE_PER_CPU(struct tss_struct, init_tss);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern struct cpuinfo_x86 cpu_data[]; extern struct cpuinfo_x86 cpu_data[];
...@@ -441,7 +442,6 @@ struct thread_struct { ...@@ -441,7 +442,6 @@ struct thread_struct {
#define INIT_TSS { \ #define INIT_TSS { \
.esp0 = sizeof(init_stack) + (long)&init_stack, \ .esp0 = sizeof(init_stack) + (long)&init_stack, \
.ss0 = __KERNEL_DS, \ .ss0 = __KERNEL_DS, \
.esp1 = sizeof(init_tss[0]) + (long)&init_tss[0], \
.ss1 = __KERNEL_CS, \ .ss1 = __KERNEL_CS, \
.ldt = GDT_ENTRY_LDT, \ .ldt = GDT_ENTRY_LDT, \
.io_bitmap_base = offsetof(struct tss_struct,io_bitmap), \ .io_bitmap_base = offsetof(struct tss_struct,io_bitmap), \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment