Commit 251e6912 authored by Zachary Amsden's avatar Zachary Amsden Committed by Linus Torvalds

[PATCH] x86: add an accessor function for getting the per-CPU gdt

Add an accessor function for getting the per-CPU gdt.  Callee must already
have the CPU.
Signed-off-by: default avatarZachary Amsden <zach@vmware.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 72e12b76
...@@ -597,12 +597,14 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, ...@@ -597,12 +597,14 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
cpumask_t cpus; cpumask_t cpus;
int cpu; int cpu;
struct desc_struct save_desc_40; struct desc_struct save_desc_40;
struct desc_struct *gdt;
cpus = apm_save_cpus(); cpus = apm_save_cpus();
cpu = get_cpu(); cpu = get_cpu();
save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; gdt = get_cpu_gdt_table(cpu);
per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
local_save_flags(flags); local_save_flags(flags);
APM_DO_CLI; APM_DO_CLI;
...@@ -610,7 +612,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, ...@@ -610,7 +612,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
APM_DO_RESTORE_SEGS; APM_DO_RESTORE_SEGS;
local_irq_restore(flags); local_irq_restore(flags);
per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = save_desc_40; gdt[0x40 / 8] = save_desc_40;
put_cpu(); put_cpu();
apm_restore_cpus(cpus); apm_restore_cpus(cpus);
...@@ -639,13 +641,14 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) ...@@ -639,13 +641,14 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
cpumask_t cpus; cpumask_t cpus;
int cpu; int cpu;
struct desc_struct save_desc_40; struct desc_struct save_desc_40;
struct desc_struct *gdt;
cpus = apm_save_cpus(); cpus = apm_save_cpus();
cpu = get_cpu(); cpu = get_cpu();
save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; gdt = get_cpu_gdt_table(cpu);
per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
local_save_flags(flags); local_save_flags(flags);
APM_DO_CLI; APM_DO_CLI;
...@@ -653,7 +656,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) ...@@ -653,7 +656,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
APM_DO_RESTORE_SEGS; APM_DO_RESTORE_SEGS;
local_irq_restore(flags); local_irq_restore(flags);
__get_cpu_var(cpu_gdt_table)[0x40 / 8] = save_desc_40; gdt[0x40 / 8] = save_desc_40;
put_cpu(); put_cpu();
apm_restore_cpus(cpus); apm_restore_cpus(cpus);
return error; return error;
...@@ -2295,35 +2298,36 @@ static int __init apm_init(void) ...@@ -2295,35 +2298,36 @@ static int __init apm_init(void)
apm_bios_entry.segment = APM_CS; apm_bios_entry.segment = APM_CS;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
set_base(per_cpu(cpu_gdt_table, i)[APM_CS >> 3], struct desc_struct *gdt = get_cpu_gdt_table(i);
set_base(gdt[APM_CS >> 3],
__va((unsigned long)apm_info.bios.cseg << 4)); __va((unsigned long)apm_info.bios.cseg << 4));
set_base(per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], set_base(gdt[APM_CS_16 >> 3],
__va((unsigned long)apm_info.bios.cseg_16 << 4)); __va((unsigned long)apm_info.bios.cseg_16 << 4));
set_base(per_cpu(cpu_gdt_table, i)[APM_DS >> 3], set_base(gdt[APM_DS >> 3],
__va((unsigned long)apm_info.bios.dseg << 4)); __va((unsigned long)apm_info.bios.dseg << 4));
#ifndef APM_RELAX_SEGMENTS #ifndef APM_RELAX_SEGMENTS
if (apm_info.bios.version == 0x100) { if (apm_info.bios.version == 0x100) {
#endif #endif
/* For ASUS motherboard, Award BIOS rev 110 (and others?) */ /* For ASUS motherboard, Award BIOS rev 110 (and others?) */
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 - 1); _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1);
/* For some unknown machine. */ /* For some unknown machine. */
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 64 * 1024 - 1); _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
/* For the DEC Hinote Ultra CT475 (and others?) */ /* For the DEC Hinote Ultra CT475 (and others?) */
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 64 * 1024 - 1); _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
#ifndef APM_RELAX_SEGMENTS #ifndef APM_RELAX_SEGMENTS
} else { } else {
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], _set_limit((char *)&gdt[APM_CS >> 3],
(apm_info.bios.cseg_len - 1) & 0xffff); (apm_info.bios.cseg_len - 1) & 0xffff);
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], _set_limit((char *)&gdt[APM_CS_16 >> 3],
(apm_info.bios.cseg_16_len - 1) & 0xffff); (apm_info.bios.cseg_16_len - 1) & 0xffff);
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], _set_limit((char *)&gdt[APM_DS >> 3],
(apm_info.bios.dseg_len - 1) & 0xffff); (apm_info.bios.dseg_len - 1) & 0xffff);
/* workaround for broken BIOSes */ /* workaround for broken BIOSes */
if (apm_info.bios.cseg_len <= apm_info.bios.offset) if (apm_info.bios.cseg_len <= apm_info.bios.offset)
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 -1); _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
/* for the BIOS that assumes granularity = 1 */ /* for the BIOS that assumes granularity = 1 */
per_cpu(cpu_gdt_table, i)[APM_DS >> 3].b |= 0x800000; gdt[APM_DS >> 3].b |= 0x800000;
printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
} }
} }
......
...@@ -573,6 +573,7 @@ void __devinit cpu_init(void) ...@@ -573,6 +573,7 @@ void __devinit cpu_init(void)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct * t = &per_cpu(init_tss, cpu); struct tss_struct * t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = &current->thread; struct thread_struct *thread = &current->thread;
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
if (cpu_test_and_set(cpu, cpu_initialized)) { if (cpu_test_and_set(cpu, cpu_initialized)) {
...@@ -594,18 +595,16 @@ void __devinit cpu_init(void) ...@@ -594,18 +595,16 @@ void __devinit cpu_init(void)
* Initialize the per-CPU GDT with the boot GDT, * Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor: * and set up the GDT descriptor:
*/ */
memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, memcpy(gdt, cpu_gdt_table, GDT_SIZE);
GDT_SIZE);
/* Set up GDT entry for 16bit stack */ /* Set up GDT entry for 16bit stack */
*(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
(CPU_16BIT_STACK_SIZE - 1); (CPU_16BIT_STACK_SIZE - 1);
cpu_gdt_descr[cpu].size = GDT_SIZE - 1; cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
cpu_gdt_descr[cpu].address = cpu_gdt_descr[cpu].address = (unsigned long)gdt;
(unsigned long)&per_cpu(cpu_gdt_table, cpu);
load_gdt(&cpu_gdt_descr[cpu]); load_gdt(&cpu_gdt_descr[cpu]);
load_idt(&idt_descr); load_idt(&idt_descr);
......
...@@ -108,7 +108,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs, ...@@ -108,7 +108,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
desc = (void *)desc + (seg & ~7); desc = (void *)desc + (seg & ~7);
} else { } else {
/* Must disable preemption while reading the GDT. */ /* Must disable preemption while reading the GDT. */
desc = (u32 *)&per_cpu(cpu_gdt_table, get_cpu()); desc = (u32 *)get_cpu_gdt_table(get_cpu());
desc = (void *)desc + (seg & ~7); desc = (void *)desc + (seg & ~7);
} }
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu))
DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
struct Xgt_desc_struct { struct Xgt_desc_struct {
...@@ -60,7 +62,7 @@ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ ...@@ -60,7 +62,7 @@ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
{ {
_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr, _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
} }
...@@ -68,7 +70,7 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *ad ...@@ -68,7 +70,7 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *ad
static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
{ {
_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
} }
#define LDT_entry_a(info) \ #define LDT_entry_a(info) \
...@@ -109,7 +111,7 @@ static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 en ...@@ -109,7 +111,7 @@ static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 en
static inline void load_TLS(struct thread_struct *t, unsigned int cpu) static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{ {
#define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
C(0); C(1); C(2); C(0); C(1); C(2);
#undef C #undef C
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment