Commit eca217b3 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/paravirt' into x86/apic

Conflicts:
	arch/x86/mach-voyager/voyager_smp.c
parents 54a353a0 e4d04071
This diff is collapsed.
...@@ -768,7 +768,8 @@ extern int sysenter_setup(void); ...@@ -768,7 +768,8 @@ extern int sysenter_setup(void);
extern struct desc_ptr early_gdt_descr; extern struct desc_ptr early_gdt_descr;
extern void cpu_set_gdt(int); extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void); extern void switch_to_new_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void); extern void cpu_init(void);
static inline unsigned long get_debugctlmsr(void) static inline unsigned long get_debugctlmsr(void)
......
...@@ -296,23 +296,28 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) ...@@ -296,23 +296,28 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
void load_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
loadsegment(fs, __KERNEL_PERCPU);
#else
loadsegment(gs, 0);
wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
#endif
}
/* Current gdt points %fs at the "master" per-cpu area: after this, /* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */ * it's on the real one. */
void switch_to_new_gdt(void) void switch_to_new_gdt(int cpu)
{ {
struct desc_ptr gdt_descr; struct desc_ptr gdt_descr;
int cpu = smp_processor_id();
gdt_descr.address = (long)get_cpu_gdt_table(cpu); gdt_descr.address = (long)get_cpu_gdt_table(cpu);
gdt_descr.size = GDT_SIZE - 1; gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr); load_gdt(&gdt_descr);
/* Reload the per-cpu base */ /* Reload the per-cpu base */
#ifdef CONFIG_X86_32
loadsegment(fs, __KERNEL_PERCPU); load_percpu_segment(cpu);
#else
loadsegment(gs, 0);
wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
#endif
} }
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
...@@ -1029,7 +1034,7 @@ void __cpuinit cpu_init(void) ...@@ -1029,7 +1034,7 @@ void __cpuinit cpu_init(void)
* and set up the GDT descriptor: * and set up the GDT descriptor:
*/ */
switch_to_new_gdt(); switch_to_new_gdt(cpu);
loadsegment(fs, 0); loadsegment(fs, 0);
load_idt((const struct desc_ptr *)&idt_descr); load_idt((const struct desc_ptr *)&idt_descr);
...@@ -1131,7 +1136,7 @@ void __cpuinit cpu_init(void) ...@@ -1131,7 +1136,7 @@ void __cpuinit cpu_init(void)
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
load_idt(&idt_descr); load_idt(&idt_descr);
switch_to_new_gdt(); switch_to_new_gdt(cpu);
/* /*
* Set up and load the per-CPU TSS and LDT * Set up and load the per-CPU TSS and LDT
......
...@@ -1143,7 +1143,7 @@ ENTRY(native_load_gs_index) ...@@ -1143,7 +1143,7 @@ ENTRY(native_load_gs_index)
CFI_STARTPROC CFI_STARTPROC
pushf pushf
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS SWAPGS
gs_change: gs_change:
movl %edi,%gs movl %edi,%gs
......
...@@ -44,6 +44,17 @@ void _paravirt_nop(void) ...@@ -44,6 +44,17 @@ void _paravirt_nop(void)
{ {
} }
/* identity function, which can be inlined */
u32 _paravirt_ident_32(u32 x)
{
return x;
}
u64 _paravirt_ident_64(u64 x)
{
return x;
}
static void __init default_banner(void) static void __init default_banner(void)
{ {
printk(KERN_INFO "Booting paravirtualized kernel on %s\n", printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
...@@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, ...@@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
if (opfunc == NULL) if (opfunc == NULL)
/* If there's no function, patch it with a ud2a (BUG) */ /* If there's no function, patch it with a ud2a (BUG) */
ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
else if (opfunc == paravirt_nop) else if (opfunc == _paravirt_nop)
/* If the operation is a nop, then nop the callsite */ /* If the operation is a nop, then nop the callsite */
ret = paravirt_patch_nop(); ret = paravirt_patch_nop();
/* identity functions just return their single argument */
else if (opfunc == _paravirt_ident_32)
ret = paravirt_patch_ident_32(insnbuf, len);
else if (opfunc == _paravirt_ident_64)
ret = paravirt_patch_ident_64(insnbuf, len);
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
...@@ -292,10 +310,10 @@ struct pv_time_ops pv_time_ops = { ...@@ -292,10 +310,10 @@ struct pv_time_ops pv_time_ops = {
struct pv_irq_ops pv_irq_ops = { struct pv_irq_ops pv_irq_ops = {
.init_IRQ = native_init_IRQ, .init_IRQ = native_init_IRQ,
.save_fl = native_save_fl, .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
.restore_fl = native_restore_fl, .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
.irq_disable = native_irq_disable, .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
.irq_enable = native_irq_enable, .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
.safe_halt = native_safe_halt, .safe_halt = native_safe_halt,
.halt = native_halt, .halt = native_halt,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -373,6 +391,14 @@ struct pv_apic_ops pv_apic_ops = { ...@@ -373,6 +391,14 @@ struct pv_apic_ops pv_apic_ops = {
#endif #endif
}; };
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
/* 32-bit pagetable entries */
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
#else
/* 64-bit pagetable entries */
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
#endif
struct pv_mmu_ops pv_mmu_ops = { struct pv_mmu_ops pv_mmu_ops = {
#ifndef CONFIG_X86_64 #ifndef CONFIG_X86_64
.pagetable_setup_start = native_pagetable_setup_start, .pagetable_setup_start = native_pagetable_setup_start,
...@@ -424,21 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -424,21 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = {
.pmd_clear = native_pmd_clear, .pmd_clear = native_pmd_clear,
#endif #endif
.set_pud = native_set_pud, .set_pud = native_set_pud,
.pmd_val = native_pmd_val,
.make_pmd = native_make_pmd, .pmd_val = PTE_IDENT,
.make_pmd = PTE_IDENT,
#if PAGETABLE_LEVELS == 4 #if PAGETABLE_LEVELS == 4
.pud_val = native_pud_val, .pud_val = PTE_IDENT,
.make_pud = native_make_pud, .make_pud = PTE_IDENT,
.set_pgd = native_set_pgd, .set_pgd = native_set_pgd,
#endif #endif
#endif /* PAGETABLE_LEVELS >= 3 */ #endif /* PAGETABLE_LEVELS >= 3 */
.pte_val = native_pte_val, .pte_val = PTE_IDENT,
.pgd_val = native_pgd_val, .pgd_val = PTE_IDENT,
.make_pte = native_make_pte, .make_pte = PTE_IDENT,
.make_pgd = native_make_pgd, .make_pgd = PTE_IDENT,
.dup_mmap = paravirt_nop, .dup_mmap = paravirt_nop,
.exit_mmap = paravirt_nop, .exit_mmap = paravirt_nop,
......
...@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); ...@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
DEF_NATIVE(pv_cpu_ops, clts, "clts"); DEF_NATIVE(pv_cpu_ops, clts, "clts");
DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
{
/* arg in %eax, return in %eax */
return 0;
}
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
{
/* arg in %edx:%eax, return in %edx:%eax */
return 0;
}
unsigned native_patch(u8 type, u16 clobbers, void *ibuf, unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len) unsigned long addr, unsigned len)
{ {
......
...@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); ...@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
DEF_NATIVE(, mov32, "mov %edi, %eax");
DEF_NATIVE(, mov64, "mov %rdi, %rax");
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
{
return paravirt_patch_insns(insnbuf, len,
start__mov32, end__mov32);
}
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
{
return paravirt_patch_insns(insnbuf, len,
start__mov64, end__mov64);
}
unsigned native_patch(u8 type, u16 clobbers, void *ibuf, unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len) unsigned long addr, unsigned len)
{ {
......
...@@ -122,7 +122,7 @@ void __init setup_per_cpu_areas(void) ...@@ -122,7 +122,7 @@ void __init setup_per_cpu_areas(void)
* area. Reload any changed state for the boot CPU. * area. Reload any changed state for the boot CPU.
*/ */
if (cpu == boot_cpu_id) if (cpu == boot_cpu_id)
switch_to_new_gdt(); switch_to_new_gdt(cpu);
DBG("PERCPU: cpu %4d %p\n", cpu, ptr); DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
} }
......
...@@ -1188,7 +1188,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1188,7 +1188,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
void __init native_smp_prepare_boot_cpu(void) void __init native_smp_prepare_boot_cpu(void)
{ {
int me = smp_processor_id(); int me = smp_processor_id();
switch_to_new_gdt(); switch_to_new_gdt(me);
/* already set me in cpu_online_mask in boot_cpu_init() */ /* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask); cpumask_set_cpu(me, cpu_callout_mask);
per_cpu(cpu_state, me) = CPU_ONLINE; per_cpu(cpu_state, me) = CPU_ONLINE;
......
...@@ -259,7 +259,7 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, ...@@ -259,7 +259,7 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
* the cpu's, all of which are still in the mask. * the cpu's, all of which are still in the mask.
*/ */
__get_cpu_var(ptcstats).ptc_i++; __get_cpu_var(ptcstats).ptc_i++;
return 0; return flush_mask;
} }
/* /*
......
...@@ -670,10 +670,11 @@ static inline int __init activate_vmi(void) ...@@ -670,10 +670,11 @@ static inline int __init activate_vmi(void)
para_fill(pv_mmu_ops.write_cr2, SetCR2); para_fill(pv_mmu_ops.write_cr2, SetCR2);
para_fill(pv_mmu_ops.write_cr3, SetCR3); para_fill(pv_mmu_ops.write_cr3, SetCR3);
para_fill(pv_cpu_ops.write_cr4, SetCR4); para_fill(pv_cpu_ops.write_cr4, SetCR4);
para_fill(pv_irq_ops.save_fl, GetInterruptMask);
para_fill(pv_irq_ops.restore_fl, SetInterruptMask); para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
para_fill(pv_irq_ops.irq_disable, DisableInterrupts); para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
para_fill(pv_irq_ops.irq_enable, EnableInterrupts); para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
para_fill(pv_cpu_ops.wbinvd, WBINVD); para_fill(pv_cpu_ops.wbinvd, WBINVD);
para_fill(pv_cpu_ops.read_tsc, RDTSC); para_fill(pv_cpu_ops.read_tsc, RDTSC);
......
...@@ -22,6 +22,7 @@ PHDRS { ...@@ -22,6 +22,7 @@ PHDRS {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
percpu PT_LOAD FLAGS(7); /* RWE */ percpu PT_LOAD FLAGS(7); /* RWE */
#endif #endif
data.init2 PT_LOAD FLAGS(7); /* RWE */
note PT_NOTE FLAGS(0); /* ___ */ note PT_NOTE FLAGS(0); /* ___ */
} }
SECTIONS SECTIONS
...@@ -215,7 +216,7 @@ SECTIONS ...@@ -215,7 +216,7 @@ SECTIONS
/* /*
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - __data_nosave - should * output PHDR, so the next output section - __data_nosave - should
* switch it back to data.init. Also, pda should be at the head of * start another section data.init2. Also, pda should be at the head of
* percpu area. Preallocate it and define the percpu offset symbol * percpu area. Preallocate it and define the percpu offset symbol
* so that it can be accessed as a percpu variable. * so that it can be accessed as a percpu variable.
*/ */
...@@ -232,7 +233,7 @@ SECTIONS ...@@ -232,7 +233,7 @@ SECTIONS
__nosave_begin = .; __nosave_begin = .;
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
*(.data.nosave) *(.data.nosave)
} :data.init /* switch back to data.init, see PERCPU_VADDR() above */ } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__nosave_end = .; __nosave_end = .;
......
...@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void) ...@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
flags &= ~X86_EFLAGS_IF; flags &= ~X86_EFLAGS_IF;
return flags; return flags;
} }
PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
static void vsmp_restore_fl(unsigned long flags) static void vsmp_restore_fl(unsigned long flags)
{ {
...@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags) ...@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
flags |= X86_EFLAGS_AC; flags |= X86_EFLAGS_AC;
native_restore_fl(flags); native_restore_fl(flags);
} }
PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
static void vsmp_irq_disable(void) static void vsmp_irq_disable(void)
{ {
...@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void) ...@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
} }
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
static void vsmp_irq_enable(void) static void vsmp_irq_enable(void)
{ {
...@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void) ...@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
} }
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len) unsigned long addr, unsigned len)
...@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void) ...@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
cap, ctl); cap, ctl);
if (cap & ctl & (1 << 4)) { if (cap & ctl & (1 << 4)) {
/* Setup irq ops and turn on vSMP IRQ fastpath handling */ /* Setup irq ops and turn on vSMP IRQ fastpath handling */
pv_irq_ops.irq_disable = vsmp_irq_disable; pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
pv_irq_ops.irq_enable = vsmp_irq_enable; pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
pv_irq_ops.save_fl = vsmp_save_fl; pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
pv_irq_ops.restore_fl = vsmp_restore_fl; pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
pv_init_ops.patch = vsmp_patch; pv_init_ops.patch = vsmp_patch;
ctl &= ~(1 << 4); ctl &= ~(1 << 4);
......
...@@ -173,24 +173,29 @@ static unsigned long save_fl(void) ...@@ -173,24 +173,29 @@ static unsigned long save_fl(void)
{ {
return lguest_data.irq_enabled; return lguest_data.irq_enabled;
} }
PV_CALLEE_SAVE_REGS_THUNK(save_fl);
/* restore_flags() just sets the flags back to the value given. */ /* restore_flags() just sets the flags back to the value given. */
static void restore_fl(unsigned long flags) static void restore_fl(unsigned long flags)
{ {
lguest_data.irq_enabled = flags; lguest_data.irq_enabled = flags;
} }
PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
/* Interrupts go off... */ /* Interrupts go off... */
static void irq_disable(void) static void irq_disable(void)
{ {
lguest_data.irq_enabled = 0; lguest_data.irq_enabled = 0;
} }
PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
/* Interrupts go on... */ /* Interrupts go on... */
static void irq_enable(void) static void irq_enable(void)
{ {
lguest_data.irq_enabled = X86_EFLAGS_IF; lguest_data.irq_enabled = X86_EFLAGS_IF;
} }
PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
/*:*/ /*:*/
/*M:003 Note that we don't check for outstanding interrupts when we re-enable /*M:003 Note that we don't check for outstanding interrupts when we re-enable
* them (or when we unmask an interrupt). This seems to work for the moment, * them (or when we unmask an interrupt). This seems to work for the moment,
...@@ -984,10 +989,10 @@ __init void lguest_init(void) ...@@ -984,10 +989,10 @@ __init void lguest_init(void)
/* interrupt-related operations */ /* interrupt-related operations */
pv_irq_ops.init_IRQ = lguest_init_IRQ; pv_irq_ops.init_IRQ = lguest_init_IRQ;
pv_irq_ops.save_fl = save_fl; pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
pv_irq_ops.restore_fl = restore_fl; pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl);
pv_irq_ops.irq_disable = irq_disable; pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
pv_irq_ops.irq_enable = irq_enable; pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable);
pv_irq_ops.safe_halt = lguest_safe_halt; pv_irq_ops.safe_halt = lguest_safe_halt;
/* init-time operations */ /* init-time operations */
......
...@@ -1744,13 +1744,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1744,13 +1744,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
static void __cpuinit voyager_smp_prepare_boot_cpu(void) static void __cpuinit voyager_smp_prepare_boot_cpu(void)
{ {
switch_to_new_gdt(); int cpu = smp_processor_id();
switch_to_new_gdt(cpu);
cpu_online_map = cpumask_of_cpu(smp_processor_id()); cpu_online_map = cpumask_of_cpu(smp_processor_id());
cpu_callout_map = cpumask_of_cpu(smp_processor_id()); cpu_callout_map = cpumask_of_cpu(smp_processor_id());
cpu_callin_map = CPU_MASK_NONE; cpu_callin_map = CPU_MASK_NONE;
cpu_present_map = cpumask_of_cpu(smp_processor_id()); cpu_present_map = cpumask_of_cpu(smp_processor_id());
} }
static int __cpuinit voyager_cpu_up(unsigned int cpu) static int __cpuinit voyager_cpu_up(unsigned int cpu)
......
...@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg ...@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg
endif endif
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm_$(BITS).o grant-table.o suspend.o time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o
obj-$(CONFIG_SMP) += smp.o spinlock.o obj-$(CONFIG_SMP) += smp.o spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
\ No newline at end of file
This diff is collapsed.
...@@ -50,6 +50,7 @@ static unsigned long xen_save_fl(void) ...@@ -50,6 +50,7 @@ static unsigned long xen_save_fl(void)
*/ */
return (-flags) & X86_EFLAGS_IF; return (-flags) & X86_EFLAGS_IF;
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
static void xen_restore_fl(unsigned long flags) static void xen_restore_fl(unsigned long flags)
{ {
...@@ -76,6 +77,7 @@ static void xen_restore_fl(unsigned long flags) ...@@ -76,6 +77,7 @@ static void xen_restore_fl(unsigned long flags)
xen_force_evtchn_callback(); xen_force_evtchn_callback();
} }
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
static void xen_irq_disable(void) static void xen_irq_disable(void)
{ {
...@@ -86,6 +88,7 @@ static void xen_irq_disable(void) ...@@ -86,6 +88,7 @@ static void xen_irq_disable(void)
percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
preempt_enable_no_resched(); preempt_enable_no_resched();
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
static void xen_irq_enable(void) static void xen_irq_enable(void)
{ {
...@@ -106,6 +109,7 @@ static void xen_irq_enable(void) ...@@ -106,6 +109,7 @@ static void xen_irq_enable(void)
if (unlikely(vcpu->evtchn_upcall_pending)) if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback(); xen_force_evtchn_callback();
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
static void xen_safe_halt(void) static void xen_safe_halt(void)
{ {
...@@ -124,10 +128,12 @@ static void xen_halt(void) ...@@ -124,10 +128,12 @@ static void xen_halt(void)
static const struct pv_irq_ops xen_irq_ops __initdata = { static const struct pv_irq_ops xen_irq_ops __initdata = {
.init_IRQ = __xen_init_IRQ, .init_IRQ = __xen_init_IRQ,
.save_fl = xen_save_fl,
.restore_fl = xen_restore_fl, .save_fl = PV_CALLEE_SAVE(xen_save_fl),
.irq_disable = xen_irq_disable, .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
.irq_enable = xen_irq_enable, .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
.safe_halt = xen_safe_halt, .safe_halt = xen_safe_halt,
.halt = xen_halt, .halt = xen_halt,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
This diff is collapsed.
...@@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t ...@@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte); pte_t *ptep, pte_t pte);
unsigned long xen_read_cr2_direct(void);
extern const struct pv_mmu_ops xen_mmu_ops;
#endif /* _XEN_MMU_H */ #endif /* _XEN_MMU_H */
...@@ -170,7 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void) ...@@ -170,7 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
/* We've switched to the "real" per-cpu gdt, so make sure the /* We've switched to the "real" per-cpu gdt, so make sure the
old memory can be recycled */ old memory can be recycled */
make_lowmem_page_readwrite(&per_cpu_var(gdt_page)); make_lowmem_page_readwrite(xen_initial_gdt);
xen_setup_vcpu_info_placement(); xen_setup_vcpu_info_placement();
} }
...@@ -235,6 +235,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -235,6 +235,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->user_regs.ss = __KERNEL_DS; ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
ctxt->user_regs.fs = __KERNEL_PERCPU; ctxt->user_regs.fs = __KERNEL_PERCPU;
#else
ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif #endif
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
...@@ -284,6 +286,9 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) ...@@ -284,6 +286,9 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
irq_ctx_init(cpu); irq_ctx_init(cpu);
#else #else
clear_tsk_thread_flag(idle, TIF_FORK); clear_tsk_thread_flag(idle, TIF_FORK);
per_cpu(kernel_stack, cpu) =
(unsigned long)task_stack_page(idle) -
KERNEL_STACK_OFFSET + THREAD_SIZE;
#endif #endif
xen_setup_timer(cpu); xen_setup_timer(cpu);
xen_init_lock_cpu(cpu); xen_init_lock_cpu(cpu);
......
/*
Asm versions of Xen pv-ops, suitable for either direct use or inlining.
The inline versions are the same as the direct-use versions, with the
pre- and post-amble chopped off.
This code is encoded for size rather than absolute efficiency,
with a view to being able to inline as much as possible.
We only bother with direct forms (ie, vcpu in percpu data) of
the operations here; the indirect forms are better handled in
C, since they're generally too large to inline anyway.
*/
#include <asm/asm-offsets.h>
#include <asm/percpu.h>
#include <asm/processor-flags.h>
#include "xen-asm.h"
/*
Enable events. This clears the event mask and tests the pending
event status with one and operation. If there are pending
events, then enter the hypervisor to get them handled.
*/
ENTRY(xen_irq_enable_direct)
/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
/* Preempt here doesn't matter because that will deal with
any pending interrupts. The pending check may end up being
run on the wrong CPU, but that doesn't hurt. */
/* Test for pending */
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
2: call check_events
1:
ENDPATCH(xen_irq_enable_direct)
ret
ENDPROC(xen_irq_enable_direct)
RELOC(xen_irq_enable_direct, 2b+1)
/*
Disabling events is simply a matter of making the event mask
non-zero.
*/
ENTRY(xen_irq_disable_direct)
movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
ENDPATCH(xen_irq_disable_direct)
ret
ENDPROC(xen_irq_disable_direct)
RELOC(xen_irq_disable_direct, 0)
/*
(xen_)save_fl is used to get the current interrupt enable status.
Callers expect the status to be in X86_EFLAGS_IF, and other bits
may be set in the return value. We take advantage of this by
making sure that X86_EFLAGS_IF has the right value (and other bits
in that byte are 0), but other bits in the return value are
undefined. We need to toggle the state of the bit, because
Xen and x86 use opposite senses (mask vs enable).
*/
ENTRY(xen_save_fl_direct)
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
setz %ah
addb %ah,%ah
ENDPATCH(xen_save_fl_direct)
ret
ENDPROC(xen_save_fl_direct)
RELOC(xen_save_fl_direct, 0)
/*
In principle the caller should be passing us a value return
from xen_save_fl_direct, but for robustness sake we test only
the X86_EFLAGS_IF flag rather than the whole byte. After
setting the interrupt mask state, it checks for unmasked
pending events and enters the hypervisor to get them delivered
if so.
*/
ENTRY(xen_restore_fl_direct)
#ifdef CONFIG_X86_64
testw $X86_EFLAGS_IF, %di
#else
testb $X86_EFLAGS_IF>>8, %ah
#endif
setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
/* Preempt here doesn't matter because that will deal with
any pending interrupts. The pending check may end up being
run on the wrong CPU, but that doesn't hurt. */
/* check for unmasked and pending */
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
2: call check_events
1:
ENDPATCH(xen_restore_fl_direct)
ret
ENDPROC(xen_restore_fl_direct)
RELOC(xen_restore_fl_direct, 2b+1)
/*
Force an event check by making a hypercall,
but preserve regs before making the call.
*/
check_events:
#ifdef CONFIG_X86_32
push %eax
push %ecx
push %edx
call xen_force_evtchn_callback
pop %edx
pop %ecx
pop %eax
#else
push %rax
push %rcx
push %rdx
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
call xen_force_evtchn_callback
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rax
#endif
ret
#ifndef _XEN_XEN_ASM_H
#define _XEN_XEN_ASM_H
#include <linux/linkage.h>
#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
#define ENDPATCH(x) .globl x##_end; x##_end=.
/* Pseudo-flag used for virtual NMI, which we don't implement yet */
#define XEN_EFLAGS_NMI 0x80000000
#endif
...@@ -11,101 +11,28 @@ ...@@ -11,101 +11,28 @@
generally too large to inline anyway. generally too large to inline anyway.
*/ */
#include <linux/linkage.h> //#include <asm/asm-offsets.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/percpu.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#define RELOC(x, v) .globl x##_reloc; x##_reloc=v #include "xen-asm.h"
#define ENDPATCH(x) .globl x##_end; x##_end=.
/* Pseudo-flag used for virtual NMI, which we don't implement yet */
#define XEN_EFLAGS_NMI 0x80000000
/*
Enable events. This clears the event mask and tests the pending
event status with one and operation. If there are pending
events, then enter the hypervisor to get them handled.
*/
ENTRY(xen_irq_enable_direct)
/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
/* Preempt here doesn't matter because that will deal with
any pending interrupts. The pending check may end up being
run on the wrong CPU, but that doesn't hurt. */
/* Test for pending */
testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
jz 1f
2: call check_events
1:
ENDPATCH(xen_irq_enable_direct)
ret
ENDPROC(xen_irq_enable_direct)
RELOC(xen_irq_enable_direct, 2b+1)
/* /*
Disabling events is simply a matter of making the event mask Force an event check by making a hypercall,
non-zero. but preserve regs before making the call.
*/
ENTRY(xen_irq_disable_direct)
movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
ENDPATCH(xen_irq_disable_direct)
ret
ENDPROC(xen_irq_disable_direct)
RELOC(xen_irq_disable_direct, 0)
/*
(xen_)save_fl is used to get the current interrupt enable status.
Callers expect the status to be in X86_EFLAGS_IF, and other bits
may be set in the return value. We take advantage of this by
making sure that X86_EFLAGS_IF has the right value (and other bits
in that byte are 0), but other bits in the return value are
undefined. We need to toggle the state of the bit, because
Xen and x86 use opposite senses (mask vs enable).
*/
ENTRY(xen_save_fl_direct)
testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
setz %ah
addb %ah,%ah
ENDPATCH(xen_save_fl_direct)
ret
ENDPROC(xen_save_fl_direct)
RELOC(xen_save_fl_direct, 0)
/*
In principle the caller should be passing us a value return
from xen_save_fl_direct, but for robustness sake we test only
the X86_EFLAGS_IF flag rather than the whole byte. After
setting the interrupt mask state, it checks for unmasked
pending events and enters the hypervisor to get them delivered
if so.
*/ */
ENTRY(xen_restore_fl_direct) check_events:
testb $X86_EFLAGS_IF>>8, %ah push %eax
setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask push %ecx
/* Preempt here doesn't matter because that will deal with push %edx
any pending interrupts. The pending check may end up being call xen_force_evtchn_callback
run on the wrong CPU, but that doesn't hurt. */ pop %edx
pop %ecx
/* check for unmasked and pending */ pop %eax
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
jz 1f
2: call check_events
1:
ENDPATCH(xen_restore_fl_direct)
ret ret
ENDPROC(xen_restore_fl_direct)
RELOC(xen_restore_fl_direct, 2b+1)
/* /*
We can't use sysexit directly, because we're not running in ring0. We can't use sysexit directly, because we're not running in ring0.
...@@ -289,17 +216,3 @@ ENTRY(xen_iret_crit_fixup) ...@@ -289,17 +216,3 @@ ENTRY(xen_iret_crit_fixup)
lea 4(%edi),%esp /* point esp to new frame */ lea 4(%edi),%esp /* point esp to new frame */
2: jmp xen_do_upcall 2: jmp xen_do_upcall
/*
Force an event check by making a hypercall,
but preserve regs before making the call.
*/
check_events:
push %eax
push %ecx
push %edx
call xen_force_evtchn_callback
pop %edx
pop %ecx
pop %eax
ret
...@@ -11,142 +11,14 @@ ...@@ -11,142 +11,14 @@
generally too large to inline anyway. generally too large to inline anyway.
*/ */
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/segment.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#define RELOC(x, v) .globl x##_reloc; x##_reloc=v #include "xen-asm.h"
#define ENDPATCH(x) .globl x##_end; x##_end=.
/* Pseudo-flag used for virtual NMI, which we don't implement yet */
#define XEN_EFLAGS_NMI 0x80000000
#if 1
/*
FIXME: x86_64 now can support direct access to percpu variables
via a segment override. Update xen accordingly.
*/
#define BUG ud2a
#endif
/*
Enable events. This clears the event mask and tests the pending
event status with one and operation. If there are pending
events, then enter the hypervisor to get them handled.
*/
ENTRY(xen_irq_enable_direct)
BUG
/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
/* Preempt here doesn't matter because that will deal with
any pending interrupts. The pending check may end up being
run on the wrong CPU, but that doesn't hurt. */
/* Test for pending */
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
2: call check_events
1:
ENDPATCH(xen_irq_enable_direct)
ret
ENDPROC(xen_irq_enable_direct)
RELOC(xen_irq_enable_direct, 2b+1)
/*
Disabling events is simply a matter of making the event mask
non-zero.
*/
ENTRY(xen_irq_disable_direct)
BUG
movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
ENDPATCH(xen_irq_disable_direct)
ret
ENDPROC(xen_irq_disable_direct)
RELOC(xen_irq_disable_direct, 0)
/*
(xen_)save_fl is used to get the current interrupt enable status.
Callers expect the status to be in X86_EFLAGS_IF, and other bits
may be set in the return value. We take advantage of this by
making sure that X86_EFLAGS_IF has the right value (and other bits
in that byte are 0), but other bits in the return value are
undefined. We need to toggle the state of the bit, because
Xen and x86 use opposite senses (mask vs enable).
*/
ENTRY(xen_save_fl_direct)
BUG
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
setz %ah
addb %ah,%ah
ENDPATCH(xen_save_fl_direct)
ret
ENDPROC(xen_save_fl_direct)
RELOC(xen_save_fl_direct, 0)
/*
In principle the caller should be passing us a value return
from xen_save_fl_direct, but for robustness sake we test only
the X86_EFLAGS_IF flag rather than the whole byte. After
setting the interrupt mask state, it checks for unmasked
pending events and enters the hypervisor to get them delivered
if so.
*/
ENTRY(xen_restore_fl_direct)
BUG
testb $X86_EFLAGS_IF>>8, %ah
setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
/* Preempt here doesn't matter because that will deal with
any pending interrupts. The pending check may end up being
run on the wrong CPU, but that doesn't hurt. */
/* check for unmasked and pending */
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
2: call check_events
1:
ENDPATCH(xen_restore_fl_direct)
ret
ENDPROC(xen_restore_fl_direct)
RELOC(xen_restore_fl_direct, 2b+1)
/*
Force an event check by making a hypercall,
but preserve regs before making the call.
*/
check_events:
push %rax
push %rcx
push %rdx
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
call xen_force_evtchn_callback
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rdx
pop %rcx
pop %rax
ret
ENTRY(xen_adjust_exception_frame) ENTRY(xen_adjust_exception_frame)
mov 8+0(%rsp),%rcx mov 8+0(%rsp),%rcx
......
...@@ -10,9 +10,12 @@ ...@@ -10,9 +10,12 @@
extern const char xen_hypervisor_callback[]; extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[]; extern const char xen_failsafe_callback[];
extern void *xen_initial_gdt;
struct trap_info; struct trap_info;
void xen_copy_trap_info(struct trap_info *traps); void xen_copy_trap_info(struct trap_info *traps);
DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU(unsigned long, xen_cr3); DECLARE_PER_CPU(unsigned long, xen_cr3);
DECLARE_PER_CPU(unsigned long, xen_current_cr3); DECLARE_PER_CPU(unsigned long, xen_current_cr3);
...@@ -22,6 +25,13 @@ extern struct shared_info *HYPERVISOR_shared_info; ...@@ -22,6 +25,13 @@ extern struct shared_info *HYPERVISOR_shared_info;
void xen_setup_mfn_list_list(void); void xen_setup_mfn_list_list(void);
void xen_setup_shared_info(void); void xen_setup_shared_info(void);
void xen_setup_machphys_mapping(void);
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
void xen_ident_map_ISA(void);
void xen_reserve_top(void);
void xen_leave_lazy(void);
void xen_post_allocator_init(void);
char * __init xen_memory_setup(void); char * __init xen_memory_setup(void);
void __init xen_arch_setup(void); void __init xen_arch_setup(void);
......
...@@ -445,24 +445,22 @@ ...@@ -445,24 +445,22 @@
* section in the linker script will go there too. @phdr should have * section in the linker script will go there too. @phdr should have
* a leading colon. * a leading colon.
* *
* This macro defines three symbols, __per_cpu_load, __per_cpu_start * Note that this macros defines __per_cpu_load as an absolute symbol.
* and __per_cpu_end. The first one is the vaddr of loaded percpu * If there is no need to put the percpu section at a predetermined
* init data. __per_cpu_start equals @vaddr and __per_cpu_end is the * address, use PERCPU().
* end offset.
*/ */
#define PERCPU_VADDR(vaddr, phdr) \ #define PERCPU_VADDR(vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load_abs) = .; \ VMLINUX_SYMBOL(__per_cpu_load) = .; \
.data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load_abs) \ .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \ - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_start) = .; \ VMLINUX_SYMBOL(__per_cpu_start) = .; \
VMLINUX_SYMBOL(__per_cpu_load) = LOADADDR(.data.percpu) + LOAD_OFFSET;\
*(.data.percpu.first) \ *(.data.percpu.first) \
*(.data.percpu.page_aligned) \ *(.data.percpu.page_aligned) \
*(.data.percpu) \ *(.data.percpu) \
*(.data.percpu.shared_aligned) \ *(.data.percpu.shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \ VMLINUX_SYMBOL(__per_cpu_end) = .; \
} phdr \ } phdr \
. = VMLINUX_SYMBOL(__per_cpu_load_abs) + SIZEOF(.data.percpu); . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
/** /**
* PERCPU - define output section for percpu area, simple version * PERCPU - define output section for percpu area, simple version
...@@ -471,7 +469,20 @@ ...@@ -471,7 +469,20 @@
* Align to @align and outputs output section for percpu area. This * Align to @align and outputs output section for percpu area. This
* macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical. * __per_cpu_start will be identical.
*
* This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
* that __per_cpu_load is defined as a relative symbol against
* .data.percpu which is required for relocatable x86_32
* configuration.
*/ */
#define PERCPU(align) \ #define PERCPU(align) \
. = ALIGN(align); \ . = ALIGN(align); \
PERCPU_VADDR( , ) .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
*(.data.percpu.first) \
*(.data.percpu.page_aligned) \
*(.data.percpu) \
*(.data.percpu.shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment