Commit d936d2d4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.11-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen bug-fixes from Konrad Rzeszutek Wilk:
 - On ARM did not have balanced calls to get/put_cpu.
 - Fix to make tboot + Xen + Linux correctly.
 - Fix events VCPU binding issues.
 - Fix a vCPU online race where IPIs are sent to not-yet-online vCPU.

* tag 'stable/for-linus-3.11-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/smp: initialize IPI vectors before marking CPU online
  xen/events: mask events when changing their VCPU binding
  xen/events: initialize local per-cpu mask for all possible events
  x86/xen: do not identity map UNUSABLE regions in the machine E820
  xen/arm: missing put_cpu in xen_percpu_init
parents 0903391a fc78d343
...@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused) ...@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused)
per_cpu(xen_vcpu, cpu) = vcpup; per_cpu(xen_vcpu, cpu) = vcpup;
enable_percpu_irq(xen_events_irq, 0); enable_percpu_irq(xen_events_irq, 0);
put_cpu();
} }
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
......
...@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) ...@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
e820_add_region(start, end - start, type); e820_add_region(start, end - start, type);
} }
void xen_ignore_unusable(struct e820entry *list, size_t map_size)
{
struct e820entry *entry;
unsigned int i;
for (i = 0, entry = list; i < map_size; i++, entry++) {
if (entry->type == E820_UNUSABLE)
entry->type = E820_RAM;
}
}
/** /**
* machine_specific_memory_setup - Hook for machine specific memory setup. * machine_specific_memory_setup - Hook for machine specific memory setup.
**/ **/
...@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) ...@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
} }
BUG_ON(rc); BUG_ON(rc);
/*
* Xen won't allow a 1:1 mapping to be created to UNUSABLE
* regions, so if we're using the machine memory map leave the
* region as RAM as it is in the pseudo-physical map.
*
* UNUSABLE regions in domUs are not handled and will need
* a patch in the future.
*/
if (xen_initial_domain())
xen_ignore_unusable(map, memmap.nr_entries);
/* Make sure the Xen-supplied memory map is well-ordered. */ /* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
......
...@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) ...@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{ {
int rc; int rc;
rc = native_cpu_up(cpu, tidle); /*
WARN_ON (xen_smp_intr_init(cpu)); * xen_smp_intr_init() needs to run before native_cpu_up()
* so that IPI vectors are set up on the booting CPU before
* it is marked online in native_cpu_up().
*/
rc = xen_smp_intr_init(cpu);
WARN_ON(rc);
if (!rc)
rc = native_cpu_up(cpu, tidle);
return rc; return rc;
} }
......
...@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void) ...@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void)
for_each_possible_cpu(i) for_each_possible_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i), memset(per_cpu(cpu_evtchn_mask, i),
(i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
} }
static inline void clear_evtchn(int port) static inline void clear_evtchn(int port)
...@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq) ...@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
/* Rebind an evtchn so that it gets delivered to a specific cpu */ /* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{ {
struct shared_info *s = HYPERVISOR_shared_info;
struct evtchn_bind_vcpu bind_vcpu; struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq); int evtchn = evtchn_from_irq(irq);
int masked;
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(evtchn))
return -1; return -1;
...@@ -1510,6 +1512,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) ...@@ -1510,6 +1512,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
bind_vcpu.port = evtchn; bind_vcpu.port = evtchn;
bind_vcpu.vcpu = tcpu; bind_vcpu.vcpu = tcpu;
/*
* Mask the event while changing the VCPU binding to prevent
* it being delivered on an unexpected VCPU.
*/
masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
/* /*
* If this fails, it usually just indicates that we're dealing with a * If this fails, it usually just indicates that we're dealing with a
* virq or IPI channel, which don't actually need to be rebound. Ignore * virq or IPI channel, which don't actually need to be rebound. Ignore
...@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) ...@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu); bind_evtchn_to_cpu(evtchn, tcpu);
if (!masked)
unmask_evtchn(evtchn);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment