Commit c7a3589e authored by Mike Travis's avatar Mike Travis Committed by Ingo Molnar

Xen: reduce memory required for cpu_evtchn_mask

Impact: reduce memory usage.

Reduce this significant gain in the amount of memory used
when NR_CPUS bumped from 128 to 4096 by allocating the
array based on nr_cpu_ids:

    65536  +2031616   2097152 +3100%  cpu_evtchn_mask(.bss)
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: virtualization@lists.osdl.org
Cc: xen-devel@lists.xensource.com
parent d38b223c
...@@ -75,7 +75,14 @@ enum { ...@@ -75,7 +75,14 @@ enum {
static int evtchn_to_irq[NR_EVENT_CHANNELS] = { static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
[0 ... NR_EVENT_CHANNELS-1] = -1 [0 ... NR_EVENT_CHANNELS-1] = -1
}; };
static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; struct cpu_evtchn_s {
unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
};
static struct cpu_evtchn_s *cpu_evtchn_mask_p;
static inline unsigned long *cpu_evtchn_mask(int cpu)
{
return cpu_evtchn_mask_p[cpu].bits;
}
static u8 cpu_evtchn[NR_EVENT_CHANNELS]; static u8 cpu_evtchn[NR_EVENT_CHANNELS];
/* Reference counts for bindings to IRQs. */ /* Reference counts for bindings to IRQs. */
...@@ -115,7 +122,7 @@ static inline unsigned long active_evtchns(unsigned int cpu, ...@@ -115,7 +122,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
unsigned int idx) unsigned int idx)
{ {
return (sh->evtchn_pending[idx] & return (sh->evtchn_pending[idx] &
cpu_evtchn_mask[cpu][idx] & cpu_evtchn_mask(cpu)[idx] &
~sh->evtchn_mask[idx]); ~sh->evtchn_mask[idx]);
} }
...@@ -128,8 +135,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) ...@@ -128,8 +135,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
#endif #endif
__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); __clear_bit(chn, cpu_evtchn_mask(cpu_evtchn[chn]));
__set_bit(chn, cpu_evtchn_mask[cpu]); __set_bit(chn, cpu_evtchn_mask(cpu));
cpu_evtchn[chn] = cpu; cpu_evtchn[chn] = cpu;
} }
...@@ -147,7 +154,7 @@ static void init_evtchn_cpu_bindings(void) ...@@ -147,7 +154,7 @@ static void init_evtchn_cpu_bindings(void)
#endif #endif
memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
} }
static inline unsigned int cpu_from_evtchn(unsigned int evtchn) static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
...@@ -822,6 +829,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { ...@@ -822,6 +829,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
void __init xen_init_IRQ(void) void __init xen_init_IRQ(void)
{ {
int i; int i;
size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
cpu_evtchn_mask_p = kmalloc(size, GFP_KERNEL);
BUG_ON(cpu_evtchn_mask == NULL);
init_evtchn_cpu_bindings(); init_evtchn_cpu_bindings();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment