Commit 780f36d8 authored by Christoph Lameter's avatar Christoph Lameter Committed by Tejun Heo

xen: Use this_cpu_ops

Use this_cpu_ops to reduce code size and simplify things in various places.

V3->V4:
	Move instance of this_cpu_inc_return to a later patchset so that
	this patch can be applied without infrastructure changes.

Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: default avatarH. Peter Anvin <hpa@zytor.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent c7b92516
...@@ -574,8 +574,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) ...@@ -574,8 +574,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
preempt_disable(); preempt_disable();
start = __get_cpu_var(idt_desc).address; start = __this_cpu_read(idt_desc.address);
end = start + __get_cpu_var(idt_desc).size + 1; end = start + __this_cpu_read(idt_desc.size) + 1;
xen_mc_flush(); xen_mc_flush();
......
...@@ -22,7 +22,7 @@ static inline void xen_mc_batch(void) ...@@ -22,7 +22,7 @@ static inline void xen_mc_batch(void)
unsigned long flags; unsigned long flags;
/* need to disable interrupts until this entry is complete */ /* need to disable interrupts until this entry is complete */
local_irq_save(flags); local_irq_save(flags);
__get_cpu_var(xen_mc_irq_flags) = flags; __this_cpu_write(xen_mc_irq_flags, flags);
} }
static inline struct multicall_space xen_mc_entry(size_t args) static inline struct multicall_space xen_mc_entry(size_t args)
......
...@@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) ...@@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
{ {
struct xen_spinlock *prev; struct xen_spinlock *prev;
prev = __get_cpu_var(lock_spinners); prev = __this_cpu_read(lock_spinners);
__get_cpu_var(lock_spinners) = xl; __this_cpu_write(lock_spinners, xl);
wmb(); /* set lock of interest before count */ wmb(); /* set lock of interest before count */
...@@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock ...@@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
asm(LOCK_PREFIX " decw %0" asm(LOCK_PREFIX " decw %0"
: "+m" (xl->spinners) : : "memory"); : "+m" (xl->spinners) : : "memory");
wmb(); /* decrement count before restoring lock */ wmb(); /* decrement count before restoring lock */
__get_cpu_var(lock_spinners) = prev; __this_cpu_write(lock_spinners, prev);
} }
static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
{ {
struct xen_spinlock *xl = (struct xen_spinlock *)lock; struct xen_spinlock *xl = (struct xen_spinlock *)lock;
struct xen_spinlock *prev; struct xen_spinlock *prev;
int irq = __get_cpu_var(lock_kicker_irq); int irq = __this_cpu_read(lock_kicker_irq);
int ret; int ret;
u64 start; u64 start;
......
...@@ -135,24 +135,24 @@ static void do_stolen_accounting(void) ...@@ -135,24 +135,24 @@ static void do_stolen_accounting(void)
/* Add the appropriate number of ticks of stolen time, /* Add the appropriate number of ticks of stolen time,
including any left-overs from last time. */ including any left-overs from last time. */
stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
if (stolen < 0) if (stolen < 0)
stolen = 0; stolen = 0;
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
__get_cpu_var(xen_residual_stolen) = stolen; __this_cpu_write(xen_residual_stolen, stolen);
account_steal_ticks(ticks); account_steal_ticks(ticks);
/* Add the appropriate number of ticks of blocked time, /* Add the appropriate number of ticks of blocked time,
including any left-overs from last time. */ including any left-overs from last time. */
blocked += __get_cpu_var(xen_residual_blocked); blocked += __this_cpu_read(xen_residual_blocked);
if (blocked < 0) if (blocked < 0)
blocked = 0; blocked = 0;
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
__get_cpu_var(xen_residual_blocked) = blocked; __this_cpu_write(xen_residual_blocked, blocked);
account_idle_ticks(ticks); account_idle_ticks(ticks);
} }
......
...@@ -355,7 +355,7 @@ static void unmask_evtchn(int port) ...@@ -355,7 +355,7 @@ static void unmask_evtchn(int port)
struct evtchn_unmask unmask = { .port = port }; struct evtchn_unmask unmask = { .port = port };
(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
} else { } else {
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
sync_clear_bit(port, &s->evtchn_mask[0]); sync_clear_bit(port, &s->evtchn_mask[0]);
...@@ -1101,7 +1101,7 @@ static void __xen_evtchn_do_upcall(void) ...@@ -1101,7 +1101,7 @@ static void __xen_evtchn_do_upcall(void)
{ {
int cpu = get_cpu(); int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
unsigned count; unsigned count;
do { do {
...@@ -1141,8 +1141,8 @@ static void __xen_evtchn_do_upcall(void) ...@@ -1141,8 +1141,8 @@ static void __xen_evtchn_do_upcall(void)
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
count = __get_cpu_var(xed_nesting_count); count = __this_cpu_read(xed_nesting_count);
__get_cpu_var(xed_nesting_count) = 0; __this_cpu_write(xed_nesting_count, 0);
} while (count != 1 || vcpu_info->evtchn_upcall_pending); } while (count != 1 || vcpu_info->evtchn_upcall_pending);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment