Commit b1d63f0c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-6.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - Fix a build failure with some versions of ld that have an odd version
   string

 - Fix incorrect use of mutex in the IMC PMU driver

Thanks to Kajol Jain, Michael Petlan, Ojaswin Mujoo, Peter Zijlstra, and
Yang Yingliang.

* tag 'powerpc-6.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/64s/hash: Make stress_hpt_timer_fn() static
  powerpc/imc-pmu: Fix use of mutex in IRQs disabled section
  powerpc/boot: Fix incorrect version calculation issue in ld_version
parents 7c698440 f12cd061
...@@ -210,6 +210,10 @@ ld_version() ...@@ -210,6 +210,10 @@ ld_version()
gsub(".*version ", ""); gsub(".*version ", "");
gsub("-.*", ""); gsub("-.*", "");
split($1,a, "."); split($1,a, ".");
if( length(a[3]) == "8" )
# a[3] is probably a date of format yyyymmdd used for release snapshots. We
# can assume it to be zero as it does not signify a new version as such.
a[3] = 0;
print a[1]*100000000 + a[2]*1000000 + a[3]*10000; print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
exit exit
}' }'
......
...@@ -137,7 +137,7 @@ struct imc_pmu { ...@@ -137,7 +137,7 @@ struct imc_pmu {
* are inited. * are inited.
*/ */
struct imc_pmu_ref { struct imc_pmu_ref {
struct mutex lock; spinlock_t lock;
unsigned int id; unsigned int id;
int refc; int refc;
}; };
......
...@@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table, ...@@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
void hpt_clear_stress(void); void hpt_clear_stress(void);
static struct timer_list stress_hpt_timer; static struct timer_list stress_hpt_timer;
void stress_hpt_timer_fn(struct timer_list *timer) static void stress_hpt_timer_fn(struct timer_list *timer)
{ {
int next_cpu; int next_cpu;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/spinlock.h>
/* Nest IMC data structures and variables */ /* Nest IMC data structures and variables */
...@@ -21,7 +22,7 @@ ...@@ -21,7 +22,7 @@
* Used to avoid races in counting the nest-pmu units during hotplug * Used to avoid races in counting the nest-pmu units during hotplug
* register and unregister * register and unregister
*/ */
static DEFINE_MUTEX(nest_init_lock); static DEFINE_SPINLOCK(nest_init_lock);
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
static struct imc_pmu **per_nest_pmu_arr; static struct imc_pmu **per_nest_pmu_arr;
static cpumask_t nest_imc_cpumask; static cpumask_t nest_imc_cpumask;
...@@ -50,7 +51,7 @@ static int trace_imc_mem_size; ...@@ -50,7 +51,7 @@ static int trace_imc_mem_size;
* core and trace-imc * core and trace-imc
*/ */
static struct imc_pmu_ref imc_global_refc = { static struct imc_pmu_ref imc_global_refc = {
.lock = __MUTEX_INITIALIZER(imc_global_refc.lock), .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
.id = 0, .id = 0,
.refc = 0, .refc = 0,
}; };
...@@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu) ...@@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
get_hard_smp_processor_id(cpu)); get_hard_smp_processor_id(cpu));
/* /*
* If this is the last cpu in this chip then, skip the reference * If this is the last cpu in this chip then, skip the reference
* count mutex lock and make the reference count on this chip zero. * count lock and make the reference count on this chip zero.
*/ */
ref = get_nest_pmu_ref(cpu); ref = get_nest_pmu_ref(cpu);
if (!ref) if (!ref)
...@@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event) ...@@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event)
/* /*
* See if we need to disable the nest PMU. * See if we need to disable the nest PMU.
* If no events are currently in use, then we have to take a * If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing * lock to ensure that we don't race with another task doing
* enable or disable the nest counters. * enable or disable the nest counters.
*/ */
ref = get_nest_pmu_ref(event->cpu); ref = get_nest_pmu_ref(event->cpu);
if (!ref) if (!ref)
return; return;
/* Take the mutex lock for this node and then decrement the reference count */ /* Take the lock for this node and then decrement the reference count */
mutex_lock(&ref->lock); spin_lock(&ref->lock);
if (ref->refc == 0) { if (ref->refc == 0) {
/* /*
* The scenario where this is true is, when perf session is * The scenario where this is true is, when perf session is
...@@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event) ...@@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event)
* an OPAL call to disable the engine in that node. * an OPAL call to disable the engine in that node.
* *
*/ */
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
return; return;
} }
ref->refc--; ref->refc--;
...@@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event) ...@@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event)
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(event->cpu)); get_hard_smp_processor_id(event->cpu));
if (rc) { if (rc) {
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
return; return;
} }
...@@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event) ...@@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event)
WARN(1, "nest-imc: Invalid event reference count\n"); WARN(1, "nest-imc: Invalid event reference count\n");
ref->refc = 0; ref->refc = 0;
} }
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
} }
static int nest_imc_event_init(struct perf_event *event) static int nest_imc_event_init(struct perf_event *event)
...@@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event) ...@@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event)
/* /*
* Get the imc_pmu_ref struct for this node. * Get the imc_pmu_ref struct for this node.
* Take the mutex lock and then increment the count of nest pmu events * Take the lock and then increment the count of nest pmu events inited.
* inited.
*/ */
ref = get_nest_pmu_ref(event->cpu); ref = get_nest_pmu_ref(event->cpu);
if (!ref) if (!ref)
return -EINVAL; return -EINVAL;
mutex_lock(&ref->lock); spin_lock(&ref->lock);
if (ref->refc == 0) { if (ref->refc == 0) {
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(event->cpu)); get_hard_smp_processor_id(event->cpu));
if (rc) { if (rc) {
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
pr_err("nest-imc: Unable to start the counters for node %d\n", pr_err("nest-imc: Unable to start the counters for node %d\n",
node_id); node_id);
return rc; return rc;
} }
} }
++ref->refc; ++ref->refc;
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
event->destroy = nest_imc_counters_release; event->destroy = nest_imc_counters_release;
return 0; return 0;
...@@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size) ...@@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size)
return -ENOMEM; return -ENOMEM;
mem_info->vbase = page_address(page); mem_info->vbase = page_address(page);
/* Init the mutex */
core_imc_refc[core_id].id = core_id; core_imc_refc[core_id].id = core_id;
mutex_init(&core_imc_refc[core_id].lock); spin_lock_init(&core_imc_refc[core_id].lock);
rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
__pa((void *)mem_info->vbase), __pa((void *)mem_info->vbase),
...@@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) ...@@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
} else { } else {
/* /*
* If this is the last cpu in this core then, skip taking refernce * If this is the last cpu in this core then skip taking reference
* count mutex lock for this core and directly zero "refc" for * count lock for this core and directly zero "refc" for this core.
* this core.
*/ */
opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(cpu)); get_hard_smp_processor_id(cpu));
...@@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) ...@@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
* last cpu in this core and core-imc event running * last cpu in this core and core-imc event running
* in this cpu. * in this cpu.
*/ */
mutex_lock(&imc_global_refc.lock); spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_CORE) if (imc_global_refc.id == IMC_DOMAIN_CORE)
imc_global_refc.refc--; imc_global_refc.refc--;
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
} }
return 0; return 0;
} }
...@@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void) ...@@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void)
static void reset_global_refc(struct perf_event *event) static void reset_global_refc(struct perf_event *event)
{ {
mutex_lock(&imc_global_refc.lock); spin_lock(&imc_global_refc.lock);
imc_global_refc.refc--; imc_global_refc.refc--;
/* /*
...@@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event) ...@@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event)
imc_global_refc.refc = 0; imc_global_refc.refc = 0;
imc_global_refc.id = 0; imc_global_refc.id = 0;
} }
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
} }
static void core_imc_counters_release(struct perf_event *event) static void core_imc_counters_release(struct perf_event *event)
...@@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event) ...@@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event)
/* /*
* See if we need to disable the IMC PMU. * See if we need to disable the IMC PMU.
* If no events are currently in use, then we have to take a * If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing * lock to ensure that we don't race with another task doing
* enable or disable the core counters. * enable or disable the core counters.
*/ */
core_id = event->cpu / threads_per_core; core_id = event->cpu / threads_per_core;
/* Take the mutex lock and decrement the refernce count for this core */ /* Take the lock and decrement the refernce count for this core */
ref = &core_imc_refc[core_id]; ref = &core_imc_refc[core_id];
if (!ref) if (!ref)
return; return;
mutex_lock(&ref->lock); spin_lock(&ref->lock);
if (ref->refc == 0) { if (ref->refc == 0) {
/* /*
* The scenario where this is true is, when perf session is * The scenario where this is true is, when perf session is
...@@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event) ...@@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event)
* an OPAL call to disable the engine in that core. * an OPAL call to disable the engine in that core.
* *
*/ */
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
return; return;
} }
ref->refc--; ref->refc--;
...@@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event) ...@@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event)
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(event->cpu)); get_hard_smp_processor_id(event->cpu));
if (rc) { if (rc) {
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
pr_err("IMC: Unable to stop the counters for core %d\n", core_id); pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
return; return;
} }
...@@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event) ...@@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event)
WARN(1, "core-imc: Invalid event reference count\n"); WARN(1, "core-imc: Invalid event reference count\n");
ref->refc = 0; ref->refc = 0;
} }
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
reset_global_refc(event); reset_global_refc(event);
} }
...@@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event) ...@@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event)
if ((!pcmi->vbase)) if ((!pcmi->vbase))
return -ENODEV; return -ENODEV;
/* Get the core_imc mutex for this core */
ref = &core_imc_refc[core_id]; ref = &core_imc_refc[core_id];
if (!ref) if (!ref)
return -EINVAL; return -EINVAL;
...@@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event) ...@@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event)
/* /*
* Core pmu units are enabled only when it is used. * Core pmu units are enabled only when it is used.
* See if this is triggered for the first time. * See if this is triggered for the first time.
* If yes, take the mutex lock and enable the core counters. * If yes, take the lock and enable the core counters.
* If not, just increment the count in core_imc_refc struct. * If not, just increment the count in core_imc_refc struct.
*/ */
mutex_lock(&ref->lock); spin_lock(&ref->lock);
if (ref->refc == 0) { if (ref->refc == 0) {
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(event->cpu)); get_hard_smp_processor_id(event->cpu));
if (rc) { if (rc) {
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
pr_err("core-imc: Unable to start the counters for core %d\n", pr_err("core-imc: Unable to start the counters for core %d\n",
core_id); core_id);
return rc; return rc;
} }
} }
++ref->refc; ++ref->refc;
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
/* /*
* Since the system can run either in accumulation or trace-mode * Since the system can run either in accumulation or trace-mode
...@@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event) ...@@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event)
* to know whether any other trace/thread imc * to know whether any other trace/thread imc
* events are running. * events are running.
*/ */
mutex_lock(&imc_global_refc.lock); spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
/* /*
* No other trace/thread imc events are running in * No other trace/thread imc events are running in
...@@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event) ...@@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event)
imc_global_refc.id = IMC_DOMAIN_CORE; imc_global_refc.id = IMC_DOMAIN_CORE;
imc_global_refc.refc++; imc_global_refc.refc++;
} else { } else {
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
return -EBUSY; return -EBUSY;
} }
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
event->destroy = core_imc_counters_release; event->destroy = core_imc_counters_release;
...@@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu) ...@@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
/* Reduce the refc if thread-imc event running on this cpu */ /* Reduce the refc if thread-imc event running on this cpu */
mutex_lock(&imc_global_refc.lock); spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_THREAD) if (imc_global_refc.id == IMC_DOMAIN_THREAD)
imc_global_refc.refc--; imc_global_refc.refc--;
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
return 0; return 0;
} }
...@@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event) ...@@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event)
if (!target) if (!target)
return -EINVAL; return -EINVAL;
mutex_lock(&imc_global_refc.lock); spin_lock(&imc_global_refc.lock);
/* /*
* Check if any other trace/core imc events are running in the * Check if any other trace/core imc events are running in the
* system, if not set the global id to thread-imc. * system, if not set the global id to thread-imc.
...@@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event) ...@@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event)
imc_global_refc.id = IMC_DOMAIN_THREAD; imc_global_refc.id = IMC_DOMAIN_THREAD;
imc_global_refc.refc++; imc_global_refc.refc++;
} else { } else {
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
return -EBUSY; return -EBUSY;
} }
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
event->pmu->task_ctx_nr = perf_sw_context; event->pmu->task_ctx_nr = perf_sw_context;
event->destroy = reset_global_refc; event->destroy = reset_global_refc;
...@@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags) ...@@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
/* /*
* imc pmus are enabled only when it is used. * imc pmus are enabled only when it is used.
* See if this is triggered for the first time. * See if this is triggered for the first time.
* If yes, take the mutex lock and enable the counters. * If yes, take the lock and enable the counters.
* If not, just increment the count in ref count struct. * If not, just increment the count in ref count struct.
*/ */
ref = &core_imc_refc[core_id]; ref = &core_imc_refc[core_id];
if (!ref) if (!ref)
return -EINVAL; return -EINVAL;
mutex_lock(&ref->lock); spin_lock(&ref->lock);
if (ref->refc == 0) { if (ref->refc == 0) {
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(smp_processor_id()))) { get_hard_smp_processor_id(smp_processor_id()))) {
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
pr_err("thread-imc: Unable to start the counter\ pr_err("thread-imc: Unable to start the counter\
for core %d\n", core_id); for core %d\n", core_id);
return -EINVAL; return -EINVAL;
} }
} }
++ref->refc; ++ref->refc;
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
return 0; return 0;
} }
...@@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags) ...@@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
return; return;
} }
mutex_lock(&ref->lock); spin_lock(&ref->lock);
ref->refc--; ref->refc--;
if (ref->refc == 0) { if (ref->refc == 0) {
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(smp_processor_id()))) { get_hard_smp_processor_id(smp_processor_id()))) {
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
pr_err("thread-imc: Unable to stop the counters\ pr_err("thread-imc: Unable to stop the counters\
for core %d\n", core_id); for core %d\n", core_id);
return; return;
...@@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags) ...@@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
} else if (ref->refc < 0) { } else if (ref->refc < 0) {
ref->refc = 0; ref->refc = 0;
} }
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
...@@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size) ...@@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
} }
} }
/* Init the mutex, if not already */
trace_imc_refc[core_id].id = core_id; trace_imc_refc[core_id].id = core_id;
mutex_init(&trace_imc_refc[core_id].lock); spin_lock_init(&trace_imc_refc[core_id].lock);
mtspr(SPRN_LDBAR, 0); mtspr(SPRN_LDBAR, 0);
return 0; return 0;
...@@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu) ...@@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
* Reduce the refc if any trace-imc event running * Reduce the refc if any trace-imc event running
* on this cpu. * on this cpu.
*/ */
mutex_lock(&imc_global_refc.lock); spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_TRACE) if (imc_global_refc.id == IMC_DOMAIN_TRACE)
imc_global_refc.refc--; imc_global_refc.refc--;
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
return 0; return 0;
} }
...@@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags) ...@@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
} }
mtspr(SPRN_LDBAR, ldbar_value); mtspr(SPRN_LDBAR, ldbar_value);
mutex_lock(&ref->lock); spin_lock(&ref->lock);
if (ref->refc == 0) { if (ref->refc == 0) {
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE, if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
get_hard_smp_processor_id(smp_processor_id()))) { get_hard_smp_processor_id(smp_processor_id()))) {
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
return -EINVAL; return -EINVAL;
} }
} }
++ref->refc; ++ref->refc;
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
return 0; return 0;
} }
...@@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags) ...@@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
return; return;
} }
mutex_lock(&ref->lock); spin_lock(&ref->lock);
ref->refc--; ref->refc--;
if (ref->refc == 0) { if (ref->refc == 0) {
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE, if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
get_hard_smp_processor_id(smp_processor_id()))) { get_hard_smp_processor_id(smp_processor_id()))) {
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
return; return;
} }
} else if (ref->refc < 0) { } else if (ref->refc < 0) {
ref->refc = 0; ref->refc = 0;
} }
mutex_unlock(&ref->lock); spin_unlock(&ref->lock);
trace_imc_event_stop(event, flags); trace_imc_event_stop(event, flags);
} }
...@@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event) ...@@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event)
* no other thread is running any core/thread imc * no other thread is running any core/thread imc
* events * events
*/ */
mutex_lock(&imc_global_refc.lock); spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
/* /*
* No core/thread imc events are running in the * No core/thread imc events are running in the
...@@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event) ...@@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event)
imc_global_refc.id = IMC_DOMAIN_TRACE; imc_global_refc.id = IMC_DOMAIN_TRACE;
imc_global_refc.refc++; imc_global_refc.refc++;
} else { } else {
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
return -EBUSY; return -EBUSY;
} }
mutex_unlock(&imc_global_refc.lock); spin_unlock(&imc_global_refc.lock);
event->hw.idx = -1; event->hw.idx = -1;
...@@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void) ...@@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void)
i = 0; i = 0;
for_each_node(nid) { for_each_node(nid) {
/* /*
* Mutex lock to avoid races while tracking the number of * Take the lock to avoid races while tracking the number of
* sessions using the chip's nest pmu units. * sessions using the chip's nest pmu units.
*/ */
mutex_init(&nest_imc_refc[i].lock); spin_lock_init(&nest_imc_refc[i].lock);
/* /*
* Loop to init the "id" with the node_id. Variable "i" initialized to * Loop to init the "id" with the node_id. Variable "i" initialized to
...@@ -1633,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr) ...@@ -1633,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
{ {
if (pmu_ptr->domain == IMC_DOMAIN_NEST) { if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
mutex_lock(&nest_init_lock); spin_lock(&nest_init_lock);
if (nest_pmus == 1) { if (nest_pmus == 1) {
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
kfree(nest_imc_refc); kfree(nest_imc_refc);
...@@ -1643,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) ...@@ -1643,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
if (nest_pmus > 0) if (nest_pmus > 0)
nest_pmus--; nest_pmus--;
mutex_unlock(&nest_init_lock); spin_unlock(&nest_init_lock);
} }
/* Free core_imc memory */ /* Free core_imc memory */
...@@ -1800,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id ...@@ -1800,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
* rest. To handle the cpuhotplug callback unregister, we track * rest. To handle the cpuhotplug callback unregister, we track
* the number of nest pmus in "nest_pmus". * the number of nest pmus in "nest_pmus".
*/ */
mutex_lock(&nest_init_lock); spin_lock(&nest_init_lock);
if (nest_pmus == 0) { if (nest_pmus == 0) {
ret = init_nest_pmu_ref(); ret = init_nest_pmu_ref();
if (ret) { if (ret) {
mutex_unlock(&nest_init_lock); spin_unlock(&nest_init_lock);
kfree(per_nest_pmu_arr); kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL; per_nest_pmu_arr = NULL;
goto err_free_mem; goto err_free_mem;
...@@ -1812,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id ...@@ -1812,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
/* Register for cpu hotplug notification. */ /* Register for cpu hotplug notification. */
ret = nest_pmu_cpumask_init(); ret = nest_pmu_cpumask_init();
if (ret) { if (ret) {
mutex_unlock(&nest_init_lock); spin_unlock(&nest_init_lock);
kfree(nest_imc_refc); kfree(nest_imc_refc);
kfree(per_nest_pmu_arr); kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL; per_nest_pmu_arr = NULL;
...@@ -1820,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id ...@@ -1820,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
} }
} }
nest_pmus++; nest_pmus++;
mutex_unlock(&nest_init_lock); spin_unlock(&nest_init_lock);
break; break;
case IMC_DOMAIN_CORE: case IMC_DOMAIN_CORE:
ret = core_imc_pmu_cpumask_init(); ret = core_imc_pmu_cpumask_init();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment