Commit 02602b3a authored by Stéphane Eranian's avatar Stéphane Eranian Committed by David Mosberger

[PATCH] ia64: perfmon update

Here is a new perfmon patch. It is important because it
fixes the problem of the close() when the file descriptor
is shared between two related processes. The good thing
is that it simplifies a lot the cleanup of the sampling
buffer.

Here is the ChangeLog:

- fix bug in pfm_close() when the descriptor is
  shared between related processed. Introduce
  a pfm_flush() called for each invocation of
  close(). pfm_close() only called for the last
  user.

- fix pfm_restore_monitoring() to also reload
  the debug registers. They could be modified
  while monitoring is masked.

- fix pfm_close() to clear ctx_fl_is_sampling.

- fix a bug in pfm_handle_work() which could cause
  the wrong PMD to be reset. 

- converted PROTECT_CTX/UNPROTECT_CTX into
  local_irq_save/restore to keep context protection
  but allow IPI to proceed.

- updated pfm_syswide_force_stop() to use
  local_irq_save/restore now that the context
  is protected from the caller side.

- updated pfm_mck_pmc_check() to check if context is 
  loaded before checking for special IBR/DBR combinations.
  Clearing the debug registers is not needed when the context
  is not yet loaded.

- updated perfmon.h to have to correct prototype definitions
  for the pfm_mod_*() functions.

- got rid of the PFM_CTX_TERMINATED state.

- cleanup the DPRINT() statements to remove
  explicit output of current->pid. This is done
   systematically by the macros.

- added a systctl entry (expert_mode) to bypass
  read/write checks on PMC/PMD. As its name indicates
  this is for experts ONLY. Must be root to toggle
  /proc/sys entry.

- corrected pfm_mod_*() to check against the current task.

- removed pfm_mod_fast_read_pmds(). It is never needed.

- added pfm_mod_write_ibrs() and pfm_mod_write_dbrs().
parent d7ee788d
...@@ -57,7 +57,6 @@ ...@@ -57,7 +57,6 @@
#define PFM_CTX_LOADED 2 /* context is loaded onto a task */ #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */ #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */ #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
#define PFM_CTX_TERMINATED 5 /* the task the context was loaded onto is gone */
#define PFM_INVALID_ACTIVATION (~0UL) #define PFM_INVALID_ACTIVATION (~0UL)
...@@ -473,6 +472,7 @@ typedef struct { ...@@ -473,6 +472,7 @@ typedef struct {
int debug; /* turn on/off debugging via syslog */ int debug; /* turn on/off debugging via syslog */
int debug_ovfl; /* turn on/off debug printk in overflow handler */ int debug_ovfl; /* turn on/off debug printk in overflow handler */
int fastctxsw; /* turn on/off fast (unsecure) ctxsw */ int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
int expert_mode; /* turn on/off value checking */
int debug_pfm_read; int debug_pfm_read;
} pfm_sysctl_t; } pfm_sysctl_t;
...@@ -508,6 +508,7 @@ static ctl_table pfm_ctl_table[]={ ...@@ -508,6 +508,7 @@ static ctl_table pfm_ctl_table[]={
{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
{2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
{3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
{4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
{ 0, }, { 0, },
}; };
static ctl_table pfm_sysctl_dir[] = { static ctl_table pfm_sysctl_dir[] = {
...@@ -520,11 +521,8 @@ static ctl_table pfm_sysctl_root[] = { ...@@ -520,11 +521,8 @@ static ctl_table pfm_sysctl_root[] = {
}; };
static struct ctl_table_header *pfm_sysctl_header; static struct ctl_table_header *pfm_sysctl_header;
static void pfm_vm_close(struct vm_area_struct * area); static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static int pfm_flush(struct file *filp);
static struct vm_operations_struct pfm_vm_ops={
close: pfm_vm_close
};
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b) #define pfm_get_cpu_data(a,b) per_cpu(a, b)
...@@ -697,6 +695,28 @@ pfm_unfreeze_pmu(void) ...@@ -697,6 +695,28 @@ pfm_unfreeze_pmu(void)
ia64_srlz_d(); ia64_srlz_d();
} }
static inline void
pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
{
int i;
for (i=0; i < nibrs; i++) {
ia64_set_ibr(i, ibrs[i]);
}
ia64_srlz_i();
}
static inline void
pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
{
int i;
for (i=0; i < ndbrs; i++) {
ia64_set_dbr(i, dbrs[i]);
}
ia64_srlz_d();
}
/* /*
* PMD[i] must be a counter. no check is made * PMD[i] must be a counter. no check is made
*/ */
...@@ -827,7 +847,10 @@ pfm_context_alloc(void) ...@@ -827,7 +847,10 @@ pfm_context_alloc(void)
{ {
pfm_context_t *ctx; pfm_context_t *ctx;
/* allocate context descriptor */ /*
* allocate context descriptor
* must be able to free with interrupts disabled
*/
ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL); ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
if (ctx) { if (ctx) {
memset(ctx, 0, sizeof(pfm_context_t)); memset(ctx, 0, sizeof(pfm_context_t));
...@@ -853,7 +876,7 @@ pfm_mask_monitoring(struct task_struct *task) ...@@ -853,7 +876,7 @@ pfm_mask_monitoring(struct task_struct *task)
unsigned long mask, val, ovfl_mask; unsigned long mask, val, ovfl_mask;
int i; int i;
DPRINT_ovfl(("[%d] masking monitoring for [%d]\n", current->pid, task->pid)); DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
ovfl_mask = pmu_conf.ovfl_val; ovfl_mask = pmu_conf.ovfl_val;
/* /*
...@@ -996,6 +1019,15 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -996,6 +1019,15 @@ pfm_restore_monitoring(struct task_struct *task)
} }
ia64_srlz_d(); ia64_srlz_d();
/*
* must restore DBR/IBR because could be modified while masked
* XXX: need to optimize
*/
if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
}
/* /*
* now restore PSR * now restore PSR
*/ */
...@@ -1106,28 +1138,6 @@ pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask) ...@@ -1106,28 +1138,6 @@ pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
ia64_srlz_d(); ia64_srlz_d();
} }
static inline void
pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
{
int i;
for (i=0; i < nibrs; i++) {
ia64_set_ibr(i, ibrs[i]);
}
ia64_srlz_i();
}
static inline void
pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
{
int i;
for (i=0; i < ndbrs; i++) {
ia64_set_dbr(i, dbrs[i]);
}
ia64_srlz_d();
}
static inline int static inline int
pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b) pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
{ {
...@@ -1684,8 +1694,7 @@ pfm_fasync(int fd, struct file *filp, int on) ...@@ -1684,8 +1694,7 @@ pfm_fasync(int fd, struct file *filp, int on)
ret = pfm_do_fasync(fd, filp, ctx, on); ret = pfm_do_fasync(fd, filp, ctx, on);
DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
current->pid,
fd, fd,
on, on,
ctx->ctx_async_queue, ret)); ctx->ctx_async_queue, ret));
...@@ -1707,6 +1716,8 @@ pfm_syswide_force_stop(void *info) ...@@ -1707,6 +1716,8 @@ pfm_syswide_force_stop(void *info)
pfm_context_t *ctx = (pfm_context_t *)info; pfm_context_t *ctx = (pfm_context_t *)info;
struct pt_regs *regs = ia64_task_regs(current); struct pt_regs *regs = ia64_task_regs(current);
struct task_struct *owner; struct task_struct *owner;
unsigned long flags;
int ret;
if (ctx->ctx_cpu != smp_processor_id()) { if (ctx->ctx_cpu != smp_processor_id()) {
printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n", printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
...@@ -1728,27 +1739,23 @@ pfm_syswide_force_stop(void *info) ...@@ -1728,27 +1739,23 @@ pfm_syswide_force_stop(void *info)
return; return;
} }
DPRINT(("[%d] on CPU%d forcing system wide stop for [%d]\n", current->pid, smp_processor_id(), ctx->ctx_task->pid)); DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));
/* /*
* Update local PMU * the context is already protected in pfm_close(), we simply
* need to mask interrupts to avoid a PMU interrupt race on
* this CPU
*/ */
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); local_irq_save(flags);
ia64_srlz_i();
/*
* update local cpuinfo
*/
PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
pfm_clear_psr_pp(); ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) {
DPRINT(("context_unload returned %d\n", ret));
}
/* /*
* also stop monitoring in the local interrupted task * unmask interrupts, PMU interrupts are now spurious here
*/ */
ia64_psr(regs)->pp = 0; local_irq_restore(flags);
SET_PMU_OWNER(NULL, NULL);
} }
static void static void
...@@ -1756,59 +1763,38 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) ...@@ -1756,59 +1763,38 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
{ {
int ret; int ret;
DPRINT(("[%d] calling CPU%d for cleanup\n", current->pid, ctx->ctx_cpu)); DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
DPRINT(("[%d] called CPU%d for cleanup ret=%d\n", current->pid, ctx->ctx_cpu, ret)); DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* /*
* called either on explicit close() or from exit_files(). * called for each close(). Partially free resources.
* * When caller is self-monitoring, the context is unloaded.
* IMPORTANT: we get called ONLY when the refcnt on the file gets to zero (fput()),i.e,
* last task to access the file. Nobody else can access the file at this point.
*
* When called from exit_files(), the VMA has been freed because exit_mm()
* is executed before exit_files().
*
* When called from exit_files(), the current task is not yet ZOMBIE but we will
* flush the PMU state to the context. This means * that when we see the context
* state as TERMINATED we are guranteed to have the latest PMU state available,
* even if the task itself is in the middle of being ctxsw out.
*/ */
static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static int static int
pfm_close(struct inode *inode, struct file *filp) pfm_flush(struct file *filp)
{ {
pfm_context_t *ctx; pfm_context_t *ctx;
struct task_struct *task; struct task_struct *task;
struct pt_regs *regs; struct pt_regs *regs;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags; unsigned long flags;
unsigned long smpl_buf_size = 0UL; unsigned long smpl_buf_size = 0UL;
void *smpl_buf_vaddr = NULL; void *smpl_buf_vaddr = NULL;
void *smpl_buf_addr = NULL;
int free_possible = 1;
int state, is_system; int state, is_system;
DPRINT(("pfm_close called private=%p\n", filp->private_data));
if (!inode) {
printk(KERN_ERR "pfm_close: NULL inode\n");
return 0;
}
if (PFM_IS_FILE(filp) == 0) { if (PFM_IS_FILE(filp) == 0) {
DPRINT(("bad magic for [%d]\n", current->pid)); DPRINT(("bad magic for\n"));
return -EBADF; return -EBADF;
} }
ctx = (pfm_context_t *)filp->private_data; ctx = (pfm_context_t *)filp->private_data;
if (ctx == NULL) { if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
return -EBADF; return -EBADF;
} }
/* /*
* remove our file from the async queue, if we use this mode. * remove our file from the async queue, if we use this mode.
* This can be done without the context being protected. We come * This can be done without the context being protected. We come
...@@ -1823,7 +1809,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1823,7 +1809,7 @@ pfm_close(struct inode *inode, struct file *filp)
* signal will be sent. In both case, we are safe * signal will be sent. In both case, we are safe
*/ */
if (filp->f_flags & FASYNC) { if (filp->f_flags & FASYNC) {
DPRINT(("[%d] cleaning up async_queue=%p\n", current->pid, ctx->ctx_async_queue)); DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
pfm_do_fasync (-1, filp, ctx, 0); pfm_do_fasync (-1, filp, ctx, 0);
} }
...@@ -1833,23 +1819,18 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1833,23 +1819,18 @@ pfm_close(struct inode *inode, struct file *filp)
is_system = ctx->ctx_fl_system; is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx); task = PFM_CTX_TASK(ctx);
regs = ia64_task_regs(task); regs = ia64_task_regs(task);
DPRINT(("[%d] ctx_state=%d is_current=%d\n", DPRINT(("ctx_state=%d is_current=%d\n",
current->pid, state, state,
task == current ? 1 : 0)); task == current ? 1 : 0));
if (state == PFM_CTX_UNLOADED || state == PFM_CTX_TERMINATED) { /*
goto doit; * if state == UNLOADED, then task is NULL
} */
/* /*
* context still loaded/masked and self monitoring, * we must stop and unload because we are losing access to the context.
* we stop/unload and we destroy right here
*
* We always go here for system-wide sessions
*/ */
if (task == current) { if (task == current) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1862,46 +1843,134 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1862,46 +1843,134 @@ pfm_close(struct inode *inode, struct file *filp)
*/ */
if (is_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
/*
UNPROTECT_CTX(ctx, flags); * keep context protected but unmask interrupt for IPI
*/
local_irq_restore(flags);
pfm_syswide_cleanup_other_cpu(ctx); pfm_syswide_cleanup_other_cpu(ctx);
PROTECT_CTX(ctx, flags);
/* /*
* short circuit pfm_context_unload(); * restore interrupt masking
*/ */
task->thread.pfm_context = NULL; local_irq_save(flags);
ctx->ctx_task = NULL;
ctx->ctx_state = state = PFM_CTX_UNLOADED;
pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
/*
* context is unloaded at this point
*/
} else } else
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
{ {
DPRINT(("forcing unload on [%d]\n", current->pid)); DPRINT(("forcing unload\n"));
/* /*
* stop and unload, returning with state UNLOADED * stop and unload, returning with state UNLOADED
* and session unreserved. * and session unreserved.
*/ */
pfm_context_unload(ctx, NULL, 0, regs); pfm_context_unload(ctx, NULL, 0, regs);
ctx->ctx_state = PFM_CTX_TERMINATED; DPRINT(("ctx_state=%d\n", ctx->ctx_state));
}
}
/*
* remove virtual mapping, if any, for the calling task.
* cannot reset ctx field until last user is calling close().
*
* ctx_smpl_vaddr must never be cleared because it is needed
* by every task with access to the context
*
* When called from do_exit(), the mm context is gone already, therefore
* mm is NULL, i.e., the VMA is already gone and we do not have to
* do anything here
*/
if (ctx->ctx_smpl_vaddr && current->mm) {
smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
smpl_buf_size = ctx->ctx_smpl_size;
}
UNPROTECT_CTX(ctx, flags);
/*
* if there was a mapping, then we systematically remove it
* at this point. Cannot be done inside critical section
* because some VM function reenables interrupts.
*
*/
if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
return 0;
}
/*
* called either on explicit close() or from exit_files().
* Only the LAST user of the file gets to this point, i.e., it is
* called only ONCE.
*
* IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
* (fput()),i.e, last task to access the file. Nobody else can access the
* file at this point.
*
* When called from exit_files(), the VMA has been freed because exit_mm()
* is executed before exit_files().
*
* When called from exit_files(), the current task is not yet ZOMBIE but we
* flush the PMU state to the context.
*/
static int
pfm_close(struct inode *inode, struct file *filp)
{
pfm_context_t *ctx;
struct task_struct *task;
struct pt_regs *regs;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
unsigned long smpl_buf_size = 0UL;
void *smpl_buf_addr = NULL;
int free_possible = 1;
int state, is_system;
DPRINT(("pfm_close called private=%p\n", filp->private_data));
DPRINT(("[%d] ctx_state=%d\n", current->pid, ctx->ctx_state)); if (PFM_IS_FILE(filp) == 0) {
DPRINT(("bad magic\n"));
return -EBADF;
} }
goto doit;
ctx = (pfm_context_t *)filp->private_data;
if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
return -EBADF;
} }
PROTECT_CTX(ctx, flags);
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
regs = ia64_task_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
task == current ? 1 : 0));
/*
* if task == current, then pfm_flush() unloaded the context
*/
if (state == PFM_CTX_UNLOADED) goto doit;
/*
* context is loaded/masked and task != current, we need to
* either force an unload or go zombie
*/
/* /*
* The task is currently blocked or will block after an overflow. * The task is currently blocked or will block after an overflow.
* we must force it to wakeup to get out of the * we must force it to wakeup to get out of the
* MASKED state and transition to the unloaded state by itself * MASKED state and transition to the unloaded state by itself.
*
* This situation is only possible for per-task mode
*/ */
if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) { if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
...@@ -1911,7 +1980,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1911,7 +1980,7 @@ pfm_close(struct inode *inode, struct file *filp)
* *
* We cannot use the ZOMBIE state, because it is checked * We cannot use the ZOMBIE state, because it is checked
* by pfm_load_regs() which is called upon wakeup from down(). * by pfm_load_regs() which is called upon wakeup from down().
* In such cas, it would free the context and then we would * In such case, it would free the context and then we would
* return to pfm_handle_work() which would access the * return to pfm_handle_work() which would access the
* stale context. Instead, we set a flag invisible to pfm_load_regs() * stale context. Instead, we set a flag invisible to pfm_load_regs()
* but visible to pfm_handle_work(). * but visible to pfm_handle_work().
...@@ -1926,7 +1995,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1926,7 +1995,7 @@ pfm_close(struct inode *inode, struct file *filp)
*/ */
up(&ctx->ctx_restart_sem); up(&ctx->ctx_restart_sem);
DPRINT(("waking up ctx_state=%d for [%d]\n", state, current->pid)); DPRINT(("waking up ctx_state=%d\n", state));
/* /*
* put ourself to sleep waiting for the other * put ourself to sleep waiting for the other
...@@ -1956,11 +2025,11 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1956,11 +2025,11 @@ pfm_close(struct inode *inode, struct file *filp)
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
/* /*
* context is terminated at this point * context is unloaded at this point
*/ */
DPRINT(("after zombie wakeup ctx_state=%d for [%d]\n", state, current->pid)); DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
} }
else { else if (task != current) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* switch context to zombie state * switch context to zombie state
...@@ -1978,8 +2047,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1978,8 +2047,7 @@ pfm_close(struct inode *inode, struct file *filp)
#endif #endif
} }
doit: /* cannot assume task is defined from now on */ doit:
/* reload state, may have changed during opening of critical section */ /* reload state, may have changed during opening of critical section */
state = ctx->ctx_state; state = ctx->ctx_state;
...@@ -1987,18 +2055,9 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1987,18 +2055,9 @@ pfm_close(struct inode *inode, struct file *filp)
* the context is still attached to a task (possibly current) * the context is still attached to a task (possibly current)
* we cannot destroy it right now * we cannot destroy it right now
*/ */
/*
* remove virtual mapping, if any. will be NULL when
* called from exit_files().
*/
if (ctx->ctx_smpl_vaddr) {
smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
smpl_buf_size = ctx->ctx_smpl_size;
ctx->ctx_smpl_vaddr = NULL;
}
/* /*
* we must fre the sampling buffer right here because * we must free the sampling buffer right here because
* we cannot rely on it being cleaned up later by the * we cannot rely on it being cleaned up later by the
* monitored task. It is not possible to free vmalloc'ed * monitored task. It is not possible to free vmalloc'ed
* memory in pfm_load_regs(). Instead, we remove the buffer * memory in pfm_load_regs(). Instead, we remove the buffer
...@@ -2011,21 +2070,19 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -2011,21 +2070,19 @@ pfm_close(struct inode *inode, struct file *filp)
smpl_buf_size = ctx->ctx_smpl_size; smpl_buf_size = ctx->ctx_smpl_size;
/* no more sampling */ /* no more sampling */
ctx->ctx_smpl_hdr = NULL; ctx->ctx_smpl_hdr = NULL;
ctx->ctx_fl_is_sampling = 0;
} }
DPRINT(("[%d] ctx_state=%d free_possible=%d vaddr=%p addr=%p size=%lu\n", DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
current->pid,
state, state,
free_possible, free_possible,
smpl_buf_vaddr,
smpl_buf_addr, smpl_buf_addr,
smpl_buf_size)); smpl_buf_size));
if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt); if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
/* /*
* UNLOADED and TERMINATED mean that the session has already been * UNLOADED that the session has already been unreserved.
* unreserved.
*/ */
if (state == PFM_CTX_ZOMBIE) { if (state == PFM_CTX_ZOMBIE) {
pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu); pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
...@@ -2047,14 +2104,9 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -2047,14 +2104,9 @@ pfm_close(struct inode *inode, struct file *filp)
UNPROTECT_CTX(ctx, flags); UNPROTECT_CTX(ctx, flags);
/* /*
* if there was a mapping, then we systematically remove it
* at this point. Cannot be done inside critical section
* because some VM function reenables interrupts.
*
* All memory free operations (especially for vmalloc'ed memory) * All memory free operations (especially for vmalloc'ed memory)
* MUST be done with interrupts ENABLED. * MUST be done with interrupts ENABLED.
*/ */
if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size); if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
/* /*
...@@ -2072,6 +2124,8 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare) ...@@ -2072,6 +2124,8 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare)
return -ENXIO; return -ENXIO;
} }
static struct file_operations pfm_file_ops = { static struct file_operations pfm_file_ops = {
.llseek = pfm_lseek, .llseek = pfm_lseek,
.read = pfm_read, .read = pfm_read,
...@@ -2080,7 +2134,8 @@ static struct file_operations pfm_file_ops = { ...@@ -2080,7 +2134,8 @@ static struct file_operations pfm_file_ops = {
.ioctl = pfm_ioctl, .ioctl = pfm_ioctl,
.open = pfm_no_open, /* special open code to disallow open via /proc */ .open = pfm_no_open, /* special open code to disallow open via /proc */
.fasync = pfm_fasync, .fasync = pfm_fasync,
.release = pfm_close .release = pfm_close,
.flush = pfm_flush
}; };
static int static int
...@@ -2088,6 +2143,7 @@ pfmfs_delete_dentry(struct dentry *dentry) ...@@ -2088,6 +2143,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
{ {
return 1; return 1;
} }
static struct dentry_operations pfmfs_dentry_operations = { static struct dentry_operations pfmfs_dentry_operations = {
.d_delete = pfmfs_delete_dentry, .d_delete = pfmfs_delete_dentry,
}; };
...@@ -2172,27 +2228,6 @@ pfm_free_fd(int fd, struct file *file) ...@@ -2172,27 +2228,6 @@ pfm_free_fd(int fd, struct file *file)
put_unused_fd(fd); put_unused_fd(fd);
} }
/*
* This function gets called from mm/mmap.c:exit_mmap() only when there is a sampling buffer
* attached to the context AND the current task has a mapping for it, i.e., it is the original
* creator of the context.
*
* This function is used to remember the fact that the vma describing the sampling buffer
* has now been removed. It can only be called when no other tasks share the same mm context.
*
*/
static void
pfm_vm_close(struct vm_area_struct *vma)
{
pfm_context_t *ctx = (pfm_context_t *)vma->vm_private_data;
unsigned long flags;
PROTECT_CTX(ctx, flags);
ctx->ctx_smpl_vaddr = NULL;
UNPROTECT_CTX(ctx, flags);
DPRINT(("[%d] clearing vaddr for ctx %p\n", current->pid, ctx));
}
static int static int
pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size) pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
{ {
...@@ -2252,7 +2287,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon ...@@ -2252,7 +2287,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
return -ENOMEM; return -ENOMEM;
} }
DPRINT(("[%d] smpl_buf @%p\n", current->pid, smpl_buf)); DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */ /* allocate vma */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
...@@ -2268,12 +2303,12 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon ...@@ -2268,12 +2303,12 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
* what we want. * what we want.
*/ */
vma->vm_mm = mm; vma->vm_mm = mm;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED|VM_DONTCOPY; vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
vma->vm_ops = &pfm_vm_ops; vma->vm_ops = NULL;
vma->vm_pgoff = 0; vma->vm_pgoff = 0;
vma->vm_file = NULL; vma->vm_file = NULL;
vma->vm_private_data = ctx; /* information needed by the pfm_vm_close() function */ vma->vm_private_data = NULL;
/* /*
* Now we have everything we need and we can initialize * Now we have everything we need and we can initialize
...@@ -2342,8 +2377,7 @@ static int ...@@ -2342,8 +2377,7 @@ static int
pfm_bad_permissions(struct task_struct *task) pfm_bad_permissions(struct task_struct *task)
{ {
/* inspired by ptrace_attach() */ /* inspired by ptrace_attach() */
DPRINT(("[%d] cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
current->pid,
current->uid, current->uid,
current->gid, current->gid,
task->euid, task->euid,
...@@ -2532,11 +2566,11 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) ...@@ -2532,11 +2566,11 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
* no kernel task or task not owner by caller * no kernel task or task not owner by caller
*/ */
if (task->mm == NULL) { if (task->mm == NULL) {
DPRINT(("[%d] task [%d] has not memory context (kernel thread)\n", current->pid, task->pid)); DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
return -EPERM; return -EPERM;
} }
if (pfm_bad_permissions(task)) { if (pfm_bad_permissions(task)) {
DPRINT(("[%d] no permission to attach to [%d]\n", current->pid, task->pid)); DPRINT(("no permission to attach to [%d]\n", task->pid));
return -EPERM; return -EPERM;
} }
/* /*
...@@ -2548,7 +2582,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) ...@@ -2548,7 +2582,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
} }
if (task->state == TASK_ZOMBIE) { if (task->state == TASK_ZOMBIE) {
DPRINT(("[%d] cannot attach to zombie task [%d]\n", current->pid, task->pid)); DPRINT(("cannot attach to zombie task [%d]\n", task->pid));
return -EBUSY; return -EBUSY;
} }
...@@ -2558,7 +2592,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) ...@@ -2558,7 +2592,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
if (task == current) return 0; if (task == current) return 0;
if (task->state != TASK_STOPPED) { if (task->state != TASK_STOPPED) {
DPRINT(("[%d] cannot attach to non-stopped task [%d] state=%ld\n", current->pid, task->pid, task->state)); DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
return -EBUSY; return -EBUSY;
} }
/* /*
...@@ -2835,7 +2869,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2835,7 +2869,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
task = ctx->ctx_task; task = ctx->ctx_task;
impl_pmds = pmu_conf.impl_pmds[0]; impl_pmds = pmu_conf.impl_pmds[0];
if (state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE) return -EINVAL; if (state == PFM_CTX_ZOMBIE) return -EINVAL;
if (is_loaded) { if (is_loaded) {
thread = &task->thread; thread = &task->thread;
...@@ -2845,7 +2879,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2845,7 +2879,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (is_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
...@@ -2928,7 +2962,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2928,7 +2962,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* execute write checker, if any * execute write checker, if any
*/ */
if (PMC_WR_FUNC(cnum)) { if (pfm_sysctl.expert_mode == 0 && PMC_WR_FUNC(cnum)) {
ret = PMC_WR_FUNC(cnum)(task, ctx, cnum, &value, regs); ret = PMC_WR_FUNC(cnum)(task, ctx, cnum, &value, regs);
if (ret) goto error; if (ret) goto error;
ret = -EINVAL; ret = -EINVAL;
...@@ -3072,7 +3106,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3072,7 +3106,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
ovfl_mask = pmu_conf.ovfl_val; ovfl_mask = pmu_conf.ovfl_val;
task = ctx->ctx_task; task = ctx->ctx_task;
if (unlikely(state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE)) return -EINVAL; if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
/* /*
* on both UP and SMP, we can only write to the PMC when the task is * on both UP and SMP, we can only write to the PMC when the task is
...@@ -3086,7 +3120,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3086,7 +3120,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
...@@ -3106,7 +3140,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3106,7 +3140,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* execute write checker, if any * execute write checker, if any
*/ */
if (PMD_WR_FUNC(cnum)) { if (pfm_sysctl.expert_mode == 0 && PMD_WR_FUNC(cnum)) {
unsigned long v = value; unsigned long v = value;
ret = PMD_WR_FUNC(cnum)(task, ctx, cnum, &v, regs); ret = PMD_WR_FUNC(cnum)(task, ctx, cnum, &v, regs);
...@@ -3279,7 +3313,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3279,7 +3313,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
/* /*
...@@ -3347,7 +3381,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3347,7 +3381,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* execute read checker, if any * execute read checker, if any
*/ */
if (unlikely(PMD_RD_FUNC(cnum))) { if (unlikely(pfm_sysctl.expert_mode == 0 && PMD_RD_FUNC(cnum))) {
unsigned long v = val; unsigned long v = val;
ret = PMD_RD_FUNC(cnum)(ctx->ctx_task, ctx, cnum, &v, regs); ret = PMD_RD_FUNC(cnum)(ctx->ctx_task, ctx, cnum, &v, regs);
if (ret) goto error; if (ret) goto error;
...@@ -3376,14 +3410,14 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3376,14 +3410,14 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
return ret; return ret;
} }
long int
pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs) pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{ {
pfm_context_t *ctx; pfm_context_t *ctx;
if (task == NULL || req == NULL) return -EINVAL; if (req == NULL) return -EINVAL;
ctx = task->thread.pfm_context; ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL; if (ctx == NULL) return -EINVAL;
...@@ -3391,20 +3425,19 @@ pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq ...@@ -3391,20 +3425,19 @@ pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq
* for now limit to current task, which is enough when calling * for now limit to current task, which is enough when calling
* from overflow handler * from overflow handler
*/ */
if (task != current) return -EBUSY; if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
return pfm_write_pmcs(ctx, req, nreq, regs); return pfm_write_pmcs(ctx, req, nreq, regs);
} }
EXPORT_SYMBOL(pfm_mod_write_pmcs); EXPORT_SYMBOL(pfm_mod_write_pmcs);
long int
pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs) pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{ {
pfm_context_t *ctx; pfm_context_t *ctx;
if (task == NULL || req == NULL) return -EINVAL; if (req == NULL) return -EINVAL;
//ctx = task->thread.pfm_context;
ctx = GET_PMU_CTX(); ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL; if (ctx == NULL) return -EINVAL;
...@@ -3419,48 +3452,6 @@ pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, ...@@ -3419,48 +3452,6 @@ pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq,
} }
EXPORT_SYMBOL(pfm_mod_read_pmds); EXPORT_SYMBOL(pfm_mod_read_pmds);
long
pfm_mod_fast_read_pmds(struct task_struct *task, unsigned long mask[4], unsigned long *addr, struct pt_regs *regs)
{
pfm_context_t *ctx;
unsigned long m, val;
unsigned int j;
if (task == NULL || addr == NULL) return -EINVAL;
//ctx = task->thread.pfm_context;
ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
m = mask[0];
for (j=0; m; m >>=1, j++) {
if ((m & 0x1) == 0) continue;
if (!(PMD_IS_IMPL(j) && CTX_IS_USED_PMD(ctx, j)) ) return -EINVAL;
if (PMD_IS_COUNTING(j)) {
val = pfm_read_soft_counter(ctx, j);
} else {
val = ia64_get_pmd(j);
}
*addr++ = val;
/* XXX: should call read checker routine? */
DPRINT(("single_read_pmd[%u]=0x%lx\n", j, val));
}
return 0;
}
EXPORT_SYMBOL(pfm_mod_fast_read_pmds);
/* /*
* Only call this function when a process it trying to * Only call this function when a process it trying to
* write the debug registers (reading is always allowed) * write the debug registers (reading is always allowed)
...@@ -3565,9 +3556,6 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3565,9 +3556,6 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
case PFM_CTX_ZOMBIE: case PFM_CTX_ZOMBIE:
DPRINT(("invalid state=%d\n", state)); DPRINT(("invalid state=%d\n", state));
return -EBUSY; return -EBUSY;
case PFM_CTX_TERMINATED:
DPRINT(("context is terminated, nothing to do\n"));
return 0;
default: default:
DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state)); DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
return -EINVAL; return -EINVAL;
...@@ -3579,7 +3567,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3579,7 +3567,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (is_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
...@@ -3739,7 +3727,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ ...@@ -3739,7 +3727,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
is_system = ctx->ctx_fl_system; is_system = ctx->ctx_fl_system;
task = ctx->ctx_task; task = ctx->ctx_task;
if (state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE) return -EINVAL; if (state == PFM_CTX_ZOMBIE) return -EINVAL;
/* /*
* on both UP and SMP, we can only write to the PMC when the task is * on both UP and SMP, we can only write to the PMC when the task is
...@@ -3753,7 +3741,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ ...@@ -3753,7 +3741,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
...@@ -3920,6 +3908,49 @@ pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3920,6 +3908,49 @@ pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs); return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
} }
int
pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{
pfm_context_t *ctx;
if (req == NULL) return -EINVAL;
ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
return pfm_write_ibrs(ctx, req, nreq, regs);
}
EXPORT_SYMBOL(pfm_mod_write_ibrs);
int
pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{
pfm_context_t *ctx;
if (req == NULL) return -EINVAL;
ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
return pfm_write_dbrs(ctx, req, nreq, regs);
}
EXPORT_SYMBOL(pfm_mod_write_dbrs);
static int static int
pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{ {
...@@ -3947,11 +3978,10 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3947,11 +3978,10 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (is_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
DPRINT(("current [%d] task [%d] ctx_state=%d is_system=%d\n", DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
current->pid,
PFM_CTX_TASK(ctx)->pid, PFM_CTX_TASK(ctx)->pid,
state, state,
is_system)); is_system));
...@@ -4010,7 +4040,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4010,7 +4040,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* monitoring disabled in kernel at next reschedule * monitoring disabled in kernel at next reschedule
*/ */
ctx->ctx_saved_psr_up = 0; ctx->ctx_saved_psr_up = 0;
DPRINT(("pfm_stop: current [%d] task=[%d]\n", current->pid, task->pid)); DPRINT(("task=[%d]\n", task->pid));
} }
return 0; return 0;
} }
...@@ -4033,7 +4063,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4033,7 +4063,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (is_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
...@@ -4167,9 +4197,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4167,9 +4197,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* can only load from unloaded or terminated state * can only load from unloaded or terminated state
*/ */
if (state != PFM_CTX_UNLOADED && state != PFM_CTX_TERMINATED) { if (state != PFM_CTX_UNLOADED) {
DPRINT(("[%d] cannot load to [%d], invalid ctx_state=%d\n", DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
current->pid,
req->load_pid, req->load_pid,
ctx->ctx_state)); ctx->ctx_state));
return -EINVAL; return -EINVAL;
...@@ -4178,7 +4207,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4178,7 +4207,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) { if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
DPRINT(("cannot use blocking mode on self for [%d]\n", current->pid)); DPRINT(("cannot use blocking mode on self\n"));
return -EINVAL; return -EINVAL;
} }
...@@ -4194,8 +4223,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4194,8 +4223,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* system wide is self monitoring only * system wide is self monitoring only
*/ */
if (is_system && task != current) { if (is_system && task != current) {
DPRINT(("system wide is self monitoring only current=%d load_pid=%d\n", DPRINT(("system wide is self monitoring only load_pid=%d\n",
current->pid,
req->load_pid)); req->load_pid));
goto error; goto error;
} }
...@@ -4264,8 +4292,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4264,8 +4292,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* *
* XXX: needs to be atomic * XXX: needs to be atomic
*/ */
DPRINT(("[%d] before cmpxchg() old_ctx=%p new_ctx=%p\n", DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
current->pid,
thread->pfm_context, ctx)); thread->pfm_context, ctx));
old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
...@@ -4409,19 +4436,19 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ...@@ -4409,19 +4436,19 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
{ {
struct task_struct *task = PFM_CTX_TASK(ctx); struct task_struct *task = PFM_CTX_TASK(ctx);
struct pt_regs *tregs; struct pt_regs *tregs;
int state, is_system; int prev_state, is_system;
int ret; int ret;
DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
state = ctx->ctx_state; prev_state = ctx->ctx_state;
is_system = ctx->ctx_fl_system; is_system = ctx->ctx_fl_system;
/* /*
* unload only when necessary * unload only when necessary
*/ */
if (state == PFM_CTX_TERMINATED || state == PFM_CTX_UNLOADED) { if (prev_state == PFM_CTX_UNLOADED) {
DPRINT(("[%d] ctx_state=%d, nothing to do\n", current->pid, ctx->ctx_state)); DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
return 0; return 0;
} }
...@@ -4431,7 +4458,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ...@@ -4431,7 +4458,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
ret = pfm_stop(ctx, NULL, 0, regs); ret = pfm_stop(ctx, NULL, 0, regs);
if (ret) return ret; if (ret) return ret;
ctx->ctx_state = state = PFM_CTX_UNLOADED; ctx->ctx_state = PFM_CTX_UNLOADED;
/* /*
* in system mode, we need to update the PMU directly * in system mode, we need to update the PMU directly
...@@ -4458,6 +4485,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ...@@ -4458,6 +4485,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
* at this point we are done with the PMU * at this point we are done with the PMU
* so we can unreserve the resource. * so we can unreserve the resource.
*/ */
if (prev_state != PFM_CTX_ZOMBIE)
pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu); pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
/* /*
...@@ -4497,7 +4525,10 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ...@@ -4497,7 +4525,10 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
/* /*
* at this point we are done with the PMU * at this point we are done with the PMU
* so we can unreserve the resource. * so we can unreserve the resource.
*
* when state was ZOMBIE, we have already unreserved.
*/ */
if (prev_state != PFM_CTX_ZOMBIE)
pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu); pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
/* /*
...@@ -4549,12 +4580,14 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) ...@@ -4549,12 +4580,14 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
task->thread.pfm_context = NULL; task->thread.pfm_context = NULL;
task->thread.flags &= ~IA64_THREAD_PM_VALID; task->thread.flags &= ~IA64_THREAD_PM_VALID;
DPRINT(("context <%d> force cleanup for [%d] by [%d]\n", ctx->ctx_fd, task->pid, current->pid)); DPRINT(("force cleanupf for [%d]\n", task->pid));
} }
/* /*
* called only from exit_thread(): task == current * called only from exit_thread(): task == current
* we come here only if current has a context attached (loaded or masked)
*/ */
void void
pfm_exit_thread(struct task_struct *task) pfm_exit_thread(struct task_struct *task)
...@@ -4575,7 +4608,8 @@ pfm_exit_thread(struct task_struct *task) ...@@ -4575,7 +4608,8 @@ pfm_exit_thread(struct task_struct *task)
switch(state) { switch(state) {
case PFM_CTX_UNLOADED: case PFM_CTX_UNLOADED:
/* /*
* come here only if attached * only comes to thios function if pfm_context is not NULL, i.e., cannot
* be in unloaded state
*/ */
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
break; break;
...@@ -4583,20 +4617,17 @@ pfm_exit_thread(struct task_struct *task) ...@@ -4583,20 +4617,17 @@ pfm_exit_thread(struct task_struct *task)
case PFM_CTX_MASKED: case PFM_CTX_MASKED:
ret = pfm_context_unload(ctx, NULL, 0, regs); ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) { if (ret) {
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret); printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
} }
ctx->ctx_state = PFM_CTX_TERMINATED; DPRINT(("ctx unloaded for current state was %d\n", state));
DPRINT(("ctx terminated by [%d]\n", task->pid));
pfm_end_notify_user(ctx); pfm_end_notify_user(ctx);
break; break;
case PFM_CTX_ZOMBIE: case PFM_CTX_ZOMBIE:
pfm_clear_psr_up(); ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) {
BUG_ON(ctx->ctx_smpl_hdr); printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
}
pfm_force_cleanup(ctx, regs);
free_ok = 1; free_ok = 1;
break; break;
default: default:
...@@ -4696,7 +4727,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) ...@@ -4696,7 +4727,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
if (task == current || ctx->ctx_fl_system) return 0; if (task == current || ctx->ctx_fl_system) return 0;
/* /*
* context is UNLOADED, MASKED, TERMINATED we are safe to go * context is UNLOADED, MASKED we are safe to go
*/ */
if (state != PFM_CTX_LOADED) return 0; if (state != PFM_CTX_LOADED) return 0;
...@@ -4749,7 +4780,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4749,7 +4780,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
if (unlikely(PFM_IS_DISABLED())) return -ENOSYS; if (unlikely(PFM_IS_DISABLED())) return -ENOSYS;
if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) { if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
DPRINT(("[%d] invalid cmd=%d\n", current->pid, cmd)); DPRINT(("invalid cmd=%d\n", cmd));
return -EINVAL; return -EINVAL;
} }
...@@ -4760,7 +4791,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4760,7 +4791,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
cmd_flags = pfm_cmd_tab[cmd].cmd_flags; cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
if (unlikely(func == NULL)) { if (unlikely(func == NULL)) {
DPRINT(("[%d] invalid cmd=%d\n", current->pid, cmd)); DPRINT(("invalid cmd=%d\n", cmd));
return -EINVAL; return -EINVAL;
} }
...@@ -4803,7 +4834,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4803,7 +4834,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
* assume sz = 0 for command without parameters * assume sz = 0 for command without parameters
*/ */
if (sz && copy_from_user(args_k, arg, sz)) { if (sz && copy_from_user(args_k, arg, sz)) {
DPRINT(("[%d] cannot copy_from_user %lu bytes @%p\n", current->pid, sz, arg)); DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
goto error_args; goto error_args;
} }
...@@ -4819,7 +4850,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4819,7 +4850,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
completed_args = 1; completed_args = 1;
DPRINT(("[%d] restart_args sz=%lu xtra_sz=%lu\n", current->pid, sz, xtra_sz)); DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
/* retry if necessary */ /* retry if necessary */
if (likely(xtra_sz)) goto restart_args; if (likely(xtra_sz)) goto restart_args;
...@@ -4831,17 +4862,17 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4831,17 +4862,17 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
file = fget(fd); file = fget(fd);
if (unlikely(file == NULL)) { if (unlikely(file == NULL)) {
DPRINT(("[%d] invalid fd %d\n", current->pid, fd)); DPRINT(("invalid fd %d\n", fd));
goto error_args; goto error_args;
} }
if (unlikely(PFM_IS_FILE(file) == 0)) { if (unlikely(PFM_IS_FILE(file) == 0)) {
DPRINT(("[%d] fd %d not related to perfmon\n", current->pid, fd)); DPRINT(("fd %d not related to perfmon\n", fd));
goto error_args; goto error_args;
} }
ctx = (pfm_context_t *)file->private_data; ctx = (pfm_context_t *)file->private_data;
if (unlikely(ctx == NULL)) { if (unlikely(ctx == NULL)) {
DPRINT(("[%d] no context for fd %d\n", current->pid, fd)); DPRINT(("no context for fd %d\n", fd));
goto error_args; goto error_args;
} }
prefetch(&ctx->ctx_state); prefetch(&ctx->ctx_state);
...@@ -4861,7 +4892,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4861,7 +4892,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
abort_locked: abort_locked:
if (likely(ctx)) { if (likely(ctx)) {
DPRINT(("[%d] context unlocked\n", current->pid)); DPRINT(("context unlocked\n"));
UNPROTECT_CTX(ctx, flags); UNPROTECT_CTX(ctx, flags);
fput(file); fput(file);
} }
...@@ -4945,12 +4976,7 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) ...@@ -4945,12 +4976,7 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
current->thread.flags &= ~IA64_THREAD_PM_VALID; current->thread.flags &= ~IA64_THREAD_PM_VALID;
ctx->ctx_task = NULL; ctx->ctx_task = NULL;
/* DPRINT(("context terminated\n"));
* switch to terminated state
*/
ctx->ctx_state = PFM_CTX_TERMINATED;
DPRINT(("context <%d> terminated for [%d]\n", ctx->ctx_fd, current->pid));
/* /*
* and wakeup controlling task, indicating we are now disconnected * and wakeup controlling task, indicating we are now disconnected
...@@ -4995,15 +5021,15 @@ pfm_handle_work(void) ...@@ -4995,15 +5021,15 @@ pfm_handle_work(void)
*/ */
reason = ctx->ctx_fl_trap_reason; reason = ctx->ctx_fl_trap_reason;
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
ovfl_regs = ctx->ctx_ovfl_regs[0];
DPRINT(("[%d] reason=%d state=%d\n", current->pid, reason, ctx->ctx_state)); DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
/* /*
* must be done before we check non-blocking mode * must be done before we check for simple-reset mode
*/ */
if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie; if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
ovfl_regs = ctx->ctx_ovfl_regs[0];
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
...@@ -5022,6 +5048,14 @@ pfm_handle_work(void) ...@@ -5022,6 +5048,14 @@ pfm_handle_work(void)
PROTECT_CTX(ctx, flags); PROTECT_CTX(ctx, flags);
/*
* we need to read the ovfl_regs only after wake-up
* because we may have had pfm_write_pmds() in between
* and that can changed PMD values and therefore
* ovfl_regs is reset for these new PMD values.
*/
ovfl_regs = ctx->ctx_ovfl_regs[0];
if (ctx->ctx_fl_going_zombie) { if (ctx->ctx_fl_going_zombie) {
do_zombie: do_zombie:
DPRINT(("context is zombie, bailing out\n")); DPRINT(("context is zombie, bailing out\n"));
...@@ -5050,7 +5084,7 @@ pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg) ...@@ -5050,7 +5084,7 @@ pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
return 0; return 0;
} }
DPRINT(("[%d] waking up somebody\n", current->pid)); DPRINT(("waking up somebody\n"));
if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait); if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
...@@ -5085,11 +5119,10 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds) ...@@ -5085,11 +5119,10 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
msg->pfm_ovfl_msg.msg_tstamp = 0UL; msg->pfm_ovfl_msg.msg_tstamp = 0UL;
} }
DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d pid=%d ovfl_pmds=0x%lx\n", DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
msg, msg,
ctx->ctx_fl_no_msg, ctx->ctx_fl_no_msg,
ctx->ctx_fd, ctx->ctx_fd,
current->pid,
ovfl_pmds)); ovfl_pmds));
return pfm_notify_user(ctx, msg); return pfm_notify_user(ctx, msg);
...@@ -5112,10 +5145,10 @@ pfm_end_notify_user(pfm_context_t *ctx) ...@@ -5112,10 +5145,10 @@ pfm_end_notify_user(pfm_context_t *ctx)
msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd; msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
msg->pfm_ovfl_msg.msg_tstamp = 0UL; msg->pfm_ovfl_msg.msg_tstamp = 0UL;
DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d pid=%d\n", DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
msg, msg,
ctx->ctx_fl_no_msg, ctx->ctx_fl_no_msg,
ctx->ctx_fd, current->pid)); ctx->ctx_fd));
return pfm_notify_user(ctx, msg); return pfm_notify_user(ctx, msg);
} }
...@@ -5275,8 +5308,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5275,8 +5308,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
* when the module cannot handle the rest of the overflows, we abort right here * when the module cannot handle the rest of the overflows, we abort right here
*/ */
if (ret && pmd_mask) { if (ret && pmd_mask) {
DPRINT(("current [%d] handler aborts leftover ovfl_pmds=0x%lx\n", DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
current->pid,
pmd_mask<<PMU_FIRST_COUNTER)); pmd_mask<<PMU_FIRST_COUNTER));
} }
/* /*
...@@ -5298,8 +5330,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5298,8 +5330,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
if (ovfl_notify == 0) reset_pmds = ovfl_pmds; if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
} }
DPRINT(("current [%d] ovfl_pmds=0x%lx reset_pmds=0x%lx\n", DPRINT(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n",
current->pid,
ovfl_pmds, ovfl_pmds,
reset_pmds)); reset_pmds));
/* /*
...@@ -5341,8 +5372,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5341,8 +5372,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
must_notify = 1; must_notify = 1;
} }
DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
current->pid,
GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
PFM_GET_WORK_PENDING(task), PFM_GET_WORK_PENDING(task),
ctx->ctx_fl_trap_reason, ctx->ctx_fl_trap_reason,
...@@ -5521,6 +5551,7 @@ pfm_proc_info(char *page) ...@@ -5521,6 +5551,7 @@ pfm_proc_info(char *page)
p += sprintf(p, "perfmon version : %u.%u\n", PFM_VERSION_MAJ, PFM_VERSION_MIN); p += sprintf(p, "perfmon version : %u.%u\n", PFM_VERSION_MAJ, PFM_VERSION_MIN);
p += sprintf(p, "model : %s\n", pmu_conf.pmu_name); p += sprintf(p, "model : %s\n", pmu_conf.pmu_name);
p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No"); p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No");
p += sprintf(p, "expert mode : %s\n", pfm_sysctl.expert_mode > 0 ? "Yes": "No");
p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val); p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val);
for(i=0; i < NR_CPUS; i++) { for(i=0; i < NR_CPUS; i++) {
...@@ -6490,7 +6521,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs) ...@@ -6490,7 +6521,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
{ {
struct thread_struct *thread; struct thread_struct *thread;
DPRINT(("perfmon: pfm_inherit clearing state for [%d] current [%d]\n", task->pid, current->pid)); DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
thread = &task->thread; thread = &task->thread;
......
...@@ -101,6 +101,7 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -101,6 +101,7 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
{ {
int ret = 0, check_case1 = 0; int ret = 0, check_case1 = 0;
unsigned long val8 = 0, val14 = 0, val13 = 0; unsigned long val8 = 0, val14 = 0, val13 = 0;
int is_loaded;
/* first preserve the reserved fields */ /* first preserve the reserved fields */
pfm_mck_reserved(cnum, val, regs); pfm_mck_reserved(cnum, val, regs);
...@@ -108,6 +109,8 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -108,6 +109,8 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
/* sanitfy check */ /* sanitfy check */
if (ctx == NULL) return -EINVAL; if (ctx == NULL) return -EINVAL;
is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
/* /*
* we must clear the debug registers if pmc13 has a value which enable * we must clear the debug registers if pmc13 has a value which enable
* memory pipeline event constraints. In this case we need to clear the * memory pipeline event constraints. In this case we need to clear the
...@@ -120,7 +123,9 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -120,7 +123,9 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
* *
* For now, we just check on cfg_dbrXX != 0x3. * For now, we just check on cfg_dbrXX != 0x3.
*/ */
if (cnum == 13 && ((*val & 0x18181818UL) != 0x18181818UL) && ctx->ctx_fl_using_dbreg == 0) { DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, *val, ctx->ctx_fl_using_dbreg, is_loaded));
if (cnum == 13 && is_loaded && ((*val & 0x18181818UL) != 0x18181818UL) && ctx->ctx_fl_using_dbreg == 0) {
DPRINT(("pmc[%d]=0x%lx has active pmc13 settings, clearing dbr\n", cnum, *val)); DPRINT(("pmc[%d]=0x%lx has active pmc13 settings, clearing dbr\n", cnum, *val));
...@@ -131,14 +136,14 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -131,14 +136,14 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
* a count of 0 will mark the debug registers as in use and also * a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared. * ensure that they are properly cleared.
*/ */
ret = pfm_write_ibr_dbr(1, ctx, NULL, 0, regs); ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
if (ret) return ret; if (ret) return ret;
} }
/* /*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled * we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information. * before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/ */
if (cnum == 14 && ((*val & 0x2222UL) != 0x2222UL) && ctx->ctx_fl_using_dbreg == 0) { if (cnum == 14 && is_loaded && ((*val & 0x2222UL) != 0x2222UL) && ctx->ctx_fl_using_dbreg == 0) {
DPRINT(("pmc[%d]=0x%lx has active pmc14 settings, clearing ibr\n", cnum, *val)); DPRINT(("pmc[%d]=0x%lx has active pmc14 settings, clearing ibr\n", cnum, *val));
...@@ -149,7 +154,7 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -149,7 +154,7 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
* a count of 0 will mark the debug registers as in use and also * a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared. * ensure that they are properly cleared.
*/ */
ret = pfm_write_ibr_dbr(0, ctx, NULL, 0, regs); ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
if (ret) return ret; if (ret) return ret;
} }
......
...@@ -242,9 +242,10 @@ extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid); ...@@ -242,9 +242,10 @@ extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
/* /*
* perfmon interface exported to modules * perfmon interface exported to modules
*/ */
extern long pfm_mod_fast_read_pmds(struct task_struct *, unsigned long mask[4], unsigned long *addr, struct pt_regs *regs); extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
extern long pfm_mod_read_pmds(struct task_struct *, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs); extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
extern long pfm_mod_write_pmcs(struct task_struct *, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs); extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
/* /*
* describe the content of the local_cpu_date->pfm_syst_info field * describe the content of the local_cpu_date->pfm_syst_info field
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment