Commit 69aeca90 authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents 3dc567d8 84725a0e
...@@ -105,37 +105,37 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ ...@@ -105,37 +105,37 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ 1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */
(p7) br.cond.sptk.few 1f (p7) br.cond.sptk.few 1f
mov r8=0 /* status = 0 */ mov r8=0 /* status = 0 */
movl r9 =0x12082004 /* generic=4 width=32 retired=8 cycles=18 */ movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */
mov r10=0 /* reserved */ mov r10=0 /* reserved */
mov r11=0 /* reserved */ mov r11=0 /* reserved */
mov r16=0xffff /* implemented PMC */ mov r16=0xffff /* implemented PMC */
mov r17=0xffff /* implemented PMD */ mov r17=0x3ffff /* implemented PMD */
add r18=8,r29 /* second index */ add r18=8,r29 /* second index */
;; ;;
st8 [r29]=r16,16 /* store implemented PMC */ st8 [r29]=r16,16 /* store implemented PMC */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
;; ;;
st8 [r29]=r0,16 /* store implemented PMC */ st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
;; ;;
st8 [r29]=r17,16 /* store implemented PMD */ st8 [r29]=r17,16 /* store implemented PMD */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
mov r16=0xf0 /* cycles count capable PMC */ mov r16=0xf0 /* cycles count capable PMC */
;; ;;
st8 [r29]=r0,16 /* store implemented PMC */ st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
mov r17=0x10 /* retired bundles capable PMC */ mov r17=0xf0 /* retired bundles capable PMC */
;; ;;
st8 [r29]=r16,16 /* store cycles capable */ st8 [r29]=r16,16 /* store cycles capable */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
;; ;;
st8 [r29]=r0,16 /* store implemented PMC */ st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
;; ;;
st8 [r29]=r17,16 /* store retired bundle capable */ st8 [r29]=r17,16 /* store retired bundle capable */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
;; ;;
st8 [r29]=r0,16 /* store implemented PMC */ st8 [r29]=r0,16 /* clear remaining bits */
st8 [r18]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */
;; ;;
1: br.cond.sptk.few rp 1: br.cond.sptk.few rp
......
...@@ -39,7 +39,7 @@ extern efi_status_t efi_call_phys (void *, ...); ...@@ -39,7 +39,7 @@ extern efi_status_t efi_call_phys (void *, ...);
struct efi efi; struct efi efi;
EXPORT_SYMBOL(efi); EXPORT_SYMBOL(efi);
static efi_runtime_services_t *runtime; static efi_runtime_services_t *runtime;
static unsigned long mem_limit = ~0UL; static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
#define efi_call_virt(f, args...) (*(f))(args) #define efi_call_virt(f, args...) (*(f))(args)
...@@ -290,6 +290,7 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -290,6 +290,7 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
void *efi_map_start, *efi_map_end, *p, *q; void *efi_map_start, *efi_map_end, *p, *q;
efi_memory_desc_t *md, *check_md; efi_memory_desc_t *md, *check_md;
u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0; u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0;
unsigned long total_mem = 0;
efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
...@@ -331,12 +332,18 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -331,12 +332,18 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
trim_top(md, last_granule_addr); trim_top(md, last_granule_addr);
if (is_available_memory(md)) { if (is_available_memory(md)) {
if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) { if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > max_addr) {
if (md->phys_addr > mem_limit) if (md->phys_addr > max_addr)
continue; continue;
md->num_pages = (mem_limit - md->phys_addr) >> EFI_PAGE_SHIFT; md->num_pages = (max_addr - md->phys_addr) >> EFI_PAGE_SHIFT;
} }
if (total_mem >= mem_limit)
continue;
total_mem += (md->num_pages << EFI_PAGE_SHIFT);
if (total_mem > mem_limit)
md->num_pages -= ((total_mem - mem_limit) >> EFI_PAGE_SHIFT);
if (md->num_pages == 0) if (md->num_pages == 0)
continue; continue;
...@@ -470,7 +477,13 @@ efi_init (void) ...@@ -470,7 +477,13 @@ efi_init (void)
for (cp = saved_command_line; *cp; ) { for (cp = saved_command_line; *cp; ) {
if (memcmp(cp, "mem=", 4) == 0) { if (memcmp(cp, "mem=", 4) == 0) {
cp += 4; cp += 4;
mem_limit = memparse(cp, &end) - 1; mem_limit = memparse(cp, &end) - 2;
if (end != cp)
break;
cp = end;
} else if (memcmp(cp, "max_addr=", 9) == 0) {
cp += 9;
max_addr = memparse(cp, &end) - 1;
if (end != cp) if (end != cp)
break; break;
cp = end; cp = end;
...@@ -481,8 +494,8 @@ efi_init (void) ...@@ -481,8 +494,8 @@ efi_init (void)
++cp; ++cp;
} }
} }
if (mem_limit != ~0UL) if (max_addr != ~0UL)
printk(KERN_INFO "Ignoring memory above %luMB\n", mem_limit >> 20); printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
efi.systab = __va(ia64_boot_param->efi_systab); efi.systab = __va(ia64_boot_param->efi_systab);
......
...@@ -574,6 +574,10 @@ GLOBAL_ENTRY(fsys_bubble_down) ...@@ -574,6 +574,10 @@ GLOBAL_ENTRY(fsys_bubble_down)
or r29=r8,r29 // construct cr.ipsr value to save or r29=r8,r29 // construct cr.ipsr value to save
addl r22=IA64_RBS_OFFSET,r2 // compute base of RBS addl r22=IA64_RBS_OFFSET,r2 // compute base of RBS
;; ;;
// GAS reports a spurious RAW hazard on the read of ar.rnat because it thinks
// we may be reading ar.itc after writing to psr.l. Avoid that message with
// this directive:
dv_serialize_data
mov.m r24=ar.rnat // read ar.rnat (5 cyc lat) mov.m r24=ar.rnat // read ar.rnat (5 cyc lat)
lfetch.fault.excl.nt1 [r22] lfetch.fault.excl.nt1 [r22]
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r2 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r2
......
...@@ -818,7 +818,8 @@ END(ia64_delay_loop) ...@@ -818,7 +818,8 @@ END(ia64_delay_loop)
GLOBAL_ENTRY(start_kernel_thread) GLOBAL_ENTRY(start_kernel_thread)
.prologue .prologue
.save rp, r0 // this is the end of the call-chain .save rp, r4 // this is the end of the call-chain
mov r4=r0
.body .body
alloc r2 = ar.pfs, 0, 0, 2, 0 alloc r2 = ar.pfs, 0, 0, 2, 0
mov out0 = r9 mov out0 = r9
......
...@@ -181,6 +181,12 @@ ENTRY(vhpt_miss) ...@@ -181,6 +181,12 @@ ENTRY(vhpt_miss)
(p7) itc.d r24 (p7) itc.d r24
;; ;;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
* cannot possibly affect the following loads:
*/
dv_serialize_data
/* /*
* Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g
* between reading the pagetable and the "itc". If so, flush the entry we * between reading the pagetable and the "itc". If so, flush the entry we
...@@ -229,6 +235,12 @@ ENTRY(itlb_miss) ...@@ -229,6 +235,12 @@ ENTRY(itlb_miss)
itc.i r18 itc.i r18
;; ;;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
* cannot possibly affect the following loads:
*/
dv_serialize_data
ld8 r19=[r17] // read L3 PTE again and see if same ld8 r19=[r17] // read L3 PTE again and see if same
mov r20=PAGE_SHIFT<<2 // setup page size for purge mov r20=PAGE_SHIFT<<2 // setup page size for purge
;; ;;
...@@ -267,6 +279,12 @@ dtlb_fault: ...@@ -267,6 +279,12 @@ dtlb_fault:
itc.d r18 itc.d r18
;; ;;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
* cannot possibly affect the following loads:
*/
dv_serialize_data
ld8 r19=[r17] // read L3 PTE again and see if same ld8 r19=[r17] // read L3 PTE again and see if same
mov r20=PAGE_SHIFT<<2 // setup page size for purge mov r20=PAGE_SHIFT<<2 // setup page size for purge
;; ;;
...@@ -504,6 +522,12 @@ ENTRY(dirty_bit) ...@@ -504,6 +522,12 @@ ENTRY(dirty_bit)
;; ;;
(p6) itc.d r25 // install updated PTE (p6) itc.d r25 // install updated PTE
;; ;;
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
* cannot possibly affect the following loads:
*/
dv_serialize_data
ld8 r18=[r17] // read PTE again ld8 r18=[r17] // read PTE again
;; ;;
cmp.eq p6,p7=r18,r25 // is it same as the newly installed cmp.eq p6,p7=r18,r25 // is it same as the newly installed
...@@ -563,6 +587,12 @@ ENTRY(iaccess_bit) ...@@ -563,6 +587,12 @@ ENTRY(iaccess_bit)
;; ;;
(p6) itc.i r25 // install updated PTE (p6) itc.i r25 // install updated PTE
;; ;;
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
* cannot possibly affect the following loads:
*/
dv_serialize_data
ld8 r18=[r17] // read PTE again ld8 r18=[r17] // read PTE again
;; ;;
cmp.eq p6,p7=r18,r25 // is it same as the newly installed cmp.eq p6,p7=r18,r25 // is it same as the newly installed
...@@ -610,6 +640,11 @@ ENTRY(daccess_bit) ...@@ -610,6 +640,11 @@ ENTRY(daccess_bit)
cmp.eq p6,p7=r26,r18 cmp.eq p6,p7=r26,r18
;; ;;
(p6) itc.d r25 // install updated PTE (p6) itc.d r25 // install updated PTE
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
* cannot possibly affect the following loads:
*/
dv_serialize_data
;; ;;
ld8 r18=[r17] // read PTE again ld8 r18=[r17] // read PTE again
;; ;;
......
...@@ -86,27 +86,25 @@ ...@@ -86,27 +86,25 @@
#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */ #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */ #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
#define PMC_IS_LAST(i) (pmu_conf.pmc_desc[i].type & PFM_REG_END) #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
#define PMD_IS_LAST(i) (pmu_conf.pmd_desc[i].type & PFM_REG_END) #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
#define PFM_IS_DISABLED() (pmu_conf.enabled == 0)
#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY) #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
/* i assumed unsigned */ /* i assumed unsigned */
#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf.pmc_desc[i].type & PFM_REG_IMPL)) #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf.pmd_desc[i].type & PFM_REG_IMPL)) #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
/* XXX: these assume that register i is implemented */ /* XXX: these assume that register i is implemented */
#define PMD_IS_COUNTING(i) ((pmu_conf.pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
#define PMC_IS_COUNTING(i) ((pmu_conf.pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
#define PMC_IS_MONITOR(i) ((pmu_conf.pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR) #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
#define PMC_IS_CONTROL(i) ((pmu_conf.pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL) #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
#define PMC_DFL_VAL(i) pmu_conf.pmc_desc[i].default_value #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
#define PMC_RSVD_MASK(i) pmu_conf.pmc_desc[i].reserved_mask #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
#define PMD_PMD_DEP(i) pmu_conf.pmd_desc[i].dep_pmd[0] #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
#define PMC_PMD_DEP(i) pmu_conf.pmc_desc[i].dep_pmd[0] #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
#define PFM_NUM_IBRS IA64_NUM_DBG_REGS #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
#define PFM_NUM_DBRS IA64_NUM_DBG_REGS #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
...@@ -133,6 +131,8 @@ ...@@ -133,6 +131,8 @@
#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v) #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info) #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
#define RDEP(x) (1UL<<(x))
/* /*
* context protection macros * context protection macros
* in SMP: * in SMP:
...@@ -374,26 +374,32 @@ typedef struct { ...@@ -374,26 +374,32 @@ typedef struct {
* dep_pmd[]: a bitmask of dependent PMD registers * dep_pmd[]: a bitmask of dependent PMD registers
* dep_pmc[]: a bitmask of dependent PMC registers * dep_pmc[]: a bitmask of dependent PMC registers
*/ */
typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
typedef struct { typedef struct {
unsigned int type; unsigned int type;
int pm_pos; int pm_pos;
unsigned long default_value; /* power-on default value */ unsigned long default_value; /* power-on default value */
unsigned long reserved_mask; /* bitmask of reserved bits */ unsigned long reserved_mask; /* bitmask of reserved bits */
int (*read_check)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); pfm_reg_check_t read_check;
int (*write_check)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); pfm_reg_check_t write_check;
unsigned long dep_pmd[4]; unsigned long dep_pmd[4];
unsigned long dep_pmc[4]; unsigned long dep_pmc[4];
} pfm_reg_desc_t; } pfm_reg_desc_t;
/* assume cnum is a valid monitor */ /* assume cnum is a valid monitor */
#define PMC_PM(cnum, val) (((val) >> (pmu_conf.pmc_desc[cnum].pm_pos)) & 0x1) #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
#define PMC_WR_FUNC(cnum) (pmu_conf.pmc_desc[cnum].write_check)
#define PMD_WR_FUNC(cnum) (pmu_conf.pmd_desc[cnum].write_check)
#define PMD_RD_FUNC(cnum) (pmu_conf.pmd_desc[cnum].read_check)
/* /*
* This structure is initialized at boot time and contains * This structure is initialized at boot time and contains
* a description of the PMU main characteristics. * a description of the PMU main characteristics.
*
* If the probe function is defined, detection is based
* on its return value:
* - 0 means recognized PMU
* - anything else means not supported
* When the probe function is not defined, then the pmu_family field
* is used and it must match the host CPU family such that:
* - cpu->family & config->pmu_family != 0
*/ */
typedef struct { typedef struct {
unsigned long ovfl_val; /* overflow value for counters */ unsigned long ovfl_val; /* overflow value for counters */
...@@ -407,15 +413,18 @@ typedef struct { ...@@ -407,15 +413,18 @@ typedef struct {
unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */ unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
char *pmu_name; /* PMU family name */ char *pmu_name; /* PMU family name */
unsigned int enabled; /* indicates if perfmon initialized properly */
unsigned int pmu_family; /* cpuid family pattern used to identify pmu */ unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
unsigned int flags; /* pmu specific flags */
unsigned int num_ibrs; /* number of IBRS: computed at init time */ unsigned int num_ibrs; /* number of IBRS: computed at init time */
unsigned int num_dbrs; /* number of DBRS: computed at init time */ unsigned int num_dbrs; /* number of DBRS: computed at init time */
unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */ unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
int (*probe)(void); /* customized probe routine */
unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */ unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
} pmu_config_t; } pmu_config_t;
/*
* PMU specific flags
*/
#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
/* /*
* debug register related type definitions * debug register related type definitions
...@@ -500,6 +509,8 @@ static pfm_uuid_t pfm_null_uuid = {0,}; ...@@ -500,6 +509,8 @@ static pfm_uuid_t pfm_null_uuid = {0,};
static spinlock_t pfm_buffer_fmt_lock; static spinlock_t pfm_buffer_fmt_lock;
static LIST_HEAD(pfm_buffer_fmt_list); static LIST_HEAD(pfm_buffer_fmt_list);
static pmu_config_t *pmu_conf;
/* sysctl() controls */ /* sysctl() controls */
static pfm_sysctl_t pfm_sysctl; static pfm_sysctl_t pfm_sysctl;
int pfm_debug_var; int pfm_debug_var;
...@@ -620,20 +631,19 @@ static void pfm_lazy_save_regs (struct task_struct *ta); ...@@ -620,20 +631,19 @@ static void pfm_lazy_save_regs (struct task_struct *ta);
#endif #endif
void dump_pmu_state(const char *); void dump_pmu_state(const char *);
static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
/*
* the HP simulator must be first because
* CONFIG_IA64_HP_SIM is independent of CONFIG_MCKINLEY or CONFIG_ITANIUM
*/
#if defined(CONFIG_IA64_HP_SIM)
#include "perfmon_hpsim.h"
#elif defined(CONFIG_ITANIUM)
#include "perfmon_itanium.h" #include "perfmon_itanium.h"
#elif defined(CONFIG_MCKINLEY)
#include "perfmon_mckinley.h" #include "perfmon_mckinley.h"
#else
#include "perfmon_generic.h" #include "perfmon_generic.h"
#endif
static pmu_config_t *pmu_confs[]={
&pmu_conf_mck,
&pmu_conf_ita,
&pmu_conf_gen, /* must be last */
NULL
};
static int pfm_end_notify_user(pfm_context_t *ctx); static int pfm_end_notify_user(pfm_context_t *ctx);
...@@ -723,7 +733,7 @@ pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs) ...@@ -723,7 +733,7 @@ pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
static inline unsigned long static inline unsigned long
pfm_read_soft_counter(pfm_context_t *ctx, int i) pfm_read_soft_counter(pfm_context_t *ctx, int i)
{ {
return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf.ovfl_val); return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
} }
/* /*
...@@ -732,7 +742,7 @@ pfm_read_soft_counter(pfm_context_t *ctx, int i) ...@@ -732,7 +742,7 @@ pfm_read_soft_counter(pfm_context_t *ctx, int i)
static inline void static inline void
pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val) pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
{ {
unsigned long ovfl_val = pmu_conf.ovfl_val; unsigned long ovfl_val = pmu_conf->ovfl_val;
ctx->ctx_pmds[i].val = val & ~ovfl_val; ctx->ctx_pmds[i].val = val & ~ovfl_val;
/* /*
...@@ -878,7 +888,7 @@ pfm_mask_monitoring(struct task_struct *task) ...@@ -878,7 +888,7 @@ pfm_mask_monitoring(struct task_struct *task)
DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
ovfl_mask = pmu_conf.ovfl_val; ovfl_mask = pmu_conf->ovfl_val;
/* /*
* monitoring can only be masked as a result of a valid * monitoring can only be masked as a result of a valid
* counter overflow. In UP, it means that the PMU still * counter overflow. In UP, it means that the PMU still
...@@ -953,7 +963,7 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -953,7 +963,7 @@ pfm_restore_monitoring(struct task_struct *task)
int i, is_system; int i, is_system;
is_system = ctx->ctx_fl_system; is_system = ctx->ctx_fl_system;
ovfl_mask = pmu_conf.ovfl_val; ovfl_mask = pmu_conf->ovfl_val;
if (task != current) { if (task != current) {
printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
...@@ -1024,8 +1034,8 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -1024,8 +1034,8 @@ pfm_restore_monitoring(struct task_struct *task)
* XXX: need to optimize * XXX: need to optimize
*/ */
if (ctx->ctx_fl_using_dbreg) { if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs); pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs); pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
} }
/* /*
...@@ -1058,7 +1068,7 @@ static inline void ...@@ -1058,7 +1068,7 @@ static inline void
pfm_restore_pmds(unsigned long *pmds, unsigned long mask) pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
{ {
int i; int i;
unsigned long val, ovfl_val = pmu_conf.ovfl_val; unsigned long val, ovfl_val = pmu_conf->ovfl_val;
for (i=0; mask; i++, mask>>=1) { for (i=0; mask; i++, mask>>=1) {
if ((mask & 0x1) == 0) continue; if ((mask & 0x1) == 0) continue;
...@@ -1075,7 +1085,7 @@ static inline void ...@@ -1075,7 +1085,7 @@ static inline void
pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
{ {
struct thread_struct *thread = &task->thread; struct thread_struct *thread = &task->thread;
unsigned long ovfl_val = pmu_conf.ovfl_val; unsigned long ovfl_val = pmu_conf->ovfl_val;
unsigned long mask = ctx->ctx_all_pmds[0]; unsigned long mask = ctx->ctx_all_pmds[0];
unsigned long val; unsigned long val;
int i; int i;
...@@ -2513,12 +2523,12 @@ pfm_reset_pmu_state(pfm_context_t *ctx) ...@@ -2513,12 +2523,12 @@ pfm_reset_pmu_state(pfm_context_t *ctx)
* *
* PMC0 is treated differently. * PMC0 is treated differently.
*/ */
ctx->ctx_all_pmcs[0] = pmu_conf.impl_pmcs[0] & ~0x1; ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
/* /*
* bitmask of all PMDs that are accesible to this context * bitmask of all PMDs that are accesible to this context
*/ */
ctx->ctx_all_pmds[0] = pmu_conf.impl_pmds[0]; ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0])); DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
...@@ -2858,16 +2868,17 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2858,16 +2868,17 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
unsigned long value, pmc_pm; unsigned long value, pmc_pm;
unsigned long smpl_pmds, reset_pmds, impl_pmds; unsigned long smpl_pmds, reset_pmds, impl_pmds;
unsigned int cnum, reg_flags, flags, pmc_type; unsigned int cnum, reg_flags, flags, pmc_type;
int i, can_access_pmu = 0, is_loaded, is_system; int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
int is_monitor, is_counting, state; int is_monitor, is_counting, state;
int ret = -EINVAL; int ret = -EINVAL;
pfm_reg_check_t wr_func;
#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z)) #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
state = ctx->ctx_state; state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0; is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system; is_system = ctx->ctx_fl_system;
task = ctx->ctx_task; task = ctx->ctx_task;
impl_pmds = pmu_conf.impl_pmds[0]; impl_pmds = pmu_conf->impl_pmds[0];
if (state == PFM_CTX_ZOMBIE) return -EINVAL; if (state == PFM_CTX_ZOMBIE) return -EINVAL;
...@@ -2884,6 +2895,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2884,6 +2895,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
} }
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
} }
expert_mode = pfm_sysctl.expert_mode;
for (i = 0; i < count; i++, req++) { for (i = 0; i < count; i++, req++) {
...@@ -2900,8 +2912,8 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2900,8 +2912,8 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
goto error; goto error;
} }
pmc_type = pmu_conf.pmc_desc[cnum].type; pmc_type = pmu_conf->pmc_desc[cnum].type;
pmc_pm = (value >> pmu_conf.pmc_desc[cnum].pm_pos) & 0x1; pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0; is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0; is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
...@@ -2914,6 +2926,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2914,6 +2926,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type)); DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
goto error; goto error;
} }
wr_func = pmu_conf->pmc_desc[cnum].write_check;
/* /*
* If the PMC is a monitor, then if the value is not the default: * If the PMC is a monitor, then if the value is not the default:
* - system-wide session: PMCx.pm=1 (privileged monitor) * - system-wide session: PMCx.pm=1 (privileged monitor)
...@@ -2962,8 +2975,8 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2962,8 +2975,8 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* execute write checker, if any * execute write checker, if any
*/ */
if (pfm_sysctl.expert_mode == 0 && PMC_WR_FUNC(cnum)) { if (likely(expert_mode == 0 && wr_func)) {
ret = PMC_WR_FUNC(cnum)(task, ctx, cnum, &value, regs); ret = (*wr_func)(task, ctx, cnum, &value, regs);
if (ret) goto error; if (ret) goto error;
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -3014,7 +3027,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3014,7 +3027,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
* possible leak here. * possible leak here.
*/ */
CTX_USED_PMD(ctx, pmu_conf.pmc_desc[cnum].dep_pmd[0]); CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
/* /*
* keep track of the monitor PMC that we are using. * keep track of the monitor PMC that we are using.
...@@ -3096,14 +3109,15 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3096,14 +3109,15 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
unsigned long value, hw_value, ovfl_mask; unsigned long value, hw_value, ovfl_mask;
unsigned int cnum; unsigned int cnum;
int i, can_access_pmu = 0, state; int i, can_access_pmu = 0, state;
int is_counting, is_loaded, is_system; int is_counting, is_loaded, is_system, expert_mode;
int ret = -EINVAL; int ret = -EINVAL;
pfm_reg_check_t wr_func;
state = ctx->ctx_state; state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0; is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system; is_system = ctx->ctx_fl_system;
ovfl_mask = pmu_conf.ovfl_val; ovfl_mask = pmu_conf->ovfl_val;
task = ctx->ctx_task; task = ctx->ctx_task;
if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL; if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
...@@ -3125,6 +3139,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3125,6 +3139,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
} }
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
} }
expert_mode = pfm_sysctl.expert_mode;
for (i = 0; i < count; i++, req++) { for (i = 0; i < count; i++, req++) {
...@@ -3136,14 +3151,15 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3136,14 +3151,15 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
goto abort_mission; goto abort_mission;
} }
is_counting = PMD_IS_COUNTING(cnum); is_counting = PMD_IS_COUNTING(cnum);
wr_func = pmu_conf->pmd_desc[cnum].write_check;
/* /*
* execute write checker, if any * execute write checker, if any
*/ */
if (pfm_sysctl.expert_mode == 0 && PMD_WR_FUNC(cnum)) { if (unlikely(expert_mode == 0 && wr_func)) {
unsigned long v = value; unsigned long v = value;
ret = PMD_WR_FUNC(cnum)(task, ctx, cnum, &v, regs); ret = (*wr_func)(task, ctx, cnum, &v, regs);
if (ret) goto abort_mission; if (ret) goto abort_mission;
value = v; value = v;
...@@ -3289,8 +3305,9 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3289,8 +3305,9 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfarg_reg_t *req = (pfarg_reg_t *)arg; pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned int cnum, reg_flags = 0; unsigned int cnum, reg_flags = 0;
int i, can_access_pmu = 0, state; int i, can_access_pmu = 0, state;
int is_loaded, is_system, is_counting; int is_loaded, is_system, is_counting, expert_mode;
int ret = -EINVAL; int ret = -EINVAL;
pfm_reg_check_t rd_func;
/* /*
* access is possible when loaded only for * access is possible when loaded only for
...@@ -3300,7 +3317,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3300,7 +3317,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
state = ctx->ctx_state; state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0; is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system; is_system = ctx->ctx_fl_system;
ovfl_mask = pmu_conf.ovfl_val; ovfl_mask = pmu_conf->ovfl_val;
task = ctx->ctx_task; task = ctx->ctx_task;
if (state == PFM_CTX_ZOMBIE) return -EINVAL; if (state == PFM_CTX_ZOMBIE) return -EINVAL;
...@@ -3323,6 +3340,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3323,6 +3340,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
if (can_access_pmu) ia64_srlz_d(); if (can_access_pmu) ia64_srlz_d();
} }
expert_mode = pfm_sysctl.expert_mode;
DPRINT(("loaded=%d access_pmu=%d ctx_state=%d\n", DPRINT(("loaded=%d access_pmu=%d ctx_state=%d\n",
is_loaded, is_loaded,
...@@ -3369,6 +3387,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3369,6 +3387,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/ */
val = is_loaded ? thread->pmds[cnum] : 0UL; val = is_loaded ? thread->pmds[cnum] : 0UL;
} }
rd_func = pmu_conf->pmd_desc[cnum].read_check;
if (is_counting) { if (is_counting) {
/* /*
...@@ -3381,9 +3400,9 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3381,9 +3400,9 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* execute read checker, if any * execute read checker, if any
*/ */
if (unlikely(pfm_sysctl.expert_mode == 0 && PMD_RD_FUNC(cnum))) { if (unlikely(expert_mode == 0 && rd_func)) {
unsigned long v = val; unsigned long v = val;
ret = PMD_RD_FUNC(cnum)(ctx->ctx_task, ctx, cnum, &v, regs); ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
if (ret) goto error; if (ret) goto error;
val = v; val = v;
ret = -EINVAL; ret = -EINVAL;
...@@ -3463,7 +3482,7 @@ pfm_use_debug_registers(struct task_struct *task) ...@@ -3463,7 +3482,7 @@ pfm_use_debug_registers(struct task_struct *task)
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
if (pmu_conf.use_rr_dbregs == 0) return 0; if (pmu_conf->use_rr_dbregs == 0) return 0;
DPRINT(("called for [%d]\n", task->pid)); DPRINT(("called for [%d]\n", task->pid));
...@@ -3517,7 +3536,7 @@ pfm_release_debug_registers(struct task_struct *task) ...@@ -3517,7 +3536,7 @@ pfm_release_debug_registers(struct task_struct *task)
unsigned long flags; unsigned long flags;
int ret; int ret;
if (pmu_conf.use_rr_dbregs == 0) return 0; if (pmu_conf->use_rr_dbregs == 0) return 0;
LOCK_PFS(flags); LOCK_PFS(flags);
if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
...@@ -3720,7 +3739,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ ...@@ -3720,7 +3739,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
int i, can_access_pmu = 0; int i, can_access_pmu = 0;
int is_system, is_loaded; int is_system, is_loaded;
if (pmu_conf.use_rr_dbregs == 0) return -EINVAL; if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
state = ctx->ctx_state; state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0; is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
...@@ -3802,12 +3821,12 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ ...@@ -3802,12 +3821,12 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
*/ */
if (first_time && can_access_pmu) { if (first_time && can_access_pmu) {
DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid)); DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
for (i=0; i < pmu_conf.num_ibrs; i++) { for (i=0; i < pmu_conf->num_ibrs; i++) {
ia64_set_ibr(i, 0UL); ia64_set_ibr(i, 0UL);
ia64_srlz_i(); ia64_srlz_i();
} }
ia64_srlz_i(); ia64_srlz_i();
for (i=0; i < pmu_conf.num_dbrs; i++) { for (i=0; i < pmu_conf->num_dbrs; i++) {
ia64_set_dbr(i, 0UL); ia64_set_dbr(i, 0UL);
ia64_srlz_d(); ia64_srlz_d();
} }
...@@ -3865,8 +3884,10 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ ...@@ -3865,8 +3884,10 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
} else { } else {
CTX_USED_DBR(ctx, rnum); CTX_USED_DBR(ctx, rnum);
if (can_access_pmu) ia64_set_dbr(rnum, dbreg.val); if (can_access_pmu) {
ia64_set_dbr(rnum, dbreg.val);
ia64_dv_serialize_data();
}
ctx->ctx_dbrs[rnum] = dbreg.val; ctx->ctx_dbrs[rnum] = dbreg.val;
DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x is_loaded=%d access_pmu=%d\n", DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x is_loaded=%d access_pmu=%d\n",
...@@ -4367,8 +4388,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4367,8 +4388,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* guaranteed safe by earlier check against DBG_VALID * guaranteed safe by earlier check against DBG_VALID
*/ */
if (ctx->ctx_fl_using_dbreg) { if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs); pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs); pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
} }
/* /*
* set new ownership * set new ownership
...@@ -4777,7 +4798,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon ...@@ -4777,7 +4798,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
/* /*
* reject any call if perfmon was disabled at initialization * reject any call if perfmon was disabled at initialization
*/ */
if (unlikely(PFM_IS_DISABLED())) return -ENOSYS; if (unlikely(pmu_conf == NULL)) return -ENOSYS;
if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) { if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
DPRINT(("invalid cmd=%d\n", cmd)); DPRINT(("invalid cmd=%d\n", cmd));
...@@ -5178,7 +5199,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5178,7 +5199,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
tstamp = ia64_get_itc(); tstamp = ia64_get_itc();
mask = pmc0 >> PMU_FIRST_COUNTER; mask = pmc0 >> PMU_FIRST_COUNTER;
ovfl_val = pmu_conf.ovfl_val; ovfl_val = pmu_conf->ovfl_val;
has_smpl = CTX_HAS_SMPL(ctx); has_smpl = CTX_HAS_SMPL(ctx);
DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
...@@ -5549,10 +5570,11 @@ pfm_proc_info(char *page) ...@@ -5549,10 +5570,11 @@ pfm_proc_info(char *page)
int i; int i;
p += sprintf(p, "perfmon version : %u.%u\n", PFM_VERSION_MAJ, PFM_VERSION_MIN); p += sprintf(p, "perfmon version : %u.%u\n", PFM_VERSION_MAJ, PFM_VERSION_MIN);
p += sprintf(p, "model : %s\n", pmu_conf.pmu_name); p += sprintf(p, "model : %s\n", pmu_conf->pmu_name);
p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No"); p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No");
p += sprintf(p, "expert mode : %s\n", pfm_sysctl.expert_mode > 0 ? "Yes": "No"); p += sprintf(p, "expert mode : %s\n", pfm_sysctl.expert_mode > 0 ? "Yes": "No");
p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val); p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf->ovfl_val);
p += sprintf(p, "flags : 0x%x\n", pmu_conf->flags);
for(i=0; i < NR_CPUS; i++) { for(i=0; i < NR_CPUS; i++) {
if (cpu_online(i) == 0) continue; if (cpu_online(i) == 0) continue;
...@@ -5899,6 +5921,7 @@ pfm_load_regs (struct task_struct *task) ...@@ -5899,6 +5921,7 @@ pfm_load_regs (struct task_struct *task)
unsigned long pmc_mask = 0UL, pmd_mask = 0UL; unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
unsigned long flags; unsigned long flags;
u64 psr, psr_up; u64 psr, psr_up;
int need_irq_resend;
ctx = PFM_GET_CTX(task); ctx = PFM_GET_CTX(task);
if (unlikely(ctx == NULL)) return; if (unlikely(ctx == NULL)) return;
...@@ -5919,6 +5942,8 @@ pfm_load_regs (struct task_struct *task) ...@@ -5919,6 +5942,8 @@ pfm_load_regs (struct task_struct *task)
flags = pfm_protect_ctx_ctxsw(ctx); flags = pfm_protect_ctx_ctxsw(ctx);
psr = pfm_get_psr(); psr = pfm_get_psr();
need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I); BUG_ON(psr & IA64_PSR_I);
...@@ -5944,8 +5969,8 @@ pfm_load_regs (struct task_struct *task) ...@@ -5944,8 +5969,8 @@ pfm_load_regs (struct task_struct *task)
* stale state. * stale state.
*/ */
if (ctx->ctx_fl_using_dbreg) { if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs); pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs); pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
} }
/* /*
* retrieve saved psr.up * retrieve saved psr.up
...@@ -6004,12 +6029,12 @@ pfm_load_regs (struct task_struct *task) ...@@ -6004,12 +6029,12 @@ pfm_load_regs (struct task_struct *task)
ia64_set_pmc(0, t->pmcs[0]); ia64_set_pmc(0, t->pmcs[0]);
ia64_srlz_d(); ia64_srlz_d();
t->pmcs[0] = 0UL; t->pmcs[0] = 0UL;
#ifndef CONFIG_MCKINLEY
/* /*
* will replay the PMU interrupt * will replay the PMU interrupt
*/ */
hw_resend_irq(NULL, IA64_PERFMON_VECTOR); if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
#endif
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
} }
...@@ -6061,6 +6086,7 @@ pfm_load_regs (struct task_struct *task) ...@@ -6061,6 +6086,7 @@ pfm_load_regs (struct task_struct *task)
struct task_struct *owner; struct task_struct *owner;
unsigned long pmd_mask, pmc_mask; unsigned long pmd_mask, pmc_mask;
u64 psr, psr_up; u64 psr, psr_up;
int need_irq_resend;
owner = GET_PMU_OWNER(); owner = GET_PMU_OWNER();
ctx = PFM_GET_CTX(task); ctx = PFM_GET_CTX(task);
...@@ -6079,14 +6105,15 @@ pfm_load_regs (struct task_struct *task) ...@@ -6079,14 +6105,15 @@ pfm_load_regs (struct task_struct *task)
* (not perfmon) by the previous task. * (not perfmon) by the previous task.
*/ */
if (ctx->ctx_fl_using_dbreg) { if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs); pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs); pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
} }
/* /*
* retrieved saved psr.up * retrieved saved psr.up
*/ */
psr_up = ctx->ctx_saved_psr_up; psr_up = ctx->ctx_saved_psr_up;
need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
/* /*
* short path, our state is still there, just * short path, our state is still there, just
...@@ -6143,12 +6170,11 @@ pfm_load_regs (struct task_struct *task) ...@@ -6143,12 +6170,11 @@ pfm_load_regs (struct task_struct *task)
t->pmcs[0] = 0UL; t->pmcs[0] = 0UL;
#ifndef CONFIG_MCKINLEY
/* /*
* will replay the PMU interrupt * will replay the PMU interrupt
*/ */
hw_resend_irq(NULL, IA64_PERFMON_VECTOR); if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
#endif
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
} }
...@@ -6222,7 +6248,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) ...@@ -6222,7 +6248,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
*/ */
task->thread.pmcs[0] = 0; task->thread.pmcs[0] = 0;
} }
ovfl_val = pmu_conf.ovfl_val; ovfl_val = pmu_conf->ovfl_val;
/* /*
* we save all the used pmds * we save all the used pmds
* we take care of overflows for counting PMDs * we take care of overflows for counting PMDs
...@@ -6287,6 +6313,29 @@ static struct irqaction perfmon_irqaction = { ...@@ -6287,6 +6313,29 @@ static struct irqaction perfmon_irqaction = {
*/ */
static int init_pfm_fs(void); static int init_pfm_fs(void);
static int __init
pfm_probe_pmu(void)
{
pmu_config_t **p;
int family;
family = local_cpu_data->family;
p = pmu_confs;
while(*p) {
if ((*p)->probe) {
if ((*p)->probe() == 0) goto found;
} else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
goto found;
}
p++;
}
return -1;
found:
pmu_conf = *p;
return 0;
}
int __init int __init
pfm_init(void) pfm_init(void)
{ {
...@@ -6297,12 +6346,9 @@ pfm_init(void) ...@@ -6297,12 +6346,9 @@ pfm_init(void)
PFM_VERSION_MIN, PFM_VERSION_MIN,
IA64_PERFMON_VECTOR); IA64_PERFMON_VECTOR);
/* if (pfm_probe_pmu()) {
* PMU type sanity check printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
* XXX: maybe better to implement autodetection (but then we have a larger kernel) local_cpu_data->family);
*/
if (local_cpu_data->family != pmu_conf.pmu_family) {
printk(KERN_INFO "perfmon: disabled, kernel only supports %s PMU family\n", pmu_conf.pmu_name);
return -ENODEV; return -ENODEV;
} }
...@@ -6313,45 +6359,48 @@ pfm_init(void) ...@@ -6313,45 +6359,48 @@ pfm_init(void)
n = 0; n = 0;
for (i=0; PMC_IS_LAST(i) == 0; i++) { for (i=0; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_IMPL(i) == 0) continue; if (PMC_IS_IMPL(i) == 0) continue;
pmu_conf.impl_pmcs[i>>6] |= 1UL << (i&63); pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
n++; n++;
} }
pmu_conf.num_pmcs = n; pmu_conf->num_pmcs = n;
n = 0; n_counters = 0; n = 0; n_counters = 0;
for (i=0; PMD_IS_LAST(i) == 0; i++) { for (i=0; PMD_IS_LAST(i) == 0; i++) {
if (PMD_IS_IMPL(i) == 0) continue; if (PMD_IS_IMPL(i) == 0) continue;
pmu_conf.impl_pmds[i>>6] |= 1UL << (i&63); pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
n++; n++;
if (PMD_IS_COUNTING(i)) n_counters++; if (PMD_IS_COUNTING(i)) n_counters++;
} }
pmu_conf.num_pmds = n; pmu_conf->num_pmds = n;
pmu_conf.num_counters = n_counters; pmu_conf->num_counters = n_counters;
/* /*
* sanity checks on the number of debug registers * sanity checks on the number of debug registers
*/ */
if (pmu_conf.use_rr_dbregs) { if (pmu_conf->use_rr_dbregs) {
if (pmu_conf.num_ibrs > IA64_NUM_DBG_REGS) { if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf.num_ibrs); printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
pmu_conf = NULL;
return -1; return -1;
} }
if (pmu_conf.num_dbrs > IA64_NUM_DBG_REGS) { if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf.num_ibrs); printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
pmu_conf = NULL;
return -1; return -1;
} }
} }
printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n", printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
pmu_conf.pmu_name, pmu_conf->pmu_name,
pmu_conf.num_pmcs, pmu_conf->num_pmcs,
pmu_conf.num_pmds, pmu_conf->num_pmds,
pmu_conf.num_counters, pmu_conf->num_counters,
ffz(pmu_conf.ovfl_val)); ffz(pmu_conf->ovfl_val));
/* sanity check */ /* sanity check */
if (pmu_conf.num_pmds >= IA64_NUM_PMD_REGS || pmu_conf.num_pmcs >= IA64_NUM_PMC_REGS) { if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) {
printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n"); printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
pmu_conf = NULL;
return -1; return -1;
} }
...@@ -6361,6 +6410,7 @@ pfm_init(void) ...@@ -6361,6 +6410,7 @@ pfm_init(void)
perfmon_dir = create_proc_read_entry ("perfmon", 0, 0, perfmon_read_entry, NULL); perfmon_dir = create_proc_read_entry ("perfmon", 0, 0, perfmon_read_entry, NULL);
if (perfmon_dir == NULL) { if (perfmon_dir == NULL) {
printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
pmu_conf = NULL;
return -1; return -1;
} }
...@@ -6379,9 +6429,6 @@ pfm_init(void) ...@@ -6379,9 +6429,6 @@ pfm_init(void)
for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL; for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
/* we are all set */
pmu_conf.enabled = 1;
return 0; return 0;
} }
...@@ -6393,8 +6440,6 @@ __initcall(pfm_init); ...@@ -6393,8 +6440,6 @@ __initcall(pfm_init);
void void
pfm_init_percpu (void) pfm_init_percpu (void)
{ {
int i;
/* /*
* make sure no measurement is active * make sure no measurement is active
* (may inherit programmed PMCs from EFI). * (may inherit programmed PMCs from EFI).
...@@ -6412,28 +6457,6 @@ pfm_init_percpu (void) ...@@ -6412,28 +6457,6 @@ pfm_init_percpu (void)
ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
ia64_srlz_d(); ia64_srlz_d();
/*
* we first initialize the PMU to a stable state.
* the values may have been changed from their power-up
* values by software executed before the kernel took over.
*
* At this point, pmu_conf has not yet been initialized
*
* On McKinley, this code is ineffective until PMC4 is initialized
* but that's all right because we take care of pmc0 later.
*
* XXX: potential problems with pmc1.
*/
for (i=1; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_IMPL(i) == 0) continue;
ia64_set_pmc(i, PMC_DFL_VAL(i));
}
for (i=0; PMD_IS_LAST(i) == 0; i++) {
if (PMD_IS_IMPL(i) == 0) continue;
ia64_set_pmd(i, 0UL);
}
} }
/* /*
......
...@@ -6,13 +6,6 @@ ...@@ -6,13 +6,6 @@
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
*/ */
#define RDEP(x) (1UL<<(x))
#if defined(CONFIG_ITANIUM) || defined (CONFIG_MCKINLEY)
#error "This file should not be used when CONFIG_ITANIUM or CONFIG_MCKINLEY is defined"
#endif
static pfm_reg_desc_t pfm_gen_pmc_desc[PMU_MAX_PMCS]={ static pfm_reg_desc_t pfm_gen_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
...@@ -40,10 +33,9 @@ static pfm_reg_desc_t pfm_gen_pmd_desc[PMU_MAX_PMDS]={ ...@@ -40,10 +33,9 @@ static pfm_reg_desc_t pfm_gen_pmd_desc[PMU_MAX_PMDS]={
/* /*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors! * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/ */
static pmu_config_t pmu_conf={ static pmu_config_t pmu_conf_gen={
.pmu_name = "Generic", .pmu_name = "Generic",
.pmu_family = 0xff, /* any */ .pmu_family = 0xff, /* any */
.enabled = 0,
.ovfl_val = (1UL << 32) - 1, .ovfl_val = (1UL << 32) - 1,
.num_ibrs = 0, /* does not use */ .num_ibrs = 0, /* does not use */
.num_dbrs = 0, /* does not use */ .num_dbrs = 0, /* does not use */
......
/*
* This file contains the HP SKI Simulator PMU register description tables
* and pmc checkers used by perfmon.c.
*
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* File mostly contributed by Ian Wienand <ianw@gelato.unsw.edu.au>
*
* This file is included as a dummy template so the kernel does not
* try to initalize registers the simulator can't handle.
*
* Note the simulator does not (currently) implement these registers, i.e.,
* they do not count anything. But you can read/write them.
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_IA64_HP_SIM
#error "This file should only be included for the HP Simulator"
#endif
static pfm_reg_desc_t pfm_hpsim_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(4), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(9), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(10), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(11), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(12), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(13), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc14 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(14), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc15 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(15), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pfm_hpsim_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(8),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(9),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(13),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(14),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(15),0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.pmu_name = "hpsim",
.pmu_family = 0x7, /* ski emulator reports as Itanium */
.enabled = 0,
.ovfl_val = (1UL << 32) - 1,
.num_ibrs = 0, /* does not use */
.num_dbrs = 0, /* does not use */
.pmd_desc = pfm_hpsim_pmd_desc,
.pmc_desc = pfm_hpsim_pmc_desc
};
...@@ -5,15 +5,7 @@ ...@@ -5,15 +5,7 @@
* Copyright (C) 2002-2003 Hewlett Packard Co * Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
*/ */
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_ITANIUM
#error "This file is only valid when CONFIG_ITANIUM is defined"
#endif
static int pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); static int pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pfm_ita_pmc_desc[PMU_MAX_PMCS]={ static pfm_reg_desc_t pfm_ita_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
...@@ -55,31 +47,22 @@ static pfm_reg_desc_t pfm_ita_pmd_desc[PMU_MAX_PMDS]={ ...@@ -55,31 +47,22 @@ static pfm_reg_desc_t pfm_ita_pmd_desc[PMU_MAX_PMDS]={
{ PFM_REG_END , 0, 0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */ { PFM_REG_END , 0, 0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
}; };
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.pmu_name = "Itanium",
.pmu_family = 0x7,
.enabled = 0,
.ovfl_val = (1UL << 32) - 1,
.pmd_desc = pfm_ita_pmd_desc,
.pmc_desc = pfm_ita_pmc_desc,
.num_ibrs = 8,
.num_dbrs = 8,
.use_rr_dbregs = 1 /* debug register are use for range retrictions */
};
static int static int
pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs) pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{ {
int ret; int ret;
int is_loaded;
/* sanitfy check */
if (ctx == NULL) return -EINVAL;
is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
/* /*
* we must clear the (instruction) debug registers if pmc13.ta bit is cleared * we must clear the (instruction) debug registers if pmc13.ta bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information. * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/ */
if (cnum == 13 && ((*val & 0x1) == 0UL) && ctx->ctx_fl_using_dbreg == 0) { if (cnum == 13 && is_loaded && ((*val & 0x1) == 0UL) && ctx->ctx_fl_using_dbreg == 0) {
DPRINT(("pmc[%d]=0x%lx has active pmc13.ta cleared, clearing ibr\n", cnum, *val)); DPRINT(("pmc[%d]=0x%lx has active pmc13.ta cleared, clearing ibr\n", cnum, *val));
...@@ -98,7 +81,7 @@ pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -98,7 +81,7 @@ pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
* we must clear the (data) debug registers if pmc11.pt bit is cleared * we must clear the (data) debug registers if pmc11.pt bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information. * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/ */
if (cnum == 11 && ((*val >> 28)& 0x1) == 0 && ctx->ctx_fl_using_dbreg == 0) { if (cnum == 11 && is_loaded && ((*val >> 28)& 0x1) == 0 && ctx->ctx_fl_using_dbreg == 0) {
DPRINT(("pmc[%d]=0x%lx has active pmc11.pt cleared, clearing dbr\n", cnum, *val)); DPRINT(("pmc[%d]=0x%lx has active pmc11.pt cleared, clearing dbr\n", cnum, *val));
...@@ -115,3 +98,18 @@ pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -115,3 +98,18 @@ pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
return 0; return 0;
} }
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf_ita={
.pmu_name = "Itanium",
.pmu_family = 0x7,
.ovfl_val = (1UL << 32) - 1,
.pmd_desc = pfm_ita_pmd_desc,
.pmc_desc = pfm_ita_pmc_desc,
.num_ibrs = 8,
.num_dbrs = 8,
.use_rr_dbregs = 1, /* debug register are use for range retrictions */
};
...@@ -5,15 +5,7 @@ ...@@ -5,15 +5,7 @@
* Copyright (C) 2002-2003 Hewlett Packard Co * Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
*/ */
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_MCKINLEY
#error "This file is only valid when CONFIG_MCKINLEY is defined"
#endif
static int pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); static int pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={ static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
...@@ -57,21 +49,6 @@ static pfm_reg_desc_t pfm_mck_pmd_desc[PMU_MAX_PMDS]={ ...@@ -57,21 +49,6 @@ static pfm_reg_desc_t pfm_mck_pmd_desc[PMU_MAX_PMDS]={
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */ { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
}; };
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.pmu_name = "Itanium 2",
.pmu_family = 0x1f,
.enabled = 0,
.ovfl_val = (1UL << 47) - 1,
.pmd_desc = pfm_mck_pmd_desc,
.pmc_desc = pfm_mck_pmc_desc,
.num_ibrs = 8,
.num_dbrs = 8,
.use_rr_dbregs = 1 /* debug register are use for range retrictions */
};
/* /*
* PMC reserved fields must have their power-up values preserved * PMC reserved fields must have their power-up values preserved
*/ */
...@@ -120,12 +97,11 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -120,12 +97,11 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
* one of the pmc13.cfg_dbrpXX field is different from 0x3 * one of the pmc13.cfg_dbrpXX field is different from 0x3
* AND * AND
* at the corresponding pmc13.ena_dbrpXX is set. * at the corresponding pmc13.ena_dbrpXX is set.
*
* For now, we just check on cfg_dbrXX != 0x3.
*/ */
DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, *val, ctx->ctx_fl_using_dbreg, is_loaded)); DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, *val, ctx->ctx_fl_using_dbreg, is_loaded));
if (cnum == 13 && is_loaded && ((*val & 0x18181818UL) != 0x18181818UL) && ctx->ctx_fl_using_dbreg == 0) { if (cnum == 13 && is_loaded
&& (*val & 0x1e00000000000UL) && (*val & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
DPRINT(("pmc[%d]=0x%lx has active pmc13 settings, clearing dbr\n", cnum, *val)); DPRINT(("pmc[%d]=0x%lx has active pmc13 settings, clearing dbr\n", cnum, *val));
...@@ -192,3 +168,20 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu ...@@ -192,3 +168,20 @@ pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnu
return ret ? -EINVAL : 0; return ret ? -EINVAL : 0;
} }
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf_mck={
.pmu_name = "Itanium 2",
.pmu_family = 0x1f,
.flags = PFM_PMU_IRQ_RESEND,
.ovfl_val = (1UL << 47) - 1,
.pmd_desc = pfm_mck_pmd_desc,
.pmc_desc = pfm_mck_pmc_desc,
.num_ibrs = 8,
.num_dbrs = 8,
.use_rr_dbregs = 1 /* debug register are use for range retrictions */
};
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/cache.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <asm/atomic.h> #include <asm/atomic.h>
......
...@@ -293,11 +293,6 @@ smp_callin (void) ...@@ -293,11 +293,6 @@ smp_callin (void)
*/ */
ia64_init_itm(); ia64_init_itm();
/*
* Set I/O port base per CPU
*/
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */ ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
...@@ -338,6 +333,9 @@ start_secondary (void *unused) ...@@ -338,6 +333,9 @@ start_secondary (void *unused)
{ {
extern int cpu_idle (void); extern int cpu_idle (void);
/* Early console may use I/O ports */
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
efi_map_pal_code(); efi_map_pal_code();
cpu_init(); cpu_init();
......
...@@ -343,6 +343,7 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -343,6 +343,7 @@ ia64_mmu_init (void *my_cpu_data)
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
ia64_srlz_d();
#endif #endif
cpu = smp_processor_id(); cpu = smp_processor_id();
......
.serialize.data
.serialize.instruction
...@@ -40,4 +40,14 @@ then ...@@ -40,4 +40,14 @@ then
CPPFLAGS="$CPPFLAGS -DHAVE_MODEL_SMALL_ATTRIBUTE" CPPFLAGS="$CPPFLAGS -DHAVE_MODEL_SMALL_ATTRIBUTE"
fi fi
rm -f $out rm -f $out
# Check whether assembler supports .serialize.{data,instruction} directive.
$CC -c $dir/check-serialize.S -o $out 2>/dev/null
res=$?
rm -f $out
if [ $res -eq 0 ]; then
CPPFLAGS="$CPPFLAGS -DHAVE_SERIALIZE_DIRECTIVE"
fi
echo $CPPFLAGS echo $CPPFLAGS
...@@ -36,6 +36,25 @@ extern irqpda_t *irqpdaindr; ...@@ -36,6 +36,25 @@ extern irqpda_t *irqpdaindr;
extern cnodeid_t master_node_get(vertex_hdl_t vhdl); extern cnodeid_t master_node_get(vertex_hdl_t vhdl);
extern nasid_t master_nasid; extern nasid_t master_nasid;
cpuid_t
sn_get_node_first_cpu(cnodeid_t cnode) {
int cpuid = -1, slice;
for (slice = CPUS_PER_NODE - 1; slice >= 0; slice--) {
cpuid = cnode_slice_to_cpuid(cnode, slice);
if (cpuid == NR_CPUS)
continue;
if (!cpu_online(cpuid))
continue;
break;
}
if (slice < 0) {
return CPU_NONE;
}
return cpuid;
}
/* Initialize some shub registers for interrupts, both IO and error. */ /* Initialize some shub registers for interrupts, both IO and error. */
void intr_init_vecblk(cnodeid_t node) void intr_init_vecblk(cnodeid_t node)
{ {
...@@ -43,7 +62,6 @@ void intr_init_vecblk(cnodeid_t node) ...@@ -43,7 +62,6 @@ void intr_init_vecblk(cnodeid_t node)
sh_ii_int0_config_u_t ii_int_config; sh_ii_int0_config_u_t ii_int_config;
cpuid_t cpu; cpuid_t cpu;
cpuid_t cpu0, cpu1; cpuid_t cpu0, cpu1;
nodepda_t *lnodepda;
sh_ii_int0_enable_u_t ii_int_enable; sh_ii_int0_enable_u_t ii_int_enable;
sh_int_node_id_config_u_t node_id_config; sh_int_node_id_config_u_t node_id_config;
sh_local_int5_config_u_t local5_config; sh_local_int5_config_u_t local5_config;
...@@ -60,15 +78,13 @@ void intr_init_vecblk(cnodeid_t node) ...@@ -60,15 +78,13 @@ void intr_init_vecblk(cnodeid_t node)
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_INT_NODE_ID_CONFIG), HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_INT_NODE_ID_CONFIG),
node_id_config.sh_int_node_id_config_regval); node_id_config.sh_int_node_id_config_regval);
cnode = nasid_to_cnodeid(master_nasid); cnode = nasid_to_cnodeid(master_nasid);
lnodepda = NODEPDA(cnode); cpu = sn_get_node_first_cpu(cnode);
cpu = lnodepda->node_first_cpu;
cpu = cpu_physical_id(cpu); cpu = cpu_physical_id(cpu);
SAL_CALL(ret_stuff, SN_SAL_REGISTER_CE, nasid, cpu, master_nasid,0,0,0,0); SAL_CALL(ret_stuff, SN_SAL_REGISTER_CE, nasid, cpu, master_nasid,0,0,0,0);
if (ret_stuff.status < 0) if (ret_stuff.status < 0)
printk("%s: SN_SAL_REGISTER_CE SAL_CALL failed\n",__FUNCTION__); printk("%s: SN_SAL_REGISTER_CE SAL_CALL failed\n",__FUNCTION__);
} else { } else {
lnodepda = NODEPDA(node); cpu = sn_get_node_first_cpu(node);
cpu = lnodepda->node_first_cpu;
cpu = cpu_physical_id(cpu); cpu = cpu_physical_id(cpu);
} }
......
...@@ -451,10 +451,6 @@ sn_cpu_init(void) ...@@ -451,10 +451,6 @@ sn_cpu_init(void)
} }
pda->shub_1_1_found = shub_1_1_found; pda->shub_1_1_found = shub_1_1_found;
if (local_node_data->active_cpu_count == 1)
nodepda->node_first_cpu = cpuid;
/* /*
* We must use different memory allocators for first cpu (bootmem * We must use different memory allocators for first cpu (bootmem
...@@ -474,7 +470,7 @@ sn_cpu_init(void) ...@@ -474,7 +470,7 @@ sn_cpu_init(void)
pda->mem_write_status_addr = (volatile u64 *) pda->mem_write_status_addr = (volatile u64 *)
LOCAL_MMR_ADDR((slice < 2 ? SH_MEMORY_WRITE_STATUS_0 : SH_MEMORY_WRITE_STATUS_1 ) ); LOCAL_MMR_ADDR((slice < 2 ? SH_MEMORY_WRITE_STATUS_0 : SH_MEMORY_WRITE_STATUS_1 ) );
if (nodepda->node_first_cpu == cpuid) { if (local_node_data->active_cpu_count++ == 0) {
int buddy_nasid; int buddy_nasid;
buddy_nasid = cnodeid_to_nasid(numa_node_id() == numnodes-1 ? 0 : numa_node_id()+ 1); buddy_nasid = cnodeid_to_nasid(numa_node_id() == numnodes-1 ? 0 : numa_node_id()+ 1);
pda->pio_shub_war_cam_addr = (volatile unsigned long*)GLOBAL_MMR_ADDR(nasid, SH_PI_CAM_CONTROL); pda->pio_shub_war_cam_addr = (volatile unsigned long*)GLOBAL_MMR_ADDR(nasid, SH_PI_CAM_CONTROL);
......
...@@ -228,11 +228,9 @@ read_version_entry(char *page, char **start, off_t off, int count, int *eof, ...@@ -228,11 +228,9 @@ read_version_entry(char *page, char **start, off_t off, int count, int *eof,
{ {
int len = 0; int len = 0;
MOD_INC_USE_COUNT;
/* data holds the pointer to this node's FIT */ /* data holds the pointer to this node's FIT */
len = dump_version(page, (unsigned long *)data); len = dump_version(page, (unsigned long *)data);
len = proc_calc_metrics(page, start, off, count, eof, len); len = proc_calc_metrics(page, start, off, count, eof, len);
MOD_DEC_USE_COUNT;
return len; return len;
} }
...@@ -242,11 +240,9 @@ read_fit_entry(char *page, char **start, off_t off, int count, int *eof, ...@@ -242,11 +240,9 @@ read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
{ {
int len = 0; int len = 0;
MOD_INC_USE_COUNT;
/* data holds the pointer to this node's FIT */ /* data holds the pointer to this node's FIT */
len = dump_fit(page, (unsigned long *)data); len = dump_fit(page, (unsigned long *)data);
len = proc_calc_metrics(page, start, off, count, eof, len); len = proc_calc_metrics(page, start, off, count, eof, len);
MOD_DEC_USE_COUNT;
return len; return len;
} }
...@@ -310,6 +306,7 @@ int __init ...@@ -310,6 +306,7 @@ int __init
prominfo_init(void) prominfo_init(void)
{ {
struct proc_dir_entry **entp; struct proc_dir_entry **entp;
struct proc_dir_entry *p;
cnodeid_t cnodeid; cnodeid_t cnodeid;
nasid_t nasid; nasid_t nasid;
char name[NODE_NAME_LEN]; char name[NODE_NAME_LEN];
...@@ -333,12 +330,16 @@ prominfo_init(void) ...@@ -333,12 +330,16 @@ prominfo_init(void)
sprintf(name, "node%d", cnodeid); sprintf(name, "node%d", cnodeid);
*entp = proc_mkdir(name, sgi_prominfo_entry); *entp = proc_mkdir(name, sgi_prominfo_entry);
nasid = cnodeid_to_nasid(cnodeid); nasid = cnodeid_to_nasid(cnodeid);
create_proc_read_entry( p = create_proc_read_entry(
"fit", 0, *entp, read_fit_entry, "fit", 0, *entp, read_fit_entry,
lookup_fit(nasid)); lookup_fit(nasid));
create_proc_read_entry( if (p)
p->owner = THIS_MODULE;
p = create_proc_read_entry(
"version", 0, *entp, read_version_entry, "version", 0, *entp, read_version_entry,
lookup_fit(nasid)); lookup_fit(nasid));
if (p)
p->owner = THIS_MODULE;
} }
return 0; return 0;
......
...@@ -500,9 +500,10 @@ sn_sal_connect_interrupt(void) ...@@ -500,9 +500,10 @@ sn_sal_connect_interrupt(void)
nasid_t console_nasid; nasid_t console_nasid;
unsigned int console_irq; unsigned int console_irq;
int result; int result;
extern cpuid_t sn_get_node_first_cpu(cnodeid_t cnode);
console_nasid = ia64_sn_get_console_nasid(); console_nasid = ia64_sn_get_console_nasid();
intr_cpuid = NODEPDA(NASID_TO_COMPACT_NODEID(console_nasid))->node_first_cpu; intr_cpuid = sn_get_node_first_cpu(NASID_TO_COMPACT_NODEID(console_nasid));
intr_cpuloc = cpu_physical_id(intr_cpuid); intr_cpuloc = cpu_physical_id(intr_cpuid);
console_irq = CPU_VECTOR_TO_IRQ(intr_cpuloc, SGI_UART_VECTOR); console_irq = CPU_VECTOR_TO_IRQ(intr_cpuloc, SGI_UART_VECTOR);
......
...@@ -100,4 +100,12 @@ ...@@ -100,4 +100,12 @@
# define TEXT_ALIGN(n) # define TEXT_ALIGN(n)
#endif #endif
#ifdef HAVE_SERIALIZE_DIRECTIVE
# define dv_serialize_data .serialize.data
# define dv_serialize_instruction .serialize.instruction
#else
# define dv_serialize_data
# define dv_serialize_instruction
#endif
#endif /* _ASM_IA64_ASMMACRO_H */ #endif /* _ASM_IA64_ASMMACRO_H */
...@@ -377,9 +377,16 @@ register unsigned long ia64_r13 asm ("r13"); ...@@ -377,9 +377,16 @@ register unsigned long ia64_r13 asm ("r13");
}) })
#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory") #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory"); #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
#ifdef HAVE_SERIALIZE_DIRECTIVE
# define ia64_dv_serialize_data() asm volatile (".serialize.data");
# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
#else
# define ia64_dv_serialize_data()
# define ia64_dv_serialize_instruction()
#endif
#define ia64_nop(x) asm volatile ("nop %0"::"i"(x)); #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory") #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
......
...@@ -204,6 +204,9 @@ __s64 _m64_popcnt(__s64 a); ...@@ -204,6 +204,9 @@ __s64 _m64_popcnt(__s64 a);
#define ia64_srlz_d __dsrlz #define ia64_srlz_d __dsrlz
#define ia64_srlz_i __isrlz #define ia64_srlz_i __isrlz
#define ia64_dv_serialize_data()
#define ia64_dv_serialize_instruction()
#define ia64_st1_rel __st1_rel #define ia64_st1_rel __st1_rel
#define ia64_st2_rel __st2_rel #define ia64_st2_rel __st2_rel
#define ia64_st4_rel __st4_rel #define ia64_st4_rel __st4_rel
......
...@@ -20,7 +20,7 @@ typedef u64 hubreg_t; ...@@ -20,7 +20,7 @@ typedef u64 hubreg_t;
typedef u64 mmr_t; typedef u64 mmr_t;
typedef u64 nic_t; typedef u64 nic_t;
#define CNODE_TO_CPU_BASE(_cnode) (NODEPDA(_cnode)->node_first_cpu) #define CNODE_TO_CPU_BASE(_cnode) (sn_get_node_first_cpu(_cnode))
#define NASID_TO_COMPACT_NODEID(nasid) (nasid_to_cnodeid(nasid)) #define NASID_TO_COMPACT_NODEID(nasid) (nasid_to_cnodeid(nasid))
#define COMPACT_TO_NASID_NODEID(cnode) (cnodeid_to_nasid(cnode)) #define COMPACT_TO_NASID_NODEID(cnode) (cnodeid_to_nasid(cnode))
......
...@@ -36,13 +36,6 @@ ...@@ -36,13 +36,6 @@
struct nodepda_s { struct nodepda_s {
cpuid_t node_first_cpu; /* Starting cpu number for node */
/* WARNING: no guarantee that */
/* the second cpu on a node is */
/* node_first_cpu+1. */
vertex_hdl_t xbow_vhdl; vertex_hdl_t xbow_vhdl;
nasid_t xbow_peer; /* NASID of our peer hub on xbow */ nasid_t xbow_peer; /* NASID of our peer hub on xbow */
struct semaphore xbow_sema; /* Sema for xbow synchronization */ struct semaphore xbow_sema; /* Sema for xbow synchronization */
......
...@@ -114,10 +114,16 @@ extern struct ia64_boot_param { ...@@ -114,10 +114,16 @@ extern struct ia64_boot_param {
*/ */
/* For spinlocks etc */ /* For spinlocks etc */
/* clearing psr.i is implicitly serialized (visible by next insn) */ /*
/* setting psr.i requires data serialization */ * - clearing psr.i is implicitly serialized (visible by next insn)
* - setting psr.i requires data serialization
* - we need a stop-bit before reading PSR because we sometimes
* write a floating-point register right before reading the PSR
* and that writes to PSR.mfl
*/
#define __local_irq_save(x) \ #define __local_irq_save(x) \
do { \ do { \
ia64_stop(); \
(x) = ia64_getreg(_IA64_REG_PSR); \ (x) = ia64_getreg(_IA64_REG_PSR); \
ia64_stop(); \ ia64_stop(); \
ia64_rsm(IA64_PSR_I); \ ia64_rsm(IA64_PSR_I); \
...@@ -166,7 +172,7 @@ do { \ ...@@ -166,7 +172,7 @@ do { \
#endif /* !CONFIG_IA64_DEBUG_IRQ */ #endif /* !CONFIG_IA64_DEBUG_IRQ */
#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) #define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR)) #define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
#define irqs_disabled() \ #define irqs_disabled() \
({ \ ({ \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment