Commit f14ceff7 authored by Huacai Chen's avatar Huacai Chen Committed by Ralf Baechle

MIPS: perf: Add hardware perf events support for Loongson-3

This patch enable hardware performance counter support for Loongson-3's
perf events.
Signed-off-by: default avatarHuacai Chen <chenhc@lemote.com>
Cc: Steven J. Hill <Steven.Hill@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Patchwork: https://patchwork.linux-mips.org/patch/9618/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent a2e50f53
...@@ -2390,7 +2390,7 @@ config NODES_SHIFT ...@@ -2390,7 +2390,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events" bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON3)
default y default y
help help
Enable hardware performance counter support for perf events. If Enable hardware performance counter support for perf events. If
......
...@@ -825,6 +825,13 @@ static const struct mips_perf_event mipsxxcore_event_map2 ...@@ -825,6 +825,13 @@ static const struct mips_perf_event mipsxxcore_event_map2
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
}; };
static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
};
static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
...@@ -1008,6 +1015,61 @@ static const struct mips_perf_event mipsxxcore_cache_map2 ...@@ -1008,6 +1015,61 @@ static const struct mips_perf_event mipsxxcore_cache_map2
}, },
}; };
static const struct mips_perf_event loongson3_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x04, CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x04, CNTR_ODD },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x09, CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x09, CNTR_ODD },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
[C(RESULT_MISS)] = { 0x02, CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
[C(RESULT_MISS)] = { 0x02, CNTR_ODD },
},
},
};
/* BMIPS5000 */ /* BMIPS5000 */
static const struct mips_perf_event bmips5000_cache_map static const struct mips_perf_event bmips5000_cache_map
[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_MAX]
...@@ -1542,6 +1604,10 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) ...@@ -1542,6 +1604,10 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
else else
raw_event.cntr_mask = raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN; raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
break;
case CPU_LOONGSON3:
raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
break;
} }
raw_event.event_id = base_id; raw_event.event_id = base_id;
...@@ -1671,6 +1737,11 @@ init_hw_perf_events(void) ...@@ -1671,6 +1737,11 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &mipsxxcore_event_map; mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map; mipspmu.cache_event_map = &mipsxxcore_cache_map;
break; break;
case CPU_LOONGSON3:
mipspmu.name = "mips/loongson3";
mipspmu.general_event_map = &loongson3_event_map;
mipspmu.cache_event_map = &loongson3_cache_map;
break;
case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2: case CPU_CAVIUM_OCTEON2:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment