Commit be2d3ece authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-for-v5.18-2022-04-02' of...

Merge tag 'perf-tools-for-v5.18-2022-04-02' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull more perf tools updates from Arnaldo Carvalho de Melo:

 - Avoid SEGV if core.cpus isn't set in 'perf stat'.

 - Stop depending on .git files for building PERF-VERSION-FILE, used in
   'perf --version', fixing some perf tools build scenarios.

 - Convert tracepoint.py example to python3.

 - Update UAPI header copies from the kernel sources: socket,
   mman-common, msr-index, KVM, i915 and cpufeatures.

 - Update copy of libbpf's hashmap.c.

 - Directly return instead of using local ret variable in
   evlist__create_syswide_maps(), found by coccinelle.

* tag 'perf-tools-for-v5.18-2022-04-02' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
  perf python: Convert tracepoint.py example to python3
  perf evlist: Directly return instead of using local ret variable
  perf cpumap: More cpu map reuse by merge.
  perf cpumap: Add is_subset function
  perf evlist: Rename cpus to user_requested_cpus
  perf tools: Stop depending on .git files for building PERF-VERSION-FILE
  tools headers cpufeatures: Sync with the kernel sources
  tools headers UAPI: Sync drm/i915_drm.h with the kernel sources
  tools headers UAPI: Sync linux/kvm.h with the kernel sources
  tools kvm headers arm64: Update KVM headers from the kernel sources
  tools arch x86: Sync the msr-index.h copy with the kernel sources
  tools headers UAPI: Sync asm-generic/mman-common.h with the kernel
  perf beauty: Update copy of linux/socket.h with the kernel sources
  perf tools: Update copy of libbpf's hashmap.c
  perf stat: Avoid SEGV if core.cpus isn't set
parents d897b680 7e2022af
...@@ -419,6 +419,16 @@ struct kvm_arm_copy_mte_tags { ...@@ -419,6 +419,16 @@ struct kvm_arm_copy_mte_tags {
#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS #define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED #define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
/* arm64-specific kvm_run::system_event flags */
/*
* Reset caused by a PSCI v1.1 SYSTEM_RESET2 call.
* Valid only when the system event has a type of KVM_SYSTEM_EVENT_RESET.
*/
#define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (1ULL << 0)
/* run->fail_entry.hardware_entry_failure_reason codes. */
#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
#endif #endif
#endif /* __ARM_KVM_H__ */ #endif /* __ARM_KVM_H__ */
...@@ -388,6 +388,7 @@ ...@@ -388,6 +388,7 @@
#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */ #define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
#define X86_FEATURE_IBT (18*32+20) /* Indirect Branch Tracking */
#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */ #define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */
#define X86_FEATURE_AVX512_FP16 (18*32+23) /* AVX512 FP16 */ #define X86_FEATURE_AVX512_FP16 (18*32+23) /* AVX512 FP16 */
#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */ #define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
......
...@@ -205,6 +205,8 @@ ...@@ -205,6 +205,8 @@
#define RTIT_CTL_DISRETC BIT(11) #define RTIT_CTL_DISRETC BIT(11)
#define RTIT_CTL_PTW_EN BIT(12) #define RTIT_CTL_PTW_EN BIT(12)
#define RTIT_CTL_BRANCH_EN BIT(13) #define RTIT_CTL_BRANCH_EN BIT(13)
#define RTIT_CTL_EVENT_EN BIT(31)
#define RTIT_CTL_NOTNT BIT_ULL(55)
#define RTIT_CTL_MTC_RANGE_OFFSET 14 #define RTIT_CTL_MTC_RANGE_OFFSET 14
#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) #define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
#define RTIT_CTL_CYC_THRESH_OFFSET 19 #define RTIT_CTL_CYC_THRESH_OFFSET 19
...@@ -360,11 +362,29 @@ ...@@ -360,11 +362,29 @@
#define MSR_ATOM_CORE_TURBO_RATIOS 0x0000066c #define MSR_ATOM_CORE_TURBO_RATIOS 0x0000066c
#define MSR_ATOM_CORE_TURBO_VIDS 0x0000066d #define MSR_ATOM_CORE_TURBO_VIDS 0x0000066d
#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690 #define MSR_CORE_PERF_LIMIT_REASONS 0x00000690
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
/* Control-flow Enforcement Technology MSRs */
#define MSR_IA32_U_CET 0x000006a0 /* user mode cet */
#define MSR_IA32_S_CET 0x000006a2 /* kernel mode cet */
#define CET_SHSTK_EN BIT_ULL(0)
#define CET_WRSS_EN BIT_ULL(1)
#define CET_ENDBR_EN BIT_ULL(2)
#define CET_LEG_IW_EN BIT_ULL(3)
#define CET_NO_TRACK_EN BIT_ULL(4)
#define CET_SUPPRESS_DISABLE BIT_ULL(5)
#define CET_RESERVED (BIT_ULL(6) | BIT_ULL(7) | BIT_ULL(8) | BIT_ULL(9))
#define CET_SUPPRESS BIT_ULL(10)
#define CET_WAIT_ENDBR BIT_ULL(11)
#define MSR_IA32_PL0_SSP 0x000006a4 /* ring-0 shadow stack pointer */
#define MSR_IA32_PL1_SSP 0x000006a5 /* ring-1 shadow stack pointer */
#define MSR_IA32_PL2_SSP 0x000006a6 /* ring-2 shadow stack pointer */
#define MSR_IA32_PL3_SSP 0x000006a7 /* ring-3 shadow stack pointer */
#define MSR_IA32_INT_SSP_TAB 0x000006a8 /* exception shadow stack table */
/* Hardware P state interface */ /* Hardware P state interface */
#define MSR_PPERF 0x0000064e #define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f #define MSR_PERF_LIMIT_REASONS 0x0000064f
......
...@@ -75,6 +75,8 @@ ...@@ -75,6 +75,8 @@
#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ #define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ #define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
/* compatibility flags */ /* compatibility flags */
#define MAP_FILE 0 #define MAP_FILE 0
......
...@@ -1118,10 +1118,16 @@ struct drm_i915_gem_exec_object2 { ...@@ -1118,10 +1118,16 @@ struct drm_i915_gem_exec_object2 {
/** /**
* When the EXEC_OBJECT_PINNED flag is specified this is populated by * When the EXEC_OBJECT_PINNED flag is specified this is populated by
* the user with the GTT offset at which this object will be pinned. * the user with the GTT offset at which this object will be pinned.
*
* When the I915_EXEC_NO_RELOC flag is specified this must contain the * When the I915_EXEC_NO_RELOC flag is specified this must contain the
* presumed_offset of the object. * presumed_offset of the object.
*
* During execbuffer2 the kernel populates it with the value of the * During execbuffer2 the kernel populates it with the value of the
* current GTT offset of the object, for future presumed_offset writes. * current GTT offset of the object, for future presumed_offset writes.
*
* See struct drm_i915_gem_create_ext for the rules when dealing with
* alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
* minimum page sizes, like DG2.
*/ */
__u64 offset; __u64 offset;
...@@ -3144,11 +3150,40 @@ struct drm_i915_gem_create_ext { ...@@ -3144,11 +3150,40 @@ struct drm_i915_gem_create_ext {
* *
* The (page-aligned) allocated size for the object will be returned. * The (page-aligned) allocated size for the object will be returned.
* *
* Note that for some devices we have might have further minimum *
* page-size restrictions(larger than 4K), like for device local-memory. * DG2 64K min page size implications:
* However in general the final size here should always reflect any *
* rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS * On discrete platforms, starting from DG2, we have to contend with GTT
* extension to place the object in device local-memory. * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE
* objects. Specifically the hardware only supports 64K or larger GTT
* page sizes for such memory. The kernel will already ensure that all
* I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page
* sizes underneath.
*
* Note that the returned size here will always reflect any required
* rounding up done by the kernel, i.e 4K will now become 64K on devices
* such as DG2.
*
* Special DG2 GTT address alignment requirement:
*
* The GTT alignment will also need to be at least 2M for such objects.
*
* Note that due to how the hardware implements 64K GTT page support, we
* have some further complications:
*
* 1) The entire PDE (which covers a 2MB virtual address range), must
* contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
* PDE is forbidden by the hardware.
*
* 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
* objects.
*
* To keep things simple for userland, we mandate that any GTT mappings
* must be aligned to and rounded up to 2MB. The kernel will internally
* pad them out to the next 2MB boundary. As this only wastes virtual
* address space and avoids userland having to copy any needlessly
* complicated PDE sharing scheme (coloring) and only affects DG2, this
* is deemed to be a good compromise.
*/ */
__u64 size; __u64 size;
/** /**
......
...@@ -562,9 +562,12 @@ struct kvm_s390_mem_op { ...@@ -562,9 +562,12 @@ struct kvm_s390_mem_op {
__u32 op; /* type of operation */ __u32 op; /* type of operation */
__u64 buf; /* buffer in userspace */ __u64 buf; /* buffer in userspace */
union { union {
struct {
__u8 ar; /* the access register number */ __u8 ar; /* the access register number */
__u8 key; /* access key, ignored if flag unset */
};
__u32 sida_offset; /* offset into the sida */ __u32 sida_offset; /* offset into the sida */
__u8 reserved[32]; /* should be set to 0 */ __u8 reserved[32]; /* ignored */
}; };
}; };
/* types for kvm_s390_mem_op->op */ /* types for kvm_s390_mem_op->op */
...@@ -572,9 +575,12 @@ struct kvm_s390_mem_op { ...@@ -572,9 +575,12 @@ struct kvm_s390_mem_op {
#define KVM_S390_MEMOP_LOGICAL_WRITE 1 #define KVM_S390_MEMOP_LOGICAL_WRITE 1
#define KVM_S390_MEMOP_SIDA_READ 2 #define KVM_S390_MEMOP_SIDA_READ 2
#define KVM_S390_MEMOP_SIDA_WRITE 3 #define KVM_S390_MEMOP_SIDA_WRITE 3
#define KVM_S390_MEMOP_ABSOLUTE_READ 4
#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
/* flags for kvm_s390_mem_op->flags */ /* flags for kvm_s390_mem_op->flags */
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
/* for KVM_INTERRUPT */ /* for KVM_INTERRUPT */
struct kvm_interrupt { struct kvm_interrupt {
...@@ -1137,6 +1143,7 @@ struct kvm_ppc_resize_hpt { ...@@ -1137,6 +1143,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_AIL_MODE_3 210 #define KVM_CAP_PPC_AIL_MODE_3 210
#define KVM_CAP_S390_MEM_OP_EXTENSION 211 #define KVM_CAP_S390_MEM_OP_EXTENSION 211
#define KVM_CAP_PMU_CAPABILITY 212 #define KVM_CAP_PMU_CAPABILITY 212
#define KVM_CAP_DISABLE_QUIRKS2 213
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
...@@ -319,6 +319,26 @@ struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map) ...@@ -319,6 +319,26 @@ struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map)
return map->nr > 0 ? map->map[map->nr - 1] : result; return map->nr > 0 ? map->map[map->nr - 1] : result;
} }
/** Is 'b' a subset of 'a'. */
bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
{
if (a == b || !b)
return true;
if (!a || b->nr > a->nr)
return false;
for (int i = 0, j = 0; i < a->nr; i++) {
if (a->map[i].cpu > b->map[j].cpu)
return false;
if (a->map[i].cpu == b->map[j].cpu) {
j++;
if (j == b->nr)
return true;
}
}
return false;
}
/* /*
* Merge two cpumaps * Merge two cpumaps
* *
...@@ -335,17 +355,12 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, ...@@ -335,17 +355,12 @@ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
int i, j, k; int i, j, k;
struct perf_cpu_map *merged; struct perf_cpu_map *merged;
if (!orig && !other) if (perf_cpu_map__is_subset(orig, other))
return NULL;
if (!orig) {
perf_cpu_map__get(other);
return other;
}
if (!other)
return orig;
if (orig->nr == other->nr &&
!memcmp(orig->map, other->map, orig->nr * sizeof(struct perf_cpu)))
return orig; return orig;
if (perf_cpu_map__is_subset(other, orig)) {
perf_cpu_map__put(orig);
return perf_cpu_map__get(other);
}
tmp_len = orig->nr + other->nr; tmp_len = orig->nr + other->nr;
tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu)); tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
......
...@@ -41,10 +41,10 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, ...@@ -41,10 +41,10 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
*/ */
if (!evsel->own_cpus || evlist->has_user_cpus) { if (!evsel->own_cpus || evlist->has_user_cpus) {
perf_cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evlist->cpus); evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) { } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->user_requested_cpus)) {
perf_cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evlist->cpus); evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (evsel->cpus != evsel->own_cpus) { } else if (evsel->cpus != evsel->own_cpus) {
perf_cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evsel->own_cpus); evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
...@@ -123,10 +123,10 @@ static void perf_evlist__purge(struct perf_evlist *evlist) ...@@ -123,10 +123,10 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist) void perf_evlist__exit(struct perf_evlist *evlist)
{ {
perf_cpu_map__put(evlist->cpus); perf_cpu_map__put(evlist->user_requested_cpus);
perf_cpu_map__put(evlist->all_cpus); perf_cpu_map__put(evlist->all_cpus);
perf_thread_map__put(evlist->threads); perf_thread_map__put(evlist->threads);
evlist->cpus = NULL; evlist->user_requested_cpus = NULL;
evlist->all_cpus = NULL; evlist->all_cpus = NULL;
evlist->threads = NULL; evlist->threads = NULL;
fdarray__exit(&evlist->pollfd); fdarray__exit(&evlist->pollfd);
...@@ -155,9 +155,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, ...@@ -155,9 +155,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
* original reference count of 1. If that is not the case it is up to * original reference count of 1. If that is not the case it is up to
* the caller to increase the reference count. * the caller to increase the reference count.
*/ */
if (cpus != evlist->cpus) { if (cpus != evlist->user_requested_cpus) {
perf_cpu_map__put(evlist->cpus); perf_cpu_map__put(evlist->user_requested_cpus);
evlist->cpus = perf_cpu_map__get(cpus); evlist->user_requested_cpus = perf_cpu_map__get(cpus);
} }
if (threads != evlist->threads) { if (threads != evlist->threads) {
...@@ -294,7 +294,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist, ...@@ -294,7 +294,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{ {
int nr_cpus = perf_cpu_map__nr(evlist->cpus); int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
int nr_threads = perf_thread_map__nr(evlist->threads); int nr_threads = perf_thread_map__nr(evlist->threads);
int nfds = 0; int nfds = 0;
struct perf_evsel *evsel; struct perf_evsel *evsel;
...@@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, ...@@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx, int idx, struct perf_mmap_param *mp, int cpu_idx,
int thread, int *_output, int *_output_overwrite) int thread, int *_output, int *_output_overwrite)
{ {
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx); struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->user_requested_cpus, cpu_idx);
struct perf_evsel *evsel; struct perf_evsel *evsel;
int revent; int revent;
...@@ -536,7 +536,7 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, ...@@ -536,7 +536,7 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp) struct perf_mmap_param *mp)
{ {
int nr_threads = perf_thread_map__nr(evlist->threads); int nr_threads = perf_thread_map__nr(evlist->threads);
int nr_cpus = perf_cpu_map__nr(evlist->cpus); int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
int cpu, thread; int cpu, thread;
for (cpu = 0; cpu < nr_cpus; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
...@@ -564,8 +564,8 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist) ...@@ -564,8 +564,8 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
{ {
int nr_mmaps; int nr_mmaps;
nr_mmaps = perf_cpu_map__nr(evlist->cpus); nr_mmaps = perf_cpu_map__nr(evlist->user_requested_cpus);
if (perf_cpu_map__empty(evlist->cpus)) if (perf_cpu_map__empty(evlist->user_requested_cpus))
nr_mmaps = perf_thread_map__nr(evlist->threads); nr_mmaps = perf_thread_map__nr(evlist->threads);
return nr_mmaps; return nr_mmaps;
...@@ -576,7 +576,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist, ...@@ -576,7 +576,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_mmap_param *mp) struct perf_mmap_param *mp)
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel;
const struct perf_cpu_map *cpus = evlist->cpus; const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
const struct perf_thread_map *threads = evlist->threads; const struct perf_thread_map *threads = evlist->threads;
if (!ops || !ops->get || !ops->mmap) if (!ops || !ops->get || !ops->mmap)
......
...@@ -25,5 +25,6 @@ struct perf_cpu_map { ...@@ -25,5 +25,6 @@ struct perf_cpu_map {
#endif #endif
int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu); int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */ #endif /* __LIBPERF_INTERNAL_CPUMAP_H */
...@@ -19,7 +19,12 @@ struct perf_evlist { ...@@ -19,7 +19,12 @@ struct perf_evlist {
int nr_entries; int nr_entries;
int nr_groups; int nr_groups;
bool has_user_cpus; bool has_user_cpus;
struct perf_cpu_map *cpus; /**
* The cpus passed from the command line or all online CPUs by
* default.
*/
struct perf_cpu_map *user_requested_cpus;
/** The union of all evsel cpu maps. */
struct perf_cpu_map *all_cpus; struct perf_cpu_map *all_cpus;
struct perf_thread_map *threads; struct perf_thread_map *threads;
int nr_mmaps; int nr_mmaps;
......
...@@ -691,9 +691,8 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt) ...@@ -691,9 +691,8 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
$(SCRIPTS) : % : %.sh $(SCRIPTS) : % : %.sh
$(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@' $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD ../../.git/ORIG_HEAD $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
$(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
$(Q)touch $(OUTPUT)PERF-VERSION-FILE
# These can record PERF_VERSION # These can record PERF_VERSION
perf.spec $(SCRIPTS) \ perf.spec $(SCRIPTS) \
...@@ -1139,21 +1138,12 @@ else ...@@ -1139,21 +1138,12 @@ else
@echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP" @echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP"
endif endif
#
# Trick: if ../../.git does not exist - we are building out of tree for example,
# then force version regeneration:
#
ifeq ($(wildcard ../../.git/HEAD),)
GIT-HEAD-PHONY = ../../.git/HEAD ../../.git/ORIG_HEAD
else
GIT-HEAD-PHONY =
endif
FORCE: FORCE:
.PHONY: all install clean config-clean strip install-gtk .PHONY: all install clean config-clean strip install-gtk
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope FORCE prepare
.PHONY: libtraceevent_plugins archheaders .PHONY: libtraceevent_plugins archheaders
endif # force_fixdep endif # force_fixdep
...@@ -199,7 +199,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr, ...@@ -199,7 +199,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
struct evsel *evsel, u32 option) struct evsel *evsel, u32 option)
{ {
int i, err = -EINVAL; int i, err = -EINVAL;
struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus; struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* Set option of each CPU we have */ /* Set option of each CPU we have */
...@@ -299,7 +299,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, ...@@ -299,7 +299,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
container_of(itr, struct cs_etm_recording, itr); container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct evsel *evsel, *cs_etm_evsel = NULL; struct evsel *evsel, *cs_etm_evsel = NULL;
struct perf_cpu_map *cpus = evlist->core.cpus; struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1); bool privileged = perf_event_paranoid_check(-1);
int err = 0; int err = 0;
...@@ -522,7 +522,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, ...@@ -522,7 +522,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
{ {
int i; int i;
int etmv3 = 0, etmv4 = 0, ete = 0; int etmv3 = 0, etmv4 = 0, ete = 0;
struct perf_cpu_map *event_cpus = evlist->core.cpus; struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */ /* cpu map is not empty, we have specific CPUs to work with */
...@@ -713,7 +713,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, ...@@ -713,7 +713,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
u32 offset; u32 offset;
u64 nr_cpu, type; u64 nr_cpu, type;
struct perf_cpu_map *cpu_map; struct perf_cpu_map *cpu_map;
struct perf_cpu_map *event_cpus = session->evlist->core.cpus; struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
struct cs_etm_recording *ptr = struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr); container_of(itr, struct cs_etm_recording, itr);
......
...@@ -144,7 +144,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, ...@@ -144,7 +144,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
container_of(itr, struct arm_spe_recording, itr); container_of(itr, struct arm_spe_recording, itr);
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu; struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
struct evsel *evsel, *arm_spe_evsel = NULL; struct evsel *evsel, *arm_spe_evsel = NULL;
struct perf_cpu_map *cpus = evlist->core.cpus; struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1); bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel; struct evsel *tracking_evsel;
int err; int err;
......
...@@ -110,7 +110,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, ...@@ -110,7 +110,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
container_of(itr, struct intel_bts_recording, itr); container_of(itr, struct intel_bts_recording, itr);
struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu; struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
struct evsel *evsel, *intel_bts_evsel = NULL; struct evsel *evsel, *intel_bts_evsel = NULL;
const struct perf_cpu_map *cpus = evlist->core.cpus; const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1); bool privileged = perf_event_paranoid_check(-1);
if (opts->auxtrace_sample_mode) { if (opts->auxtrace_sample_mode) {
......
...@@ -382,7 +382,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ...@@ -382,7 +382,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
ui__warning("Intel Processor Trace: TSC not available\n"); ui__warning("Intel Processor Trace: TSC not available\n");
} }
per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus); per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
...@@ -632,7 +632,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, ...@@ -632,7 +632,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
bool have_timing_info, need_immediate = false; bool have_timing_info, need_immediate = false;
struct evsel *evsel, *intel_pt_evsel = NULL; struct evsel *evsel, *intel_pt_evsel = NULL;
const struct perf_cpu_map *cpus = evlist->core.cpus; const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1); bool privileged = perf_event_paranoid_check(-1);
u64 tsc_bit; u64 tsc_bit;
int err; int err;
......
...@@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr) ...@@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr)
init_stats(&time_stats); init_stats(&time_stats);
printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.cpus)); printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.user_requested_cpus));
printf(" Number of threads:\t%d\n", evlist->core.threads->nr); printf(" Number of threads:\t%d\n", evlist->core.threads->nr);
printf(" Number of events:\t%d (%d fds)\n", printf(" Number of events:\t%d (%d fds)\n",
evlist->core.nr_entries, evlist__count_evsel_fds(evlist)); evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
......
...@@ -301,7 +301,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap) ...@@ -301,7 +301,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
static int set_tracing_cpu(struct perf_ftrace *ftrace) static int set_tracing_cpu(struct perf_ftrace *ftrace)
{ {
struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus; struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
if (!target__has_cpu(&ftrace->target)) if (!target__has_cpu(&ftrace->target))
return 0; return 0;
......
...@@ -987,7 +987,7 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru ...@@ -987,7 +987,7 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
int m, tm, nr_mmaps = evlist->core.nr_mmaps; int m, tm, nr_mmaps = evlist->core.nr_mmaps;
struct mmap *mmap = evlist->mmap; struct mmap *mmap = evlist->mmap;
struct mmap *overwrite_mmap = evlist->overwrite_mmap; struct mmap *overwrite_mmap = evlist->overwrite_mmap;
struct perf_cpu_map *cpus = evlist->core.cpus; struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits, thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
thread_data->mask->maps.nbits); thread_data->mask->maps.nbits);
...@@ -1881,7 +1881,7 @@ static int record__synthesize(struct record *rec, bool tail) ...@@ -1881,7 +1881,7 @@ static int record__synthesize(struct record *rec, bool tail)
return err; return err;
} }
err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus, err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.user_requested_cpus,
process_synthesized_event, NULL); process_synthesized_event, NULL);
if (err < 0) { if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n"); pr_err("Couldn't synthesize cpu map.\n");
...@@ -3675,7 +3675,7 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu ...@@ -3675,7 +3675,7 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
static int record__init_thread_masks(struct record *rec) static int record__init_thread_masks(struct record *rec)
{ {
int ret = 0; int ret = 0;
struct perf_cpu_map *cpus = rec->evlist->core.cpus; struct perf_cpu_map *cpus = rec->evlist->core.user_requested_cpus;
if (!record__threads_enabled(rec)) if (!record__threads_enabled(rec))
return record__init_thread_default_masks(rec, cpus); return record__init_thread_default_masks(rec, cpus);
......
...@@ -804,7 +804,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) ...@@ -804,7 +804,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (group) if (group)
evlist__set_leader(evsel_list); evlist__set_leader(evsel_list);
if (!cpu_map__is_dummy(evsel_list->core.cpus)) { if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0) if (affinity__setup(&saved_affinity) < 0)
return -1; return -1;
affinity = &saved_affinity; affinity = &saved_affinity;
...@@ -1458,7 +1458,7 @@ static int perf_stat_init_aggr_mode(void) ...@@ -1458,7 +1458,7 @@ static int perf_stat_init_aggr_mode(void)
aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
if (get_id) { if (get_id) {
stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
get_id, /*data=*/NULL); get_id, /*data=*/NULL);
if (!stat_config.aggr_map) { if (!stat_config.aggr_map) {
pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
...@@ -1472,7 +1472,10 @@ static int perf_stat_init_aggr_mode(void) ...@@ -1472,7 +1472,10 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of * taking the highest cpu number to be the size of
* the aggregation translate cpumap. * the aggregation translate cpumap.
*/ */
nr = perf_cpu_map__max(evsel_list->core.cpus).cpu; if (evsel_list->core.user_requested_cpus)
nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
else
nr = 0;
stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
return stat_config.cpus_aggr_map ? 0 : -ENOMEM; return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
} }
...@@ -1627,7 +1630,7 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st) ...@@ -1627,7 +1630,7 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
if (!get_id) if (!get_id)
return 0; return 0;
stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, get_id, env); stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env);
if (!stat_config.aggr_map) { if (!stat_config.aggr_map) {
pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
return -1; return -1;
......
...@@ -1021,7 +1021,7 @@ static int perf_top__start_counters(struct perf_top *top) ...@@ -1021,7 +1021,7 @@ static int perf_top__start_counters(struct perf_top *top)
evlist__for_each_entry(evlist, counter) { evlist__for_each_entry(evlist, counter) {
try_again: try_again:
if (evsel__open(counter, top->evlist->core.cpus, if (evsel__open(counter, top->evlist->core.user_requested_cpus,
top->evlist->core.threads) < 0) { top->evlist->core.threads) < 0) {
/* /*
......
...@@ -34,7 +34,7 @@ def main(): ...@@ -34,7 +34,7 @@ def main():
if not isinstance(event, perf.sample_event): if not isinstance(event, perf.sample_event):
continue continue
print "time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % ( print("time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % (
event.sample_time, event.sample_time,
event.prev_comm, event.prev_comm,
event.prev_pid, event.prev_pid,
...@@ -42,7 +42,7 @@ def main(): ...@@ -42,7 +42,7 @@ def main():
event.prev_state, event.prev_state,
event.next_comm, event.next_comm,
event.next_pid, event.next_pid,
event.next_prio) event.next_prio))
if __name__ == '__main__': if __name__ == '__main__':
main() main()
...@@ -366,6 +366,7 @@ struct ucred { ...@@ -366,6 +366,7 @@ struct ucred {
#define SOL_XDP 283 #define SOL_XDP 283
#define SOL_MPTCP 284 #define SOL_MPTCP 284
#define SOL_MCTP 285 #define SOL_MCTP 285
#define SOL_SMC 286
/* IPX options */ /* IPX options */
#define IPX_TYPE 1 #define IPX_TYPE 1
......
...@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, ...@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
mp->idx = idx; mp->idx = idx;
if (per_cpu) { if (per_cpu) {
mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx); mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx);
if (evlist->core.threads) if (evlist->core.threads)
mp->tid = perf_thread_map__pid(evlist->core.threads, 0); mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else else
......
...@@ -38,7 +38,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) ...@@ -38,7 +38,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
/* don't need to set cpu filter for system-wide mode */ /* don't need to set cpu filter for system-wide mode */
if (ftrace->target.cpu_list) { if (ftrace->target.cpu_list) {
ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus); ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
} }
...@@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) ...@@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
fd = bpf_map__fd(skel->maps.cpu_filter); fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) { for (i = 0; i < ncpus; i++) {
cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu; cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
} }
} }
......
...@@ -440,7 +440,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) ...@@ -440,7 +440,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
bool has_imm = false; bool has_imm = false;
// See explanation in evlist__close() // See explanation in evlist__close()
if (!cpu_map__is_dummy(evlist->core.cpus)) { if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0) if (affinity__setup(&saved_affinity) < 0)
return; return;
affinity = &saved_affinity; affinity = &saved_affinity;
...@@ -500,7 +500,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name) ...@@ -500,7 +500,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
struct affinity saved_affinity, *affinity = NULL; struct affinity saved_affinity, *affinity = NULL;
// See explanation in evlist__close() // See explanation in evlist__close()
if (!cpu_map__is_dummy(evlist->core.cpus)) { if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0) if (affinity__setup(&saved_affinity) < 0)
return; return;
affinity = &saved_affinity; affinity = &saved_affinity;
...@@ -565,7 +565,7 @@ static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, ...@@ -565,7 +565,7 @@ static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel,
static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread) static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
{ {
int cpu; int cpu;
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
if (!evsel->core.fd) if (!evsel->core.fd)
return -EINVAL; return -EINVAL;
...@@ -580,7 +580,7 @@ static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evse ...@@ -580,7 +580,7 @@ static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evse
int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
{ {
bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
if (per_cpu_mmaps) if (per_cpu_mmaps)
return evlist__enable_event_cpu(evlist, evsel, idx); return evlist__enable_event_cpu(evlist, evsel, idx);
...@@ -1301,10 +1301,11 @@ void evlist__close(struct evlist *evlist) ...@@ -1301,10 +1301,11 @@ void evlist__close(struct evlist *evlist)
struct affinity affinity; struct affinity affinity;
/* /*
* With perf record core.cpus is usually NULL. * With perf record core.user_requested_cpus is usually NULL.
* Use the old method to handle this for now. * Use the old method to handle this for now.
*/ */
if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) { if (!evlist->core.user_requested_cpus ||
cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
evlist__for_each_entry_reverse(evlist, evsel) evlist__for_each_entry_reverse(evlist, evsel)
evsel__close(evsel); evsel__close(evsel);
return; return;
...@@ -1330,7 +1331,6 @@ static int evlist__create_syswide_maps(struct evlist *evlist) ...@@ -1330,7 +1331,6 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
{ {
struct perf_cpu_map *cpus; struct perf_cpu_map *cpus;
struct perf_thread_map *threads; struct perf_thread_map *threads;
int err = -ENOMEM;
/* /*
* Try reading /sys/devices/system/cpu/online to get * Try reading /sys/devices/system/cpu/online to get
...@@ -1355,7 +1355,7 @@ static int evlist__create_syswide_maps(struct evlist *evlist) ...@@ -1355,7 +1355,7 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
out_put: out_put:
perf_cpu_map__put(cpus); perf_cpu_map__put(cpus);
out: out:
return err; return -ENOMEM;
} }
int evlist__open(struct evlist *evlist) int evlist__open(struct evlist *evlist)
...@@ -1367,7 +1367,7 @@ int evlist__open(struct evlist *evlist) ...@@ -1367,7 +1367,7 @@ int evlist__open(struct evlist *evlist)
* Default: one fd per CPU, all threads, aka systemwide * Default: one fd per CPU, all threads, aka systemwide
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
*/ */
if (evlist->core.threads == NULL && evlist->core.cpus == NULL) { if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
err = evlist__create_syswide_maps(evlist); err = evlist__create_syswide_maps(evlist);
if (err < 0) if (err < 0)
goto out_err; goto out_err;
......
...@@ -75,7 +75,7 @@ void hashmap__clear(struct hashmap *map) ...@@ -75,7 +75,7 @@ void hashmap__clear(struct hashmap *map)
void hashmap__free(struct hashmap *map) void hashmap__free(struct hashmap *map)
{ {
if (!map) if (IS_ERR_OR_NULL(map))
return; return;
hashmap__clear(map); hashmap__clear(map);
...@@ -238,4 +238,3 @@ bool hashmap__delete(struct hashmap *map, const void *key, ...@@ -238,4 +238,3 @@ bool hashmap__delete(struct hashmap *map, const void *key,
return true; return true;
} }
...@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call ...@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call
if (opts->group) if (opts->group)
evlist__set_leader(evlist); evlist__set_leader(evlist);
if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0) if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
opts->no_inherit = true; opts->no_inherit = true;
use_comm_exec = perf_can_comm_exec(); use_comm_exec = perf_can_comm_exec();
...@@ -244,7 +244,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) ...@@ -244,7 +244,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
evsel = evlist__last(temp_evlist); evsel = evlist__last(temp_evlist);
if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) { if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
if (cpus) if (cpus)
...@@ -252,7 +252,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) ...@@ -252,7 +252,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
perf_cpu_map__put(cpus); perf_cpu_map__put(cpus);
} else { } else {
cpu = perf_cpu_map__cpu(evlist->core.cpus, 0); cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
} }
while (1) { while (1) {
......
...@@ -114,7 +114,8 @@ int evlist__start_sb_thread(struct evlist *evlist, struct target *target) ...@@ -114,7 +114,8 @@ int evlist__start_sb_thread(struct evlist *evlist, struct target *target)
} }
evlist__for_each_entry(evlist, counter) { evlist__for_each_entry(evlist, counter) {
if (evsel__open(counter, evlist->core.cpus, evlist->core.threads) < 0) if (evsel__open(counter, evlist->core.user_requested_cpus,
evlist->core.threads) < 0)
goto out_delete_evlist; goto out_delete_evlist;
} }
......
...@@ -929,7 +929,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config, ...@@ -929,7 +929,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
int all_idx; int all_idx;
struct perf_cpu cpu; struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.cpus) { perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
struct evsel *counter; struct evsel *counter;
bool first = true; bool first = true;
......
...@@ -2127,7 +2127,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p ...@@ -2127,7 +2127,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
return err; return err;
} }
err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL); err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
if (err < 0) { if (err < 0) {
pr_err("Couldn't synthesize thread map.\n"); pr_err("Couldn't synthesize thread map.\n");
return err; return err;
......
...@@ -95,15 +95,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) ...@@ -95,15 +95,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
if (target->cpu_list) if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "", perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
? "s" : "",
target->cpu_list); target->cpu_list);
else { else {
if (target->tid) if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")"); ret += SNPRINTF(bf + ret, size - ret, ")");
else else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
perf_cpu_map__nr(top->evlist->core.cpus), perf_cpu_map__nr(top->evlist->core.user_requested_cpus),
perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : ""); perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
? "s" : "");
} }
perf_top__reset_sample_counters(top); perf_top__reset_sample_counters(top);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment