Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
e4f57147
Commit
e4f57147
authored
Nov 29, 2017
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
754fe00f
6e948c67
Changes
16
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
467 additions
and
287 deletions
+467
-287
arch/x86/events/intel/core.c
arch/x86/events/intel/core.c
+23
-12
kernel/events/core.c
kernel/events/core.c
+1
-0
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm/include/uapi/asm/kvm.h
+7
-0
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
+7
-0
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/cpufeatures.h
+274
-263
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/disabled-features.h
+7
-1
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/asm-generic/mman.h
+1
-0
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/drm.h
+41
-0
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/drm/i915_drm.h
+27
-6
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/kvm.h
+1
-0
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/perf_event.h
+1
-0
tools/include/uapi/linux/prctl.h
tools/include/uapi/linux/prctl.h
+9
-0
tools/perf/bench/numa.c
tools/perf/bench/numa.c
+51
-5
tools/perf/tests/task-exit.c
tools/perf/tests/task-exit.c
+4
-0
tools/perf/trace/beauty/mmap.c
tools/perf/trace/beauty/mmap.c
+3
-0
tools/perf/util/intel-pt-decoder/inat.h
tools/perf/util/intel-pt-decoder/inat.h
+10
-0
No files found.
arch/x86/events/intel/core.c
View file @
e4f57147
...
...
@@ -3734,6 +3734,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
EVENT_ATTR_STR
(
cycles
-
ct
,
cycles_ct
,
"event=0x3c,in_tx=1,in_tx_cp=1"
);
static
struct
attribute
*
hsw_events_attrs
[]
=
{
EVENT_PTR
(
mem_ld_hsw
),
EVENT_PTR
(
mem_st_hsw
),
EVENT_PTR
(
td_slots_issued
),
EVENT_PTR
(
td_slots_retired
),
EVENT_PTR
(
td_fetch_bubbles
),
EVENT_PTR
(
td_total_slots
),
EVENT_PTR
(
td_total_slots_scale
),
EVENT_PTR
(
td_recovery_bubbles
),
EVENT_PTR
(
td_recovery_bubbles_scale
),
NULL
};
static
struct
attribute
*
hsw_tsx_events_attrs
[]
=
{
EVENT_PTR
(
tx_start
),
EVENT_PTR
(
tx_commit
),
EVENT_PTR
(
tx_abort
),
...
...
@@ -3746,18 +3759,16 @@ static struct attribute *hsw_events_attrs[] = {
EVENT_PTR
(
el_conflict
),
EVENT_PTR
(
cycles_t
),
EVENT_PTR
(
cycles_ct
),
EVENT_PTR
(
mem_ld_hsw
),
EVENT_PTR
(
mem_st_hsw
),
EVENT_PTR
(
td_slots_issued
),
EVENT_PTR
(
td_slots_retired
),
EVENT_PTR
(
td_fetch_bubbles
),
EVENT_PTR
(
td_total_slots
),
EVENT_PTR
(
td_total_slots_scale
),
EVENT_PTR
(
td_recovery_bubbles
),
EVENT_PTR
(
td_recovery_bubbles_scale
),
NULL
};
static
__init
struct
attribute
**
get_hsw_events_attrs
(
void
)
{
return
boot_cpu_has
(
X86_FEATURE_RTM
)
?
merge_attr
(
hsw_events_attrs
,
hsw_tsx_events_attrs
)
:
hsw_events_attrs
;
}
static
ssize_t
freeze_on_smi_show
(
struct
device
*
cdev
,
struct
device_attribute
*
attr
,
char
*
buf
)
...
...
@@ -4186,7 +4197,7 @@ __init int intel_pmu_init(void)
x86_pmu
.
hw_config
=
hsw_hw_config
;
x86_pmu
.
get_event_constraints
=
hsw_get_event_constraints
;
x86_pmu
.
cpu_events
=
hsw_events_attrs
;
x86_pmu
.
cpu_events
=
get_hsw_events_attrs
()
;
x86_pmu
.
lbr_double_abort
=
true
;
extra_attr
=
boot_cpu_has
(
X86_FEATURE_RTM
)
?
hsw_format_attr
:
nhm_format_attr
;
...
...
@@ -4225,7 +4236,7 @@ __init int intel_pmu_init(void)
x86_pmu
.
hw_config
=
hsw_hw_config
;
x86_pmu
.
get_event_constraints
=
hsw_get_event_constraints
;
x86_pmu
.
cpu_events
=
hsw_events_attrs
;
x86_pmu
.
cpu_events
=
get_hsw_events_attrs
()
;
x86_pmu
.
limit_period
=
bdw_limit_period
;
extra_attr
=
boot_cpu_has
(
X86_FEATURE_RTM
)
?
hsw_format_attr
:
nhm_format_attr
;
...
...
@@ -4283,7 +4294,7 @@ __init int intel_pmu_init(void)
extra_attr
=
boot_cpu_has
(
X86_FEATURE_RTM
)
?
hsw_format_attr
:
nhm_format_attr
;
extra_attr
=
merge_attr
(
extra_attr
,
skl_format_attr
);
x86_pmu
.
cpu_events
=
hsw_events_attrs
;
x86_pmu
.
cpu_events
=
get_hsw_events_attrs
()
;
intel_pmu_pebs_data_source_skl
(
boot_cpu_data
.
x86_model
==
INTEL_FAM6_SKYLAKE_X
);
pr_cont
(
"Skylake events, "
);
...
...
kernel/events/core.c
View file @
e4f57147
...
...
@@ -6680,6 +6680,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
ns_inode
=
ns_path
.
dentry
->
d_inode
;
ns_link_info
->
dev
=
new_encode_dev
(
ns_inode
->
i_sb
->
s_dev
);
ns_link_info
->
ino
=
ns_inode
->
i_ino
;
path_put
(
&
ns_path
);
}
}
...
...
tools/arch/arm/include/uapi/asm/kvm.h
View file @
e4f57147
...
...
@@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
/* PL1 Physical Timer Registers */
#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
/* Virtual Timer Registers */
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
...
...
@@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
...
...
tools/arch/arm64/include/uapi/asm/kvm.h
View file @
e4f57147
...
...
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
/* Physical Timer EL0 Registers */
#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
/* EL0 Virtual Timer Registers */
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
...
...
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
...
...
tools/arch/x86/include/asm/cpufeatures.h
View file @
e4f57147
This diff is collapsed.
Click to expand it.
tools/arch/x86/include/asm/disabled-features.h
View file @
e4f57147
...
...
@@ -16,6 +16,12 @@
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
#endif
#ifdef CONFIG_X86_INTEL_UMIP
# define DISABLE_UMIP 0
#else
# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
#endif
#ifdef CONFIG_X86_64
# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
...
...
@@ -63,7 +69,7 @@
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57
|DISABLE_UMIP
)
#define DISABLED_MASK17 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
...
...
tools/include/uapi/asm-generic/mman.h
View file @
e4f57147
...
...
@@ -13,6 +13,7 @@
#define MAP_NONBLOCK 0x10000
/* do not block on IO */
#define MAP_STACK 0x20000
/* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000
/* create a huge page mapping */
#define MAP_SYNC 0x80000
/* perform synchronous page faults for the mapping */
/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
...
...
tools/include/uapi/drm/drm.h
View file @
e4f57147
...
...
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
__u32
pad
;
};
/* Query current scanout sequence number */
struct
drm_crtc_get_sequence
{
__u32
crtc_id
;
/* requested crtc_id */
__u32
active
;
/* return: crtc output is active */
__u64
sequence
;
/* return: most recent vblank sequence */
__s64
sequence_ns
;
/* return: most recent time of first pixel out */
};
/* Queue event to be delivered at specified sequence. Time stamp marks
* when the first pixel of the refresh cycle leaves the display engine
* for the display
*/
#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001
/* sequence is relative to current */
#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002
/* Use next sequence if we've missed */
struct
drm_crtc_queue_sequence
{
__u32
crtc_id
;
__u32
flags
;
__u64
sequence
;
/* on input, target sequence. on output, actual sequence */
__u64
user_data
;
/* user data passed to event */
};
#if defined(__cplusplus)
}
#endif
...
...
@@ -819,6 +841,9 @@ extern "C" {
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
...
...
@@ -863,6 +888,11 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
...
...
@@ -893,6 +923,7 @@ struct drm_event {
#define DRM_EVENT_VBLANK 0x01
#define DRM_EVENT_FLIP_COMPLETE 0x02
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct
drm_event_vblank
{
struct
drm_event
base
;
...
...
@@ -903,6 +934,16 @@ struct drm_event_vblank {
__u32
crtc_id
;
/* 0 on older kernels that do not support this */
};
/* Event delivered at sequence. Time stamp marks when the first pixel
* of the refresh cycle leaves the display engine for the display
*/
struct
drm_event_crtc_sequence
{
struct
drm_event
base
;
__u64
user_data
;
__s64
time_ns
;
__u64
sequence
;
};
/* typedef area */
#ifndef __KERNEL__
typedef
struct
drm_clip_rect
drm_clip_rect_t
;
...
...
tools/include/uapi/drm/i915_drm.h
View file @
e4f57147
...
...
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_MIN_EU_IN_POOL 39
#define I915_PARAM_MMAP_GTT_VERSION 40
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
/*
* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
* priorities and the driver will attempt to execute batches in priority order.
* The param returns a capability bitmask, nonzero implies that the scheduler
* is enabled, with different features present according to the mask.
*
* The initial priority for each batch is supplied by the context and is
* controlled via I915_CONTEXT_PARAM_PRIORITY.
*/
#define I915_PARAM_HAS_SCHEDULER 41
#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_PARAM_HUC_STATUS 42
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
...
...
@@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
* be specified
*/
__u64
offset
;
#define I915_REG_READ_8B_WA (1ul << 0)
__u64
val
;
/* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
* single instruction 8byte read, in order to workaround that
use
*
offset (0x2538 | 1) instea
d.
* single instruction 8byte read, in order to workaround that
pass
*
flag I915_REG_READ_8B_WA in offset fiel
d.
*
*/
...
...
@@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
#define I915_CONTEXT_PARAM_BANNABLE 0x5
#define I915_CONTEXT_PARAM_PRIORITY 0x6
#define I915_CONTEXT_MAX_USER_PRIORITY 1023
/* inclusive */
#define I915_CONTEXT_DEFAULT_PRIORITY 0
#define I915_CONTEXT_MIN_USER_PRIORITY -1023
/* inclusive */
__u64
value
;
};
...
...
@@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
__u32
n_boolean_regs
;
__u32
n_flex_regs
;
__u64
__user
mux_regs_ptr
;
__u64
__user
boolean_regs_ptr
;
__u64
__user
flex_regs_ptr
;
/*
* These fields are pointers to tuples of u32 values (register
* address, value). For example the expected length of the buffer
* pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
*/
__u64
mux_regs_ptr
;
__u64
boolean_regs_ptr
;
__u64
flex_regs_ptr
;
};
#if defined(__cplusplus)
...
...
tools/include/uapi/linux/kvm.h
View file @
e4f57147
...
...
@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_SMT_POSSIBLE 147
#define KVM_CAP_HYPERV_SYNIC2 148
#define KVM_CAP_HYPERV_VP_INDEX 149
#define KVM_CAP_S390_AIS_MIGRATION 150
#ifdef KVM_CAP_IRQ_ROUTING
...
...
tools/include/uapi/linux/perf_event.h
View file @
e4f57147
...
...
@@ -942,6 +942,7 @@ enum perf_callchain_context {
#define PERF_AUX_FLAG_TRUNCATED 0x01
/* record was truncated to fit */
#define PERF_AUX_FLAG_OVERWRITE 0x02
/* snapshot from overwrite mode */
#define PERF_AUX_FLAG_PARTIAL 0x04
/* record contains gaps */
#define PERF_AUX_FLAG_COLLISION 0x08
/* sample collided with another */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
...
...
tools/include/uapi/linux/prctl.h
View file @
e4f57147
...
...
@@ -198,4 +198,13 @@ struct prctl_mm_map {
# define PR_CAP_AMBIENT_LOWER 3
# define PR_CAP_AMBIENT_CLEAR_ALL 4
/* arm64 Scalable Vector Extension controls */
/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
#define PR_SVE_SET_VL 50
/* set task vector length */
# define PR_SVE_SET_VL_ONEXEC (1 << 18)
/* defer effect until exec */
#define PR_SVE_GET_VL 51
/* get task vector length */
/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
# define PR_SVE_VL_LEN_MASK 0xffff
# define PR_SVE_VL_INHERIT (1 << 17)
/* inherit across exec */
#endif
/* _LINUX_PRCTL_H */
tools/perf/bench/numa.c
View file @
e4f57147
...
...
@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
NULL
};
/*
* To get number of numa nodes present.
*/
static
int
nr_numa_nodes
(
void
)
{
int
i
,
nr_nodes
=
0
;
for
(
i
=
0
;
i
<
g
->
p
.
nr_nodes
;
i
++
)
{
if
(
numa_bitmask_isbitset
(
numa_nodes_ptr
,
i
))
nr_nodes
++
;
}
return
nr_nodes
;
}
/*
* To check if given numa node is present.
*/
static
int
is_node_present
(
int
node
)
{
return
numa_bitmask_isbitset
(
numa_nodes_ptr
,
node
);
}
/*
* To check given numa node has cpus.
*/
static
bool
node_has_cpus
(
int
node
)
{
struct
bitmask
*
cpu
=
numa_allocate_cpumask
();
unsigned
int
i
;
if
(
cpu
&&
!
numa_node_to_cpus
(
node
,
cpu
))
{
for
(
i
=
0
;
i
<
cpu
->
size
;
i
++
)
{
if
(
numa_bitmask_isbitset
(
cpu
,
i
))
return
true
;
}
}
return
false
;
/* lets fall back to nocpus safely */
}
static
cpu_set_t
bind_to_cpu
(
int
target_cpu
)
{
cpu_set_t
orig_mask
,
mask
;
...
...
@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
static
cpu_set_t
bind_to_node
(
int
target_node
)
{
int
cpus_per_node
=
g
->
p
.
nr_cpus
/
g
->
p
.
nr_nodes
;
int
cpus_per_node
=
g
->
p
.
nr_cpus
/
nr_numa_nodes
()
;
cpu_set_t
orig_mask
,
mask
;
int
cpu
;
int
ret
;
BUG_ON
(
cpus_per_node
*
g
->
p
.
nr_nodes
!=
g
->
p
.
nr_cpus
);
BUG_ON
(
cpus_per_node
*
nr_numa_nodes
()
!=
g
->
p
.
nr_cpus
);
BUG_ON
(
!
cpus_per_node
);
ret
=
sched_getaffinity
(
0
,
sizeof
(
orig_mask
),
&
orig_mask
);
...
...
@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
int
i
;
for
(
i
=
0
;
i
<
mul
;
i
++
)
{
if
(
t
>=
g
->
p
.
nr_tasks
)
{
if
(
t
>=
g
->
p
.
nr_tasks
||
!
node_has_cpus
(
bind_node
)
)
{
printf
(
"
\n
# NOTE: ignoring bind NODEs starting at NODE#%d
\n
"
,
bind_node
);
goto
out
;
}
...
...
@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
sum
=
0
;
for
(
node
=
0
;
node
<
g
->
p
.
nr_nodes
;
node
++
)
{
if
(
!
is_node_present
(
node
))
continue
;
nr
=
nodes
[
node
];
nr_min
=
min
(
nr
,
nr_min
);
nr_max
=
max
(
nr
,
nr_max
);
...
...
@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
process_groups
=
0
;
for
(
node
=
0
;
node
<
g
->
p
.
nr_nodes
;
node
++
)
{
int
processes
=
count_node_processes
(
node
)
;
int
processes
;
if
(
!
is_node_present
(
node
))
continue
;
processes
=
count_node_processes
(
node
);
nr
=
nodes
[
node
];
tprintf
(
" %2d/%-2d"
,
nr
,
processes
);
...
...
@@ -1291,7 +1337,7 @@ static void print_summary(void)
printf
(
"
\n
###
\n
"
);
printf
(
" # %d %s will execute (on %d nodes, %d CPUs):
\n
"
,
g
->
p
.
nr_tasks
,
g
->
p
.
nr_tasks
==
1
?
"task"
:
"tasks"
,
g
->
p
.
nr_nodes
,
g
->
p
.
nr_cpus
);
g
->
p
.
nr_tasks
,
g
->
p
.
nr_tasks
==
1
?
"task"
:
"tasks"
,
nr_numa_nodes
()
,
g
->
p
.
nr_cpus
);
printf
(
" # %5dx %5ldMB global shared mem operations
\n
"
,
g
->
p
.
nr_loops
,
g
->
p
.
bytes_global
/
1024
/
1024
);
printf
(
" # %5dx %5ldMB process shared mem operations
\n
"
,
...
...
tools/perf/tests/task-exit.c
View file @
e4f57147
...
...
@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
evsel
=
perf_evlist__first
(
evlist
);
evsel
->
attr
.
task
=
1
;
#ifdef __s390x__
evsel
->
attr
.
sample_freq
=
1000000
;
#else
evsel
->
attr
.
sample_freq
=
1
;
#endif
evsel
->
attr
.
inherit
=
0
;
evsel
->
attr
.
watermark
=
0
;
evsel
->
attr
.
wakeup_events
=
1
;
...
...
tools/perf/trace/beauty/mmap.c
View file @
e4f57147
...
...
@@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
P_MMAP_FLAG
(
POPULATE
);
P_MMAP_FLAG
(
STACK
);
P_MMAP_FLAG
(
UNINITIALIZED
);
#ifdef MAP_SYNC
P_MMAP_FLAG
(
SYNC
);
#endif
#undef P_MMAP_FLAG
if
(
flags
)
...
...
tools/perf/util/intel-pt-decoder/inat.h
View file @
e4f57147
...
...
@@ -97,6 +97,16 @@
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
/* Identifiers for segment registers */
#define INAT_SEG_REG_IGNORE 0
#define INAT_SEG_REG_DEFAULT 1
#define INAT_SEG_REG_CS 2
#define INAT_SEG_REG_SS 3
#define INAT_SEG_REG_DS 4
#define INAT_SEG_REG_ES 5
#define INAT_SEG_REG_FS 6
#define INAT_SEG_REG_GS 7
/* Attribute search APIs */
extern
insn_attr_t
inat_get_opcode_attribute
(
insn_byte_t
opcode
);
extern
int
inat_get_last_prefix_id
(
insn_byte_t
last_pfx
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment