Commit fac3fcae authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf tooling fixes from Thomas Gleixner:

 - Synchronization of tools and kernel headers

 - A series of fixes for perf report addressing various failures:
    * Handle invalid maps proper
    * Plug a memory leak
    * Handle frames and callchain order correctly

 - Fixes for handling inlines and children mode

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tools/include: Sync kernel ABI headers with tooling headers
  perf tools: Put caller above callee in --children mode
  perf report: Do not drop last inlined frame
  perf report: Always honor callchain order for inlined nodes
  perf script: Add --inline option for debugging
  perf report: Fix off-by-one for non-activation frames
  perf report: Fix memory leak in addr2line when called by addr2inlines
  perf report: Don't crash on invalid maps in `-g srcline` mode
parents 805f2869 6e30437b
......@@ -27,6 +27,8 @@
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
......@@ -114,6 +116,8 @@ struct kvm_debug_exit_arch {
};
struct kvm_sync_regs {
/* Used with KVM_CAP_ARM_USER_IRQ */
__u64 device_irq_level;
};
struct kvm_arch_memory_slot {
......@@ -192,6 +196,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
......@@ -199,6 +204,9 @@ struct kvm_arch_memory_slot {
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
......
......@@ -39,6 +39,8 @@
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
......@@ -143,6 +145,8 @@ struct kvm_debug_exit_arch {
#define KVM_GUESTDBG_USE_HW (1 << 17)
struct kvm_sync_regs {
/* Used with KVM_CAP_ARM_USER_IRQ */
__u64 device_irq_level;
};
struct kvm_arch_memory_slot {
......@@ -212,6 +216,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
......@@ -219,6 +224,9 @@ struct kvm_arch_memory_slot {
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
......
......@@ -29,6 +29,9 @@
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_GUEST_DEBUG
/* Not always available, but if it is, this is the correct offset. */
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
struct kvm_regs {
__u64 pc;
__u64 cr;
......
......@@ -26,6 +26,8 @@
#define KVM_DEV_FLIC_ADAPTER_REGISTER 6
#define KVM_DEV_FLIC_ADAPTER_MODIFY 7
#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8
#define KVM_DEV_FLIC_AISM 9
#define KVM_DEV_FLIC_AIRQ_INJECT 10
/*
* We can have up to 4*64k pending subchannels + 8 adapter interrupts,
* as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
......@@ -41,7 +43,14 @@ struct kvm_s390_io_adapter {
__u8 isc;
__u8 maskable;
__u8 swap;
__u8 pad;
__u8 flags;
};
#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01
struct kvm_s390_ais_req {
__u8 isc;
__u16 mode;
};
#define KVM_S390_IO_ADAPTER_MASK 1
......@@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine {
#define KVM_S390_VM_CPU_FEAT_CMMA 10
#define KVM_S390_VM_CPU_FEAT_PFMFI 11
#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
#define KVM_S390_VM_CPU_FEAT_KSS 13
struct kvm_s390_vm_cpu_feat {
__u64 feat[16];
};
......@@ -198,6 +208,10 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_VRS (1UL << 6)
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
#define KVM_SYNC_GSCB (1UL << 9)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
......@@ -218,8 +232,16 @@ struct kvm_sync_regs {
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
__u8 padding[52]; /* riccb needs to be 64byte aligned */
__u8 padding1[52]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
__u8 padding2[192]; /* sdnx needs to be 256byte aligned */
union {
__u8 sdnx[SDNXL]; /* state description annex */
struct {
__u64 reserved1[2];
__u64 gscb[4];
};
};
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
......
......@@ -202,6 +202,8 @@
#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
......
......@@ -36,6 +36,12 @@
# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
#ifdef CONFIG_X86_5LEVEL
# define DISABLE_LA57 0
#else
# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
#endif
/*
* Make sure to add features to the correct mask
*/
......@@ -55,7 +61,7 @@
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
#define DISABLED_MASK17 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
......
......@@ -53,6 +53,12 @@
# define NEED_MOVBE 0
#endif
#ifdef CONFIG_X86_5LEVEL
# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31))
#else
# define NEED_LA57 0
#endif
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT
/* Paravirtualized systems may not have PSE or PGE available */
......@@ -98,7 +104,7 @@
#define REQUIRED_MASK13 0
#define REQUIRED_MASK14 0
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 0
#define REQUIRED_MASK16 (NEED_LA57)
#define REQUIRED_MASK17 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
......
......@@ -9,6 +9,9 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
#define DE_VECTOR 0
#define DB_VECTOR 1
#define BP_VECTOR 3
......
......@@ -76,7 +76,11 @@
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_APIC_WRITE 56
#define EXIT_REASON_RDRAND 57
#define EXIT_REASON_INVPCID 58
#define EXIT_REASON_VMFUNC 59
#define EXIT_REASON_ENCLS 60
#define EXIT_REASON_RDSEED 61
#define EXIT_REASON_PML_FULL 62
#define EXIT_REASON_XSAVES 63
#define EXIT_REASON_XRSTORS 64
......@@ -90,6 +94,7 @@
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
{ EXIT_REASON_CPUID, "CPUID" }, \
{ EXIT_REASON_HLT, "HLT" }, \
{ EXIT_REASON_INVD, "INVD" }, \
{ EXIT_REASON_INVLPG, "INVLPG" }, \
{ EXIT_REASON_RDPMC, "RDPMC" }, \
{ EXIT_REASON_RDTSC, "RDTSC" }, \
......@@ -108,6 +113,8 @@
{ EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
{ EXIT_REASON_MSR_READ, "MSR_READ" }, \
{ EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
{ EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
{ EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
{ EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \
{ EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
......@@ -115,20 +122,24 @@
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
{ EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \
{ EXIT_REASON_LDTR_TR, "LDTR_TR" }, \
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
{ EXIT_REASON_INVEPT, "INVEPT" }, \
{ EXIT_REASON_RDTSCP, "RDTSCP" }, \
{ EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \
{ EXIT_REASON_INVVPID, "INVVPID" }, \
{ EXIT_REASON_WBINVD, "WBINVD" }, \
{ EXIT_REASON_XSETBV, "XSETBV" }, \
{ EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
{ EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
{ EXIT_REASON_INVD, "INVD" }, \
{ EXIT_REASON_INVVPID, "INVVPID" }, \
{ EXIT_REASON_RDRAND, "RDRAND" }, \
{ EXIT_REASON_INVPCID, "INVPCID" }, \
{ EXIT_REASON_VMFUNC, "VMFUNC" }, \
{ EXIT_REASON_ENCLS, "ENCLS" }, \
{ EXIT_REASON_RDSEED, "RDSEED" }, \
{ EXIT_REASON_PML_FULL, "PML_FULL" }, \
{ EXIT_REASON_XSAVES, "XSAVES" }, \
{ EXIT_REASON_XRSTORS, "XRSTORS" }
......
......@@ -48,17 +48,13 @@
* tv_sec holds the number of seconds before (negative) or after (positive)
* 00:00:00 1st January 1970 UTC.
*
* tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is
* negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time.
*
* Note that if both tv_sec and tv_nsec are non-zero, then the two values must
* either be both positive or both negative.
* tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time.
*
* __reserved is held in case we need a yet finer resolution.
*/
struct statx_timestamp {
__s64 tv_sec;
__s32 tv_nsec;
__u32 tv_nsec;
__s32 __reserved;
};
......
......@@ -311,6 +311,10 @@ include::itrace.txt[]
Set the maximum number of program blocks to print with brstackasm for
each sample.
--inline::
If a callgraph address belongs to an inlined function, the inline stack
will be printed. Each entry has function name and file/line.
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
......
......@@ -2494,6 +2494,8 @@ int cmd_script(int argc, const char **argv)
"Enable kernel symbol demangling"),
OPT_STRING(0, "time", &script.time_str, "str",
"Time span of interest (start,stop)"),
OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
"Show inline function"),
OPT_END()
};
const char * const script_subcommands[] = { "record", "report", NULL };
......
......@@ -210,6 +210,8 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
return 0;
ret = b->callchain->max_depth - a->callchain->max_depth;
if (callchain_param.order == ORDER_CALLER)
ret = -ret;
}
return ret;
}
......
......@@ -621,14 +621,19 @@ enum match_result {
static enum match_result match_chain_srcline(struct callchain_cursor_node *node,
struct callchain_list *cnode)
{
char *left = get_srcline(cnode->ms.map->dso,
char *left = NULL;
char *right = NULL;
enum match_result ret = MATCH_EQ;
int cmp;
if (cnode->ms.map)
left = get_srcline(cnode->ms.map->dso,
map__rip_2objdump(cnode->ms.map, cnode->ip),
cnode->ms.sym, true, false);
char *right = get_srcline(node->map->dso,
if (node->map)
right = get_srcline(node->map->dso,
map__rip_2objdump(node->map, node->ip),
node->sym, true, false);
enum match_result ret = MATCH_EQ;
int cmp;
if (left && right)
cmp = strcmp(left, right);
......
......@@ -7,6 +7,7 @@
#include "map.h"
#include "strlist.h"
#include "symbol.h"
#include "srcline.h"
static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
{
......@@ -168,6 +169,38 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
if (!print_oneline)
printed += fprintf(fp, "\n");
if (symbol_conf.inline_name && node->map) {
struct inline_node *inode;
addr = map__rip_2objdump(node->map, node->ip),
inode = dso__parse_addr_inlines(node->map->dso, addr);
if (inode) {
struct inline_list *ilist;
list_for_each_entry(ilist, &inode->val, list) {
if (print_arrow)
printed += fprintf(fp, " <-");
/* IP is same, just skip it */
if (print_ip)
printed += fprintf(fp, "%c%16s",
s, "");
if (print_sym)
printed += fprintf(fp, " %s",
ilist->funcname);
if (print_srcline)
printed += fprintf(fp, "\n %s:%d",
ilist->filename,
ilist->line_nr);
if (!print_oneline)
printed += fprintf(fp, "\n");
}
inline_node__delete(inode);
}
}
if (symbol_conf.bt_stop_list &&
node->sym &&
strlist__has_entry(symbol_conf.bt_stop_list,
......
......@@ -56,7 +56,10 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
}
}
if (callchain_param.order == ORDER_CALLEE)
list_add_tail(&ilist->list, &node->val);
else
list_add(&ilist->list, &node->val);
return 0;
}
......@@ -200,12 +203,14 @@ static void addr2line_cleanup(struct a2l_data *a2l)
#define MAX_INLINE_NEST 1024
static void inline_list__reverse(struct inline_node *node)
static int inline_list__append_dso_a2l(struct dso *dso,
struct inline_node *node)
{
struct inline_list *ilist, *n;
struct a2l_data *a2l = dso->a2l;
char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL;
char *filename = a2l->filename ? strdup(a2l->filename) : NULL;
list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
list_move_tail(&ilist->list, &node->val);
return inline_list__append(filename, funcname, a2l->line, node, dso);
}
static int addr2line(const char *dso_name, u64 addr,
......@@ -230,36 +235,36 @@ static int addr2line(const char *dso_name, u64 addr,
bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
if (a2l->found && unwind_inlines) {
if (!a2l->found)
return 0;
if (unwind_inlines) {
int cnt = 0;
if (node && inline_list__append_dso_a2l(dso, node))
return 0;
while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
&a2l->funcname, &a2l->line) &&
cnt++ < MAX_INLINE_NEST) {
if (node != NULL) {
if (inline_list__append(strdup(a2l->filename),
strdup(a2l->funcname),
a2l->line, node,
dso) != 0)
if (inline_list__append_dso_a2l(dso, node))
return 0;
// found at least one inline frame
ret = 1;
}
}
if ((node != NULL) &&
(callchain_param.order != ORDER_CALLEE)) {
inline_list__reverse(node);
}
if (file) {
*file = a2l->filename ? strdup(a2l->filename) : NULL;
ret = *file ? 1 : 0;
}
if (a2l->found && a2l->filename) {
*file = strdup(a2l->filename);
if (line)
*line = a2l->line;
if (*file)
ret = 1;
}
return ret;
}
......@@ -278,8 +283,6 @@ void dso__free_a2l(struct dso *dso)
static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
struct dso *dso)
{
char *file = NULL;
unsigned int line = 0;
struct inline_node *node;
node = zalloc(sizeof(*node));
......@@ -291,7 +294,7 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
INIT_LIST_HEAD(&node->val);
node->addr = addr;
if (!addr2line(dso_name, addr, &file, &line, dso, TRUE, node))
if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node))
goto out_free_inline_node;
if (list_empty(&node->val))
......
......@@ -168,12 +168,16 @@ frame_callback(Dwfl_Frame *state, void *arg)
{
struct unwind_info *ui = arg;
Dwarf_Addr pc;
bool isactivation;
if (!dwfl_frame_pc(state, &pc, NULL)) {
if (!dwfl_frame_pc(state, &pc, &isactivation)) {
pr_err("%s", dwfl_errmsg(-1));
return DWARF_CB_ABORT;
}
if (!isactivation)
--pc;
return entry(pc, ui) || !(--ui->max_stack) ?
DWARF_CB_ABORT : DWARF_CB_OK;
}
......
......@@ -692,6 +692,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
while (!ret && (unw_step(&c) > 0) && i < max_stack) {
unw_get_reg(&c, UNW_REG_IP, &ips[i]);
/*
* Decrement the IP for any non-activation frames.
* this is required to properly find the srcline
* for caller frames.
* See also the documentation for dwfl_frame_pc(),
* which this code tries to replicate.
*/
if (unw_is_signal_frame(&c) <= 0)
--ips[i];
++i;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment