Commit dcde237b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-2020-07-07' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull perf tooling fixes from Arnaldo Carvalho de Melo:

 - Intel PT fixes for PEBS-via-PT with registers

 - Fixes for Intel PT python based GUI

 - Avoid duplicated sideband events with Intel PT in system wide tracing

 - Remove needless 'dummy' event from TUI menu, used when synthesizing
   meta data events for pre-existing processes

 - Fix corner case segfault when pressing enter in a screen without
   entries in the TUI for report/top

 - Fixes for time stamp handling in libtraceevent

 - Explicitly set utf-8 encoding in perf flamegraph

 - Update arch/x86/lib/memcpy_64.S copy used in 'perf bench mem memcpy',
   silencing perf build warning

* tag 'perf-tools-fixes-2020-07-07' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
  perf report TUI: Remove needless 'dummy' event from menu
  perf intel-pt: Fix PEBS sample for XMM registers
  perf intel-pt: Fix displaying PEBS-via-PT with registers
  perf intel-pt: Fix recording PEBS-via-PT with registers
  perf report TUI: Fix segmentation fault in perf_evsel__hists_browse()
  tools lib traceevent: Add proper KBUFFER_TYPE_TIME_STAMP handling
  tools lib traceevent: Add API to read time information from kbuffer
  perf scripts python: exported-sql-viewer.py: Fix time chart call tree
  perf scripts python: exported-sql-viewer.py: Fix zero id in call tree 'Find' result
  perf scripts python: exported-sql-viewer.py: Fix zero id in call graph 'Find' result
  perf scripts python: exported-sql-viewer.py: Fix unexpanded 'Find' result
  perf record: Fix duplicated sideband events with Intel PT system wide tracing
  perf scripts python: export-to-postgresql.py: Fix struct.pack() int argument
  tools arch: Update arch/x86/lib/memcpy_64.S copy used in 'perf bench mem memcpy'
  perf flamegraph: Explicitly set utf-8 encoding
parents 6d12075d bee9ca1c
......@@ -8,6 +8,8 @@
#include <asm/alternative-asm.h>
#include <asm/export.h>
.pushsection .noinstr.text, "ax"
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
......@@ -184,6 +186,8 @@ SYM_FUNC_START(memcpy_orig)
retq
SYM_FUNC_END(memcpy_orig)
.popsection
#ifndef CONFIG_UML
MCSAFE_TEST_CTL
......
......@@ -361,6 +361,7 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
break;
case KBUFFER_TYPE_TIME_EXTEND:
case KBUFFER_TYPE_TIME_STAMP:
extend = read_4(kbuf, data);
data += 4;
extend <<= TS_SHIFT;
......@@ -369,10 +370,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
*length = 0;
break;
case KBUFFER_TYPE_TIME_STAMP:
data += 12;
*length = 0;
break;
case 0:
*length = read_4(kbuf, data) - 4;
*length = (*length + 3) & ~3;
......@@ -397,7 +394,11 @@ static unsigned int update_pointers(struct kbuffer *kbuf)
type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
kbuf->timestamp += delta;
if (type_len == KBUFFER_TYPE_TIME_STAMP)
kbuf->timestamp = delta;
else
kbuf->timestamp += delta;
kbuf->index = calc_index(kbuf, ptr);
kbuf->next = kbuf->index + length;
......@@ -454,7 +455,9 @@ static int __next_event(struct kbuffer *kbuf)
if (kbuf->next >= kbuf->size)
return -1;
type = update_pointers(kbuf);
} while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING);
} while (type == KBUFFER_TYPE_TIME_EXTEND ||
type == KBUFFER_TYPE_TIME_STAMP ||
type == KBUFFER_TYPE_PADDING);
return 0;
}
......@@ -546,6 +549,34 @@ int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer)
return 0;
}
/**
* kbuffer_subbuf_timestamp - read the timestamp from a sub buffer
* @kbuf: The kbuffer to load
* @subbuf: The subbuffer to read from.
*
* Return the timestamp from a subbuffer.
*/
unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf)
{
return kbuf->read_8(subbuf);
}
/**
* kbuffer_ptr_delta - read the delta field from a record
* @kbuf: The kbuffer to load
* @ptr: The record in the buffe.
*
* Return the timestamp delta from a record
*/
unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr)
{
unsigned int type_len_ts;
type_len_ts = read_4(kbuf, ptr);
return ts4host(kbuf, type_len_ts);
}
/**
* kbuffer_read_event - read the next event in the kbuffer subbuffer
* @kbuf: The kbuffer to read from
......
......@@ -49,6 +49,8 @@ int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer);
void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts);
void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts);
unsigned long long kbuffer_timestamp(struct kbuffer *kbuf);
unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf);
unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr);
void *kbuffer_translate_data(int swap, void *data, unsigned int *size);
......
......@@ -641,6 +641,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
evsel->no_aux_samples = true;
intel_pt_evsel = evsel;
opts->full_auxtrace = true;
}
......
......@@ -852,20 +852,20 @@ static int record__open(struct record *rec)
* event synthesis.
*/
if (opts->initial_delay || target__has_cpu(&opts->target)) {
if (perf_evlist__add_dummy(evlist))
return -ENOMEM;
pos = perf_evlist__get_tracking_event(evlist);
if (!evsel__is_dummy_event(pos)) {
/* Set up dummy event. */
if (perf_evlist__add_dummy(evlist))
return -ENOMEM;
pos = evlist__last(evlist);
perf_evlist__set_tracking_event(evlist, pos);
}
/* Disable tracking of mmaps on lead event. */
pos = evlist__first(evlist);
pos->tracking = 0;
/* Set up dummy event. */
pos = evlist__last(evlist);
pos->tracking = 1;
/*
* Enable the dummy event when the process is forked for
* initial_delay, immediately for system wide.
*/
if (opts->initial_delay)
if (opts->initial_delay && !pos->immediate)
pos->core.attr.enable_on_exec = 1;
else
pos->immediate = 1;
......
......@@ -462,7 +462,7 @@ static int perf_evsel__check_attr(struct evsel *evsel, struct perf_session *sess
return -EINVAL;
if (PRINT_FIELD(IREGS) &&
evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS))
evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(UREGS) &&
......
......@@ -1055,7 +1055,7 @@ def cbr(id, raw_buf):
cbr = data[0]
MHz = (data[4] + 500) / 1000
percent = ((cbr * 1000 / data[2]) + 5) / 10
value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent)
value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent))
cbr_file.write(value)
def mwait(id, raw_buf):
......
......@@ -768,7 +768,8 @@ class CallGraphModel(CallGraphModelBase):
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" WHERE calls.id <> 0"
" AND symbols.name" + match +
" GROUP BY comm_id, thread_id, call_path_id"
" ORDER BY comm_id, thread_id, call_path_id")
......@@ -963,7 +964,8 @@ class CallTreeModel(CallGraphModelBase):
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" WHERE calls.id <> 0"
" AND symbols.name" + match +
" ORDER BY comm_id, thread_id, call_time, calls.id")
def FindPath(self, query):
......@@ -1050,6 +1052,7 @@ class TreeWindowBase(QMdiSubWindow):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
......@@ -1127,6 +1130,7 @@ class CallTreeWindow(TreeWindowBase):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
......@@ -1139,6 +1143,7 @@ class CallTreeWindow(TreeWindowBase):
return
last_child = None
for row in xrange(n):
self.view.setExpanded(parent, True)
child = self.model.index(row, 0, parent)
child_call_time = child.internalPointer().call_time
if child_call_time < time:
......@@ -1151,9 +1156,11 @@ class CallTreeWindow(TreeWindowBase):
if not last_child:
if not found:
child = self.model.index(0, 0, parent)
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
return
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(last_child)
parent = last_child
......
......@@ -17,6 +17,7 @@
from __future__ import print_function
import sys
import os
import io
import argparse
import json
......@@ -81,7 +82,7 @@ class FlameGraphCLI:
if self.args.format == "html":
try:
with open(self.args.template) as f:
with io.open(self.args.template, encoding="utf-8") as f:
output_str = f.read().replace("/** @flamegraph_json **/",
json_str)
except IOError as e:
......@@ -93,11 +94,12 @@ class FlameGraphCLI:
output_fn = self.args.output or "stacks.json"
if output_fn == "-":
sys.stdout.write(output_str)
with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out:
out.write(output_str)
else:
print("dumping data to {}".format(output_fn))
try:
with open(output_fn, "w") as out:
with io.open(output_fn, "w", encoding="utf-8") as out:
out.write(output_str)
except IOError as e:
print("Error writing output file: {}".format(e), file=sys.stderr)
......
......@@ -2288,6 +2288,11 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *browser
return browser->he_selection->thread;
}
static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser)
{
return browser->he_selection ? browser->he_selection->res_samples : NULL;
}
/* Check whether the browser is for 'top' or 'report' */
static inline bool is_report_browser(void *timer)
{
......@@ -3357,16 +3362,16 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
&options[nr_options], NULL, NULL, evsel);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_entry(browser)->res_samples,
evsel, A_NORMAL);
hist_browser__selected_res_sample(browser),
evsel, A_NORMAL);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_entry(browser)->res_samples,
evsel, A_ASM);
hist_browser__selected_res_sample(browser),
evsel, A_ASM);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_entry(browser)->res_samples,
evsel, A_SOURCE);
hist_browser__selected_res_sample(browser),
evsel, A_SOURCE);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
skip_scripting:
......@@ -3598,6 +3603,23 @@ static int __perf_evlist__tui_browse_hists(struct evlist *evlist,
hbt, warn_lost_event);
}
static bool perf_evlist__single_entry(struct evlist *evlist)
{
int nr_entries = evlist->core.nr_entries;
if (nr_entries == 1)
return true;
if (nr_entries == 2) {
struct evsel *last = evlist__last(evlist);
if (evsel__is_dummy_event(last))
return true;
}
return false;
}
int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
struct hist_browser_timer *hbt,
float min_pcnt,
......@@ -3608,7 +3630,7 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
int nr_entries = evlist->core.nr_entries;
single_entry:
if (nr_entries == 1) {
if (perf_evlist__single_entry(evlist)) {
struct evsel *first = evlist__first(evlist);
return perf_evsel__hists_browse(first, nr_entries, help,
......
......@@ -1566,6 +1566,18 @@ void perf_evlist__to_front(struct evlist *evlist,
list_splice(&move, &evlist->core.entries);
}
struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->tracking)
return evsel;
}
return evlist__first(evlist);
}
void perf_evlist__set_tracking_event(struct evlist *evlist,
struct evsel *tracking_evsel)
{
......
......@@ -335,6 +335,7 @@ void perf_evlist__to_front(struct evlist *evlist,
evlist__cpu_iter_start(evlist); \
perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus)
struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist);
void perf_evlist__set_tracking_event(struct evlist *evlist,
struct evsel *tracking_evsel);
......
......@@ -898,12 +898,6 @@ static void evsel__apply_config_terms(struct evsel *evsel,
}
}
static bool is_dummy_event(struct evsel *evsel)
{
return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
(evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
}
struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
{
struct evsel_config_term *term, *found_term = NULL;
......@@ -1020,12 +1014,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
if (callchain && callchain->enabled && !evsel->no_aux_samples)
evsel__config_callchain(evsel, opts, callchain);
if (opts->sample_intr_regs) {
if (opts->sample_intr_regs && !evsel->no_aux_samples) {
attr->sample_regs_intr = opts->sample_intr_regs;
evsel__set_sample_bit(evsel, REGS_INTR);
}
if (opts->sample_user_regs) {
if (opts->sample_user_regs && !evsel->no_aux_samples) {
attr->sample_regs_user |= opts->sample_user_regs;
evsel__set_sample_bit(evsel, REGS_USER);
}
......@@ -1161,7 +1155,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
* The software event will trigger -EOPNOTSUPP error out,
* if BRANCH_STACK bit is set.
*/
if (is_dummy_event(evsel))
if (evsel__is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK);
}
......
......@@ -399,6 +399,12 @@ static inline bool evsel__has_br_stack(const struct evsel *evsel)
evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
}
static inline bool evsel__is_dummy_event(struct evsel *evsel)
{
return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
(evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
}
struct perf_env *evsel__env(struct evsel *evsel);
int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
......
......@@ -1735,6 +1735,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
u64 sample_type = evsel->core.attr.sample_type;
u64 id = evsel->core.id[0];
u8 cpumode;
u64 regs[8 * sizeof(sample.intr_regs.mask)];
if (intel_pt_skip_event(pt))
return 0;
......@@ -1784,8 +1785,8 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
}
if (sample_type & PERF_SAMPLE_REGS_INTR &&
items->mask[INTEL_PT_GP_REGS_POS]) {
u64 regs[sizeof(sample.intr_regs.mask)];
(items->mask[INTEL_PT_GP_REGS_POS] ||
items->mask[INTEL_PT_XMM_POS])) {
u64 regs_mask = evsel->core.attr.sample_regs_intr;
u64 *pos;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment