Commit ca21b9b3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A set of fixes and improvements for the perf subsystem:

  Kernel fixes:

   - Install cgroup events to the correct CPU context to prevent a
     potential list double add

   - Prevent an integer underflow in the perf mlock accounting

   - Add a missing prototype for arch_perf_update_userpage()

  Tooling:

   - Add a missing unlock in the error path of maps__insert() in perf
     maps.

   - Fix the build with the latest libbfd

   - Fix the perf parser so it does not delete parse event terms, which
     caused a regression for using perf with the ARM CoreSight as the
     sink configuration was missing due to the deletion.

   - Fix the double free in the perf CPU map merging test case

   - Add the missing ustring support for the perf probe command"

* tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf maps: Add missing unlock to maps__insert() error case
  perf probe: Add ustring support for perf probe command
  perf: Make perf able to build with latest libbfd
  perf test: Fix test case Merge cpu map
  perf parse: Copy string to perf_evsel_config_term
  perf parse: Refactor 'struct perf_evsel_config_term'
  kernel/events: Add a missing prototype for arch_perf_update_userpage()
  perf/cgroups: Install cgroup events to correct cpuctx
  perf/core: Fix mlock accounting in perf_mmap()
parents 2fbc23c7 45f03574
...@@ -1544,4 +1544,8 @@ int perf_event_exit_cpu(unsigned int cpu); ...@@ -1544,4 +1544,8 @@ int perf_event_exit_cpu(unsigned int cpu);
#define perf_event_exit_cpu NULL #define perf_event_exit_cpu NULL
#endif #endif
extern void __weak arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
#endif /* _LINUX_PERF_EVENT_H */ #endif /* _LINUX_PERF_EVENT_H */
...@@ -951,9 +951,9 @@ list_update_cgroup_event(struct perf_event *event, ...@@ -951,9 +951,9 @@ list_update_cgroup_event(struct perf_event *event,
/* /*
* Because cgroup events are always per-cpu events, * Because cgroup events are always per-cpu events,
* this will always be called from the right CPU. * @ctx == &cpuctx->ctx.
*/ */
cpuctx = __get_cpu_context(ctx); cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
/* /*
* Since setting cpuctx->cgrp is conditional on the current @cgrp * Since setting cpuctx->cgrp is conditional on the current @cgrp
...@@ -979,7 +979,8 @@ list_update_cgroup_event(struct perf_event *event, ...@@ -979,7 +979,8 @@ list_update_cgroup_event(struct perf_event *event,
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
if (add) if (add)
list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list)); list_add(cpuctx_entry,
per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
else else
list_del(cpuctx_entry); list_del(cpuctx_entry);
} }
...@@ -5916,7 +5917,15 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -5916,7 +5917,15 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
*/ */
user_lock_limit *= num_online_cpus(); user_lock_limit *= num_online_cpus();
user_locked = atomic_long_read(&user->locked_vm) + user_extra; user_locked = atomic_long_read(&user->locked_vm);
/*
* sysctl_perf_event_mlock may have changed, so that
* user->locked_vm > user_lock_limit
*/
if (user_locked > user_lock_limit)
user_locked = user_lock_limit;
user_locked += user_extra;
if (user_locked > user_lock_limit) { if (user_locked > user_lock_limit) {
/* /*
......
...@@ -226,7 +226,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu, ...@@ -226,7 +226,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG) if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
continue; continue;
sink = term->val.drv_cfg; sink = term->val.str;
snprintf(path, PATH_MAX, "sinks/%s", sink); snprintf(path, PATH_MAX, "sinks/%s", sink);
ret = perf_pmu__scan_file(pmu, path, "%x", &hash); ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
......
...@@ -131,7 +131,6 @@ int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_un ...@@ -131,7 +131,6 @@ int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_un
TEST_ASSERT_VAL("failed to merge map: bad nr", c->nr == 5); TEST_ASSERT_VAL("failed to merge map: bad nr", c->nr == 5);
cpu_map__snprint(c, buf, sizeof(buf)); cpu_map__snprint(c, buf, sizeof(buf));
TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7")); TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7"));
perf_cpu_map__put(a);
perf_cpu_map__put(b); perf_cpu_map__put(b);
perf_cpu_map__put(c); perf_cpu_map__put(c);
return 0; return 0;
......
...@@ -808,12 +808,12 @@ static void apply_config_terms(struct evsel *evsel, ...@@ -808,12 +808,12 @@ static void apply_config_terms(struct evsel *evsel,
perf_evsel__reset_sample_bit(evsel, TIME); perf_evsel__reset_sample_bit(evsel, TIME);
break; break;
case PERF_EVSEL__CONFIG_TERM_CALLGRAPH: case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
callgraph_buf = term->val.callgraph; callgraph_buf = term->val.str;
break; break;
case PERF_EVSEL__CONFIG_TERM_BRANCH: case PERF_EVSEL__CONFIG_TERM_BRANCH:
if (term->val.branch && strcmp(term->val.branch, "no")) { if (term->val.str && strcmp(term->val.str, "no")) {
perf_evsel__set_sample_bit(evsel, BRANCH_STACK); perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
parse_branch_str(term->val.branch, parse_branch_str(term->val.str,
&attr->branch_sample_type); &attr->branch_sample_type);
} else } else
perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
...@@ -1265,6 +1265,8 @@ static void perf_evsel__free_config_terms(struct evsel *evsel) ...@@ -1265,6 +1265,8 @@ static void perf_evsel__free_config_terms(struct evsel *evsel)
list_for_each_entry_safe(term, h, &evsel->config_terms, list) { list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
list_del_init(&term->list); list_del_init(&term->list);
if (term->free_str)
zfree(&term->val.str);
free(term); free(term);
} }
} }
......
...@@ -32,22 +32,21 @@ enum evsel_term_type { ...@@ -32,22 +32,21 @@ enum evsel_term_type {
struct perf_evsel_config_term { struct perf_evsel_config_term {
struct list_head list; struct list_head list;
enum evsel_term_type type; enum evsel_term_type type;
bool free_str;
union { union {
u64 period; u64 period;
u64 freq; u64 freq;
bool time; bool time;
char *callgraph;
char *drv_cfg;
u64 stack_user; u64 stack_user;
int max_stack; int max_stack;
bool inherit; bool inherit;
bool overwrite; bool overwrite;
char *branch;
unsigned long max_events; unsigned long max_events;
bool percore; bool percore;
bool aux_output; bool aux_output;
u32 aux_sample_size; u32 aux_sample_size;
u64 cfg_chg; u64 cfg_chg;
char *str;
} val; } val;
bool weak; bool weak;
}; };
......
...@@ -549,6 +549,7 @@ void maps__insert(struct maps *maps, struct map *map) ...@@ -549,6 +549,7 @@ void maps__insert(struct maps *maps, struct map *map)
if (maps_by_name == NULL) { if (maps_by_name == NULL) {
__maps__free_maps_by_name(maps); __maps__free_maps_by_name(maps);
up_write(&maps->lock);
return; return;
} }
......
...@@ -1219,8 +1219,7 @@ static int config_attr(struct perf_event_attr *attr, ...@@ -1219,8 +1219,7 @@ static int config_attr(struct perf_event_attr *attr,
static int get_config_terms(struct list_head *head_config, static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused) struct list_head *head_terms __maybe_unused)
{ {
#define ADD_CONFIG_TERM(__type, __name, __val) \ #define ADD_CONFIG_TERM(__type) \
do { \
struct perf_evsel_config_term *__t; \ struct perf_evsel_config_term *__t; \
\ \
__t = zalloc(sizeof(*__t)); \ __t = zalloc(sizeof(*__t)); \
...@@ -1229,9 +1228,24 @@ do { \ ...@@ -1229,9 +1228,24 @@ do { \
\ \
INIT_LIST_HEAD(&__t->list); \ INIT_LIST_HEAD(&__t->list); \
__t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \ __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
__t->val.__name = __val; \
__t->weak = term->weak; \ __t->weak = term->weak; \
list_add_tail(&__t->list, head_terms); \ list_add_tail(&__t->list, head_terms)
#define ADD_CONFIG_TERM_VAL(__type, __name, __val) \
do { \
ADD_CONFIG_TERM(__type); \
__t->val.__name = __val; \
} while (0)
#define ADD_CONFIG_TERM_STR(__type, __val) \
do { \
ADD_CONFIG_TERM(__type); \
__t->val.str = strdup(__val); \
if (!__t->val.str) { \
zfree(&__t); \
return -ENOMEM; \
} \
__t->free_str = true; \
} while (0) } while (0)
struct parse_events_term *term; struct parse_events_term *term;
...@@ -1239,53 +1253,62 @@ do { \ ...@@ -1239,53 +1253,62 @@ do { \
list_for_each_entry(term, head_config, list) { list_for_each_entry(term, head_config, list) {
switch (term->type_term) { switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
ADD_CONFIG_TERM(PERIOD, period, term->val.num); ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num);
break; break;
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
ADD_CONFIG_TERM(FREQ, freq, term->val.num); ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num);
break; break;
case PARSE_EVENTS__TERM_TYPE_TIME: case PARSE_EVENTS__TERM_TYPE_TIME:
ADD_CONFIG_TERM(TIME, time, term->val.num); ADD_CONFIG_TERM_VAL(TIME, time, term->val.num);
break; break;
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
ADD_CONFIG_TERM(CALLGRAPH, callgraph, term->val.str); ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str);
break; break;
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
ADD_CONFIG_TERM(BRANCH, branch, term->val.str); ADD_CONFIG_TERM_STR(BRANCH, term->val.str);
break; break;
case PARSE_EVENTS__TERM_TYPE_STACKSIZE: case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num); ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
term->val.num);
break; break;
case PARSE_EVENTS__TERM_TYPE_INHERIT: case PARSE_EVENTS__TERM_TYPE_INHERIT:
ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 1 : 0); ADD_CONFIG_TERM_VAL(INHERIT, inherit,
term->val.num ? 1 : 0);
break; break;
case PARSE_EVENTS__TERM_TYPE_NOINHERIT: case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1); ADD_CONFIG_TERM_VAL(INHERIT, inherit,
term->val.num ? 0 : 1);
break; break;
case PARSE_EVENTS__TERM_TYPE_MAX_STACK: case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
ADD_CONFIG_TERM(MAX_STACK, max_stack, term->val.num); ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
term->val.num);
break; break;
case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
ADD_CONFIG_TERM(MAX_EVENTS, max_events, term->val.num); ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
term->val.num);
break; break;
case PARSE_EVENTS__TERM_TYPE_OVERWRITE: case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
ADD_CONFIG_TERM(OVERWRITE, overwrite, term->val.num ? 1 : 0); ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
term->val.num ? 1 : 0);
break; break;
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
ADD_CONFIG_TERM(OVERWRITE, overwrite, term->val.num ? 0 : 1); ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
term->val.num ? 0 : 1);
break; break;
case PARSE_EVENTS__TERM_TYPE_DRV_CFG: case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
ADD_CONFIG_TERM(DRV_CFG, drv_cfg, term->val.str); ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str);
break; break;
case PARSE_EVENTS__TERM_TYPE_PERCORE: case PARSE_EVENTS__TERM_TYPE_PERCORE:
ADD_CONFIG_TERM(PERCORE, percore, ADD_CONFIG_TERM_VAL(PERCORE, percore,
term->val.num ? true : false); term->val.num ? true : false);
break; break;
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
ADD_CONFIG_TERM(AUX_OUTPUT, aux_output, term->val.num ? 1 : 0); ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
term->val.num ? 1 : 0);
break; break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
ADD_CONFIG_TERM(AUX_SAMPLE_SIZE, aux_sample_size, term->val.num); ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num);
break; break;
default: default:
break; break;
...@@ -1322,7 +1345,7 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config, ...@@ -1322,7 +1345,7 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
} }
if (bits) if (bits)
ADD_CONFIG_TERM(CFG_CHG, cfg_chg, bits); ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits);
#undef ADD_CONFIG_TERM #undef ADD_CONFIG_TERM
return 0; return 0;
......
...@@ -303,7 +303,8 @@ static int convert_variable_type(Dwarf_Die *vr_die, ...@@ -303,7 +303,8 @@ static int convert_variable_type(Dwarf_Die *vr_die,
char prefix; char prefix;
/* TODO: check all types */ /* TODO: check all types */
if (cast && strcmp(cast, "string") != 0 && strcmp(cast, "x") != 0 && if (cast && strcmp(cast, "string") != 0 && strcmp(cast, "ustring") &&
strcmp(cast, "x") != 0 &&
strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) { strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) {
/* Non string type is OK */ /* Non string type is OK */
/* and respect signedness/hexadecimal cast */ /* and respect signedness/hexadecimal cast */
......
...@@ -193,16 +193,30 @@ static void find_address_in_section(bfd *abfd, asection *section, void *data) ...@@ -193,16 +193,30 @@ static void find_address_in_section(bfd *abfd, asection *section, void *data)
bfd_vma pc, vma; bfd_vma pc, vma;
bfd_size_type size; bfd_size_type size;
struct a2l_data *a2l = data; struct a2l_data *a2l = data;
flagword flags;
if (a2l->found) if (a2l->found)
return; return;
if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0) #ifdef bfd_get_section_flags
flags = bfd_get_section_flags(abfd, section);
#else
flags = bfd_section_flags(section);
#endif
if ((flags & SEC_ALLOC) == 0)
return; return;
pc = a2l->addr; pc = a2l->addr;
#ifdef bfd_get_section_vma
vma = bfd_get_section_vma(abfd, section); vma = bfd_get_section_vma(abfd, section);
#else
vma = bfd_section_vma(section);
#endif
#ifdef bfd_get_section_size
size = bfd_get_section_size(section); size = bfd_get_section_size(section);
#else
size = bfd_section_size(section);
#endif
if (pc < vma || pc >= vma + size) if (pc < vma || pc >= vma + size)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment