trace_event_perf.c 8.83 KB
Newer Older
Peter Zijlstra's avatar
Peter Zijlstra committed
1
/*
2
 * trace event based perf event profiling/tracing
Peter Zijlstra's avatar
Peter Zijlstra committed
3
 *
4
 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5
 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
Peter Zijlstra's avatar
Peter Zijlstra committed
6 7
 */

8
#include <linux/module.h>
9
#include <linux/kprobes.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
10 11
#include "trace.h"

12
static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13

14 15 16 17 18 19
/*
 * Force it to be aligned to unsigned long to avoid misaligned accesses
 * suprises
 */
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
	perf_trace_t;
20

21
/* Count the events in use (per event id, not per instance) */
22
static int	total_ref_count;
23

24
static int perf_trace_event_perm(struct trace_event_call *tp_event,
25 26
				 struct perf_event *p_event)
{
27 28 29 30 31 32
	if (tp_event->perf_perm) {
		int ret = tp_event->perf_perm(tp_event, p_event);
		if (ret)
			return ret;
	}

33 34 35 36 37 38 39 40 41 42 43 44
	/*
	 * We checked and allowed to create parent,
	 * allow children without checking.
	 */
	if (p_event->parent)
		return 0;

	/*
	 * It's ok to check current process (owner) permissions in here,
	 * because code below is called only via perf_event_open syscall.
	 */

45
	/* The ftrace function trace is allowed only for root. */
46 47 48 49 50 51 52 53 54 55 56
	if (ftrace_event_is_function(tp_event)) {
		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
			return -EPERM;

		/*
		 * We don't allow user space callchains for  function trace
		 * event, due to issues with page faults while tracing page
		 * fault handler and its overall trickiness nature.
		 */
		if (!p_event->attr.exclude_callchain_user)
			return -EINVAL;
57 58 59 60 61 62 63

		/*
		 * Same reason to disable user stack dump as for user space
		 * callchains above.
		 */
		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
			return -EINVAL;
64
	}
65

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	/* No tracing, just counting, so no obvious leak */
	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
		return 0;

	/* Some events are ok to be traced by non-root users... */
	if (p_event->attach_state == PERF_ATTACH_TASK) {
		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
			return 0;
	}

	/*
	 * ...otherwise raw tracepoint data can be a severe data leak,
	 * only allow root to have these.
	 */
	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
		return -EPERM;

	return 0;
}

86
static int perf_trace_event_reg(struct trace_event_call *tp_event,
87
				struct perf_event *p_event)
88
{
89
	struct hlist_head __percpu *list;
90
	int ret = -ENOMEM;
91
	int cpu;
92

93 94
	p_event->tp_event = tp_event;
	if (tp_event->perf_refcount++ > 0)
95 96
		return 0;

97 98 99 100 101 102
	list = alloc_percpu(struct hlist_head);
	if (!list)
		goto fail;

	for_each_possible_cpu(cpu)
		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
103

104
	tp_event->perf_events = list;
105

106
	if (!total_ref_count) {
107
		char __percpu *buf;
108
		int i;
109

110
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
111
			buf = (char __percpu *)alloc_percpu(perf_trace_t);
112
			if (!buf)
113
				goto fail;
114

115
			perf_trace_buf[i] = buf;
116
		}
117 118
	}

119
	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
120 121
	if (ret)
		goto fail;
122

123 124 125 126
	total_ref_count++;
	return 0;

fail:
127
	if (!total_ref_count) {
128 129
		int i;

130
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
131 132 133
			free_percpu(perf_trace_buf[i]);
			perf_trace_buf[i] = NULL;
		}
134
	}
135 136 137 138

	if (!--tp_event->perf_refcount) {
		free_percpu(tp_event->perf_events);
		tp_event->perf_events = NULL;
139
	}
140 141

	return ret;
142 143
}

144 145
static void perf_trace_event_unreg(struct perf_event *p_event)
{
146
	struct trace_event_call *tp_event = p_event->tp_event;
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
	int i;

	if (--tp_event->perf_refcount > 0)
		goto out;

	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);

	/*
	 * Ensure our callback won't be called anymore. The buffers
	 * will be freed after that.
	 */
	tracepoint_synchronize_unregister();

	free_percpu(tp_event->perf_events);
	tp_event->perf_events = NULL;

	if (!--total_ref_count) {
		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
			free_percpu(perf_trace_buf[i]);
			perf_trace_buf[i] = NULL;
		}
	}
out:
	module_put(tp_event->mod);
}

static int perf_trace_event_open(struct perf_event *p_event)
{
175
	struct trace_event_call *tp_event = p_event->tp_event;
176 177 178 179 180
	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
}

static void perf_trace_event_close(struct perf_event *p_event)
{
181
	struct trace_event_call *tp_event = p_event->tp_event;
182 183 184
	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
}

185
static int perf_trace_event_init(struct trace_event_call *tp_event,
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
				 struct perf_event *p_event)
{
	int ret;

	ret = perf_trace_event_perm(tp_event, p_event);
	if (ret)
		return ret;

	ret = perf_trace_event_reg(tp_event, p_event);
	if (ret)
		return ret;

	ret = perf_trace_event_open(p_event);
	if (ret) {
		perf_trace_event_unreg(p_event);
		return ret;
	}

	return 0;
}

207
int perf_trace_init(struct perf_event *p_event)
Peter Zijlstra's avatar
Peter Zijlstra committed
208
{
209
	struct trace_event_call *tp_event;
210
	u64 event_id = p_event->attr.config;
211
	int ret = -EINVAL;
Peter Zijlstra's avatar
Peter Zijlstra committed
212

213
	mutex_lock(&event_mutex);
214
	list_for_each_entry(tp_event, &ftrace_events, list) {
215
		if (tp_event->event.type == event_id &&
216
		    tp_event->class && tp_event->class->reg &&
217 218
		    try_module_get(tp_event->mod)) {
			ret = perf_trace_event_init(tp_event, p_event);
Li Zefan's avatar
Li Zefan committed
219 220
			if (ret)
				module_put(tp_event->mod);
221 222
			break;
		}
Peter Zijlstra's avatar
Peter Zijlstra committed
223
	}
224
	mutex_unlock(&event_mutex);
Peter Zijlstra's avatar
Peter Zijlstra committed
225

226
	return ret;
Peter Zijlstra's avatar
Peter Zijlstra committed
227 228
}

229 230 231 232 233 234 235 236
void perf_trace_destroy(struct perf_event *p_event)
{
	mutex_lock(&event_mutex);
	perf_trace_event_close(p_event);
	perf_trace_event_unreg(p_event);
	mutex_unlock(&event_mutex);
}

Peter Zijlstra's avatar
Peter Zijlstra committed
237
int perf_trace_add(struct perf_event *p_event, int flags)
238
{
239
	struct trace_event_call *tp_event = p_event->tp_event;
240
	struct hlist_head __percpu *pcpu_list;
241
	struct hlist_head *list;
242

243 244
	pcpu_list = tp_event->perf_events;
	if (WARN_ON_ONCE(!pcpu_list))
245
		return -EINVAL;
246

Peter Zijlstra's avatar
Peter Zijlstra committed
247 248 249
	if (!(flags & PERF_EF_START))
		p_event->hw.state = PERF_HES_STOPPED;

250
	list = this_cpu_ptr(pcpu_list);
251
	hlist_add_head_rcu(&p_event->hlist_entry, list);
252

253
	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
254
}
255

Peter Zijlstra's avatar
Peter Zijlstra committed
256
void perf_trace_del(struct perf_event *p_event, int flags)
257
{
258
	struct trace_event_call *tp_event = p_event->tp_event;
259
	hlist_del_rcu(&p_event->hlist_entry);
260
	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
261 262
}

263
void *perf_trace_buf_prepare(int size, unsigned short type,
264
			     struct pt_regs **regs, int *rctxp)
265 266
{
	struct trace_entry *entry;
267
	unsigned long flags;
268
	char *raw_data;
269
	int pc;
270

271 272
	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));

273 274 275 276
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
			"perf buffer not large enough"))
		return NULL;

277 278 279 280
	pc = preempt_count();

	*rctxp = perf_swevent_get_recursion_context();
	if (*rctxp < 0)
281
		return NULL;
282

283 284
	if (regs)
		*regs = this_cpu_ptr(&__perf_regs[*rctxp]);
285
	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
286 287

	/* zero the dead bytes from align to not leak stack to user */
288
	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
289 290

	entry = (struct trace_entry *)raw_data;
291 292
	local_save_flags(flags);
	tracing_generic_entry_update(entry, flags, pc);
293 294 295 296
	entry->type = type;

	return raw_data;
}
297
EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
298
NOKPROBE_SYMBOL(perf_trace_buf_prepare);
299 300 301

#ifdef CONFIG_FUNCTION_TRACER
static void
302
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
303
			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
304 305 306 307 308 309
{
	struct ftrace_entry *entry;
	struct hlist_head *head;
	struct pt_regs regs;
	int rctx;

310 311 312 313
	head = this_cpu_ptr(event_function.perf_events);
	if (hlist_empty(head))
		return;

314 315 316 317 318 319 320 321 322 323 324 325 326 327
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
		    sizeof(u64)) - sizeof(u32))

	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);

	perf_fetch_caller_regs(&regs);

	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
	if (!entry)
		return;

	entry->ip = ip;
	entry->parent_ip = parent_ip;
	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
328
			      1, &regs, head, NULL);
329 330 331 332 333 334 335 336

#undef ENTRY_SIZE
}

static int perf_ftrace_function_register(struct perf_event *event)
{
	struct ftrace_ops *ops = &event->ftrace_ops;

337
	ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
338 339 340 341 342 343 344
	ops->func = perf_ftrace_function_call;
	return register_ftrace_function(ops);
}

static int perf_ftrace_function_unregister(struct perf_event *event)
{
	struct ftrace_ops *ops = &event->ftrace_ops;
345 346 347
	int ret = unregister_ftrace_function(ops);
	ftrace_free_filter(ops);
	return ret;
348 349 350 351 352 353 354 355 356 357 358 359
}

static void perf_ftrace_function_enable(struct perf_event *event)
{
	ftrace_function_local_enable(&event->ftrace_ops);
}

static void perf_ftrace_function_disable(struct perf_event *event)
{
	ftrace_function_local_disable(&event->ftrace_ops);
}

360
int perf_ftrace_event_register(struct trace_event_call *call,
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
			       enum trace_reg type, void *data)
{
	switch (type) {
	case TRACE_REG_REGISTER:
	case TRACE_REG_UNREGISTER:
		break;
	case TRACE_REG_PERF_REGISTER:
	case TRACE_REG_PERF_UNREGISTER:
		return 0;
	case TRACE_REG_PERF_OPEN:
		return perf_ftrace_function_register(data);
	case TRACE_REG_PERF_CLOSE:
		return perf_ftrace_function_unregister(data);
	case TRACE_REG_PERF_ADD:
		perf_ftrace_function_enable(data);
		return 0;
	case TRACE_REG_PERF_DEL:
		perf_ftrace_function_disable(data);
		return 0;
	}

	return -EINVAL;
}
#endif /* CONFIG_FUNCTION_TRACER */