Commit 32b0ed3a authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - 'perf probe' improvements: (Masami Hiramatsu)

    - Support glob wildcards for function name
    - Support $params special probe argument: Collect all function arguments
    - Make --line checks validate C-style function name.
    - Add --no-inlines option to avoid searching inline functions

  - Introduce new 'perf bench futex' benchmark: 'wake-parallel', to
    measure parallel waker threads generating contention for kernel
    locks (hb->lock). (Davidlohr Bueso)

Bug fixes:

  - Improve 'perf top' to survive much longer on high core count machines,
    more work needed to refcount more data structures besides 'struct thread'
    and fix more races. (Arnaldo Carvalho de Melo)

Infrastructure changes:

  - Move barrier.h mb/rmb/wmb API from tools/perf/ to kernel like tools/arch/
    hierarchy. (Arnaldo Carvalho de Melo)

  - Borrow atomic.h from the kernel, initially the x86 implementations
    with a fallback to gcc intrinsics for the other arches, all the kernel
    like framework in place for doing arch specific implementations,
    preferrably cloning what is in the kernel to the greater extent
    possible. (Arnaldo Carvalho de Melo)

  - Protect the 'struct thread' lifetime with a reference counter,
    and protect data structures that contains its instances with
    a mutex. (Arnaldo Carvalho de Melo

  - Disable libdw DWARF unwind when built with NO_DWARF (Naveen N. Rao)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents cb307113 76d40849
#ifndef __TOOLS_LINUX_ASM_ALPHA_BARRIER_H
#define __TOOLS_LINUX_ASM_ALPHA_BARRIER_H
#define mb() __asm__ __volatile__("mb": : :"memory")
#define rmb() __asm__ __volatile__("mb": : :"memory")
#define wmb() __asm__ __volatile__("wmb": : :"memory")
#endif /* __TOOLS_LINUX_ASM_ALPHA_BARRIER_H */
#ifndef _TOOLS_LINUX_ASM_ARM_BARRIER_H
#define _TOOLS_LINUX_ASM_ARM_BARRIER_H
/*
* Use the __kuser_memory_barrier helper in the CPU helper page. See
* arch/arm/kernel/entry-armv.S in the kernel source for details.
*/
#define mb() ((void(*)(void))0xffff0fa0)()
#define wmb() ((void(*)(void))0xffff0fa0)()
#define rmb() ((void(*)(void))0xffff0fa0)()
#endif /* _TOOLS_LINUX_ASM_ARM_BARRIER_H */
#ifndef _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
#define _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
/*
* From tools/perf/perf-sys.h, last modified in:
* f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers
*
* XXX: arch/arm64/include/asm/barrier.h in the kernel sources use dsb, is this
* a case like for arm32 where we do things differently in userspace?
*/
#define mb() asm volatile("dmb ish" ::: "memory")
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
/*
* Copied from the kernel sources to tools/:
*
* Memory barrier definitions. This is based on information published
* in the Processor Abstraction Layer and the System Abstraction Layer
* manual.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
#ifndef _TOOLS_LINUX_ASM_IA64_BARRIER_H
#define _TOOLS_LINUX_ASM_IA64_BARRIER_H
#include <linux/compiler.h>
/*
* Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all
* architecturally visible effects of a memory access have occurred
* (at a minimum, this means the memory has been read or written).
*
* wmb(): Guarantees that all preceding stores to memory-
* like regions are visible before any subsequent
* stores and that all following stores will be
* visible only after all previous stores.
* rmb(): Like wmb(), but for reads.
* mb(): wmb()/rmb() combo, i.e., all previous memory
* accesses are visible before all subsequent
* accesses and vice versa. This is also known as
* a "fence."
*
* Note: "mb()" and its variants cannot be used as a fence to order
* accesses to memory mapped I/O registers. For that, mf.a needs to
* be used. However, we don't want to always use mf.a because (a)
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
#define ia64_mf() asm volatile ("mf" ::: "memory")
#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
#endif /* _TOOLS_LINUX_ASM_IA64_BARRIER_H */
#ifndef _TOOLS_LINUX_ASM_MIPS_BARRIER_H
#define _TOOLS_LINUX_ASM_MIPS_BARRIER_H
/*
* FIXME: This came from tools/perf/perf-sys.h, where it was first introduced
* in c1e028ef40b8d6943b767028ba17d4f2ba020edb, more work needed to make it
* more closely follow the Linux kernel arch/mips/include/asm/barrier.h file.
* Probably when we continue work on tools/ Kconfig support to have all the
* CONFIG_ needed for properly doing that.
*/
#define mb() asm volatile( \
".set mips2\n\t" \
"sync\n\t" \
".set mips0" \
: /* no output */ \
: /* no input */ \
: "memory")
#define wmb() mb()
#define rmb() mb()
#endif /* _TOOLS_LINUX_ASM_MIPS_BARRIER_H */
/*
* Copied from the kernel sources:
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*/
#ifndef _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
#define _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
* by this processor have been performed (with respect to all other
* mechanisms that access memory). The eieio instruction is a barrier
* providing an ordering (separately) for (a) cacheable stores and (b)
* loads and stores to non-cacheable memory (e.g. I/O devices).
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
*
* *mb() variants without smp_ prefix must order all types of memory
* operations with one another. sync is the only instruction sufficient
* to do this.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */
/*
* Copied from the kernel sources:
*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __TOOLS_LINUX_ASM_BARRIER_H
#define __TOOLS_LINUX_ASM_BARRIER_H
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BARRIER "bcr 14,0\n"
#else
#define __ASM_BARRIER "bcr 15,0\n"
#endif
#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
#define rmb() mb()
#define wmb() mb()
#endif /* __TOOLS_LIB_ASM_BARRIER_H */
/*
* Copied from the kernel sources:
*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Paul Mundt
*/
#ifndef __TOOLS_LINUX_ASM_SH_BARRIER_H
#define __TOOLS_LINUX_ASM_SH_BARRIER_H
/*
* A brief note on ctrl_barrier(), the control register write barrier.
*
* Legacy SH cores typically require a sequence of 8 nops after
* modification of a control register in order for the changes to take
* effect. On newer cores (like the sh4a and sh5) this is accomplished
* with icbi.
*
* Also note that on sh4a in the icbi case we can forego a synco for the
* write barrier, as it's not necessary for control registers.
*
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
#if defined(__SH4A__) || defined(__SH5__)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
#endif
#include <asm-generic/barrier.h>
#endif /* __TOOLS_LINUX_ASM_SH_BARRIER_H */
#ifndef ___TOOLS_LINUX_ASM_SPARC_BARRIER_H
#define ___TOOLS_LINUX_ASM_SPARC_BARRIER_H
#if defined(__sparc__) && defined(__arch64__)
#include "barrier_64.h"
#else
#include "barrier_32.h"
#endif
#endif
#ifndef __TOOLS_PERF_SPARC_BARRIER_H
#define __TOOLS_PERF_SPARC_BARRIER_H
#include <asm-generic/barrier.h>
#endif /* !(__TOOLS_PERF_SPARC_BARRIER_H) */
#ifndef __TOOLS_LINUX_SPARC64_BARRIER_H
#define __TOOLS_LINUX_SPARC64_BARRIER_H
/* Copied from the kernel sources to tools/:
*
* These are here in an effort to more fully work around Spitfire Errata
* #51. Essentially, if a memory barrier occurs soon after a mispredicted
* branch, the chip can stop executing instructions until a trap occurs.
* Therefore, if interrupts are disabled, the chip can hang forever.
*
* It used to be believed that the memory barrier had to be right in the
* delay slot, but a case has been traced recently wherein the memory barrier
* was one instruction after the branch delay slot and the chip still hung.
* The offending sequence was the following in sym_wakeup_done() of the
* sym53c8xx_2 driver:
*
* call sym_ccb_from_dsa, 0
* movge %icc, 0, %l0
* brz,pn %o0, .LL1303
* mov %o0, %l2
* membar #LoadLoad
*
* The branch has to be mispredicted for the bug to occur. Therefore, we put
* the memory barrier explicitly into a "branch always, predicted taken"
* delay slot to avoid the problem case.
*/
#define membar_safe(type) \
do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
" membar " type "\n" \
"1:\n" \
: : : "memory"); \
} while (0)
/* The kernel always executes in TSO memory model these days,
* and furthermore most sparc64 chips implement more stringent
* memory ordering than required by the specifications.
*/
#define mb() membar_safe("#StoreLoad")
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")
#endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */
#ifndef _TOOLS_LINUX_ASM_TILE_BARRIER_H
#define _TOOLS_LINUX_ASM_TILE_BARRIER_H
/*
* FIXME: This came from tools/perf/perf-sys.h, where it was first introduced
* in 620830b6954913647b7c7f68920cf48eddf6ad92, more work needed to make it
* more closely follow the Linux kernel arch/tile/include/asm/barrier.h file.
* Probably when we continue work on tools/ Kconfig support to have all the
* CONFIG_ needed for properly doing that.
*/
#define mb() asm volatile ("mf" ::: "memory")
#define wmb() mb()
#define rmb() mb()
#endif /* _TOOLS_LINUX_ASM_TILE_BARRIER_H */
#ifndef _TOOLS_LINUX_ASM_X86_ATOMIC_H
#define _TOOLS_LINUX_ASM_X86_ATOMIC_H
#include <linux/compiler.h>
#include <linux/types.h>
#include "rmwcc.h"
#define LOCK_PREFIX "\n\tlock; "
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return ACCESS_ONCE((v)->counter);
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
#endif /* _TOOLS_LINUX_ASM_X86_ATOMIC_H */
#ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H
#define _TOOLS_LINUX_ASM_X86_BARRIER_H
/*
* Copied from the Linux kernel sources, and also moving code
* out from tools/perf/perf-sys.h so as to make it be located
* in a place similar as in the kernel sources.
*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#if defined(__i386__)
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#elif defined(__x86_64__)
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */
#ifndef _TOOLS_LINUX_ASM_X86_RMWcc
#define _TOOLS_LINUX_ASM_X86_RMWcc
#ifdef CC_HAVE_ASM_GOTO
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
: : "m" (var), ## __VA_ARGS__ \
: "memory" : cc_label); \
return 0; \
cc_label: \
return 1; \
} while (0)
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
__GEN_RMWcc(op " " arg0, var, cc)
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
#else /* !CC_HAVE_ASM_GOTO */
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
char c; \
asm volatile (fullop "; set" cc " %1" \
: "+m" (var), "=qm" (c) \
: __VA_ARGS__ : "memory"); \
return c != 0; \
} while (0)
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
__GEN_RMWcc(op " " arg0, var, cc)
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
#endif /* CC_HAVE_ASM_GOTO */
#endif /* _TOOLS_LINUX_ASM_X86_RMWcc */
/*
* Copied from the kernel sources to tools/:
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2012 Tensilica Inc.
*/
#ifndef _TOOLS_LINUX_XTENSA_SYSTEM_H
#define _TOOLS_LINUX_XTENSA_SYSTEM_H
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier()
#define wmb() mb()
#endif /* _TOOLS_LINUX_XTENSA_SYSTEM_H */
#ifndef __TOOLS_ASM_GENERIC_ATOMIC_H
#define __TOOLS_ASM_GENERIC_ATOMIC_H
#include <linux/compiler.h>
#include <linux/types.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* Excerpts obtained from the Linux kernel sources.
*/
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return ACCESS_ONCE((v)->counter);
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
__sync_add_and_fetch(&v->counter, 1);
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
return __sync_sub_and_fetch(&v->counter, 1) == 0;
}
#endif /* __TOOLS_ASM_GENERIC_ATOMIC_H */
/*
* Copied from the kernel sources to tools/perf/:
*
* Generic barrier definitions, originally based on MN10300 definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __TOOLS_LINUX_ASM_GENERIC_BARRIER_H
#define __TOOLS_LINUX_ASM_GENERIC_BARRIER_H
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
/*
* Force strict CPU ordering. And yes, this is required on UP too when we're
* talking to devices.
*
* Fall back to compiler barriers if nothing better is provided.
*/
#ifndef mb
#define mb() barrier()
#endif
#ifndef rmb
#define rmb() mb()
#endif
#ifndef wmb
#define wmb() mb()
#endif
#endif /* !__ASSEMBLY__ */
#endif /* __TOOLS_LINUX_ASM_GENERIC_BARRIER_H */
#ifndef __TOOLS_LINUX_ASM_ATOMIC_H
#define __TOOLS_LINUX_ASM_ATOMIC_H
#if defined(__i386__) || defined(__x86_64__)
#include "../../arch/x86/include/asm/atomic.h"
#else
#include <asm-generic/atomic-gcc.h>
#endif
#endif /* __TOOLS_LINUX_ASM_ATOMIC_H */
#if defined(__i386__) || defined(__x86_64__)
#include "../../arch/x86/include/asm/barrier.h"
#elif defined(__arm__)
#include "../../arch/arm/include/asm/barrier.h"
#elif defined(__aarch64__)
#include "../../arch/arm64/include/asm/barrier.h"
#elif defined(__powerpc__)
#include "../../arch/powerpc/include/asm/barrier.h"
#elif defined(__s390__)
#include "../../arch/s390/include/asm/barrier.h"
#elif defined(__sh__)
#include "../../arch/sh/include/asm/barrier.h"
#elif defined(__sparc__)
#include "../../arch/sparc/include/asm/barrier.h"
#elif defined(__tile__)
#include "../../arch/tile/include/asm/barrier.h"
#elif defined(__alpha__)
#include "../../arch/alpha/include/asm/barrier.h"
#elif defined(__mips__)
#include "../../arch/mips/include/asm/barrier.h"
#elif defined(__ia64__)
#include "../../arch/ia64/include/asm/barrier.h"
#elif defined(__xtensa__)
#include "../../arch/xtensa/include/asm/barrier.h"
#else
#include <asm-generic/barrier.h>
#endif
#ifndef __TOOLS_LINUX_ATOMIC_H
#define __TOOLS_LINUX_ATOMIC_H
#include <asm/atomic.h>
#endif /* __TOOLS_LINUX_ATOMIC_H */
#ifndef _TOOLS_LINUX_COMPILER_H_
#define _TOOLS_LINUX_COMPILER_H_
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
#ifndef __always_inline
# define __always_inline inline __attribute__((always_inline))
#endif
......
......@@ -60,6 +60,10 @@ typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
typedef struct {
int counter;
} atomic_t;
struct list_head {
struct list_head *next, *prev;
};
......
......@@ -210,6 +210,9 @@ Suite for evaluating hash tables.
*wake*::
Suite for evaluating wake calls.
*wake-parallel*::
Suite for evaluating parallel wake calls.
*requeue*::
Suite for evaluating requeue calls.
......
......@@ -83,6 +83,10 @@ OPTIONS
(Only for --vars) Show external defined variables in addition to local
variables.
--no-inlines::
(Only for --add) Search only for non-inlined functions. The functions
which do not have instances are ignored.
-F::
--funcs[=FILTER]::
Show available functions in given module or kernel. With -x/--exec,
......@@ -151,7 +155,7 @@ Each probe argument follows below syntax.
[NAME=]LOCALVAR|$retval|%REG|@SYMBOL[:TYPE]
'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
'$vars' special argument is also available for NAME, it is expanded to the local variables which can access at given probe point.
'$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters.
'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
......
tools/perf
tools/arch/alpha/include/asm/barrier.h
tools/arch/arm/include/asm/barrier.h
tools/arch/ia64/include/asm/barrier.h
tools/arch/mips/include/asm/barrier.h
tools/arch/powerpc/include/asm/barrier.h
tools/arch/s390/include/asm/barrier.h
tools/arch/sh/include/asm/barrier.h
tools/arch/sparc/include/asm/barrier.h
tools/arch/sparc/include/asm/barrier_32.h
tools/arch/sparc/include/asm/barrier_64.h
tools/arch/tile/include/asm/barrier.h
tools/arch/x86/include/asm/barrier.h
tools/arch/xtensa/include/asm/barrier.h
tools/scripts
tools/build
tools/arch/x86/include/asm/atomic.h
tools/arch/x86/include/asm/rmwcc.h
tools/lib/traceevent
tools/lib/api
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c
tools/include/asm/atomic.h
tools/include/asm/barrier.h
tools/include/asm/bug.h
tools/include/asm-generic/barrier.h
tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h
tools/include/asm-generic/bitops/const_hweight.h
......@@ -17,6 +35,7 @@ tools/include/asm-generic/bitops/fls64.h
tools/include/asm-generic/bitops/fls.h
tools/include/asm-generic/bitops/hweight.h
tools/include/asm-generic/bitops.h
tools/include/linux/atomic.h
tools/include/linux/bitops.h
tools/include/linux/compiler.h
tools/include/linux/export.h
......
......@@ -3,6 +3,7 @@ perf-y += sched-pipe.o
perf-y += mem-memcpy.o
perf-y += futex-hash.o
perf-y += futex-wake.o
perf-y += futex-wake-parallel.o
perf-y += futex-requeue.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
......
......@@ -33,6 +33,8 @@ extern int bench_mem_memcpy(int argc, const char **argv,
extern int bench_mem_memset(int argc, const char **argv, const char *prefix);
extern int bench_futex_hash(int argc, const char **argv, const char *prefix);
extern int bench_futex_wake(int argc, const char **argv, const char *prefix);
extern int bench_futex_wake_parallel(int argc, const char **argv,
const char *prefix);
extern int bench_futex_requeue(int argc, const char **argv, const char *prefix);
#define BENCH_FORMAT_DEFAULT_STR "default"
......
/*
* Copyright (C) 2015 Davidlohr Bueso.
*
* Block a bunch of threads and let parallel waker threads wakeup an
* equal amount of them. The program output reflects the avg latency
* for each individual thread to service its share of work. Ultimately
* it can be used to measure futex_wake() changes.
*/
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
#include "../util/parse-options.h"
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <pthread.h>
struct thread_data {
pthread_t worker;
unsigned int nwoken;
struct timeval runtime;
};
static unsigned int nwakes = 1;
/* all threads will block on the same futex -- hash bucket chaos ;) */
static u_int32_t futex = 0;
static pthread_t *blocked_worker;
static bool done = false, silent = false, fshared = false;
static unsigned int nblocked_threads = 0, nwaking_threads = 0;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats waketime_stats, wakeup_stats;
static unsigned int ncpus, threads_starting;
static int futex_flag = 0;
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nblocked_threads, "Specify amount of threads"),
OPT_UINTEGER('w', "nwakers", &nwaking_threads, "Specify amount of waking threads"),
OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
OPT_END()
};
static const char * const bench_futex_wake_parallel_usage[] = {
"perf bench futex wake-parallel <options>",
NULL
};
static void *waking_workerfn(void *arg)
{
struct thread_data *waker = (struct thread_data *) arg;
struct timeval start, end;
gettimeofday(&start, NULL);
waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
if (waker->nwoken != nwakes)
warnx("couldn't wakeup all tasks (%d/%d)",
waker->nwoken, nwakes);
gettimeofday(&end, NULL);
timersub(&end, &start, &waker->runtime);
pthread_exit(NULL);
return NULL;
}
static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
{
unsigned int i;
pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
/* create and block all threads */
for (i = 0; i < nwaking_threads; i++) {
/*
* Thread creation order will impact per-thread latency
* as it will affect the order to acquire the hb spinlock.
* For now let the scheduler decide.
*/
if (pthread_create(&td[i].worker, &thread_attr,
waking_workerfn, (void *)&td[i]))
err(EXIT_FAILURE, "pthread_create");
}
for (i = 0; i < nwaking_threads; i++)
if (pthread_join(td[i].worker, NULL))
err(EXIT_FAILURE, "pthread_join");
}
static void *blocked_workerfn(void *arg __maybe_unused)
{
pthread_mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
pthread_cond_signal(&thread_parent);
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
while (1) { /* handle spurious wakeups */
if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
break;
}
pthread_exit(NULL);
return NULL;
}
static void block_threads(pthread_t *w, pthread_attr_t thread_attr)
{
cpu_set_t cpu;
unsigned int i;
threads_starting = nblocked_threads;
/* create and block all threads */
for (i = 0; i < nblocked_threads; i++) {
CPU_ZERO(&cpu);
CPU_SET(i % ncpus, &cpu);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
err(EXIT_FAILURE, "pthread_create");
}
}
static void print_run(struct thread_data *waking_worker, unsigned int run_num)
{
unsigned int i, wakeup_avg;
double waketime_avg, waketime_stddev;
struct stats __waketime_stats, __wakeup_stats;
init_stats(&__wakeup_stats);
init_stats(&__waketime_stats);
for (i = 0; i < nwaking_threads; i++) {
update_stats(&__waketime_stats, waking_worker[i].runtime.tv_usec);
update_stats(&__wakeup_stats, waking_worker[i].nwoken);
}
waketime_avg = avg_stats(&__waketime_stats);
waketime_stddev = stddev_stats(&__waketime_stats);
wakeup_avg = avg_stats(&__wakeup_stats);
printf("[Run %d]: Avg per-thread latency (waking %d/%d threads) "
"in %.4f ms (+-%.2f%%)\n", run_num + 1, wakeup_avg,
nblocked_threads, waketime_avg/1e3,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
static void print_summary(void)
{
unsigned int wakeup_avg;
double waketime_avg, waketime_stddev;
waketime_avg = avg_stats(&waketime_stats);
waketime_stddev = stddev_stats(&waketime_stats);
wakeup_avg = avg_stats(&wakeup_stats);
printf("Avg per-thread latency (waking %d/%d threads) in %.4f ms (+-%.2f%%)\n",
wakeup_avg,
nblocked_threads,
waketime_avg/1e3,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
static void do_run_stats(struct thread_data *waking_worker)
{
unsigned int i;
for (i = 0; i < nwaking_threads; i++) {
update_stats(&waketime_stats, waking_worker[i].runtime.tv_usec);
update_stats(&wakeup_stats, waking_worker[i].nwoken);
}
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
done = true;
}
int bench_futex_wake_parallel(int argc, const char **argv,
const char *prefix __maybe_unused)
{
int ret = 0;
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
struct thread_data *waking_worker;
argc = parse_options(argc, argv, options,
bench_futex_wake_parallel_usage, 0);
if (argc) {
usage_with_options(bench_futex_wake_parallel_usage, options);
exit(EXIT_FAILURE);
}
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (!nblocked_threads)
nblocked_threads = ncpus;
/* some sanity checks */
if (nwaking_threads > nblocked_threads || !nwaking_threads)
nwaking_threads = nblocked_threads;
if (nblocked_threads % nwaking_threads)
errx(EXIT_FAILURE, "Must be perfectly divisible");
/*
* Each thread will wakeup nwakes tasks in
* a single futex_wait call.
*/
nwakes = nblocked_threads/nwaking_threads;
blocked_worker = calloc(nblocked_threads, sizeof(*blocked_worker));
if (!blocked_worker)
err(EXIT_FAILURE, "calloc");
if (!fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
"futex %p), %d threads waking up %d at a time.\n\n",
getpid(), nblocked_threads, fshared ? "shared":"private",
&futex, nwaking_threads, nwakes);
init_stats(&wakeup_stats);
init_stats(&waketime_stats);
pthread_attr_init(&thread_attr);
pthread_mutex_init(&thread_lock, NULL);
pthread_cond_init(&thread_parent, NULL);
pthread_cond_init(&thread_worker, NULL);
for (j = 0; j < bench_repeat && !done; j++) {
waking_worker = calloc(nwaking_threads, sizeof(*waking_worker));
if (!waking_worker)
err(EXIT_FAILURE, "calloc");
/* create, launch & block all threads */
block_threads(blocked_worker, thread_attr);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
while (threads_starting)
pthread_cond_wait(&thread_parent, &thread_lock);
pthread_cond_broadcast(&thread_worker);
pthread_mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start waking folks up */
wakeup_threads(waking_worker, thread_attr);
for (i = 0; i < nblocked_threads; i++) {
ret = pthread_join(blocked_worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
do_run_stats(waking_worker);
if (!silent)
print_run(waking_worker, j);
free(waking_worker);
}
/* cleanup & report results */
pthread_cond_destroy(&thread_parent);
pthread_cond_destroy(&thread_worker);
pthread_mutex_destroy(&thread_lock);
pthread_attr_destroy(&thread_attr);
print_summary();
free(blocked_worker);
return ret;
}
......@@ -60,7 +60,12 @@ static void *workerfn(void *arg __maybe_unused)
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
futex_wait(&futex1, 0, NULL, futex_flag);
while (1) {
if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
break;
}
pthread_exit(NULL);
return NULL;
}
......
......@@ -84,6 +84,7 @@ static int process_sample_event(struct perf_tool *tool,
{
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
int ret = 0;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
......@@ -92,15 +93,16 @@ static int process_sample_event(struct perf_tool *tool,
}
if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
return 0;
goto out_put;
if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
return -1;
ret = -1;
}
return 0;
out_put:
addr_location__put(&al);
return ret;
}
static int hist_entry__tty_annotate(struct hist_entry *he,
......
......@@ -58,6 +58,7 @@ static struct bench mem_benchmarks[] = {
static struct bench futex_benchmarks[] = {
{ "hash", "Benchmark for futex hash table", bench_futex_hash },
{ "wake", "Benchmark for futex wake calls", bench_futex_wake },
{ "wake-parallel", "Benchmark for parallel futex wake calls", bench_futex_wake_parallel },
{ "requeue", "Benchmark for futex requeue calls", bench_futex_requeue },
{ "all", "Test all futex benchmarks", NULL },
{ NULL, NULL, NULL }
......
......@@ -328,6 +328,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
{
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
int ret = -1;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
......@@ -338,7 +339,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
if (hists__add_entry(hists, &al, sample->period,
sample->weight, sample->transaction)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
goto out_put;
}
/*
......@@ -350,8 +351,10 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
hists->stats.total_period += sample->period;
if (!al.filtered)
hists->stats.total_non_filtered_period += sample->period;
return 0;
ret = 0;
out_put:
addr_location__put(&al);
return ret;
}
static struct perf_tool tool = {
......
......@@ -365,6 +365,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
}
}
thread__put(thread);
repipe:
perf_event__repipe(tool, event, sample, machine);
return 0;
......
......@@ -906,6 +906,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct perf_evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
......@@ -919,10 +920,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
return f(evsel, sample);
err = f(evsel, sample);
}
return 0;
thread__put(thread);
return err;
}
static struct perf_tool perf_kmem = {
......
......@@ -651,6 +651,7 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread;
struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
tool);
......@@ -666,9 +667,10 @@ static int process_sample_event(struct perf_tool *tool,
}
if (!handle_kvm_event(kvm, thread, evsel, sample))
return -1;
err = -1;
return 0;
thread__put(thread);
return err;
}
static int cpu_isa_config(struct perf_kvm_stat *kvm)
......
......@@ -769,6 +769,7 @@ static void dump_threads(void)
t = perf_session__findnew(session, st->tid);
pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
node = rb_next(node);
thread__put(t);
};
}
......@@ -810,6 +811,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct perf_evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
......@@ -821,10 +823,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
return f(evsel, sample);
err = f(evsel, sample);
}
return 0;
thread__put(thread);
return err;
}
static void sort_result(void)
......
......@@ -74,7 +74,7 @@ dump_raw_samples(struct perf_tool *tool,
}
if (al.filtered || (mem->hide_unresolved && al.sym == NULL))
return 0;
goto out_put;
if (al.map != NULL)
al.map->dso->hit = 1;
......@@ -103,7 +103,8 @@ dump_raw_samples(struct perf_tool *tool,
symbol_conf.field_sep,
al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???",
al.sym ? al.sym->name : "???");
out_put:
addr_location__put(&al);
return 0;
}
......
......@@ -50,8 +50,6 @@
static struct {
int command; /* Command short_name */
bool list_events;
bool force_add;
bool show_ext_vars;
bool uprobes;
bool quiet;
bool target_used;
......@@ -59,7 +57,6 @@ static struct {
struct perf_probe_event events[MAX_PROBES];
struct line_range line_range;
char *target;
int max_probe_points;
struct strfilter *filter;
} params;
......@@ -364,7 +361,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
"\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n",
#endif
opt_add_probe_event),
OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events"
OPT_BOOLEAN('f', "force", &probe_conf.force_add, "forcibly add events"
" with existing name"),
#ifdef HAVE_DWARF_SUPPORT
OPT_CALLBACK('L', "line", NULL,
......@@ -373,7 +370,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK('V', "vars", NULL,
"FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT",
"Show accessible variables on PROBEDEF", opt_show_vars),
OPT_BOOLEAN('\0', "externs", &params.show_ext_vars,
OPT_BOOLEAN('\0', "externs", &probe_conf.show_ext_vars,
"Show external variables too (with --vars only)"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
......@@ -382,9 +379,11 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK('m', "module", NULL, "modname|path",
"target module name (for online) or path (for offline)",
opt_set_target),
OPT_BOOLEAN('\0', "no-inlines", &probe_conf.no_inlines,
"Don't search inlined functions"),
#endif
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
OPT_INTEGER('\0', "max-probes", &probe_conf.max_probes,
"Set how many probe points can be found for a probe."),
OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
"Show potential probe-able functions.",
......@@ -440,8 +439,8 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
verbose = -1;
}
if (params.max_probe_points == 0)
params.max_probe_points = MAX_PROBES;
if (probe_conf.max_probes == 0)
probe_conf.max_probes = MAX_PROBES;
/*
* Only consider the user's kernel image path if given.
......@@ -477,10 +476,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
NULL);
ret = show_available_vars(params.events, params.nevents,
params.max_probe_points,
params.target,
params.filter,
params.show_ext_vars);
params.filter);
if (ret < 0)
pr_err_with_code(" Error: Failed to show vars.", ret);
return ret;
......@@ -499,9 +495,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
usage_with_options(probe_usage, options);
}
ret = add_perf_probe_events(params.events, params.nevents,
params.max_probe_points,
params.force_add);
ret = add_perf_probe_events(params.events, params.nevents);
if (ret < 0) {
pr_err_with_code(" Error: Failed to add events.", ret);
return ret;
......@@ -523,5 +517,5 @@ int cmd_probe(int argc, const char **argv, const char *prefix)
cleanup_params();
}
return ret;
return ret < 0 ? ret : 0;
}
......@@ -142,7 +142,7 @@ static int process_sample_event(struct perf_tool *tool,
.hide_unresolved = rep->hide_unresolved,
.add_entry_cb = hist_iter__report_callback,
};
int ret;
int ret = 0;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
......@@ -151,10 +151,10 @@ static int process_sample_event(struct perf_tool *tool,
}
if (rep->hide_unresolved && al.sym == NULL)
return 0;
goto out_put;
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
return 0;
goto out_put;
if (sort__mode == SORT_MODE__BRANCH)
iter.ops = &hist_iter_branch;
......@@ -172,7 +172,8 @@ static int process_sample_event(struct perf_tool *tool,
rep);
if (ret < 0)
pr_debug("problem adding hist entry, skipping event\n");
out_put:
addr_location__put(&al);
return ret;
}
......
......@@ -770,7 +770,7 @@ static int replay_fork_event(struct perf_sched *sched,
if (child == NULL || parent == NULL) {
pr_debug("thread does not exist on fork event: child %p, parent %p\n",
child, parent);
return 0;
goto out_put;
}
if (verbose) {
......@@ -781,6 +781,9 @@ static int replay_fork_event(struct perf_sched *sched,
register_pid(sched, parent->tid, thread__comm_str(parent));
register_pid(sched, child->tid, thread__comm_str(child));
out_put:
thread__put(child);
thread__put(parent);
return 0;
}
......@@ -957,7 +960,7 @@ static int latency_switch_event(struct perf_sched *sched,
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
u64 timestamp0, timestamp = sample->time;
int cpu = sample->cpu;
int cpu = sample->cpu, err = -1;
s64 delta;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
......@@ -976,15 +979,17 @@ static int latency_switch_event(struct perf_sched *sched,
sched_out = machine__findnew_thread(machine, -1, prev_pid);
sched_in = machine__findnew_thread(machine, -1, next_pid);
if (sched_out == NULL || sched_in == NULL)
goto out_put;
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
if (thread_atoms_insert(sched, sched_out))
return -1;
goto out_put;
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
pr_err("out-event: Internal tree error");
return -1;
goto out_put;
}
}
if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
......@@ -993,22 +998,25 @@ static int latency_switch_event(struct perf_sched *sched,
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
if (thread_atoms_insert(sched, sched_in))
return -1;
goto out_put;
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
pr_err("in-event: Internal tree error");
return -1;
goto out_put;
}
/*
* Take came in we have not heard about yet,
* add in an initial atom in runnable state:
*/
if (add_sched_out_event(in_events, 'R', timestamp))
return -1;
goto out_put;
}
add_sched_in_event(in_events, timestamp);
return 0;
err = 0;
out_put:
thread__put(sched_out);
thread__put(sched_in);
return err;
}
static int latency_runtime_event(struct perf_sched *sched,
......@@ -1021,23 +1029,29 @@ static int latency_runtime_event(struct perf_sched *sched,
struct thread *thread = machine__findnew_thread(machine, -1, pid);
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
u64 timestamp = sample->time;
int cpu = sample->cpu;
int cpu = sample->cpu, err = -1;
if (thread == NULL)
return -1;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
if (!atoms) {
if (thread_atoms_insert(sched, thread))
return -1;
goto out_put;
atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
if (!atoms) {
pr_err("in-event: Internal tree error");
return -1;
goto out_put;
}
if (add_sched_out_event(atoms, 'R', timestamp))
return -1;
goto out_put;
}
add_runtime_event(atoms, runtime, timestamp);
return 0;
err = 0;
out_put:
thread__put(thread);
return err;
}
static int latency_wakeup_event(struct perf_sched *sched,
......@@ -1050,19 +1064,22 @@ static int latency_wakeup_event(struct perf_sched *sched,
struct work_atom *atom;
struct thread *wakee;
u64 timestamp = sample->time;
int err = -1;
wakee = machine__findnew_thread(machine, -1, pid);
if (wakee == NULL)
return -1;
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, wakee))
return -1;
goto out_put;
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
pr_err("wakeup-event: Internal tree error");
return -1;
goto out_put;
}
if (add_sched_out_event(atoms, 'S', timestamp))
return -1;
goto out_put;
}
BUG_ON(list_empty(&atoms->work_list));
......@@ -1081,17 +1098,21 @@ static int latency_wakeup_event(struct perf_sched *sched,
* skip in this case.
*/
if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
return 0;
goto out_ok;
sched->nr_timestamps++;
if (atom->sched_out_time > timestamp) {
sched->nr_unordered_timestamps++;
return 0;
goto out_ok;
}
atom->state = THREAD_WAIT_CPU;
atom->wake_up_time = timestamp;
return 0;
out_ok:
err = 0;
out_put:
thread__put(wakee);
return err;
}
static int latency_migrate_task_event(struct perf_sched *sched,
......@@ -1104,6 +1125,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *migrant;
int err = -1;
/*
* Only need to worry about migration when profiling one CPU.
......@@ -1112,18 +1134,20 @@ static int latency_migrate_task_event(struct perf_sched *sched,
return 0;
migrant = machine__findnew_thread(machine, -1, pid);
if (migrant == NULL)
return -1;
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, migrant))
return -1;
goto out_put;
register_pid(sched, migrant->tid, thread__comm_str(migrant));
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
pr_err("migration-event: Internal tree error");
return -1;
goto out_put;
}
if (add_sched_out_event(atoms, 'R', timestamp))
return -1;
goto out_put;
}
BUG_ON(list_empty(&atoms->work_list));
......@@ -1135,8 +1159,10 @@ static int latency_migrate_task_event(struct perf_sched *sched,
if (atom->sched_out_time > timestamp)
sched->nr_unordered_timestamps++;
return 0;
err = 0;
out_put:
thread__put(migrant);
return err;
}
static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
......@@ -1330,8 +1356,10 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
}
sched_in = machine__findnew_thread(machine, -1, next_pid);
if (sched_in == NULL)
return -1;
sched->curr_thread[this_cpu] = sched_in;
sched->curr_thread[this_cpu] = thread__get(sched_in);
printf(" ");
......@@ -1381,6 +1409,8 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
printf("\n");
}
thread__put(sched_in);
return 0;
}
......
......@@ -607,13 +607,14 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
}
if (al.filtered)
return 0;
goto out_put;
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
goto out_put;
scripting_ops->process_event(event, sample, evsel, &al);
out_put:
addr_location__put(&al);
return 0;
}
......@@ -681,8 +682,8 @@ static int process_comm_event(struct perf_tool *tool,
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
ret = 0;
out:
thread__put(thread);
return ret;
}
......@@ -713,6 +714,7 @@ static int process_fork_event(struct perf_tool *tool,
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
}
......@@ -721,6 +723,7 @@ static int process_exit_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
int err = 0;
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
......@@ -742,9 +745,10 @@ static int process_exit_event(struct perf_tool *tool,
perf_event__fprintf(event, stdout);
if (perf_event__process_exit(tool, event, sample, machine) < 0)
return -1;
err = -1;
return 0;
thread__put(thread);
return err;
}
static int process_mmap_event(struct perf_tool *tool,
......@@ -774,7 +778,7 @@ static int process_mmap_event(struct perf_tool *tool,
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
}
......@@ -805,7 +809,7 @@ static int process_mmap2_event(struct perf_tool *tool,
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
}
......
......@@ -523,7 +523,7 @@ static const char *cat_backtrace(union perf_event *event,
* Discard all.
*/
zfree(&p);
goto exit;
goto exit_put;
}
continue;
}
......@@ -538,7 +538,8 @@ static const char *cat_backtrace(union perf_event *event,
else
fprintf(f, "..... %016" PRIx64 "\n", ip);
}
exit_put:
addr_location__put(&al);
exit:
fclose(f);
......
......@@ -793,7 +793,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
pthread_mutex_unlock(&hists->lock);
}
return;
addr_location__put(&al);
}
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
......
......@@ -1712,7 +1712,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
void *args;
size_t printed = 0;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
......@@ -1725,14 +1725,14 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
return -1;
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
if (ttrace->entry_str == NULL) {
ttrace->entry_str = malloc(1024);
if (!ttrace->entry_str)
return -1;
goto out_put;
}
if (!trace->summary_only)
......@@ -1757,8 +1757,10 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
thread__put(trace->current);
trace->current = thread__get(thread);
}
return 0;
err = 0;
out_put:
thread__put(thread);
return err;
}
static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
......@@ -1768,7 +1770,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
long ret;
u64 duration = 0;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
......@@ -1781,7 +1783,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
return -1;
goto out_put;
if (trace->summary)
thread__update_stats(ttrace, id, sample);
......@@ -1835,8 +1837,10 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
fputc('\n', trace->output);
out:
ttrace->entry_pending = false;
return 0;
err = 0;
out_put:
thread__put(thread);
return err;
}
static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
......@@ -1863,6 +1867,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
ttrace->runtime_ms += runtime_ms;
trace->runtime_ms += runtime_ms;
thread__put(thread);
return 0;
out_dump:
......@@ -1872,6 +1877,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
(pid_t)perf_evsel__intval(evsel, sample, "pid"),
runtime,
perf_evsel__intval(evsel, sample, "vruntime"));
thread__put(thread);
return 0;
}
......@@ -1924,11 +1930,12 @@ static int trace__pgfault(struct trace *trace,
struct addr_location al;
char map_type = 'd';
struct thread_trace *ttrace;
int err = -1;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
return -1;
goto out_put;
if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
ttrace->pfmaj++;
......@@ -1936,7 +1943,7 @@ static int trace__pgfault(struct trace *trace,
ttrace->pfmin++;
if (trace->summary_only)
return 0;
goto out;
thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
sample->ip, &al);
......@@ -1967,8 +1974,11 @@ static int trace__pgfault(struct trace *trace,
print_location(trace->output, sample, &al, true, false);
fprintf(trace->output, " (%c%c)\n", map_type, al.level);
return 0;
out:
err = 0;
out_put:
thread__put(thread);
return err;
}
static bool skip_sample(struct trace *trace, struct perf_sample *sample)
......
......@@ -268,6 +268,10 @@ else
endif # libelf support
endif # NO_LIBELF
ifdef NO_DWARF
NO_LIBDW_DWARF_UNWIND := 1
endif
ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBELF_SUPPORT
EXTLIBS += -lelf
......
......@@ -6,11 +6,9 @@
#include <sys/syscall.h>
#include <linux/types.h>
#include <linux/perf_event.h>
#include <asm/barrier.h>
#if defined(__i386__)
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC {"model name"}
#ifndef __NR_perf_event_open
......@@ -25,9 +23,6 @@
#endif
#if defined(__x86_64__)
#define mb() asm volatile("mfence" ::: "memory")
#define wmb() asm volatile("sfence" ::: "memory")
#define rmb() asm volatile("lfence" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC {"model name"}
#ifndef __NR_perf_event_open
......@@ -43,129 +38,63 @@
#ifdef __powerpc__
#include "../../arch/powerpc/include/uapi/asm/unistd.h"
#define mb() asm volatile ("sync" ::: "memory")
#define wmb() asm volatile ("sync" ::: "memory")
#define rmb() asm volatile ("sync" ::: "memory")
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __s390__
#define mb() asm volatile("bcr 15,0" ::: "memory")
#define wmb() asm volatile("bcr 15,0" ::: "memory")
#define rmb() asm volatile("bcr 15,0" ::: "memory")
#define CPUINFO_PROC {"vendor_id"}
#endif
#ifdef __sh__
#if defined(__SH4A__) || defined(__SH5__)
# define mb() asm volatile("synco" ::: "memory")
# define wmb() asm volatile("synco" ::: "memory")
# define rmb() asm volatile("synco" ::: "memory")
#else
# define mb() asm volatile("" ::: "memory")
# define wmb() asm volatile("" ::: "memory")
# define rmb() asm volatile("" ::: "memory")
#endif
#define CPUINFO_PROC {"cpu type"}
#endif
#ifdef __hppa__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __sparc__
#ifdef __LP64__
#define mb() asm volatile("ba,pt %%xcc, 1f\n" \
"membar #StoreLoad\n" \
"1:\n":::"memory")
#else
#define mb() asm volatile("":::"memory")
#endif
#define wmb() asm volatile("":::"memory")
#define rmb() asm volatile("":::"memory")
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __alpha__
#define mb() asm volatile("mb" ::: "memory")
#define wmb() asm volatile("wmb" ::: "memory")
#define rmb() asm volatile("mb" ::: "memory")
#define CPUINFO_PROC {"cpu model"}
#endif
#ifdef __ia64__
#define mb() asm volatile ("mf" ::: "memory")
#define wmb() asm volatile ("mf" ::: "memory")
#define rmb() asm volatile ("mf" ::: "memory")
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
#define CPUINFO_PROC {"model name"}
#endif
#ifdef __arm__
/*
* Use the __kuser_memory_barrier helper in the CPU helper page. See
* arch/arm/kernel/entry-armv.S in the kernel source for details.
*/
#define mb() ((void(*)(void))0xffff0fa0)()
#define wmb() ((void(*)(void))0xffff0fa0)()
#define rmb() ((void(*)(void))0xffff0fa0)()
#define CPUINFO_PROC {"model name", "Processor"}
#endif
#ifdef __aarch64__
#define mb() asm volatile("dmb ish" ::: "memory")
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
#define cpu_relax() asm volatile("yield" ::: "memory")
#endif
#ifdef __mips__
#define mb() asm volatile( \
".set mips2\n\t" \
"sync\n\t" \
".set mips0" \
: /* no output */ \
: /* no input */ \
: "memory")
#define wmb() mb()
#define rmb() mb()
#define CPUINFO_PROC {"cpu model"}
#endif
#ifdef __arc__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC {"Processor"}
#endif
#ifdef __metag__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC {"CPU"}
#endif
#ifdef __xtensa__
#define mb() asm volatile("memw" ::: "memory")
#define wmb() asm volatile("memw" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC {"core ID"}
#endif
#ifdef __tile__
#define mb() asm volatile ("mf" ::: "memory")
#define wmb() asm volatile ("mf" ::: "memory")
#define rmb() asm volatile ("mf" ::: "memory")
#define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
#define CPUINFO_PROC {"model name"}
#endif
#define barrier() asm volatile ("" ::: "memory")
#ifndef cpu_relax
#define cpu_relax() barrier()
#endif
......
......@@ -248,6 +248,7 @@ static int process_sample_event(struct machine *machine,
struct perf_sample sample;
struct thread *thread;
u8 cpumode;
int ret;
if (perf_evlist__parse_sample(evlist, event, &sample)) {
pr_debug("perf_evlist__parse_sample failed\n");
......@@ -262,7 +263,9 @@ static int process_sample_event(struct machine *machine,
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
return read_object_code(sample.ip, READLEN, cpumode, thread, state);
ret = read_object_code(sample.ip, READLEN, cpumode, thread, state);
thread__put(thread);
return ret;
}
static int process_event(struct machine *machine, struct perf_evlist *evlist,
......@@ -457,13 +460,13 @@ static int do_test_code_reading(bool try_kcore)
thread = machine__findnew_thread(machine, pid, pid);
if (!thread) {
pr_debug("machine__findnew_thread failed\n");
goto out_err;
goto out_put;
}
cpus = cpu_map__new(NULL);
if (!cpus) {
pr_debug("cpu_map__new failed\n");
goto out_err;
goto out_put;
}
while (1) {
......@@ -472,7 +475,7 @@ static int do_test_code_reading(bool try_kcore)
evlist = perf_evlist__new();
if (!evlist) {
pr_debug("perf_evlist__new failed\n");
goto out_err;
goto out_put;
}
perf_evlist__set_maps(evlist, cpus, threads);
......@@ -485,7 +488,7 @@ static int do_test_code_reading(bool try_kcore)
ret = parse_events(evlist, str, NULL);
if (ret < 0) {
pr_debug("parse_events failed\n");
goto out_err;
goto out_put;
}
perf_evlist__config(evlist, &opts);
......@@ -506,7 +509,7 @@ static int do_test_code_reading(bool try_kcore)
continue;
}
pr_debug("perf_evlist__open failed\n");
goto out_err;
goto out_put;
}
break;
}
......@@ -514,7 +517,7 @@ static int do_test_code_reading(bool try_kcore)
ret = perf_evlist__mmap(evlist, UINT_MAX, false);
if (ret < 0) {
pr_debug("perf_evlist__mmap failed\n");
goto out_err;
goto out_put;
}
perf_evlist__enable(evlist);
......@@ -525,7 +528,7 @@ static int do_test_code_reading(bool try_kcore)
ret = process_events(machine, evlist, &state);
if (ret < 0)
goto out_err;
goto out_put;
if (!have_vmlinux && !have_kcore && !try_kcore)
err = TEST_CODE_READING_NO_KERNEL_OBJ;
......@@ -535,7 +538,10 @@ static int do_test_code_reading(bool try_kcore)
err = TEST_CODE_READING_NO_ACCESS;
else
err = TEST_CODE_READING_OK;
out_put:
thread__put(thread);
out_err:
if (evlist) {
perf_evlist__delete(evlist);
} else {
......
......@@ -170,6 +170,7 @@ int test__dwarf_unwind(void)
}
err = krava_1(thread);
thread__put(thread);
out:
machine__delete_threads(machine);
......
......@@ -96,6 +96,7 @@ struct machine *setup_fake_machine(struct machines *machines)
goto out;
thread__set_comm(thread, fake_threads[i].comm, 0);
thread__put(thread);
}
for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
......
......@@ -105,8 +105,10 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
PERF_MAX_STACK_DEPTH, NULL) < 0)
PERF_MAX_STACK_DEPTH, NULL) < 0) {
addr_location__put(&al);
goto out;
}
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;
......
......@@ -82,8 +82,10 @@ static int add_hist_entries(struct perf_evlist *evlist,
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
PERF_MAX_STACK_DEPTH, NULL) < 0)
PERF_MAX_STACK_DEPTH, NULL) < 0) {
addr_location__put(&al);
goto out;
}
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;
......
......@@ -91,8 +91,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
if (he == NULL)
if (he == NULL) {
addr_location__put(&al);
goto out;
}
fake_common_samples[k].thread = al.thread;
fake_common_samples[k].map = al.map;
......@@ -115,8 +117,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
if (he == NULL)
if (he == NULL) {
addr_location__put(&al);
goto out;
}
fake_samples[i][k].thread = al.thread;
fake_samples[i][k].map = al.map;
......
......@@ -71,8 +71,10 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
PERF_MAX_STACK_DEPTH, NULL) < 0)
PERF_MAX_STACK_DEPTH, NULL) < 0) {
addr_location__put(&al);
goto out;
}
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;
......
......@@ -191,6 +191,8 @@ static int mmap_events(synth_cb synth)
PERF_RECORD_MISC_USER, MAP__FUNCTION,
(unsigned long) (td->map + 1), &al);
thread__put(thread);
if (!al.map) {
pr_debug("failed, couldn't find map\n");
err = -1;
......
......@@ -64,22 +64,22 @@ int test__thread_mg_share(void)
TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
/* release thread group */
thread__delete(leader);
thread__put(leader);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 3);
thread__delete(t1);
thread__put(t1);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 2);
thread__delete(t2);
thread__put(t2);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 1);
thread__delete(t3);
thread__put(t3);
/* release other group */
thread__delete(other_leader);
thread__put(other_leader);
TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 1);
thread__delete(other);
thread__put(other);
/*
* Cannot call machine__delete_threads(machine) now,
......
......@@ -43,6 +43,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
if (al.map != NULL)
al.map->dso->hit = 1;
thread__put(thread);
return 0;
}
......@@ -59,8 +60,10 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
if (thread)
if (thread) {
machine__remove_thread(machine, thread);
thread__put(thread);
}
return 0;
}
......
......@@ -122,6 +122,7 @@ int db_export__machine(struct db_export *dbe, struct machine *machine)
int db_export__thread(struct db_export *dbe, struct thread *thread,
struct machine *machine, struct comm *comm)
{
struct thread *main_thread;
u64 main_thread_db_id = 0;
int err;
......@@ -131,8 +132,6 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
thread->db_id = ++dbe->thread_last_db_id;
if (thread->pid_ != -1) {
struct thread *main_thread;
if (thread->pid_ == thread->tid) {
main_thread = thread;
} else {
......@@ -144,14 +143,16 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
err = db_export__thread(dbe, main_thread, machine,
comm);
if (err)
return err;
goto out_put;
if (comm) {
err = db_export__comm_thread(dbe, comm, thread);
if (err)
return err;
goto out_put;
}
}
main_thread_db_id = main_thread->db_id;
if (main_thread != thread)
thread__put(main_thread);
}
if (dbe->export_thread)
......@@ -159,6 +160,10 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
machine);
return 0;
out_put:
thread__put(main_thread);
return err;
}
int db_export__comm(struct db_export *dbe, struct comm *comm,
......@@ -303,6 +308,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
if (err)
return err;
/* FIXME: check refcounting for get_main_thread, that calls machine__find_thread... */
main_thread = get_main_thread(al->machine, thread);
if (main_thread)
comm = machine__thread_exec_comm(al->machine, main_thread);
......
......@@ -139,10 +139,26 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
{
const char *name;
name = dwarf_diename(dw_die);
return name ? (strcmp(tname, name) == 0) : false;
}
/**
* die_match_name - Match diename and glob
* @dw_die: a DIE
* @glob: a string of target glob pattern
*
* Glob matching the name of @dw_die and @glob. Return false if matching fail.
*/
bool die_match_name(Dwarf_Die *dw_die, const char *glob)
{
const char *name;
name = dwarf_diename(dw_die);
return name ? strglobmatch(name, glob) : false;
}
/**
* die_get_call_lineno - Get callsite line number of inline-function instance
* @in_die: a DIE of an inlined function instance
......
......@@ -47,6 +47,9 @@ extern bool die_is_func_instance(Dwarf_Die *dw_die);
/* Compare diename and tname */
extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
/* Matching diename with glob pattern */
extern bool die_match_name(Dwarf_Die *dw_die, const char *glob);
/* Get callsite line number of inline-function instance */
extern int die_get_call_lineno(Dwarf_Die *in_die);
......
......@@ -919,6 +919,10 @@ void thread__find_addr_location(struct thread *thread,
al->sym = NULL;
}
/*
* Callers need to drop the reference to al->thread, obtained in
* machine__findnew_thread()
*/
int perf_event__preprocess_sample(const union perf_event *event,
struct machine *machine,
struct addr_location *al,
......@@ -979,6 +983,17 @@ int perf_event__preprocess_sample(const union perf_event *event,
return 0;
}
/*
* The preprocess_sample method will return with reference counts for the
* in it, when done using (and perhaps getting ref counts if needing to
* keep a pointer to one of those entries) it must be paired with
* addr_location__put(), so that the refcounts can be decremented.
*/
void addr_location__put(struct addr_location *al)
{
thread__zput(al->thread);
}
bool is_bts_event(struct perf_event_attr *attr)
{
return attr->type == PERF_TYPE_HARDWARE &&
......
......@@ -426,6 +426,8 @@ int perf_event__preprocess_sample(const union perf_event *event,
struct addr_location *al,
struct perf_sample *sample);
void addr_location__put(struct addr_location *al);
struct thread;
bool is_bts_event(struct perf_event_attr *attr);
......
......@@ -14,6 +14,8 @@
#include "unwind.h"
#include "linux/hash.h"
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
static void dsos__init(struct dsos *dsos)
{
INIT_LIST_HEAD(&dsos->head);
......@@ -28,6 +30,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
dsos__init(&machine->kernel_dsos);
machine->threads = RB_ROOT;
pthread_rwlock_init(&machine->threads_lock, NULL);
INIT_LIST_HEAD(&machine->dead_threads);
machine->last_match = NULL;
......@@ -54,6 +57,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
snprintf(comm, sizeof(comm), "[guest/%d]", pid);
thread__set_comm(thread, comm, 0);
thread__put(thread);
}
machine->current_tid = NULL;
......@@ -91,14 +95,17 @@ static void dsos__delete(struct dsos *dsos)
void machine__delete_threads(struct machine *machine)
{
struct rb_node *nd = rb_first(&machine->threads);
struct rb_node *nd;
pthread_rwlock_wrlock(&machine->threads_lock);
nd = rb_first(&machine->threads);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
nd = rb_next(nd);
machine__remove_thread(machine, t);
__machine__remove_thread(machine, t, false);
}
pthread_rwlock_unlock(&machine->threads_lock);
}
void machine__exit(struct machine *machine)
......@@ -109,6 +116,7 @@ void machine__exit(struct machine *machine)
vdso__exit(machine);
zfree(&machine->root_dir);
zfree(&machine->current_tid);
pthread_rwlock_destroy(&machine->threads_lock);
}
void machine__delete(struct machine *machine)
......@@ -303,7 +311,7 @@ static void machine__update_thread_pid(struct machine *machine,
if (th->pid_ == th->tid)
return;
leader = machine__findnew_thread(machine, th->pid_, th->pid_);
leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
if (!leader)
goto out_err;
......@@ -336,9 +344,9 @@ static void machine__update_thread_pid(struct machine *machine,
pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
}
static struct thread *__machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
static struct thread *____machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
{
struct rb_node **p = &machine->threads.rb_node;
struct rb_node *parent = NULL;
......@@ -393,6 +401,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
*/
if (thread__init_map_groups(th, machine)) {
rb_erase(&th->rb_node, &machine->threads);
RB_CLEAR_NODE(&th->rb_node);
thread__delete(th);
return NULL;
}
......@@ -406,16 +415,30 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
return th;
}
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
{
return ____machine__findnew_thread(machine, pid, tid, true);
}
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
return __machine__findnew_thread(machine, pid, tid, true);
struct thread *th;
pthread_rwlock_wrlock(&machine->threads_lock);
th = thread__get(__machine__findnew_thread(machine, pid, tid));
pthread_rwlock_unlock(&machine->threads_lock);
return th;
}
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
return __machine__findnew_thread(machine, pid, tid, false);
struct thread *th;
pthread_rwlock_rdlock(&machine->threads_lock);
th = thread__get(____machine__findnew_thread(machine, pid, tid, false));
pthread_rwlock_unlock(&machine->threads_lock);
return th;
}
struct comm *machine__thread_exec_comm(struct machine *machine,
......@@ -434,6 +457,7 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
event->comm.pid,
event->comm.tid);
bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
int err = 0;
if (exec)
machine->comm_exec = true;
......@@ -444,10 +468,12 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
if (thread == NULL ||
__thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
err = -1;
}
return 0;
thread__put(thread);
return err;
}
int machine__process_lost_event(struct machine *machine __maybe_unused,
......@@ -591,12 +617,16 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
size_t ret = 0;
struct rb_node *nd;
pthread_rwlock_rdlock(&machine->threads_lock);
for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
}
pthread_rwlock_unlock(&machine->threads_lock);
return ret;
}
......@@ -1213,11 +1243,14 @@ int machine__process_mmap2_event(struct machine *machine,
event->mmap2.filename, type, thread);
if (map == NULL)
goto out_problem;
goto out_problem_map;
thread__insert_map(thread, map);
thread__put(thread);
return 0;
out_problem_map:
thread__put(thread);
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
return 0;
......@@ -1260,31 +1293,45 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
type, thread);
if (map == NULL)
goto out_problem;
goto out_problem_map;
thread__insert_map(thread, map);
thread__put(thread);
return 0;
out_problem_map:
thread__put(thread);
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
return 0;
}
void machine__remove_thread(struct machine *machine, struct thread *th)
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
{
if (machine->last_match == th)
thread__zput(machine->last_match);
BUG_ON(th->refcnt.counter == 0);
if (lock)
pthread_rwlock_wrlock(&machine->threads_lock);
rb_erase(&th->rb_node, &machine->threads);
RB_CLEAR_NODE(&th->rb_node);
/*
* Move it first to the dead_threads list, then drop the reference,
* if this is the last reference, then the thread__delete destructor
* will be called and we will remove it from the dead_threads list.
*/
list_add_tail(&th->node, &machine->dead_threads);
if (lock)
pthread_rwlock_unlock(&machine->threads_lock);
thread__put(th);
}
void machine__remove_thread(struct machine *machine, struct thread *th)
{
return __machine__remove_thread(machine, th, true);
}
int machine__process_fork_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
......@@ -1294,10 +1341,13 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
struct thread *parent = machine__findnew_thread(machine,
event->fork.ppid,
event->fork.ptid);
int err = 0;
/* if a thread currently exists for the thread id remove it */
if (thread != NULL)
if (thread != NULL) {
machine__remove_thread(machine, thread);
thread__put(thread);
}
thread = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid);
......@@ -1307,10 +1357,12 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
if (thread == NULL || parent == NULL ||
thread__fork(thread, parent, sample->time) < 0) {
dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
return -1;
err = -1;
}
thread__put(thread);
thread__put(parent);
return 0;
return err;
}
int machine__process_exit_event(struct machine *machine, union perf_event *event,
......@@ -1323,8 +1375,10 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
if (dump_trace)
perf_event__fprintf_task(event, stdout);
if (thread != NULL)
if (thread != NULL) {
thread__exited(thread);
thread__put(thread);
}
return 0;
}
......@@ -1841,6 +1895,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
return -ENOMEM;
thread->cpu = cpu;
thread__put(thread);
return 0;
}
......
......@@ -30,6 +30,7 @@ struct machine {
bool comm_exec;
char *root_dir;
struct rb_root threads;
pthread_rwlock_t threads_lock;
struct list_head dead_threads;
struct thread *last_match;
struct vdso_info *vdso_info;
......@@ -151,8 +152,8 @@ static inline bool machine__is_host(struct machine *machine)
return machine ? machine->pid == HOST_KERNEL_ID : false;
}
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
pid_t tid);
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
size_t machine__fprintf(struct machine *machine, FILE *fp);
......
This diff is collapsed.
......@@ -6,10 +6,19 @@
#include "strlist.h"
#include "strfilter.h"
/* Probe related configurations */
struct probe_conf {
bool show_ext_vars;
bool force_add;
bool no_inlines;
int max_probes;
};
extern struct probe_conf probe_conf;
extern bool probe_event_dry_run;
/* kprobe-tracer and uprobe-tracer tracing point */
struct probe_trace_point {
char *realname; /* function real name (if needed) */
char *symbol; /* Base symbol */
char *module; /* Module name */
unsigned long offset; /* Offset from symbol */
......@@ -124,15 +133,13 @@ extern int line_range__init(struct line_range *lr);
/* Internal use: Return kernel/module path */
extern const char *kernel_get_module_path(const char *module);
extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
int max_probe_points, bool force_add);
extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs);
extern int del_perf_probe_events(struct strfilter *filter);
extern int show_perf_probe_events(struct strfilter *filter);
extern int show_line_range(struct line_range *lr, const char *module,
bool user);
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
int max_probe_points, const char *module,
struct strfilter *filter, bool externs);
struct strfilter *filter);
extern int show_available_funcs(const char *module, struct strfilter *filter,
bool user);
bool arch__prefers_symtab(void);
......
......@@ -717,7 +717,7 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
}
/* If the function name is given, that's what user expects */
if (fsp->function) {
if (die_compare_name(fn_die, fsp->function)) {
if (die_match_name(fn_die, fsp->function)) {
memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
fsp->found = true;
return 1;
......@@ -920,13 +920,14 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
/* Check tag and diename */
if (!die_is_func_def(sp_die) ||
!die_compare_name(sp_die, pp->function))
!die_match_name(sp_die, pp->function))
return DWARF_CB_OK;
/* Check declared file */
if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die)))
return DWARF_CB_OK;
pr_debug("Matched function: %s\n", dwarf_diename(sp_die));
pf->fname = dwarf_decl_file(sp_die);
if (pp->line) { /* Function relative line */
dwarf_decl_line(sp_die, &pf->lno);
......@@ -943,10 +944,20 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
/* TODO: Check the address in this function */
param->retval = call_probe_finder(sp_die, pf);
}
} else
} else if (!probe_conf.no_inlines) {
/* Inlined function: search instances */
param->retval = die_walk_instances(sp_die,
probe_point_inline_cb, (void *)pf);
/* This could be a non-existed inline definition */
if (param->retval == -ENOENT && strisglob(pp->function))
param->retval = 0;
}
/* We need to find other candidates */
if (strisglob(pp->function) && param->retval >= 0) {
param->retval = 0; /* We have to clear the result */
return DWARF_CB_OK;
}
return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
}
......@@ -975,7 +986,7 @@ static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
if (dwarf_tag(param->sp_die) != DW_TAG_subprogram)
return DWARF_CB_OK;
if (die_compare_name(param->sp_die, param->function)) {
if (die_match_name(param->sp_die, param->function)) {
if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die))
return DWARF_CB_OK;
......@@ -1028,7 +1039,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
return -ENOMEM;
/* Fastpath: lookup by function name from .debug_pubnames section */
if (pp->function) {
if (pp->function && !strisglob(pp->function)) {
struct pubname_callback_param pubname_param = {
.function = pp->function,
.file = pp->file,
......@@ -1087,6 +1098,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
struct local_vars_finder {
struct probe_finder *pf;
struct perf_probe_arg *args;
bool vars;
int max_args;
int nargs;
int ret;
......@@ -1101,7 +1113,7 @@ static int copy_variables_cb(Dwarf_Die *die_mem, void *data)
tag = dwarf_tag(die_mem);
if (tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) {
(tag == DW_TAG_variable && vf->vars)) {
if (convert_variable_location(die_mem, vf->pf->addr,
vf->pf->fb_ops, &pf->sp_die,
NULL) == 0) {
......@@ -1127,26 +1139,28 @@ static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf,
Dwarf_Die die_mem;
int i;
int n = 0;
struct local_vars_finder vf = {.pf = pf, .args = args,
struct local_vars_finder vf = {.pf = pf, .args = args, .vars = false,
.max_args = MAX_PROBE_ARGS, .ret = 0};
for (i = 0; i < pf->pev->nargs; i++) {
/* var never be NULL */
if (strcmp(pf->pev->args[i].var, "$vars") == 0) {
pr_debug("Expanding $vars into:");
vf.nargs = n;
/* Special local variables */
die_find_child(sc_die, copy_variables_cb, (void *)&vf,
&die_mem);
pr_debug(" (%d)\n", vf.nargs - n);
if (vf.ret < 0)
return vf.ret;
n = vf.nargs;
} else {
if (strcmp(pf->pev->args[i].var, PROBE_ARG_VARS) == 0)
vf.vars = true;
else if (strcmp(pf->pev->args[i].var, PROBE_ARG_PARAMS) != 0) {
/* Copy normal argument */
args[n] = pf->pev->args[i];
n++;
continue;
}
pr_debug("Expanding %s into:", pf->pev->args[i].var);
vf.nargs = n;
/* Special local variables */
die_find_child(sc_die, copy_variables_cb, (void *)&vf,
&die_mem);
pr_debug(" (%d)\n", vf.nargs - n);
if (vf.ret < 0)
return vf.ret;
n = vf.nargs;
}
return n;
}
......@@ -1174,6 +1188,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
if (ret < 0)
return ret;
tev->point.realname = strdup(dwarf_diename(sc_die));
if (!tev->point.realname)
return -ENOMEM;
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
tev->point.offset);
......@@ -1211,15 +1229,15 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
int debuginfo__find_trace_events(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct probe_trace_event **tevs, int max_tevs)
struct probe_trace_event **tevs)
{
struct trace_event_finder tf = {
.pf = {.pev = pev, .callback = add_probe_trace_event},
.mod = dbg->mod, .max_tevs = max_tevs};
.max_tevs = probe_conf.max_probes, .mod = dbg->mod};
int ret;
/* Allocate result tevs array */
*tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
*tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
if (*tevs == NULL)
return -ENOMEM;
......@@ -1300,9 +1318,9 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem);
/* Find external variables */
if (!af->externs)
if (!probe_conf.show_ext_vars)
goto out;
/* Don't need to search child DIE for externs. */
/* Don't need to search child DIE for external vars. */
af->child = false;
die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem);
......@@ -1322,17 +1340,16 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
*/
int debuginfo__find_available_vars_at(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct variable_list **vls,
int max_vls, bool externs)
struct variable_list **vls)
{
struct available_var_finder af = {
.pf = {.pev = pev, .callback = add_available_vars},
.mod = dbg->mod,
.max_vls = max_vls, .externs = externs};
.max_vls = probe_conf.max_probes};
int ret;
/* Allocate result vls array */
*vls = zalloc(sizeof(struct variable_list) * max_vls);
*vls = zalloc(sizeof(struct variable_list) * af.max_vls);
if (*vls == NULL)
return -ENOMEM;
......@@ -1533,7 +1550,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
return DWARF_CB_OK;
if (die_is_func_def(sp_die) &&
die_compare_name(sp_die, lr->function)) {
die_match_name(sp_die, lr->function)) {
lf->fname = dwarf_decl_file(sp_die);
dwarf_decl_line(sp_die, &lr->offset);
pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
......
......@@ -10,6 +10,9 @@
#define MAX_PROBES 128
#define MAX_PROBE_ARGS 128
#define PROBE_ARG_VARS "$vars"
#define PROBE_ARG_PARAMS "$params"
static inline int is_c_varname(const char *name)
{
/* TODO */
......@@ -37,8 +40,7 @@ extern void debuginfo__delete(struct debuginfo *dbg);
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
extern int debuginfo__find_trace_events(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct probe_trace_event **tevs,
int max_tevs);
struct probe_trace_event **tevs);
/* Find a perf_probe_point from debuginfo */
extern int debuginfo__find_probe_point(struct debuginfo *dbg,
......@@ -52,8 +54,7 @@ extern int debuginfo__find_line_range(struct debuginfo *dbg,
/* Find available variables */
extern int debuginfo__find_available_vars_at(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct variable_list **vls,
int max_points, bool externs);
struct variable_list **vls);
/* Find a src file from a DWARF tag path */
int get_real_path(const char *raw_path, const char *comp_dir,
......@@ -96,7 +97,6 @@ struct available_var_finder {
struct variable_list *vls; /* Found variable lists */
int nvls; /* Number of variable lists */
int max_vls; /* Max no. of variable lists */
bool externs; /* Find external vars too */
bool child; /* Search child scopes */
};
......
......@@ -18,7 +18,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
if (pid == thread->tid || pid == -1) {
thread->mg = map_groups__new(machine);
} else {
leader = machine__findnew_thread(machine, pid, pid);
leader = __machine__findnew_thread(machine, pid, pid);
if (leader)
thread->mg = map_groups__get(leader->mg);
}
......@@ -53,7 +53,9 @@ struct thread *thread__new(pid_t pid, pid_t tid)
goto err_thread;
list_add(&comm->list, &thread->comm_list);
atomic_set(&thread->refcnt, 0);
INIT_LIST_HEAD(&thread->node);
RB_CLEAR_NODE(&thread->rb_node);
}
return thread;
......@@ -67,6 +69,9 @@ void thread__delete(struct thread *thread)
{
struct comm *comm, *tmp;
BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
BUG_ON(!list_empty(&thread->node));
thread_stack__free(thread);
if (thread->mg) {
......@@ -84,13 +89,14 @@ void thread__delete(struct thread *thread)
struct thread *thread__get(struct thread *thread)
{
++thread->refcnt;
if (thread)
atomic_inc(&thread->refcnt);
return thread;
}
void thread__put(struct thread *thread)
{
if (thread && --thread->refcnt == 0) {
if (thread && atomic_dec_and_test(&thread->refcnt)) {
list_del_init(&thread->node);
thread__delete(thread);
}
......
#ifndef __PERF_THREAD_H
#define __PERF_THREAD_H
#include <linux/atomic.h>
#include <linux/rbtree.h>
#include <linux/list.h>
#include <unistd.h>
......@@ -21,7 +22,7 @@ struct thread {
pid_t tid;
pid_t ppid;
int cpu;
int refcnt;
atomic_t refcnt;
char shortname[3];
bool comm_set;
bool dead; /* if set thread has exited */
......
......@@ -257,6 +257,10 @@ char **argv_split(const char *str, int *argcp);
void argv_free(char **argv);
bool strglobmatch(const char *str, const char *pat);
bool strlazymatch(const char *str, const char *pat);
static inline bool strisglob(const char *str)
{
return strpbrk(str, "*?[") != NULL;
}
int strtailcmp(const char *s1, const char *s2);
char *strxfrchar(char *s, char from, char to);
unsigned long convert_unit(unsigned long value, char *unit);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment