Commit b555d191 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-for-v6.8-1-2024-02-01' of...

Merge tag 'perf-tools-fixes-for-v6.8-1-2024-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools

Pull perf tools fixes from Arnaldo Carvalho de Melo:
 "Vendor events:

   - Intel Alderlake/Sapphire Rapids metric fixes, the CPU type
     ("cpu_atom", "cpu_core") needs to be used as a prefix to be
     considered on a metric formula, detected via one of the 'perf test'
     entries.

  'perf test' fixes:

   - Fix the creation of event selector lists on 'perf test' entries, by
     initializing the sample ID flag, which is done by 'perf record', so
     this fix affects only the tests, the common case isn't affected

   - Make 'perf list' respect debug settings (-v) to fix its 'perf test'
     entry

   - Fix 'perf script' test when python support isn't enabled

   - Special case 'perf script' tests on s390, where only DWARF call
     graphs are supported and only on software events

   - Make 'perf daemon' signal test less racy

  Compiler warnings/errors:

   - Remove needless malloc(0) call in 'perf top' that triggers
     -Walloc-size

   - Fix calloc() argument order to address error introduced in gcc-14

  Build:

   - Make minimal shellcheck version to v0.6.0, avoiding the build to
     fail with older versions

  Sync kernel header copies:

   - stat.h to pick STATX_MNT_ID_UNIQUE

   - msr-index.h to pick IA32_MKTME_KEYID_PARTITIONING

   - drm.h to pick DRM_IOCTL_MODE_CLOSEFB

   - unistd.h to pick {list,stat}mount,
     lsm_{[gs]et_self_attr,list_modules} syscall numbers

   - x86 cpufeatures to pick TDX, Zen, APIC MSR fence changes

   - x86's mem{cpy,set}_64.S used in 'perf bench'

   - Also, without tooling effects: asm-generic/unaligned.h, mount.h,
     fcntl.h, kvm headers"

* tag 'perf-tools-fixes-for-v6.8-1-2024-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools: (21 commits)
  perf tools headers: update the asm-generic/unaligned.h copy with the kernel sources
  tools include UAPI: Sync linux/mount.h copy with the kernel sources
  perf evlist: Fix evlist__new_default() for > 1 core PMU
  tools headers: Update the copy of x86's mem{cpy,set}_64.S used in 'perf bench'
  tools headers x86 cpufeatures: Sync with the kernel sources to pick TDX, Zen, APIC MSR fence changes
  tools headers UAPI: Sync unistd.h to pick {list,stat}mount, lsm_{[gs]et_self_attr,list_modules} syscall numbers
  perf vendor events intel: Alderlake/sapphirerapids metric fixes
  tools headers UAPI: Sync kvm headers with the kernel sources
  perf tools: Fix calloc() arguments to address error introduced in gcc-14
  perf top: Remove needless malloc(0) call that triggers -Walloc-size
  perf build: Make minimal shellcheck version to v0.6.0
  tools headers UAPI: Update tools's copy of drm.h headers to pick DRM_IOCTL_MODE_CLOSEFB
  perf test shell daemon: Make signal test less racy
  perf test shell script: Fix test for python being disabled
  perf test: Workaround debug output in list test
  perf list: Add output file option
  perf list: Switch error message to pr_err() to respect debug settings (-v)
  perf test: Fix 'perf script' tests on s390
  tools headers UAPI: Sync linux/fcntl.h with the kernel sources
  tools arch x86: Sync the msr-index.h copy with the kernel sources to pick IA32_MKTME_KEYID_PARTITIONING
  ...
parents 56897d51 fdd0ae72
...@@ -198,6 +198,7 @@ ...@@ -198,6 +198,7 @@
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* Platform supports being a TDX host */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */ #define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
...@@ -308,10 +309,14 @@ ...@@ -308,10 +309,14 @@
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */ #define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */ #define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */ #define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ #define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ #define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
...@@ -495,6 +500,7 @@ ...@@ -495,6 +500,7 @@
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */
#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */
/* BUG word 2 */ /* BUG word 2 */
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
......
...@@ -237,6 +237,11 @@ ...@@ -237,6 +237,11 @@
#define LBR_INFO_CYCLES 0xffff #define LBR_INFO_CYCLES 0xffff
#define LBR_INFO_BR_TYPE_OFFSET 56 #define LBR_INFO_BR_TYPE_OFFSET 56
#define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET) #define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET)
#define LBR_INFO_BR_CNTR_OFFSET 32
#define LBR_INFO_BR_CNTR_NUM 4
#define LBR_INFO_BR_CNTR_BITS 2
#define LBR_INFO_BR_CNTR_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0)
#define LBR_INFO_BR_CNTR_FULL_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0)
#define MSR_ARCH_LBR_CTL 0x000014ce #define MSR_ARCH_LBR_CTL 0x000014ce
#define ARCH_LBR_CTL_LBREN BIT(0) #define ARCH_LBR_CTL_LBREN BIT(0)
...@@ -536,6 +541,9 @@ ...@@ -536,6 +541,9 @@
#define MSR_RELOAD_PMC0 0x000014c1 #define MSR_RELOAD_PMC0 0x000014c1
#define MSR_RELOAD_FIXED_CTR0 0x00001309 #define MSR_RELOAD_FIXED_CTR0 0x00001309
/* KeyID partitioning between MKTME and TDX */
#define MSR_IA32_MKTME_KEYID_PARTITIONING 0x00000087
/* /*
* AMD64 MSRs. Not complete. See the architecture manual for a more * AMD64 MSRs. Not complete. See the architecture manual for a more
* complete list. * complete list.
......
...@@ -562,4 +562,7 @@ struct kvm_pmu_event_filter { ...@@ -562,4 +562,7 @@ struct kvm_pmu_event_filter {
/* x86-specific KVM_EXIT_HYPERCALL flags. */ /* x86-specific KVM_EXIT_HYPERCALL flags. */
#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0) #define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
#define KVM_X86_DEFAULT_VM 0
#define KVM_X86_SW_PROTECTED_VM 1
#endif /* _ASM_X86_KVM_H */ #endif /* _ASM_X86_KVM_H */
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2002 Andi Kleen */ /* Copyright 2002 Andi Kleen */
#include <linux/export.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/export.h>
.section .noinstr.text, "ax" .section .noinstr.text, "ax"
...@@ -39,7 +39,7 @@ SYM_TYPED_FUNC_START(__memcpy) ...@@ -39,7 +39,7 @@ SYM_TYPED_FUNC_START(__memcpy)
SYM_FUNC_END(__memcpy) SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy) EXPORT_SYMBOL(__memcpy)
SYM_FUNC_ALIAS(memcpy, __memcpy) SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(memcpy)
SYM_FUNC_START_LOCAL(memcpy_orig) SYM_FUNC_START_LOCAL(memcpy_orig)
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2002 Andi Kleen, SuSE Labs */ /* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/export.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/export.h>
.section .noinstr.text, "ax" .section .noinstr.text, "ax"
...@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset) ...@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset)
SYM_FUNC_END(__memset) SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset) EXPORT_SYMBOL(__memset)
SYM_FUNC_ALIAS(memset, __memset) SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
EXPORT_SYMBOL(memset) EXPORT_SYMBOL(memset)
SYM_FUNC_START_LOCAL(memset_orig) SYM_FUNC_START_LOCAL(memset_orig)
......
...@@ -105,9 +105,9 @@ static inline u32 get_unaligned_le24(const void *p) ...@@ -105,9 +105,9 @@ static inline u32 get_unaligned_le24(const void *p)
static inline void __put_unaligned_be24(const u32 val, u8 *p) static inline void __put_unaligned_be24(const u32 val, u8 *p)
{ {
*p++ = val >> 16; *p++ = (val >> 16) & 0xff;
*p++ = val >> 8; *p++ = (val >> 8) & 0xff;
*p++ = val; *p++ = val & 0xff;
} }
static inline void put_unaligned_be24(const u32 val, void *p) static inline void put_unaligned_be24(const u32 val, void *p)
...@@ -117,9 +117,9 @@ static inline void put_unaligned_be24(const u32 val, void *p) ...@@ -117,9 +117,9 @@ static inline void put_unaligned_be24(const u32 val, void *p)
static inline void __put_unaligned_le24(const u32 val, u8 *p) static inline void __put_unaligned_le24(const u32 val, u8 *p)
{ {
*p++ = val; *p++ = val & 0xff;
*p++ = val >> 8; *p++ = (val >> 8) & 0xff;
*p++ = val >> 16; *p++ = (val >> 16) & 0xff;
} }
static inline void put_unaligned_le24(const u32 val, void *p) static inline void put_unaligned_le24(const u32 val, void *p)
...@@ -129,12 +129,12 @@ static inline void put_unaligned_le24(const u32 val, void *p) ...@@ -129,12 +129,12 @@ static inline void put_unaligned_le24(const u32 val, void *p)
static inline void __put_unaligned_be48(const u64 val, u8 *p) static inline void __put_unaligned_be48(const u64 val, u8 *p)
{ {
*p++ = val >> 40; *p++ = (val >> 40) & 0xff;
*p++ = val >> 32; *p++ = (val >> 32) & 0xff;
*p++ = val >> 24; *p++ = (val >> 24) & 0xff;
*p++ = val >> 16; *p++ = (val >> 16) & 0xff;
*p++ = val >> 8; *p++ = (val >> 8) & 0xff;
*p++ = val; *p++ = val & 0xff;
} }
static inline void put_unaligned_be48(const u64 val, void *p) static inline void put_unaligned_be48(const u64 val, void *p)
......
...@@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait) ...@@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456 #define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue) __SYSCALL(__NR_futex_requeue, sys_futex_requeue)
#define __NR_statmount 457
__SYSCALL(__NR_statmount, sys_statmount)
#define __NR_listmount 458
__SYSCALL(__NR_listmount, sys_listmount)
#define __NR_lsm_get_self_attr 459
__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
#define __NR_lsm_set_self_attr 460
__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
#define __NR_lsm_list_modules 461
__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#undef __NR_syscalls #undef __NR_syscalls
#define __NR_syscalls 457 #define __NR_syscalls 462
/* /*
* 32 bit systems traditionally used different * 32 bit systems traditionally used different
......
...@@ -713,7 +713,8 @@ struct drm_gem_open { ...@@ -713,7 +713,8 @@ struct drm_gem_open {
/** /**
* DRM_CAP_ASYNC_PAGE_FLIP * DRM_CAP_ASYNC_PAGE_FLIP
* *
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC. * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
* page-flips.
*/ */
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 #define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/** /**
...@@ -773,6 +774,13 @@ struct drm_gem_open { ...@@ -773,6 +774,13 @@ struct drm_gem_open {
* :ref:`drm_sync_objects`. * :ref:`drm_sync_objects`.
*/ */
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14 #define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/**
* DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
* commits.
*/
#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */ /* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap { struct drm_get_cap {
...@@ -842,6 +850,31 @@ struct drm_get_cap { ...@@ -842,6 +850,31 @@ struct drm_get_cap {
*/ */
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
/**
* DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
*
* Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
* virtualbox) have additional restrictions for cursor planes (thus
* making cursor planes on those drivers not truly universal,) e.g.
* they need cursor planes to act like one would expect from a mouse
* cursor and have correctly set hotspot properties.
* If this client cap is not set the DRM core will hide cursor plane on
* those virtualized drivers because not setting it implies that the
* client is not capable of dealing with those extra restictions.
* Clients which do set cursor hotspot and treat the cursor plane
* like a mouse cursor should set this property.
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
*
* Setting this property on drivers which do not special case
* cursor planes (i.e. non-virtualized drivers) will return
* EOPNOTSUPP, which can be used by userspace to gauge
* requirements of the hardware/drivers they're running on.
*
* This capability is always supported for atomic-capable virtualized
* drivers starting from kernel version 6.6.
*/
#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap { struct drm_set_client_cap {
__u64 capability; __u64 capability;
...@@ -893,6 +926,7 @@ struct drm_syncobj_transfer { ...@@ -893,6 +926,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait { struct drm_syncobj_wait {
__u64 handles; __u64 handles;
/* absolute timeout */ /* absolute timeout */
...@@ -901,6 +935,14 @@ struct drm_syncobj_wait { ...@@ -901,6 +935,14 @@ struct drm_syncobj_wait {
__u32 flags; __u32 flags;
__u32 first_signaled; /* only valid when not waiting all */ __u32 first_signaled; /* only valid when not waiting all */
__u32 pad; __u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
}; };
struct drm_syncobj_timeline_wait { struct drm_syncobj_timeline_wait {
...@@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait { ...@@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait {
__u32 flags; __u32 flags;
__u32 first_signaled; /* only valid when not waiting all */ __u32 first_signaled; /* only valid when not waiting all */
__u32 pad; __u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
}; };
/** /**
...@@ -1218,6 +1268,26 @@ extern "C" { ...@@ -1218,6 +1268,26 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) #define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
/**
* DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
*
* This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
* argument is a framebuffer object ID.
*
* This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
* planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
* alive. When the plane no longer uses the framebuffer (because the
* framebuffer is replaced with another one, or the plane is disabled), the
* framebuffer is cleaned up.
*
* This is useful to implement flicker-free transitions between two processes.
*
* Depending on the threat model, user-space may want to ensure that the
* framebuffer doesn't expose any sensitive user information: closed
* framebuffers attached to a plane can be read back by the next DRM master.
*/
#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
/* /*
* Device specific ioctls should only be in their respective headers * Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f. * The device specific ioctl range is from 0x40 to 0x9f.
......
...@@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait { ...@@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_FENCE 44 #define I915_PARAM_HAS_EXEC_FENCE 44
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
* user specified bufffers for post-mortem debugging of GPU hangs. See * user-specified buffers for post-mortem debugging of GPU hangs. See
* EXEC_OBJECT_CAPTURE. * EXEC_OBJECT_CAPTURE.
*/ */
#define I915_PARAM_HAS_EXEC_CAPTURE 45 #define I915_PARAM_HAS_EXEC_CAPTURE 45
...@@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy { ...@@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy {
* is accurate. * is accurate.
* *
* The returned dword is split into two fields to indicate both * The returned dword is split into two fields to indicate both
* the engine classess on which the object is being read, and the * the engine classes on which the object is being read, and the
* engine class on which it is currently being written (if any). * engine class on which it is currently being written (if any).
* *
* The low word (bits 0:15) indicate if the object is being written * The low word (bits 0:15) indicate if the object is being written
...@@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise { ...@@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise {
__u32 handle; __u32 handle;
/* Advice: either the buffer will be needed again in the near future, /* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure. * or won't be and could be discarded under memory pressure.
*/ */
__u32 madv; __u32 madv;
...@@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info { ...@@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info {
* // enough to hold our array of engines. The kernel will fill out the * // enough to hold our array of engines. The kernel will fill out the
* // item.length for us, which is the number of bytes we need. * // item.length for us, which is the number of bytes we need.
* // * //
* // Alternatively a large buffer can be allocated straight away enabling * // Alternatively a large buffer can be allocated straightaway enabling
* // querying in one pass, in which case item.length should contain the * // querying in one pass, in which case item.length should contain the
* // length of the provided buffer. * // length of the provided buffer.
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
...@@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info { ...@@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info {
* // Now that we allocated the required number of bytes, we call the ioctl * // Now that we allocated the required number of bytes, we call the ioctl
* // again, this time with the data_ptr pointing to our newly allocated * // again, this time with the data_ptr pointing to our newly allocated
* // blob, which the kernel can then populate with info on all engines. * // blob, which the kernel can then populate with info on all engines.
* item.data_ptr = (uintptr_t)&info, * item.data_ptr = (uintptr_t)&info;
* *
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
* if (err) ... * if (err) ...
...@@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info { ...@@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info {
/** /**
* struct drm_i915_engine_info * struct drm_i915_engine_info
* *
* Describes one engine and it's capabilities as known to the driver. * Describes one engine and its capabilities as known to the driver.
*/ */
struct drm_i915_engine_info { struct drm_i915_engine_info {
/** @engine: Engine class and instance. */ /** @engine: Engine class and instance. */
......
...@@ -116,5 +116,8 @@ ...@@ -116,5 +116,8 @@
#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to #define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
compare object identity and may not compare object identity and may not
be usable to open_by_handle_at(2) */ be usable to open_by_handle_at(2) */
#if defined(__KERNEL__)
#define AT_GETATTR_NOSEC 0x80000000
#endif
#endif /* _UAPI_LINUX_FCNTL_H */ #endif /* _UAPI_LINUX_FCNTL_H */
...@@ -16,76 +16,6 @@ ...@@ -16,76 +16,6 @@
#define KVM_API_VERSION 12 #define KVM_API_VERSION 12
/* *** Deprecated interfaces *** */
#define KVM_TRC_SHIFT 16
#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
#define KVM_TRC_HEAD_SIZE 12
#define KVM_TRC_CYCLE_SIZE 8
#define KVM_TRC_EXTRA_MAX 7
#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
struct kvm_user_trace_setup {
__u32 buf_size;
__u32 buf_nr;
};
#define __KVM_DEPRECATED_MAIN_W_0x06 \
_IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
struct kvm_breakpoint {
__u32 enabled;
__u32 padding;
__u64 address;
};
struct kvm_debug_guest {
__u32 enabled;
__u32 pad;
struct kvm_breakpoint breakpoints[4];
__u32 singlestep;
};
#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
/* *** End of deprecated interfaces *** */
/* for KVM_SET_USER_MEMORY_REGION */ /* for KVM_SET_USER_MEMORY_REGION */
struct kvm_userspace_memory_region { struct kvm_userspace_memory_region {
__u32 slot; __u32 slot;
...@@ -95,6 +25,19 @@ struct kvm_userspace_memory_region { ...@@ -95,6 +25,19 @@ struct kvm_userspace_memory_region {
__u64 userspace_addr; /* start of the userspace allocated memory */ __u64 userspace_addr; /* start of the userspace allocated memory */
}; };
/* for KVM_SET_USER_MEMORY_REGION2 */
struct kvm_userspace_memory_region2 {
__u32 slot;
__u32 flags;
__u64 guest_phys_addr;
__u64 memory_size;
__u64 userspace_addr;
__u64 guest_memfd_offset;
__u32 guest_memfd;
__u32 pad1;
__u64 pad2[14];
};
/* /*
* The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
* userspace, other bits are reserved for kvm internal use which are defined * userspace, other bits are reserved for kvm internal use which are defined
...@@ -102,6 +45,7 @@ struct kvm_userspace_memory_region { ...@@ -102,6 +45,7 @@ struct kvm_userspace_memory_region {
*/ */
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1) #define KVM_MEM_READONLY (1UL << 1)
#define KVM_MEM_GUEST_MEMFD (1UL << 2)
/* for KVM_IRQ_LINE */ /* for KVM_IRQ_LINE */
struct kvm_irq_level { struct kvm_irq_level {
...@@ -265,6 +209,7 @@ struct kvm_xen_exit { ...@@ -265,6 +209,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37 #define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_LOONGARCH_IOCSR 38 #define KVM_EXIT_LOONGARCH_IOCSR 38
#define KVM_EXIT_MEMORY_FAULT 39
/* For KVM_EXIT_INTERNAL_ERROR */ /* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */ /* Emulate instruction failed. */
...@@ -518,6 +463,13 @@ struct kvm_run { ...@@ -518,6 +463,13 @@ struct kvm_run {
#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0) #define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
__u32 flags; __u32 flags;
} notify; } notify;
/* KVM_EXIT_MEMORY_FAULT */
struct {
#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
__u64 flags;
__u64 gpa;
__u64 size;
} memory_fault;
/* Fix the size of the union. */ /* Fix the size of the union. */
char padding[256]; char padding[256];
}; };
...@@ -945,9 +897,6 @@ struct kvm_ppc_resize_hpt { ...@@ -945,9 +897,6 @@ struct kvm_ppc_resize_hpt {
*/ */
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) #define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
...@@ -1201,6 +1150,11 @@ struct kvm_ppc_resize_hpt { ...@@ -1201,6 +1150,11 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 #define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
#define KVM_CAP_USER_MEMORY2 231
#define KVM_CAP_MEMORY_FAULT_INFO 232
#define KVM_CAP_MEMORY_ATTRIBUTES 233
#define KVM_CAP_GUEST_MEMFD 234
#define KVM_CAP_VM_TYPES 235
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -1291,6 +1245,7 @@ struct kvm_x86_mce { ...@@ -1291,6 +1245,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
struct kvm_xen_hvm_config { struct kvm_xen_hvm_config {
__u32 flags; __u32 flags;
...@@ -1483,6 +1438,8 @@ struct kvm_vfio_spapr_tce { ...@@ -1483,6 +1438,8 @@ struct kvm_vfio_spapr_tce {
struct kvm_userspace_memory_region) struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
struct kvm_userspace_memory_region2)
/* enable ucontrol for s390 */ /* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping { struct kvm_s390_ucas_mapping {
...@@ -1507,20 +1464,8 @@ struct kvm_s390_ucas_mapping { ...@@ -1507,20 +1464,8 @@ struct kvm_s390_ucas_mapping {
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \ #define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
struct kvm_assigned_pci_dev)
#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
struct kvm_assigned_msix_nr)
#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
struct kvm_assigned_msix_entry)
#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
...@@ -1537,9 +1482,6 @@ struct kvm_s390_ucas_mapping { ...@@ -1537,9 +1482,6 @@ struct kvm_s390_ucas_mapping {
* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */ * KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
/* Available with KVM_CAP_PCI_2_3 */
#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
struct kvm_assigned_pci_dev)
/* Available with KVM_CAP_SIGNAL_MSI */ /* Available with KVM_CAP_SIGNAL_MSI */
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */ /* Available with KVM_CAP_PPC_GET_SMMU_INFO */
...@@ -1592,8 +1534,6 @@ struct kvm_s390_ucas_mapping { ...@@ -1592,8 +1534,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
...@@ -2267,4 +2207,24 @@ struct kvm_s390_zpci_op { ...@@ -2267,4 +2207,24 @@ struct kvm_s390_zpci_op {
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */ /* flags for kvm_s390_zpci_op->u.reg_aen.flags */
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
struct kvm_memory_attributes {
__u64 address;
__u64 size;
__u64 attributes;
__u64 flags;
};
#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
struct kvm_create_guest_memfd {
__u64 size;
__u64 flags;
__u64 reserved[6];
};
#endif /* __LINUX_KVM_H */ #endif /* __LINUX_KVM_H */
...@@ -138,4 +138,74 @@ struct mount_attr { ...@@ -138,4 +138,74 @@ struct mount_attr {
/* List of all mount_attr versions. */ /* List of all mount_attr versions. */
#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */ #define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
/*
* Structure for getting mount/superblock/filesystem info with statmount(2).
*
* The interface is similar to statx(2): individual fields or groups can be
* selected with the @mask argument of statmount(). Kernel will set the @mask
* field according to the supported fields.
*
* If string fields are selected, then the caller needs to pass a buffer that
* has space after the fixed part of the structure. Nul terminated strings are
* copied there and offsets relative to @str are stored in the relevant fields.
* If the buffer is too small, then EOVERFLOW is returned. The actually used
* size is returned in @size.
*/
struct statmount {
__u32 size; /* Total size, including strings */
__u32 __spare1;
__u64 mask; /* What results were written */
__u32 sb_dev_major; /* Device ID */
__u32 sb_dev_minor;
__u64 sb_magic; /* ..._SUPER_MAGIC */
__u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
__u32 fs_type; /* [str] Filesystem type */
__u64 mnt_id; /* Unique ID of mount */
__u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
__u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
__u32 mnt_parent_id_old;
__u64 mnt_attr; /* MOUNT_ATTR_... */
__u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
__u64 mnt_peer_group; /* ID of shared peer group */
__u64 mnt_master; /* Mount receives propagation from this ID */
__u64 propagate_from; /* Propagation from in current namespace */
__u32 mnt_root; /* [str] Root of mount relative to root of fs */
__u32 mnt_point; /* [str] Mountpoint relative to current root */
__u64 __spare2[50];
char str[]; /* Variable size part containing strings */
};
/*
* Structure for passing mount ID and miscellaneous parameters to statmount(2)
* and listmount(2).
*
* For statmount(2) @param represents the request mask.
* For listmount(2) @param represents the last listed mount id (or zero).
*/
struct mnt_id_req {
__u32 size;
__u32 spare;
__u64 mnt_id;
__u64 param;
};
/* List of all mnt_id_req versions. */
#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
/*
* @mask bits for statmount(2)
*/
#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */
#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
/*
* Special @mnt_id values that can be passed to listmount
*/
#define LSMT_ROOT 0xffffffffffffffff /* root mount */
#endif /* _UAPI_LINUX_MOUNT_H */ #endif /* _UAPI_LINUX_MOUNT_H */
...@@ -154,6 +154,7 @@ struct statx { ...@@ -154,6 +154,7 @@ struct statx {
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ #define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ #define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ #define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
......
...@@ -47,6 +47,10 @@ Print PMU events and metrics limited to the specific PMU name. ...@@ -47,6 +47,10 @@ Print PMU events and metrics limited to the specific PMU name.
--json:: --json::
Output in JSON format. Output in JSON format.
-o::
--output=::
Output file name. By default output is written to stdout.
[[EVENT_MODIFIERS]] [[EVENT_MODIFIERS]]
EVENT MODIFIERS EVENT MODIFIERS
--------------- ---------------
......
...@@ -236,6 +236,16 @@ else ...@@ -236,6 +236,16 @@ else
SHELLCHECK := $(shell which shellcheck 2> /dev/null) SHELLCHECK := $(shell which shellcheck 2> /dev/null)
endif endif
# shellcheck is using in tools/perf/tests/Build with option -a/--check-sourced (
# introduced in v0.4.7) and -S/--severity (introduced in v0.6.0). So make the
# minimal shellcheck version as v0.6.0.
ifneq ($(SHELLCHECK),)
ifeq ($(shell expr $(shell $(SHELLCHECK) --version | grep version: | \
sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 060), 1)
SHELLCHECK :=
endif
endif
export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK
......
This diff is collapsed.
...@@ -4080,8 +4080,8 @@ int cmd_record(int argc, const char **argv) ...@@ -4080,8 +4080,8 @@ int cmd_record(int argc, const char **argv)
} }
if (rec->switch_output.num_files) { if (rec->switch_output.num_files) {
rec->switch_output.filenames = calloc(sizeof(char *), rec->switch_output.filenames = calloc(rec->switch_output.num_files,
rec->switch_output.num_files); sizeof(char *));
if (!rec->switch_output.filenames) { if (!rec->switch_output.filenames) {
err = -EINVAL; err = -EINVAL;
goto out_opts; goto out_opts;
......
...@@ -357,7 +357,7 @@ static void perf_top__print_sym_table(struct perf_top *top) ...@@ -357,7 +357,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
static void prompt_integer(int *target, const char *msg) static void prompt_integer(int *target, const char *msg)
{ {
char *buf = malloc(0), *p; char *buf = NULL, *p;
size_t dummy = 0; size_t dummy = 0;
int tmp; int tmp;
......
...@@ -195,7 +195,6 @@ ...@@ -195,7 +195,6 @@
}, },
{ {
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).", "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD", "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound", "MetricName": "tma_dram_bound",
...@@ -457,7 +456,6 @@ ...@@ -457,7 +456,6 @@
}, },
{ {
"BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.", "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD", "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound", "MetricName": "tma_l2_bound",
...@@ -466,7 +464,6 @@ ...@@ -466,7 +464,6 @@
}, },
{ {
"BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.", "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD", "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group", "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound", "MetricName": "tma_l3_bound",
...@@ -683,7 +680,6 @@ ...@@ -683,7 +680,6 @@
}, },
{ {
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.", "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks", "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group", "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk", "MetricName": "tma_store_fwd_blk",
......
...@@ -414,16 +414,30 @@ EOF ...@@ -414,16 +414,30 @@ EOF
# start daemon # start daemon
daemon_start ${config} test daemon_start ${config} test
# send 2 signals # send 2 signals then exit. Do this in a loop watching the number of
perf daemon signal --config ${config} --session test # files to avoid races. If the loop retries more than 600 times then
perf daemon signal --config ${config} # give up.
local retries=0
# stop daemon local signals=0
daemon_exit ${config} local success=0
while [ ${retries} -lt 600 ] && [ ${success} -eq 0 ]; do
# count is 2 perf.data for signals and 1 for perf record finished local files
count=`ls ${base}/session-test/*perf.data* | wc -l` files=`ls ${base}/session-test/*perf.data* 2> /dev/null | wc -l`
if [ ${count} -ne 3 ]; then if [ ${signals} -eq 0 ]; then
perf daemon signal --config ${config} --session test
signals=1
elif [ ${signals} -eq 1 ] && [ $files -ge 1 ]; then
perf daemon signal --config ${config}
signals=2
elif [ ${signals} -eq 2 ] && [ $files -ge 2 ]; then
daemon_exit ${config}
signals=3
elif [ ${signals} -eq 3 ] && [ $files -ge 3 ]; then
success=1
fi
retries=$((${retries} +1))
done
if [ ${success} -eq 0 ]; then
error=1 error=1
echo "FAILED: perf data no generated" echo "FAILED: perf data no generated"
fi fi
......
...@@ -3,17 +3,32 @@ ...@@ -3,17 +3,32 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
set -e set -e
err=0
shelldir=$(dirname "$0") shelldir=$(dirname "$0")
# shellcheck source=lib/setup_python.sh # shellcheck source=lib/setup_python.sh
. "${shelldir}"/lib/setup_python.sh . "${shelldir}"/lib/setup_python.sh
list_output=$(mktemp /tmp/__perf_test.list_output.json.XXXXX)
cleanup() {
rm -f "${list_output}"
trap - EXIT TERM INT
}
trap_cleanup() {
cleanup
exit 1
}
trap trap_cleanup EXIT TERM INT
test_list_json() { test_list_json() {
echo "Json output test" echo "Json output test"
perf list -j | $PYTHON -m json.tool perf list -j -o "${list_output}"
$PYTHON -m json.tool "${list_output}"
echo "Json output test [Success]" echo "Json output test [Success]"
} }
test_list_json test_list_json
exit $err cleanup
exit 0
...@@ -36,8 +36,7 @@ test_db() ...@@ -36,8 +36,7 @@ test_db()
echo "DB test" echo "DB test"
# Check if python script is supported # Check if python script is supported
libpython=$(perf version --build-options | grep python | grep -cv OFF) if perf version --build-options | grep python | grep -q OFF ; then
if [ "${libpython}" != "1" ] ; then
echo "SKIP: python scripting is not supported" echo "SKIP: python scripting is not supported"
err=2 err=2
return return
...@@ -54,7 +53,14 @@ def sample_table(*args): ...@@ -54,7 +53,14 @@ def sample_table(*args):
def call_path_table(*args): def call_path_table(*args):
print(f'call_path_table({args}') print(f'call_path_table({args}')
_end_of_file_ _end_of_file_
perf record -g -o "${perfdatafile}" true case $(uname -m)
in s390x)
cmd_flags="--call-graph dwarf -e cpu-clock";;
*)
cmd_flags="-g";;
esac
perf record $cmd_flags -o "${perfdatafile}" true
perf script -i "${perfdatafile}" -s "${db_test}" perf script -i "${perfdatafile}" -s "${db_test}"
echo "DB test [Success]" echo "DB test [Success]"
} }
......
...@@ -67,6 +67,7 @@ size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_a ...@@ -67,6 +67,7 @@ size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_a
P_FLAG(BTIME); P_FLAG(BTIME);
P_FLAG(MNT_ID); P_FLAG(MNT_ID);
P_FLAG(DIOALIGN); P_FLAG(DIOALIGN);
P_FLAG(MNT_ID_UNIQUE);
#undef P_FLAG #undef P_FLAG
......
...@@ -103,7 +103,14 @@ struct evlist *evlist__new_default(void) ...@@ -103,7 +103,14 @@ struct evlist *evlist__new_default(void)
err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu"); err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
if (err) { if (err) {
evlist__delete(evlist); evlist__delete(evlist);
evlist = NULL; return NULL;
}
if (evlist->core.nr_entries > 1) {
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel)
evsel__set_sample_id(evsel, /*can_sample_identifier=*/false);
} }
return evlist; return evlist;
......
...@@ -491,8 +491,8 @@ static int hist_entry__init(struct hist_entry *he, ...@@ -491,8 +491,8 @@ static int hist_entry__init(struct hist_entry *he,
} }
if (symbol_conf.res_sample) { if (symbol_conf.res_sample) {
he->res_samples = calloc(sizeof(struct res_sample), he->res_samples = calloc(symbol_conf.res_sample,
symbol_conf.res_sample); sizeof(struct res_sample));
if (!he->res_samples) if (!he->res_samples)
goto err_srcline; goto err_srcline;
} }
......
...@@ -115,6 +115,10 @@ ...@@ -115,6 +115,10 @@
SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK) SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK)
#endif #endif
#ifndef SYM_FUNC_ALIAS_MEMFUNC
#define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS
#endif
// In the kernel sources (include/linux/cfi_types.h), this has a different // In the kernel sources (include/linux/cfi_types.h), this has a different
// definition when CONFIG_CFI_CLANG is used, for tools/ just use the !clang // definition when CONFIG_CFI_CLANG is used, for tools/ just use the !clang
// definition: // definition:
......
...@@ -286,7 +286,7 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids, ...@@ -286,7 +286,7 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids,
*out_metric_events = NULL; *out_metric_events = NULL;
ids_size = hashmap__size(ids); ids_size = hashmap__size(ids);
metric_events = calloc(sizeof(void *), ids_size + 1); metric_events = calloc(ids_size + 1, sizeof(void *));
if (!metric_events) if (!metric_events)
return -ENOMEM; return -ENOMEM;
......
...@@ -66,7 +66,7 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus ...@@ -66,7 +66,7 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus
put_tracing_file(events_path); put_tracing_file(events_path);
if (events_fd < 0) { if (events_fd < 0) {
printf("Error: failed to open tracing events directory\n"); pr_err("Error: failed to open tracing events directory\n");
return; return;
} }
......
...@@ -1055,11 +1055,11 @@ int perf_event__synthesize_threads(struct perf_tool *tool, ...@@ -1055,11 +1055,11 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (thread_nr > n) if (thread_nr > n)
thread_nr = n; thread_nr = n;
synthesize_threads = calloc(sizeof(pthread_t), thread_nr); synthesize_threads = calloc(thread_nr, sizeof(pthread_t));
if (synthesize_threads == NULL) if (synthesize_threads == NULL)
goto free_dirent; goto free_dirent;
args = calloc(sizeof(*args), thread_nr); args = calloc(thread_nr, sizeof(*args));
if (args == NULL) if (args == NULL)
goto free_threads; goto free_threads;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment