Commit 7a7a933e authored by Martin Krastev's avatar Martin Krastev Committed by Zack Rusin

drm/vmwgfx: Introduce VMware mks-guest-stats

VMware mks-guest-stats mechanism allows the collection of performance stats from
guest userland GL contexts, as well as from vmwgfx kernelspace, via a set of sw-
defined performance counters. The userspace performance counters are (de)registerd
with vmware-vmx-stats hypervisor via new iocts. The vmwgfx kernelspace counters
are controlled at build-time via a new config DRM_VMWGFX_MKSSTATS.

* Add vmw_mksstat_{add|remove|reset}_ioctl controlling the tracking of
  mks-guest-stats in guest winsys contexts
* Add DRM_VMWGFX_MKSSTATS config to drivers/gpu/drm/vmwgfx/Kconfig controlling
  the instrumentation of vmwgfx for kernelspace mks-guest-stats counters
* Instrument vmwgfx vmw_execbuf_ioctl to collect mks-guest-stats according to
  DRM_VMWGFX_MKSSTATS
Signed-off-by: default avatarMartin Krastev <krastevm@vmware.com>
Reviewed-by: default avatarZack Rusin <zackr@vmware.com>
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210609172307.131929-3-zackr@vmware.com
parent d92223ea
...@@ -22,3 +22,10 @@ config DRM_VMWGFX_FBCON ...@@ -22,3 +22,10 @@ config DRM_VMWGFX_FBCON
Choose this option if you are shipping a new vmwgfx Choose this option if you are shipping a new vmwgfx
userspace driver that supports using the kernel driver. userspace driver that supports using the kernel driver.
config DRM_VMWGFX_MKSSTATS
bool "Enable mksGuestStats instrumentation of vmwgfx by default"
depends on DRM_VMWGFX
default n
help
Choose this option to instrument the kernel driver for mksGuestStats.
...@@ -23,9 +23,11 @@ ...@@ -23,9 +23,11 @@
* SOFTWARE. * SOFTWARE.
* *
**********************************************************/ **********************************************************/
#ifndef _VM_BASIC_TYPES_H_ #ifndef _SVGA_TYPES_H_
#define _VM_BASIC_TYPES_H_ #define _SVGA_TYPES_H_
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/page.h>
typedef u32 uint32; typedef u32 uint32;
typedef s32 int32; typedef s32 int32;
...@@ -48,4 +50,90 @@ typedef bool Bool; ...@@ -48,4 +50,90 @@ typedef bool Bool;
#define CONST64U(x) x##ULL #define CONST64U(x) x##ULL
/*
* MKS Guest Stats types
*/
typedef struct MKSGuestStatCounter {
atomic64_t count;
} MKSGuestStatCounter;
typedef struct MKSGuestStatCounterTime {
MKSGuestStatCounter counter;
atomic64_t selfCycles;
atomic64_t totalCycles;
} MKSGuestStatCounterTime;
/*
* Flags for MKSGuestStatInfoEntry::flags below
*/
#define MKS_GUEST_STAT_FLAG_NONE 0
#define MKS_GUEST_STAT_FLAG_TIME (1U << 0)
typedef __attribute__((aligned(32))) struct MKSGuestStatInfoEntry {
union {
const char *s;
uint64 u;
} name;
union {
const char *s;
uint64 u;
} description;
uint64 flags;
union {
MKSGuestStatCounter *counter;
MKSGuestStatCounterTime *counterTime;
uint64 u;
} stat;
} MKSGuestStatInfoEntry;
#define INVALID_PPN64 ((PPN64)0x000fffffffffffffULL)
#define vmw_num_pages(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
#define MKS_GUEST_STAT_INSTANCE_DESC_LENGTH 1024
#define MKS_GUEST_STAT_INSTANCE_MAX_STATS 4096
#define MKS_GUEST_STAT_INSTANCE_MAX_STAT_PPNS \
(vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \
sizeof(MKSGuestStatCounterTime)))
#define MKS_GUEST_STAT_INSTANCE_MAX_INFO_PPNS \
(vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \
sizeof(MKSGuestStatInfoEntry)))
#define MKS_GUEST_STAT_AVERAGE_NAME_LENGTH 40
#define MKS_GUEST_STAT_INSTANCE_MAX_STRS_PPNS \
(vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \
MKS_GUEST_STAT_AVERAGE_NAME_LENGTH))
/*
* The MKSGuestStatInstanceDescriptor is used as main interface to
* communicate guest stats back to the host code. The guest must
* allocate an instance of this structure at the start of a page and
* provide the physical address to the host. From there the host code
* can walk this structure to find other (pinned) pages containing the
* stats data.
*
* Since the MKSGuestStatInfoEntry structures contain userlevel
* pointers, the InstanceDescriptor also contains pointers to the
* begining of these sections allowing the host side code to correctly
* interpret the pointers.
*
* Because the host side code never acknowledges anything back to the
* guest there is no strict requirement to maintain compatability
* across releases. If the interface changes the host might not be
* able to log stats, but the guest will continue to run normally.
*/
typedef struct MKSGuestStatInstanceDescriptor {
uint64 reservedMBZ; /* must be zero for now. */
uint64 statStartVA; /* VA of the start of the stats section. */
uint64 strsStartVA; /* VA of the start of the strings section. */
uint64 statLength; /* length of the stats section in bytes. */
uint64 infoLength; /* length of the info entry section in bytes. */
uint64 strsLength; /* length of the strings section in bytes. */
PPN64 statPPNs[MKS_GUEST_STAT_INSTANCE_MAX_STAT_PPNS]; /* stat counters */
PPN64 infoPPNs[MKS_GUEST_STAT_INSTANCE_MAX_INFO_PPNS]; /* stat info */
PPN64 strsPPNs[MKS_GUEST_STAT_INSTANCE_MAX_STRS_PPNS]; /* strings */
char description[MKS_GUEST_STAT_INSTANCE_DESC_LENGTH];
} MKSGuestStatInstanceDescriptor;
#endif #endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VM_BASIC_TYPES_H_
#define _VM_BASIC_TYPES_H_
#include <linux/kernel.h>
typedef u32 uint32;
typedef s32 int32;
typedef u64 uint64;
typedef u16 uint16;
typedef s16 int16;
typedef u8 uint8;
typedef s8 int8;
typedef uint64 PA;
typedef uint32 PPN;
typedef uint64 PPN64;
typedef bool Bool;
#define MAX_UINT32 U32_MAX
#endif
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include "vmwgfx_binding.h" #include "vmwgfx_binding.h"
#include "vmwgfx_devcaps.h" #include "vmwgfx_devcaps.h"
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_mksstat.h"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
...@@ -148,6 +149,14 @@ ...@@ -148,6 +149,14 @@
#define DRM_IOCTL_VMW_MSG \ #define DRM_IOCTL_VMW_MSG \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
struct drm_vmw_msg_arg) struct drm_vmw_msg_arg)
#define DRM_IOCTL_VMW_MKSSTAT_RESET \
DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET)
#define DRM_IOCTL_VMW_MKSSTAT_ADD \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD, \
struct drm_vmw_mksstat_add_arg)
#define DRM_IOCTL_VMW_MKSSTAT_REMOVE \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE, \
struct drm_vmw_mksstat_remove_arg)
/* /*
* The core DRM version of this macro doesn't account for * The core DRM version of this macro doesn't account for
...@@ -244,6 +253,15 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -244,6 +253,15 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_MSG, VMW_IOCTL_DEF(VMW_MSG,
vmw_msg_ioctl, vmw_msg_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_MKSSTAT_RESET,
vmw_mksstat_reset_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_MKSSTAT_ADD,
vmw_mksstat_add_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_MKSSTAT_REMOVE,
vmw_mksstat_remove_ioctl,
DRM_RENDER_ALLOW),
}; };
static const struct pci_device_id vmw_pci_id_list[] = { static const struct pci_device_id vmw_pci_id_list[] = {
...@@ -1137,6 +1155,8 @@ static void vmw_driver_unload(struct drm_device *dev) ...@@ -1137,6 +1155,8 @@ static void vmw_driver_unload(struct drm_device *dev)
for (i = vmw_res_context; i < vmw_res_max; ++i) for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]); idr_destroy(&dev_priv->res_idr[i]);
vmw_mksstat_remove_all(dev_priv);
pci_release_regions(pdev); pci_release_regions(pdev);
} }
......
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
/************************************************************************** /**************************************************************************
* *
* Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA * Copyright 2009-2021 VMware, Inc., Palo Alto, CA., USA
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -91,6 +91,9 @@ ...@@ -91,6 +91,9 @@
#define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4 #define VMW_RES_SHADER ttm_driver_type4
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
struct vmw_fpriv { struct vmw_fpriv {
struct ttm_object_file *tfile; struct ttm_object_file *tfile;
bool gb_aware; /* user-space is guest-backed aware */ bool gb_aware; /* user-space is guest-backed aware */
...@@ -630,6 +633,18 @@ struct vmw_private { ...@@ -630,6 +633,18 @@ struct vmw_private {
struct vmw_validation_mem vvm; struct vmw_validation_mem vvm;
uint32 *devcaps; uint32 *devcaps;
/*
* mksGuestStat instance-descriptor and pid arrays
*/
struct page *mksstat_user_pages[MKSSTAT_CAPACITY];
atomic_t mksstat_user_pids[MKSSTAT_CAPACITY];
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
struct page *mksstat_kern_pages[MKSSTAT_CAPACITY];
u8 mksstat_kern_top_timer[MKSSTAT_CAPACITY];
atomic_t mksstat_kern_pids[MKSSTAT_CAPACITY];
#endif
}; };
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
...@@ -1503,6 +1518,17 @@ __printf(1, 2) int vmw_host_printf(const char *fmt, ...); ...@@ -1503,6 +1518,17 @@ __printf(1, 2) int vmw_host_printf(const char *fmt, ...);
int vmw_msg_ioctl(struct drm_device *dev, void *data, int vmw_msg_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
/* Host mksGuestStats -vmwgfx_msg.c: */
int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv);
int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
/* VMW logging */ /* VMW logging */
/** /**
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
#include "vmwgfx_so.h" #include "vmwgfx_so.h"
#include "vmwgfx_binding.h" #include "vmwgfx_binding.h"
#include "vmwgfx_mksstat.h"
#define VMW_RES_HT_ORDER 12 #define VMW_RES_HT_ORDER 12
...@@ -4406,6 +4407,9 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -4406,6 +4407,9 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
int ret; int ret;
struct dma_fence *in_fence = NULL; struct dma_fence *in_fence = NULL;
MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
/* /*
* Extend the ioctl argument while maintaining backwards compatibility: * Extend the ioctl argument while maintaining backwards compatibility:
* We take different code paths depending on the value of arg->version. * We take different code paths depending on the value of arg->version.
...@@ -4415,7 +4419,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -4415,7 +4419,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION || if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
arg->version == 0)) { arg->version == 0)) {
VMW_DEBUG_USER("Incorrect execbuf version.\n"); VMW_DEBUG_USER("Incorrect execbuf version.\n");
return -EINVAL; ret = -EINVAL;
goto mksstats_out;
} }
switch (arg->version) { switch (arg->version) {
...@@ -4435,7 +4440,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -4435,7 +4440,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (!in_fence) { if (!in_fence) {
VMW_DEBUG_USER("Cannot get imported fence\n"); VMW_DEBUG_USER("Cannot get imported fence\n");
return -EINVAL; ret = -EINVAL;
goto mksstats_out;
} }
ret = vmw_wait_dma_fence(dev_priv->fman, in_fence); ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
...@@ -4458,5 +4464,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -4458,5 +4464,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
out: out:
if (in_fence) if (in_fence)
dma_fence_put(in_fence); dma_fence_put(in_fence);
mksstats_out:
MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
return ret; return ret;
} }
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright 2021 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _VMWGFX_MKSSTAT_H_
#define _VMWGFX_MKSSTAT_H_
#include <asm/page.h>
/* Reservation marker for mksstat pid's */
#define MKSSTAT_PID_RESERVED -1
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
/*
* Kernel-internal mksGuestStat counters. The order of this enum dictates the
* order of instantiation of these counters in the mksGuestStat pages.
*/
typedef enum {
MKSSTAT_KERN_EXECBUF, /* vmw_execbuf_ioctl */
MKSSTAT_KERN_COUNT /* Reserved entry; always last */
} mksstat_kern_stats_t;
/**
* vmw_mksstat_get_kern_pstat: Computes the address of the MKSGuestStatCounterTime
* array from the address of the base page.
*
* @page_addr: Pointer to the base page.
* Return: Pointer to the MKSGuestStatCounterTime array.
*/
static inline void *vmw_mksstat_get_kern_pstat(void *page_addr)
{
return page_addr + PAGE_SIZE * 1;
}
/**
* vmw_mksstat_get_kern_pinfo: Computes the address of the MKSGuestStatInfoEntry
* array from the address of the base page.
*
* @page_addr: Pointer to the base page.
* Return: Pointer to the MKSGuestStatInfoEntry array.
*/
static inline void *vmw_mksstat_get_kern_pinfo(void *page_addr)
{
return page_addr + PAGE_SIZE * 2;
}
/**
* vmw_mksstat_get_kern_pstrs: Computes the address of the mksGuestStat strings
* sequence from the address of the base page.
*
* @page_addr: Pointer to the base page.
* Return: Pointer to the mksGuestStat strings sequence.
*/
static inline void *vmw_mksstat_get_kern_pstrs(void *page_addr)
{
return page_addr + PAGE_SIZE * 3;
}
/*
* MKS_STAT_TIME_DECL/PUSH/POP macros to be used in timer-counted routines.
*/
struct mksstat_timer_t {
/* mutable */ mksstat_kern_stats_t old_top;
const u64 t0;
const int slot;
};
#define MKS_STAT_TIME_DECL(kern_cntr) \
struct mksstat_timer_t _##kern_cntr = { \
.t0 = rdtsc(), \
.slot = vmw_mksstat_get_kern_slot(current->pid, dev_priv) \
}
#define MKS_STAT_TIME_PUSH(kern_cntr) \
do { \
if (_##kern_cntr.slot >= 0) { \
_##kern_cntr.old_top = dev_priv->mksstat_kern_top_timer[_##kern_cntr.slot]; \
dev_priv->mksstat_kern_top_timer[_##kern_cntr.slot] = kern_cntr; \
} \
} while (0)
#define MKS_STAT_TIME_POP(kern_cntr) \
do { \
if (_##kern_cntr.slot >= 0) { \
const pid_t pid = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[_##kern_cntr.slot], current->pid, MKSSTAT_PID_RESERVED); \
dev_priv->mksstat_kern_top_timer[_##kern_cntr.slot] = _##kern_cntr.old_top; \
\
if (pid == current->pid) { \
const u64 dt = rdtsc() - _##kern_cntr.t0; \
MKSGuestStatCounterTime *pstat; \
\
BUG_ON(!dev_priv->mksstat_kern_pages[_##kern_cntr.slot]); \
\
pstat = vmw_mksstat_get_kern_pstat(page_address(dev_priv->mksstat_kern_pages[_##kern_cntr.slot])); \
\
atomic64_inc(&pstat[kern_cntr].counter.count); \
atomic64_add(dt, &pstat[kern_cntr].selfCycles); \
atomic64_add(dt, &pstat[kern_cntr].totalCycles); \
\
if (_##kern_cntr.old_top != MKSSTAT_KERN_COUNT) \
atomic64_sub(dt, &pstat[_##kern_cntr.old_top].selfCycles); \
\
atomic_set(&dev_priv->mksstat_kern_pids[_##kern_cntr.slot], current->pid); \
} \
} \
} while (0)
#else
#define MKS_STAT_TIME_DECL(kern_cntr)
#define MKS_STAT_TIME_PUSH(kern_cntr)
#define MKS_STAT_TIME_POP(kern_cntr)
#endif /* IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS */
#endif
...@@ -31,10 +31,12 @@ ...@@ -31,10 +31,12 @@
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <drm/drm_ioctl.h>
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_msg_x86.h" #include "vmwgfx_msg_x86.h"
#include "vmwgfx_msg_arm64.h" #include "vmwgfx_msg_arm64.h"
#include "vmwgfx_mksstat.h"
#define MESSAGE_STATUS_SUCCESS 0x0001 #define MESSAGE_STATUS_SUCCESS 0x0001
#define MESSAGE_STATUS_DORECV 0x0002 #define MESSAGE_STATUS_DORECV 0x0002
...@@ -56,6 +58,11 @@ ...@@ -56,6 +58,11 @@
#define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG) #define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
#define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG) #define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
#define VMW_PORT_CMD_MKS_GUEST_STATS 85
#define VMW_PORT_CMD_MKSGS_RESET (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
#define VMW_PORT_CMD_MKSGS_ADD_PPN (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
#define VMW_PORT_CMD_MKSGS_REMOVE_PPN (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16) #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
#define MAX_USER_MSG_LENGTH PAGE_SIZE #define MAX_USER_MSG_LENGTH PAGE_SIZE
...@@ -612,3 +619,575 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data, ...@@ -612,3 +619,575 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
/**
* reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content
*
* @arr: Array to reset.
* @size: Array length.
*/
static inline void reset_ppn_array(PPN64 *arr, size_t size)
{
size_t i;
BUG_ON(!arr || size == 0);
for (i = 0; i < size; ++i)
arr[i] = INVALID_PPN64;
}
/**
* hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from
* the hypervisor. All related pages should be subsequently unpinned or freed.
*
*/
static inline void hypervisor_ppn_reset_all(void)
{
unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
VMW_PORT(VMW_PORT_CMD_MKSGS_RESET,
0, si, di,
0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
/**
* hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the
* hypervisor. Any related userspace pages should be pinned in advance.
*
* @pfn: Physical page number of the instance descriptor
*/
static inline void hypervisor_ppn_add(PPN64 pfn)
{
unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN,
pfn, si, di,
0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
/**
* hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from
* the hypervisor. All related pages should be subsequently unpinned or freed.
*
* @pfn: Physical page number of the instance descriptor
*/
static inline void hypervisor_ppn_remove(PPN64 pfn)
{
unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN,
pfn, si, di,
0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
/* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */
#define MKSSTAT_KERNEL_PAGES_ORDER 2
/* Header to the text description of mksGuestStat instance descriptor */
#define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
{
{ "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
};
/**
* mksstat_init_record: Initializes an MKSGuestStatCounter-based record
* for the respective mksGuestStat index.
*
* @stat_idx: Index of the MKSGuestStatCounter-based mksGuestStat record.
* @pstat: Pointer to array of MKSGuestStatCounterTime.
* @pinfo: Pointer to array of MKSGuestStatInfoEntry.
* @pstrs: Pointer to current end of the name/description sequence.
* Return: Pointer to the new end of the names/description sequence.
*/
static inline char *mksstat_init_record(mksstat_kern_stats_t stat_idx,
MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
{
char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
pinfo[stat_idx].name.s = pstrs;
pinfo[stat_idx].description.s = pstrd;
pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_NONE;
pinfo[stat_idx].stat.counter = (MKSGuestStatCounter *)&pstat[stat_idx];
return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
}
/**
* mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record
* for the respective mksGuestStat index.
*
* @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record.
* @pstat: Pointer to array of MKSGuestStatCounterTime.
* @pinfo: Pointer to array of MKSGuestStatInfoEntry.
* @pstrs: Pointer to current end of the name/description sequence.
* Return: Pointer to the new end of the names/description sequence.
*/
static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx,
MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
{
char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
pinfo[stat_idx].name.s = pstrs;
pinfo[stat_idx].description.s = pstrd;
pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME;
pinfo[stat_idx].stat.counterTime = &pstat[stat_idx];
return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
}
/**
* mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and
* kernel-internal counters. Adds PFN mapping to the hypervisor.
*
* Create a single mksGuestStat instance descriptor and corresponding structures
* for all kernel-internal counters. The corresponding PFNs are mapped with the
* hypervisor.
*
* @ppage: Output pointer to page containing the instance descriptor.
* Return: Zero on success, negative error code on error.
*/
static int mksstat_init_kern_id(struct page **ppage)
{
MKSGuestStatInstanceDescriptor *pdesc;
MKSGuestStatCounterTime *pstat;
MKSGuestStatInfoEntry *pinfo;
char *pstrs, *pstrs_acc;
/* Allocate pages for the kernel-internal instance descriptor */
struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER);
if (!page)
return -ENOMEM;
pdesc = page_address(page);
pstat = vmw_mksstat_get_kern_pstat(pdesc);
pinfo = vmw_mksstat_get_kern_pinfo(pdesc);
pstrs = vmw_mksstat_get_kern_pstrs(pdesc);
/* Set up all kernel-internal counters and corresponding structures */
pstrs_acc = pstrs;
pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
/* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
/* Set up the kernel-internal instance descriptor */
pdesc->reservedMBZ = 0;
pdesc->statStartVA = (uintptr_t)pstat;
pdesc->strsStartVA = (uintptr_t)pstrs;
pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT;
pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT;
pdesc->strsLength = pstrs_acc - pstrs;
snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d",
MKSSTAT_KERNEL_DESCRIPTION, current->pid);
pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat));
reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1);
pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo));
reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1);
pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs));
reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1);
*ppage = page;
hypervisor_ppn_add((PPN64)page_to_pfn(page));
return 0;
}
/**
* vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal
* mksGuestStat instance descriptor.
*
* Find a slot for a single kernel-internal mksGuestStat instance descriptor.
* In case no such was already present, allocate a new one and set up a kernel-
* internal mksGuestStat instance descriptor for the former.
*
* @pid: Process for which a slot is sought.
* @dev_priv: Identifies the drm private device.
* Return: Non-negative slot on success, negative error code on error.
*/
int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
{
const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
size_t i;
for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
/* Check if an instance descriptor for this pid is already present */
if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot]))
return (int)slot;
/* Set up a new instance descriptor for this pid */
if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) {
const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
if (!ret) {
/* Reset top-timer tracking for this slot */
dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
atomic_set(&dev_priv->mksstat_kern_pids[slot], pid);
return (int)slot;
}
atomic_set(&dev_priv->mksstat_kern_pids[slot], 0);
return ret;
}
}
return -ENOSPC;
}
#endif
/**
* vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating
* mksGuestStat instance-descriptor page and unpins all related user pages.
*
* Unpin all user pages realated to this instance descriptor and free
* the instance-descriptor page itself.
*
* @page: Page of the instance descriptor.
*/
static void vmw_mksstat_cleanup_descriptor(struct page *page)
{
MKSGuestStatInstanceDescriptor *pdesc = page_address(page);
size_t i;
for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
__free_page(page);
}
/**
* vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors
* from the hypervisor.
*
* Discard all hypervisor PFN mappings, containing active mksGuestState instance
* descriptors, unpin the related userspace pages and free the related kernel pages.
*
* @dev_priv: Identifies the drm private device.
* Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
{
int ret = 0;
size_t i;
/* Discard all PFN mappings with the hypervisor */
hypervisor_ppn_reset_all();
/* Discard all userspace-originating instance descriptors and unpin all related pages */
for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) {
const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
if (!pid0)
continue;
if (pid0 != MKSSTAT_PID_RESERVED) {
const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED);
if (!pid1)
continue;
if (pid1 == pid0) {
struct page *const page = dev_priv->mksstat_user_pages[i];
BUG_ON(!page);
dev_priv->mksstat_user_pages[i] = NULL;
atomic_set(&dev_priv->mksstat_user_pids[i], 0);
vmw_mksstat_cleanup_descriptor(page);
continue;
}
}
ret = -EAGAIN;
}
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
/* Discard all kernel-internal instance descriptors and free all related pages */
for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
if (!pid0)
continue;
if (pid0 != MKSSTAT_PID_RESERVED) {
const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED);
if (!pid1)
continue;
if (pid1 == pid0) {
struct page *const page = dev_priv->mksstat_kern_pages[i];
BUG_ON(!page);
dev_priv->mksstat_kern_pages[i] = NULL;
atomic_set(&dev_priv->mksstat_kern_pids[i], 0);
__free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER);
continue;
}
}
ret = -EAGAIN;
}
#endif
return ret;
}
/**
* vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors
* from the hypervisor.
*
* Discard all hypervisor PFN mappings, containing active mksGuestStat instance
* descriptors, unpin the related userspace pages and free the related kernel pages.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller; unused.
* Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *const dev_priv = vmw_priv(dev);
return vmw_mksstat_remove_all(dev_priv);
}
/**
* vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat
* instance descriptor and registers that with the hypervisor.
*
* Create a hypervisor PFN mapping, containing a single mksGuestStat instance
* descriptor and pin the corresponding userspace pages.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller; unused.
* Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_mksstat_add_arg *arg =
(struct drm_vmw_mksstat_add_arg *) data;
struct vmw_private *const dev_priv = vmw_priv(dev);
struct page *page;
MKSGuestStatInstanceDescriptor *pdesc;
const size_t num_pages_stat = vmw_num_pages(arg->stat_len);
const size_t num_pages_info = vmw_num_pages(arg->info_len);
const size_t num_pages_strs = vmw_num_pages(arg->strs_len);
long desc_len;
long nr_pinned_stat;
long nr_pinned_info;
long nr_pinned_strs;
struct page *pages_stat[ARRAY_SIZE(pdesc->statPPNs)];
struct page *pages_info[ARRAY_SIZE(pdesc->infoPPNs)];
struct page *pages_strs[ARRAY_SIZE(pdesc->strsPPNs)];
size_t i, slot;
arg->id = -1;
if (!arg->stat || !arg->info || !arg->strs)
return -EINVAL;
if (!arg->stat_len || !arg->info_len || !arg->strs_len)
return -EINVAL;
if (!arg->description)
return -EINVAL;
if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) ||
num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) ||
num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs))
return -EINVAL;
/* Find an available slot in the mksGuestStats user array and reserve it */
for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot)
if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED))
break;
if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids))
return -ENOSPC;
BUG_ON(dev_priv->mksstat_user_pages[slot]);
/* Allocate a page for the instance descriptor */
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
return -ENOMEM;
}
/* Set up the instance descriptor */
pdesc = page_address(page);
pdesc->reservedMBZ = 0;
pdesc->statStartVA = arg->stat;
pdesc->strsStartVA = arg->strs;
pdesc->statLength = arg->stat_len;
pdesc->infoLength = arg->info_len;
pdesc->strsLength = arg->strs_len;
desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description),
ARRAY_SIZE(pdesc->description) - 1);
if (desc_len < 0) {
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
return -EFAULT;
}
reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs));
reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
/* Pin mksGuestStat user pages and store those in the instance descriptor */
nr_pinned_stat = pin_user_pages(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat, NULL);
if (num_pages_stat != nr_pinned_stat)
goto err_pin_stat;
for (i = 0; i < num_pages_stat; ++i)
pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
nr_pinned_info = pin_user_pages(arg->info, num_pages_info, FOLL_LONGTERM, pages_info, NULL);
if (num_pages_info != nr_pinned_info)
goto err_pin_info;
for (i = 0; i < num_pages_info; ++i)
pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
nr_pinned_strs = pin_user_pages(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs, NULL);
if (num_pages_strs != nr_pinned_strs)
goto err_pin_strs;
for (i = 0; i < num_pages_strs; ++i)
pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
/* Send the descriptor to the host via a hypervisor call. The mksGuestStat
pages will remain in use until the user requests a matching remove stats
or a stats reset occurs. */
hypervisor_ppn_add((PPN64)page_to_pfn(page));
dev_priv->mksstat_user_pages[slot] = page;
atomic_set(&dev_priv->mksstat_user_pids[slot], current->pid);
arg->id = slot;
DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%lu\n", current->pid, (int)desc_len, pdesc->description, slot);
return 0;
err_pin_strs:
if (nr_pinned_strs > 0)
unpin_user_pages(pages_strs, nr_pinned_strs);
err_pin_info:
if (nr_pinned_info > 0)
unpin_user_pages(pages_info, nr_pinned_info);
err_pin_stat:
if (nr_pinned_stat > 0)
unpin_user_pages(pages_stat, nr_pinned_stat);
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
__free_page(page);
return -ENOMEM;
}
/**
* vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat
* instance descriptor from the hypervisor.
*
* Discard a hypervisor PFN mapping, containing a single mksGuestStat instance
* descriptor and unpin the corresponding userspace pages.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller; unused.
* Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_mksstat_remove_arg *arg =
(struct drm_vmw_mksstat_remove_arg *) data;
struct vmw_private *const dev_priv = vmw_priv(dev);
const size_t slot = arg->id;
pid_t pid0;
if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids))
return -EINVAL;
DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%lu\n", current->pid, slot);
pid0 = atomic_read(&dev_priv->mksstat_user_pids[slot]);
if (!pid0)
return 0;
if (pid0 != MKSSTAT_PID_RESERVED) {
const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pid0, MKSSTAT_PID_RESERVED);
if (!pid1)
return 0;
if (pid1 == pid0) {
struct page *const page = dev_priv->mksstat_user_pages[slot];
BUG_ON(!page);
dev_priv->mksstat_user_pages[slot] = NULL;
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
hypervisor_ppn_remove((PPN64)page_to_pfn(page));
vmw_mksstat_cleanup_descriptor(page);
return 0;
}
}
return -EAGAIN;
}
...@@ -72,6 +72,9 @@ extern "C" { ...@@ -72,6 +72,9 @@ extern "C" {
#define DRM_VMW_GB_SURFACE_CREATE_EXT 27 #define DRM_VMW_GB_SURFACE_CREATE_EXT 27
#define DRM_VMW_GB_SURFACE_REF_EXT 28 #define DRM_VMW_GB_SURFACE_REF_EXT 28
#define DRM_VMW_MSG 29 #define DRM_VMW_MSG 29
#define DRM_VMW_MKSSTAT_RESET 30
#define DRM_VMW_MKSSTAT_ADD 31
#define DRM_VMW_MKSSTAT_REMOVE 32
/*************************************************************************/ /*************************************************************************/
/** /**
...@@ -1236,6 +1239,44 @@ struct drm_vmw_msg_arg { ...@@ -1236,6 +1239,44 @@ struct drm_vmw_msg_arg {
__u32 receive_len; __u32 receive_len;
}; };
/**
* struct drm_vmw_mksstat_add_arg
*
* @stat: Pointer to user-space stat-counters array, page-aligned.
* @info: Pointer to user-space counter-infos array, page-aligned.
* @strs: Pointer to user-space stat strings, page-aligned.
* @stat_len: Length in bytes of stat-counters array.
* @info_len: Length in bytes of counter-infos array.
* @strs_len: Length in bytes of the stat strings, terminators included.
* @description: Pointer to instance descriptor string; will be truncated
* to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars.
* @id: Output identifier of the produced record; -1 if error.
*
* Argument to the DRM_VMW_MKSSTAT_ADD ioctl.
*/
struct drm_vmw_mksstat_add_arg {
__u64 stat;
__u64 info;
__u64 strs;
__u64 stat_len;
__u64 info_len;
__u64 strs_len;
__u64 description;
__u64 id;
};
/**
* struct drm_vmw_mksstat_remove_arg
*
* @id: Identifier of the record being disposed, originally obtained through
* DRM_VMW_MKSSTAT_ADD ioctl.
*
* Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl.
*/
struct drm_vmw_mksstat_remove_arg {
__u64 id;
};
#if defined(__cplusplus) #if defined(__cplusplus)
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment