Commit 204226de authored by Rob Clark's avatar Rob Clark

drm/msm: Add GPU memory traces

Perfetto can use these traces to track global and per-process GPU memory
usage.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/580854/
parent 593f1dd4
......@@ -33,6 +33,7 @@ config DRM_MSM
select PM_OPP
select NVMEM
select PM_GENERIC_DOMAINS
select TRACE_GPU_MEM
help
DRM/KMS driver for MSM/snapdragon.
......
......@@ -127,6 +127,11 @@ struct msm_drm_private {
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf;
/**
* total_mem: Total/global amount of memory backing GEM objects.
*/
atomic64_t total_mem;
/**
* List of all GEM objects (mainly for debugfs, protected by obj_lock
* (acquire before per GEM object lock)
......
......@@ -12,6 +12,9 @@
#include <linux/pfn_t.h>
#include <drm/drm_prime.h>
#include <drm/drm_file.h>
#include <trace/events/gpu_mem.h>
#include "msm_drv.h"
#include "msm_fence.h"
......@@ -33,6 +36,34 @@ static bool use_pages(struct drm_gem_object *obj)
return !msm_obj->vram_node;
}
static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
{
uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
trace_gpu_mem_total(0, 0, total_mem);
}
static void update_ctx_mem(struct drm_file *file, ssize_t size)
{
struct msm_file_private *ctx = file->driver_priv;
uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
rcu_read_lock(); /* Locks file->pid! */
trace_gpu_mem_total(0, pid_nr(file->pid), ctx_mem);
rcu_read_unlock();
}
static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{
update_ctx_mem(file, obj->size);
return 0;
}
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
update_ctx_mem(file, -obj->size);
}
/*
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
* API. Really GPU cache is out of scope here (handled on cmdstream)
......@@ -156,6 +187,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
update_device_mem(dev->dev_private, obj->size);
msm_obj->pages = p;
msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
......@@ -209,6 +242,8 @@ static void put_pages(struct drm_gem_object *obj)
msm_obj->sgt = NULL;
}
update_device_mem(obj->dev->dev_private, -obj->size);
if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else
......@@ -1118,6 +1153,8 @@ static const struct vm_operations_struct vm_ops = {
static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.open = msm_gem_open,
.close = msm_gem_close,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
......
......@@ -428,6 +428,14 @@ struct msm_file_private {
* level.
*/
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
/**
* ctx_mem:
*
* Total amount of memory of GEM buffers with handles attached for
* this context.
*/
atomic64_t ctx_mem;
};
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment