Commit b9f19259 authored by Boris Brezillon's avatar Boris Brezillon Committed by Eric Anholt

drm/vc4: Add the DRM_IOCTL_VC4_GEM_MADVISE ioctl

This ioctl will allow us to purge inactive userspace buffers when the
system is running out of contiguous memory.

For now, the purge logic is rather dumb in that it does not try to
release only the amount of BO needed to meet the last CMA alloc request
but instead purges all objects placed in the purgeable pool as soon as
we experience a CMA allocation failure.

Note that the in-kernel BO cache is always purged before the purgeable
cache because those objects are known to be unused while objects marked
as purgeable by a userspace application/library might have to be
restored when they are marked back as unpurgeable, which can be
expensive.
Signed-off-by: default avatarBoris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
Reviewed-by: default avatarEric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20171019125748.3152-1-boris.brezillon@free-electrons.com
parent 06e733e4
This diff is collapsed.
......@@ -100,6 +100,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_VC4_PARAM_SUPPORTS_ETC1:
case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
case DRM_VC4_PARAM_SUPPORTS_MADVISE:
args->value = true;
break;
default:
......@@ -117,6 +118,12 @@ static void vc4_lastclose(struct drm_device *dev)
drm_fbdev_cma_restore_mode(vc4->fbdev);
}
static const struct vm_operations_struct vc4_vm_ops = {
.fault = vc4_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct file_operations vc4_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
......@@ -142,6 +149,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver vc4_drm_driver = {
......@@ -166,7 +174,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
.gem_free_object_unlocked = vc4_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_vm_ops = &vc4_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
......
......@@ -74,6 +74,19 @@ struct vc4_dev {
/* Protects bo_cache and bo_labels. */
struct mutex bo_lock;
/* Purgeable BO pool. All BOs in this pool can have their memory
* reclaimed if the driver is unable to allocate new BOs. We also
* keep stats related to the purge mechanism here.
*/
struct {
struct list_head list;
unsigned int num;
size_t size;
unsigned int purged_num;
size_t purged_size;
struct mutex lock;
} purgeable;
uint64_t dma_fence_context;
/* Sequence number for the last job queued in bin_job_list.
......@@ -192,6 +205,16 @@ struct vc4_bo {
* for user-allocated labels.
*/
int label;
/* Count the number of active users. This is needed to determine
* whether we can move the BO to the purgeable list or not (when the BO
* is used by the GPU or the display engine we can't purge it).
*/
refcount_t usecnt;
/* Store purgeable/purged state here */
u32 madv;
struct mutex madv_lock;
};
static inline struct vc4_bo *
......@@ -503,6 +526,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
......@@ -513,6 +537,10 @@ void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
void vc4_bo_cache_destroy(struct drm_device *dev);
int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
......@@ -557,6 +585,8 @@ void vc4_job_handle_completed(struct vc4_dev *vc4);
int vc4_queue_seqno_cb(struct drm_device *dev,
struct vc4_seqno_cb *cb, uint64_t seqno,
void (*func)(struct vc4_seqno_cb *cb));
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* vc4_hdmi.c */
extern struct platform_driver vc4_hdmi_driver;
......
......@@ -188,11 +188,22 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
bo = to_vc4_bo(&exec[i]->bo[j]->base);
/* Retain BOs just in case they were marked purgeable.
* This prevents the BO from being purged before
* someone had a chance to dump the hang state.
*/
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
/* No need to retain BOs coming from the ->unref_list
* because they are naturally unpurgeable.
*/
drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base;
j++;
......@@ -233,6 +244,26 @@ vc4_save_hang_state(struct drm_device *dev)
state->fdbgs = V3D_READ(V3D_FDBGS);
state->errstat = V3D_READ(V3D_ERRSTAT);
/* We need to turn purgeable BOs into unpurgeable ones so that
* userspace has a chance to dump the hang state before the kernel
* decides to purge those BOs.
* Note that BO consistency at dump time cannot be guaranteed. For
* example, if the owner of these BOs decides to re-use them or mark
* them purgeable again there's nothing we can do to prevent it.
*/
for (i = 0; i < kernel_state->user_state.bo_count; i++) {
struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
if (bo->madv == __VC4_MADV_NOTSUPP)
continue;
mutex_lock(&bo->madv_lock);
if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
bo->madv = VC4_MADV_WILLNEED;
refcount_dec(&bo->usecnt);
mutex_unlock(&bo->madv_lock);
}
spin_lock_irqsave(&vc4->job_lock, irqflags);
if (vc4->hang_state) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
......@@ -639,9 +670,6 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
* The command validator needs to reference BOs by their index within
* the submitted job's BO list. This does the validation of the job's
* BO list and reference counting for the lifetime of the job.
*
* Note that this function doesn't need to unreference the BOs on
* failure, because that will happen at vc4_complete_exec() time.
*/
static int
vc4_cl_lookup_bos(struct drm_device *dev,
......@@ -693,16 +721,47 @@ vc4_cl_lookup_bos(struct drm_device *dev,
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
i, handles[i]);
ret = -EINVAL;
spin_unlock(&file_priv->table_lock);
goto fail;
break;
}
drm_gem_object_get(bo);
exec->bo[i] = (struct drm_gem_cma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
if (ret)
goto fail_put_bo;
for (i = 0; i < exec->bo_count; i++) {
ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
if (ret)
goto fail_dec_usecnt;
}
kvfree(handles);
return 0;
fail_dec_usecnt:
/* Decrease usecnt on acquired objects.
* We cannot rely on vc4_complete_exec() to release resources here,
* because vc4_complete_exec() has no information about which BO has
* had its ->usecnt incremented.
* To make things easier we just free everything explicitly and set
* exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
* step.
*/
for (i-- ; i >= 0; i--)
vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
fail_put_bo:
/* Release any reference to acquired objects. */
for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
drm_gem_object_put_unlocked(&exec->bo[i]->base);
fail:
kvfree(handles);
kvfree(exec->bo);
exec->bo = NULL;
return ret;
}
......@@ -833,8 +892,12 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
dma_fence_signal(exec->fence);
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
for (i = 0; i < exec->bo_count; i++) {
struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
vc4_bo_dec_usecnt(bo);
drm_gem_object_put_unlocked(&exec->bo[i]->base);
}
kvfree(exec->bo);
}
......@@ -1098,6 +1161,9 @@ vc4_gem_init(struct drm_device *dev)
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
mutex_init(&vc4->power_lock);
INIT_LIST_HEAD(&vc4->purgeable.list);
mutex_init(&vc4->purgeable.lock);
}
void
......@@ -1121,3 +1187,81 @@ vc4_gem_destroy(struct drm_device *dev)
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
}
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vc4_gem_madvise *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
int ret;
switch (args->madv) {
case VC4_MADV_DONTNEED:
case VC4_MADV_WILLNEED:
break;
default:
return -EINVAL;
}
if (args->pad != 0)
return -EINVAL;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
bo = to_vc4_bo(gem_obj);
/* Only BOs exposed to userspace can be purged. */
if (bo->madv == __VC4_MADV_NOTSUPP) {
DRM_DEBUG("madvise not supported on this BO\n");
ret = -EINVAL;
goto out_put_gem;
}
/* Not sure it's safe to purge imported BOs. Let's just assume it's
* not until proven otherwise.
*/
if (gem_obj->import_attach) {
DRM_DEBUG("madvise not supported on imported BOs\n");
ret = -EINVAL;
goto out_put_gem;
}
mutex_lock(&bo->madv_lock);
if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
!refcount_read(&bo->usecnt)) {
/* If the BO is about to be marked as purgeable, is not used
* and is not already purgeable or purged, add it to the
* purgeable list.
*/
vc4_bo_add_to_purgeable_pool(bo);
} else if (args->madv == VC4_MADV_WILLNEED &&
bo->madv == VC4_MADV_DONTNEED &&
!refcount_read(&bo->usecnt)) {
/* The BO has not been purged yet, just remove it from
* the purgeable list.
*/
vc4_bo_remove_from_purgeable_pool(bo);
}
/* Save the purged state. */
args->retained = bo->madv != __VC4_MADV_PURGED;
/* Update internal madv state only if the bo was not purged. */
if (bo->madv != __VC4_MADV_PURGED)
bo->madv = args->madv;
mutex_unlock(&bo->madv_lock);
ret = 0;
out_put_gem:
drm_gem_object_put_unlocked(gem_obj);
return ret;
}
......@@ -23,6 +23,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
......@@ -774,21 +775,40 @@ static int vc4_prepare_fb(struct drm_plane *plane,
{
struct vc4_bo *bo;
struct dma_fence *fence;
int ret;
if ((plane->state->fb == state->fb) || !state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
fence = reservation_object_get_excl_rcu(bo->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
static void vc4_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_bo *bo;
if (plane->state->fb == state->fb || !state->fb)
return;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
vc4_bo_dec_usecnt(bo);
}
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_check = vc4_plane_atomic_check,
.atomic_update = vc4_plane_atomic_update,
.prepare_fb = vc4_prepare_fb,
.cleanup_fb = vc4_cleanup_fb,
};
static void vc4_plane_destroy(struct drm_plane *plane)
......
......@@ -41,6 +41,7 @@ extern "C" {
#define DRM_VC4_SET_TILING 0x08
#define DRM_VC4_GET_TILING 0x09
#define DRM_VC4_LABEL_BO 0x0a
#define DRM_VC4_GEM_MADVISE 0x0b
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
......@@ -53,6 +54,7 @@ extern "C" {
#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
......@@ -305,6 +307,7 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
struct drm_vc4_get_param {
__u32 param;
......@@ -333,6 +336,22 @@ struct drm_vc4_label_bo {
__u64 name;
};
/*
* States prefixed with '__' are internal states and cannot be passed to the
* DRM_IOCTL_VC4_GEM_MADVISE ioctl.
*/
#define VC4_MADV_WILLNEED 0
#define VC4_MADV_DONTNEED 1
#define __VC4_MADV_PURGED 2
#define __VC4_MADV_NOTSUPP 3
struct drm_vc4_gem_madvise {
__u32 handle;
__u32 madv;
__u32 retained;
__u32 pad;
};
#if defined(__cplusplus)
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment