Commit 4eb39974 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2024-08-01' of...

Merge tag 'drm-misc-fixes-2024-08-01' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

A couple drm_panic fixes, several v3d fixes to increase the new timestamp API
safety, several fixes for vmwgfx for various modesetting issues, PM fixes
for ast, async flips improvements and two fixes for nouveau to fix
resource refcounting and buffer placement.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240801-interesting-antique-bat-2fe4c0@houat
parents 7b9b7651 9c685f61
...@@ -268,6 +268,7 @@ config DRM_EXEC ...@@ -268,6 +268,7 @@ config DRM_EXEC
config DRM_GPUVM config DRM_GPUVM
tristate tristate
depends on DRM depends on DRM
select DRM_EXEC
help help
GPU-VM representation providing helpers to manage a GPUs virtual GPU-VM representation providing helpers to manage a GPUs virtual
address space address space
......
...@@ -158,7 +158,14 @@ void ast_dp_launch(struct drm_device *dev) ...@@ -158,7 +158,14 @@ void ast_dp_launch(struct drm_device *dev)
ASTDP_HOST_EDID_READ_DONE); ASTDP_HOST_EDID_READ_DONE);
} }
bool ast_dp_power_is_on(struct ast_device *ast)
{
u8 vgacre3;
vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
return !(vgacre3 & AST_DP_PHY_SLEEP);
}
void ast_dp_power_on_off(struct drm_device *dev, bool on) void ast_dp_power_on_off(struct drm_device *dev, bool on)
{ {
......
...@@ -391,6 +391,11 @@ static int ast_drm_freeze(struct drm_device *dev) ...@@ -391,6 +391,11 @@ static int ast_drm_freeze(struct drm_device *dev)
static int ast_drm_thaw(struct drm_device *dev) static int ast_drm_thaw(struct drm_device *dev)
{ {
struct ast_device *ast = to_ast_device(dev);
ast_enable_vga(ast->ioregs);
ast_open_key(ast->ioregs);
ast_enable_mmio(dev->dev, ast->ioregs);
ast_post_gpu(dev); ast_post_gpu(dev);
return drm_mode_config_helper_resume(dev); return drm_mode_config_helper_resume(dev);
......
...@@ -472,6 +472,7 @@ void ast_init_3rdtx(struct drm_device *dev); ...@@ -472,6 +472,7 @@ void ast_init_3rdtx(struct drm_device *dev);
bool ast_astdp_is_connected(struct ast_device *ast); bool ast_astdp_is_connected(struct ast_device *ast);
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata); int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
void ast_dp_launch(struct drm_device *dev); void ast_dp_launch(struct drm_device *dev);
bool ast_dp_power_is_on(struct ast_device *ast);
void ast_dp_power_on_off(struct drm_device *dev, bool no); void ast_dp_power_on_off(struct drm_device *dev, bool no);
void ast_dp_set_on_off(struct drm_device *dev, bool no); void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode); void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
* Authors: Dave Airlie <airlied@redhat.com> * Authors: Dave Airlie <airlied@redhat.com>
*/ */
#include <linux/delay.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -1687,11 +1688,35 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector ...@@ -1687,11 +1688,35 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector
struct drm_modeset_acquire_ctx *ctx, struct drm_modeset_acquire_ctx *ctx,
bool force) bool force)
{ {
struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(connector->dev); struct ast_device *ast = to_ast_device(connector->dev);
enum drm_connector_status status = connector_status_disconnected;
struct drm_connector_state *connector_state = connector->state;
bool is_active = false;
mutex_lock(&ast->modeset_lock);
if (connector_state && connector_state->crtc) {
struct drm_crtc_state *crtc_state = connector_state->crtc->state;
if (crtc_state && crtc_state->active)
is_active = true;
}
if (!is_active && !ast_dp_power_is_on(ast)) {
ast_dp_power_on_off(dev, true);
msleep(50);
}
if (ast_astdp_is_connected(ast)) if (ast_astdp_is_connected(ast))
return connector_status_connected; status = connector_status_connected;
return connector_status_disconnected;
if (!is_active && status == connector_status_disconnected)
ast_dp_power_on_off(dev, false);
mutex_unlock(&ast->modeset_lock);
return status;
} }
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = { static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
......
...@@ -1070,7 +1070,10 @@ int drm_atomic_set_property(struct drm_atomic_state *state, ...@@ -1070,7 +1070,10 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break; break;
} }
if (async_flip && prop != config->prop_fb_id) { if (async_flip &&
prop != config->prop_fb_id &&
prop != config->prop_in_fence_fd &&
prop != config->prop_fb_damage_clips) {
ret = drm_atomic_plane_get_property(plane, plane_state, ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val); prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
......
...@@ -355,7 +355,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, ...@@ -355,7 +355,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
err_drm_gem_vmap_unlocked: err_drm_gem_vmap_unlocked:
drm_gem_unlock(gem); drm_gem_unlock(gem);
return 0; return ret;
} }
EXPORT_SYMBOL(drm_client_buffer_vmap_local); EXPORT_SYMBOL(drm_client_buffer_vmap_local);
......
...@@ -624,6 +624,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u ...@@ -624,6 +624,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y, static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
u32 width, u32 height) u32 width, u32 height)
{ {
/*
* This function may be invoked by panic() to flush the frame
* buffer, where all CPUs except the panic CPU are stopped.
* During the following schedule_work(), the panic CPU needs
* the worker_pool lock, which might be held by a stopped CPU,
* causing schedule_work() and panic() to block. Return early on
* oops_in_progress to prevent this blocking.
*/
if (oops_in_progress)
return;
drm_fb_helper_add_damage_clip(helper, x, y, width, height); drm_fb_helper_add_damage_clip(helper, x, y, width, height);
schedule_work(&helper->damage_work); schedule_work(&helper->damage_work);
......
...@@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = { ...@@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
}, },
.driver_data = (void *)&lcd1600x2560_leftside_up, .driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* OrangePi Neo */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Samsung GalaxyBook 10.6 */ }, { /* Samsung GalaxyBook 10.6 */
.matches = { .matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
......
...@@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
* to the caller, instead of a normal nouveau_bo ttm reference. */ * to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(dev, &nvbo->bo.base, size); ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) { if (ret) {
nouveau_bo_ref(NULL, &nvbo); drm_gem_object_release(&nvbo->bo.base);
kfree(nvbo);
obj = ERR_PTR(-ENOMEM); obj = ERR_PTR(-ENOMEM);
goto unlock; goto unlock;
} }
......
...@@ -1803,6 +1803,7 @@ nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) ...@@ -1803,6 +1803,7 @@ nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{ {
struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj); struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
return nouveau_bo_validate(nvbo, true, false); return nouveau_bo_validate(nvbo, true, false);
} }
......
...@@ -565,6 +565,10 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo); ...@@ -565,6 +565,10 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo); void v3d_mmu_remove_ptes(struct v3d_bo *bo);
/* v3d_sched.c */ /* v3d_sched.c */
void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
unsigned int count);
void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
unsigned int count);
void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue); void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
int v3d_sched_init(struct v3d_dev *v3d); int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d); void v3d_sched_fini(struct v3d_dev *v3d);
......
...@@ -73,24 +73,44 @@ v3d_sched_job_free(struct drm_sched_job *sched_job) ...@@ -73,24 +73,44 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
v3d_job_cleanup(job); v3d_job_cleanup(job);
} }
void
v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
unsigned int count)
{
if (query_info->queries) {
unsigned int i;
for (i = 0; i < count; i++)
drm_syncobj_put(query_info->queries[i].syncobj);
kvfree(query_info->queries);
}
}
void
v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
unsigned int count)
{
if (query_info->queries) {
unsigned int i;
for (i = 0; i < count; i++)
drm_syncobj_put(query_info->queries[i].syncobj);
kvfree(query_info->queries);
}
}
static void static void
v3d_cpu_job_free(struct drm_sched_job *sched_job) v3d_cpu_job_free(struct drm_sched_job *sched_job)
{ {
struct v3d_cpu_job *job = to_cpu_job(sched_job); struct v3d_cpu_job *job = to_cpu_job(sched_job);
struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
struct v3d_performance_query_info *performance_query = &job->performance_query;
if (timestamp_query->queries) { v3d_timestamp_query_info_free(&job->timestamp_query,
for (int i = 0; i < timestamp_query->count; i++) job->timestamp_query.count);
drm_syncobj_put(timestamp_query->queries[i].syncobj);
kvfree(timestamp_query->queries);
}
if (performance_query->queries) { v3d_performance_query_info_free(&job->performance_query,
for (int i = 0; i < performance_query->count; i++) job->performance_query.count);
drm_syncobj_put(performance_query->queries[i].syncobj);
kvfree(performance_query->queries);
}
v3d_job_cleanup(&job->base); v3d_job_cleanup(&job->base);
} }
......
...@@ -452,6 +452,8 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, ...@@ -452,6 +452,8 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
{ {
u32 __user *offsets, *syncs; u32 __user *offsets, *syncs;
struct drm_v3d_timestamp_query timestamp; struct drm_v3d_timestamp_query timestamp;
unsigned int i;
int err;
if (!job) { if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
...@@ -480,26 +482,34 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, ...@@ -480,26 +482,34 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
offsets = u64_to_user_ptr(timestamp.offsets); offsets = u64_to_user_ptr(timestamp.offsets);
syncs = u64_to_user_ptr(timestamp.syncs); syncs = u64_to_user_ptr(timestamp.syncs);
for (int i = 0; i < timestamp.count; i++) { for (i = 0; i < timestamp.count; i++) {
u32 offset, sync; u32 offset, sync;
if (copy_from_user(&offset, offsets++, sizeof(offset))) { if (copy_from_user(&offset, offsets++, sizeof(offset))) {
kvfree(job->timestamp_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->timestamp_query.queries[i].offset = offset; job->timestamp_query.queries[i].offset = offset;
if (copy_from_user(&sync, syncs++, sizeof(sync))) { if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->timestamp_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->timestamp_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
} }
job->timestamp_query.count = timestamp.count; job->timestamp_query.count = timestamp.count;
return 0; return 0;
error:
v3d_timestamp_query_info_free(&job->timestamp_query, i);
return err;
} }
static int static int
...@@ -509,6 +519,8 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, ...@@ -509,6 +519,8 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
{ {
u32 __user *syncs; u32 __user *syncs;
struct drm_v3d_reset_timestamp_query reset; struct drm_v3d_reset_timestamp_query reset;
unsigned int i;
int err;
if (!job) { if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
...@@ -533,21 +545,29 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, ...@@ -533,21 +545,29 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(reset.syncs); syncs = u64_to_user_ptr(reset.syncs);
for (int i = 0; i < reset.count; i++) { for (i = 0; i < reset.count; i++) {
u32 sync; u32 sync;
job->timestamp_query.queries[i].offset = reset.offset + 8 * i; job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
if (copy_from_user(&sync, syncs++, sizeof(sync))) { if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->timestamp_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->timestamp_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
} }
job->timestamp_query.count = reset.count; job->timestamp_query.count = reset.count;
return 0; return 0;
error:
v3d_timestamp_query_info_free(&job->timestamp_query, i);
return err;
} }
/* Get data for the copy timestamp query results job submission. */ /* Get data for the copy timestamp query results job submission. */
...@@ -558,7 +578,8 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, ...@@ -558,7 +578,8 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
{ {
u32 __user *offsets, *syncs; u32 __user *offsets, *syncs;
struct drm_v3d_copy_timestamp_query copy; struct drm_v3d_copy_timestamp_query copy;
int i; unsigned int i;
int err;
if (!job) { if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
...@@ -591,18 +612,22 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, ...@@ -591,18 +612,22 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
u32 offset, sync; u32 offset, sync;
if (copy_from_user(&offset, offsets++, sizeof(offset))) { if (copy_from_user(&offset, offsets++, sizeof(offset))) {
kvfree(job->timestamp_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->timestamp_query.queries[i].offset = offset; job->timestamp_query.queries[i].offset = offset;
if (copy_from_user(&sync, syncs++, sizeof(sync))) { if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->timestamp_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->timestamp_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
} }
job->timestamp_query.count = copy.count; job->timestamp_query.count = copy.count;
...@@ -613,6 +638,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, ...@@ -613,6 +638,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
job->copy.stride = copy.stride; job->copy.stride = copy.stride;
return 0; return 0;
error:
v3d_timestamp_query_info_free(&job->timestamp_query, i);
return err;
} }
static int static int
...@@ -623,6 +652,8 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, ...@@ -623,6 +652,8 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
u32 __user *syncs; u32 __user *syncs;
u64 __user *kperfmon_ids; u64 __user *kperfmon_ids;
struct drm_v3d_reset_performance_query reset; struct drm_v3d_reset_performance_query reset;
unsigned int i, j;
int err;
if (!job) { if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
...@@ -637,6 +668,9 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, ...@@ -637,6 +668,9 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
if (copy_from_user(&reset, ext, sizeof(reset))) if (copy_from_user(&reset, ext, sizeof(reset)))
return -EFAULT; return -EFAULT;
if (reset.nperfmons > V3D_MAX_PERFMONS)
return -EINVAL;
job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY; job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
job->performance_query.queries = kvmalloc_array(reset.count, job->performance_query.queries = kvmalloc_array(reset.count,
...@@ -648,39 +682,47 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, ...@@ -648,39 +682,47 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(reset.syncs); syncs = u64_to_user_ptr(reset.syncs);
kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids); kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
for (int i = 0; i < reset.count; i++) { for (i = 0; i < reset.count; i++) {
u32 sync; u32 sync;
u64 ids; u64 ids;
u32 __user *ids_pointer; u32 __user *ids_pointer;
u32 id; u32 id;
if (copy_from_user(&sync, syncs++, sizeof(sync))) { if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->performance_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
kvfree(job->performance_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
ids_pointer = u64_to_user_ptr(ids); ids_pointer = u64_to_user_ptr(ids);
for (int j = 0; j < reset.nperfmons; j++) { for (j = 0; j < reset.nperfmons; j++) {
if (copy_from_user(&id, ids_pointer++, sizeof(id))) { if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
kvfree(job->performance_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->performance_query.queries[i].kperfmon_ids[j] = id; job->performance_query.queries[i].kperfmon_ids[j] = id;
} }
job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->performance_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
} }
job->performance_query.count = reset.count; job->performance_query.count = reset.count;
job->performance_query.nperfmons = reset.nperfmons; job->performance_query.nperfmons = reset.nperfmons;
return 0; return 0;
error:
v3d_performance_query_info_free(&job->performance_query, i);
return err;
} }
static int static int
...@@ -691,6 +733,8 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, ...@@ -691,6 +733,8 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
u32 __user *syncs; u32 __user *syncs;
u64 __user *kperfmon_ids; u64 __user *kperfmon_ids;
struct drm_v3d_copy_performance_query copy; struct drm_v3d_copy_performance_query copy;
unsigned int i, j;
int err;
if (!job) { if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
...@@ -708,6 +752,9 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, ...@@ -708,6 +752,9 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
if (copy.pad) if (copy.pad)
return -EINVAL; return -EINVAL;
if (copy.nperfmons > V3D_MAX_PERFMONS)
return -EINVAL;
job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY; job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
job->performance_query.queries = kvmalloc_array(copy.count, job->performance_query.queries = kvmalloc_array(copy.count,
...@@ -719,34 +766,38 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, ...@@ -719,34 +766,38 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(copy.syncs); syncs = u64_to_user_ptr(copy.syncs);
kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids); kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
for (int i = 0; i < copy.count; i++) { for (i = 0; i < copy.count; i++) {
u32 sync; u32 sync;
u64 ids; u64 ids;
u32 __user *ids_pointer; u32 __user *ids_pointer;
u32 id; u32 id;
if (copy_from_user(&sync, syncs++, sizeof(sync))) { if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->performance_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
kvfree(job->performance_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
ids_pointer = u64_to_user_ptr(ids); ids_pointer = u64_to_user_ptr(ids);
for (int j = 0; j < copy.nperfmons; j++) { for (j = 0; j < copy.nperfmons; j++) {
if (copy_from_user(&id, ids_pointer++, sizeof(id))) { if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
kvfree(job->performance_query.queries); err = -EFAULT;
return -EFAULT; goto error;
} }
job->performance_query.queries[i].kperfmon_ids[j] = id; job->performance_query.queries[i].kperfmon_ids[j] = id;
} }
job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->performance_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
} }
job->performance_query.count = copy.count; job->performance_query.count = copy.count;
job->performance_query.nperfmons = copy.nperfmons; job->performance_query.nperfmons = copy.nperfmons;
...@@ -759,6 +810,10 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, ...@@ -759,6 +810,10 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
job->copy.stride = copy.stride; job->copy.stride = copy.stride;
return 0; return 0;
error:
v3d_performance_query_info_free(&job->performance_query, i);
return err;
} }
/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
......
...@@ -48,7 +48,7 @@ struct virtio_gpu_submit { ...@@ -48,7 +48,7 @@ struct virtio_gpu_submit {
static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
struct dma_fence *in_fence) struct dma_fence *in_fence)
{ {
u32 context = submit->fence_ctx + submit->ring_idx; u64 context = submit->fence_ctx + submit->ring_idx;
if (dma_fence_match_context(in_fence, context)) if (dma_fence_match_context(in_fence, context))
return 0; return 0;
......
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/********************************************************** /**********************************************************
* Copyright 2021 VMware, Inc. *
* SPDX-License-Identifier: GPL-2.0 OR MIT * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person * Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation * obtaining a copy of this software and associated documentation
...@@ -31,6 +33,10 @@ ...@@ -31,6 +33,10 @@
#include <drm/vmwgfx_drm.h> #include <drm/vmwgfx_drm.h>
#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) ((svga3d_flags) >> 32)
#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
((svga3d_flags) & ((uint64_t)U32_MAX))
static inline u32 clamped_umul32(u32 a, u32 b) static inline u32 clamped_umul32(u32 a, u32 b)
{ {
uint64_t tmp = (uint64_t) a*b; uint64_t tmp = (uint64_t) a*b;
......
// SPDX-License-Identifier: GPL-2.0 OR MIT // SPDX-License-Identifier: GPL-2.0 OR MIT
/************************************************************************** /**************************************************************************
* *
* Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
* All Rights Reserved. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -28,15 +28,39 @@ ...@@ -28,15 +28,39 @@
#include "vmwgfx_bo.h" #include "vmwgfx_bo.h"
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
static void vmw_bo_release(struct vmw_bo *vbo) static void vmw_bo_release(struct vmw_bo *vbo)
{ {
struct vmw_resource *res;
WARN_ON(vbo->tbo.base.funcs && WARN_ON(vbo->tbo.base.funcs &&
kref_read(&vbo->tbo.base.refcount) != 0); kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo); vmw_bo_unmap(vbo);
xa_destroy(&vbo->detached_resources);
WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
if (vbo->is_dumb && vbo->dumb_surface) {
res = &vbo->dumb_surface->res;
WARN_ON(vbo != res->guest_memory_bo);
WARN_ON(!res->guest_memory_bo);
if (res->guest_memory_bo) {
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void)vmw_resource_reserve(res, false, true);
vmw_resource_mob_detach(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL,
0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
vmw_surface_unreference(&vbo->dumb_surface);
}
drm_gem_object_release(&vbo->tbo.base); drm_gem_object_release(&vbo->tbo.base);
} }
...@@ -325,6 +349,11 @@ void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin) ...@@ -325,6 +349,11 @@ void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
* *
*/ */
void *vmw_bo_map_and_cache(struct vmw_bo *vbo) void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
}
void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
{ {
struct ttm_buffer_object *bo = &vbo->tbo; struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used; bool not_used;
...@@ -335,9 +364,10 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo) ...@@ -335,9 +364,10 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
if (virtual) if (virtual)
return virtual; return virtual;
ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map); ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
if (ret) if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret); DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
ret, bo->base.size, size);
return ttm_kmap_obj_virtual(&vbo->map, &not_used); return ttm_kmap_obj_virtual(&vbo->map, &not_used);
} }
...@@ -390,6 +420,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -390,6 +420,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->tbo.priority = 3; vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT; vmw_bo->res_tree = RB_ROOT;
xa_init(&vmw_bo->detached_resources);
params->size = ALIGN(params->size, PAGE_SIZE); params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size); drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
...@@ -654,52 +685,6 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, ...@@ -654,52 +685,6 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
dma_fence_put(&fence->base); dma_fence_put(&fence->base);
} }
/**
* vmw_dumb_create - Create a dumb kms buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @args: Pointer to a struct drm_mode_create_dumb structure
* Return: Zero on success, negative error code on failure.
*
* This is a driver callback for the core drm create_dumb functionality.
* Note that this is very similar to the vmw_bo_alloc ioctl, except
* that the arguments have a different format.
*/
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_bo *vbo;
int cpp = DIV_ROUND_UP(args->bpp, 8);
int ret;
switch (cpp) {
case 1: /* DRM_FORMAT_C8 */
case 2: /* DRM_FORMAT_RGB565 */
case 4: /* DRM_FORMAT_XRGB8888 */
break;
default:
/*
* Dumb buffers don't allow anything else.
* This is tested via IGT's dumb_buffers
*/
return -EINVAL;
}
args->pitch = args->width * cpp;
args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
&vbo);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->tbo.base);
return ret;
}
/** /**
* vmw_bo_swap_notify - swapout notify callback. * vmw_bo_swap_notify - swapout notify callback.
* *
...@@ -853,3 +838,43 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo) ...@@ -853,3 +838,43 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain); vmw_bo_placement_set(bo, domain, domain);
} }
void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
xa_erase(&vbo->detached_resources, (unsigned long)res);
}
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
{
unsigned long index;
struct vmw_resource *res = NULL;
struct vmw_surface *surf = NULL;
struct rb_node *rb_itr = vbo->res_tree.rb_node;
if (vbo->is_dumb && vbo->dumb_surface) {
res = &vbo->dumb_surface->res;
goto out;
}
xa_for_each(&vbo->detached_resources, index, res) {
if (res->func->res_type == vmw_res_surface)
goto out;
}
for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
rb_itr = rb_next(rb_itr)) {
res = rb_entry(rb_itr, struct vmw_resource, mob_node);
if (res->func->res_type == vmw_res_surface)
goto out;
}
out:
if (res)
surf = vmw_res_to_srf(res);
return surf;
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
/************************************************************************** /**************************************************************************
* *
* Copyright 2023 VMware, Inc., Palo Alto, CA., USA * Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -35,11 +36,13 @@ ...@@ -35,11 +36,13 @@
#include <linux/rbtree_types.h> #include <linux/rbtree_types.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/xarray.h>
struct vmw_bo_dirty; struct vmw_bo_dirty;
struct vmw_fence_obj; struct vmw_fence_obj;
struct vmw_private; struct vmw_private;
struct vmw_resource; struct vmw_resource;
struct vmw_surface;
enum vmw_bo_domain { enum vmw_bo_domain {
VMW_BO_DOMAIN_SYS = BIT(0), VMW_BO_DOMAIN_SYS = BIT(0),
...@@ -85,11 +88,15 @@ struct vmw_bo { ...@@ -85,11 +88,15 @@ struct vmw_bo {
struct rb_root res_tree; struct rb_root res_tree;
u32 res_prios[TTM_MAX_BO_PRIORITY]; u32 res_prios[TTM_MAX_BO_PRIORITY];
struct xarray detached_resources;
atomic_t cpu_writers; atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */ /* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx; struct vmw_resource *dx_query_ctx;
struct vmw_bo_dirty *dirty; struct vmw_bo_dirty *dirty;
bool is_dumb;
struct vmw_surface *dumb_surface;
}; };
void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain); void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
...@@ -124,15 +131,21 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, ...@@ -124,15 +131,21 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence); struct vmw_fence_obj *fence);
void *vmw_bo_map_and_cache(struct vmw_bo *vbo); void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size);
void vmw_bo_unmap(struct vmw_bo *vbo); void vmw_bo_unmap(struct vmw_bo *vbo);
void vmw_bo_move_notify(struct ttm_buffer_object *bo, void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem); struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo); void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
int vmw_user_bo_lookup(struct drm_file *filp, int vmw_user_bo_lookup(struct drm_file *filp,
u32 handle, u32 handle,
struct vmw_bo **out); struct vmw_bo **out);
/** /**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
* according to attached resources * according to attached resources
......
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
/************************************************************************** /**************************************************************************
* *
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -762,6 +763,26 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv, ...@@ -762,6 +763,26 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv,
int gmr_id); int gmr_id);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
* User handles
*/
struct vmw_user_object {
struct vmw_surface *surface;
struct vmw_bo *buffer;
};
int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp,
u32 handle, struct vmw_user_object *uo);
struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo);
void vmw_user_object_unref(struct vmw_user_object *uo);
bool vmw_user_object_is_null(struct vmw_user_object *uo);
struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo);
struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo);
void *vmw_user_object_map(struct vmw_user_object *uo);
void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size);
void vmw_user_object_unmap(struct vmw_user_object *uo);
bool vmw_user_object_is_mapped(struct vmw_user_object *uo);
/** /**
* Resource utilities - vmwgfx_resource.c * Resource utilities - vmwgfx_resource.c
*/ */
...@@ -776,11 +797,6 @@ extern int vmw_resource_validate(struct vmw_resource *res, bool intr, ...@@ -776,11 +797,6 @@ extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup); bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_bo **out_buf);
extern int vmw_user_resource_lookup_handle( extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv, struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
...@@ -1057,9 +1073,6 @@ int vmw_kms_suspend(struct drm_device *dev); ...@@ -1057,9 +1073,6 @@ int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev); int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev); void vmw_kms_lost_device(struct drm_device *dev);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res); extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
...@@ -1176,6 +1189,15 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, ...@@ -1176,6 +1189,15 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
int vmw_gb_surface_define(struct vmw_private *dev_priv, int vmw_gb_surface_define(struct vmw_private *dev_priv,
const struct vmw_surface_metadata *req, const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out); struct vmw_surface **srf_out);
struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
struct vmw_bo *bo,
u32 handle);
u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
struct vmw_bo *bo,
u32 handle);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
/* /*
* Shader management - vmwgfx_shader.c * Shader management - vmwgfx_shader.c
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#define VMW_FENCE_WRAP (1 << 31) #define VMW_FENCE_WRAP (1 << 31)
struct vmw_fence_manager { struct vmw_fence_manager {
int num_fence_objects;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
spinlock_t lock; spinlock_t lock;
struct list_head fence_list; struct list_head fence_list;
...@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f) ...@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
{ {
struct vmw_fence_obj *fence = struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base); container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence); struct vmw_fence_manager *fman = fman_from_fence(fence);
if (!list_empty(&fence->head)) {
spin_lock(&fman->lock); spin_lock(&fman->lock);
list_del_init(&fence->head); list_del_init(&fence->head);
--fman->num_fence_objects;
spin_unlock(&fman->lock); spin_unlock(&fman->lock);
}
fence->destroy(fence); fence->destroy(fence);
} }
...@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = { ...@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = {
.release = vmw_fence_obj_destroy, .release = vmw_fence_obj_destroy,
}; };
/* /*
* Execute signal actions on fences recently signaled. * Execute signal actions on fences recently signaled.
* This is done from a workqueue so we don't have to execute * This is done from a workqueue so we don't have to execute
...@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, ...@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
goto out_unlock; goto out_unlock;
} }
list_add_tail(&fence->head, &fman->fence_list); list_add_tail(&fence->head, &fman->fence_list);
++fman->num_fence_objects;
out_unlock: out_unlock:
spin_unlock(&fman->lock); spin_unlock(&fman->lock);
...@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, ...@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno) u32 passed_seqno)
{ {
u32 goal_seqno; u32 goal_seqno;
struct vmw_fence_obj *fence; struct vmw_fence_obj *fence, *next_fence;
if (likely(!fman->seqno_valid)) if (likely(!fman->seqno_valid))
return false; return false;
...@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, ...@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
return false; return false;
fman->seqno_valid = false; fman->seqno_valid = false;
list_for_each_entry(fence, &fman->fence_list, head) { list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) { if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true; fman->seqno_valid = true;
vmw_fence_goal_write(fman->dev_priv, vmw_fence_goal_write(fman->dev_priv,
......
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* /*
* Copyright 2021-2023 VMware, Inc. * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person * Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation * obtaining a copy of this software and associated documentation
...@@ -78,6 +79,59 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj) ...@@ -78,6 +79,59 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages); return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
} }
static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
int ret;
if (obj->import_attach) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
return -EIO;
}
}
} else {
ret = ttm_bo_vmap(bo, map);
}
return ret;
}
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
if (obj->import_attach)
dma_buf_vunmap(obj->import_attach->dmabuf, map);
else
drm_gem_ttm_vunmap(obj, map);
}
static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
if (obj->import_attach) {
/*
* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
* doesn't set those fields.
*/
vma->vm_private_data = NULL;
vma->vm_ops = NULL;
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
/* Drop the reference drm_gem_mmap_obj() acquired.*/
if (!ret)
drm_gem_object_put(obj);
return ret;
}
return drm_gem_ttm_mmap(obj, vma);
}
static const struct vm_operations_struct vmw_vm_ops = { static const struct vm_operations_struct vmw_vm_ops = {
.pfn_mkwrite = vmw_bo_vm_mkwrite, .pfn_mkwrite = vmw_bo_vm_mkwrite,
.page_mkwrite = vmw_bo_vm_mkwrite, .page_mkwrite = vmw_bo_vm_mkwrite,
...@@ -94,9 +148,9 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = { ...@@ -94,9 +148,9 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.pin = vmw_gem_object_pin, .pin = vmw_gem_object_pin,
.unpin = vmw_gem_object_unpin, .unpin = vmw_gem_object_unpin,
.get_sg_table = vmw_gem_object_get_sg_table, .get_sg_table = vmw_gem_object_get_sg_table,
.vmap = drm_gem_ttm_vmap, .vmap = vmw_gem_vmap,
.vunmap = drm_gem_ttm_vunmap, .vunmap = vmw_gem_vunmap,
.mmap = drm_gem_ttm_mmap, .mmap = vmw_gem_mmap,
.vm_ops = &vmw_vm_ops, .vm_ops = &vmw_vm_ops,
}; };
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
/************************************************************************** /**************************************************************************
* *
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -221,11 +222,9 @@ struct vmw_framebuffer { ...@@ -221,11 +222,9 @@ struct vmw_framebuffer {
struct vmw_framebuffer_surface { struct vmw_framebuffer_surface {
struct vmw_framebuffer base; struct vmw_framebuffer base;
struct vmw_surface *surface; struct vmw_user_object uo;
bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
}; };
struct vmw_framebuffer_bo { struct vmw_framebuffer_bo {
struct vmw_framebuffer base; struct vmw_framebuffer base;
struct vmw_bo *buffer; struct vmw_bo *buffer;
...@@ -277,8 +276,7 @@ struct vmw_cursor_plane_state { ...@@ -277,8 +276,7 @@ struct vmw_cursor_plane_state {
*/ */
struct vmw_plane_state { struct vmw_plane_state {
struct drm_plane_state base; struct drm_plane_state base;
struct vmw_surface *surf; struct vmw_user_object uo;
struct vmw_bo *bo;
int content_fb_type; int content_fb_type;
unsigned long bo_size; unsigned long bo_size;
...@@ -457,9 +455,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv, ...@@ -457,9 +455,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips); uint32_t num_clips);
struct vmw_framebuffer * struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv, vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_bo *bo, struct vmw_user_object *uo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_mode_fb_cmd2 *mode_cmd);
void vmw_guess_mode_timing(struct drm_display_mode *mode); void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv); void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
...@@ -486,8 +482,7 @@ void vmw_du_plane_reset(struct drm_plane *plane); ...@@ -486,8 +482,7 @@ void vmw_du_plane_reset(struct drm_plane *plane);
struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane); struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane);
void vmw_du_plane_destroy_state(struct drm_plane *plane, void vmw_du_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state); struct drm_plane_state *state);
void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps);
bool unreference);
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state); struct drm_atomic_state *state);
......
// SPDX-License-Identifier: GPL-2.0 OR MIT // SPDX-License-Identifier: GPL-2.0 OR MIT
/************************************************************************** /**************************************************************************
* *
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -147,8 +148,9 @@ static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb) ...@@ -147,8 +148,9 @@ static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
struct vmw_bo *buf; struct vmw_bo *buf;
int ret; int ret;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : buf = vfb->bo ?
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo; vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
if (!buf) if (!buf)
return 0; return 0;
...@@ -169,8 +171,10 @@ static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb) ...@@ -169,8 +171,10 @@ static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_bo *buf; struct vmw_bo *buf;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : buf = vfb->bo ?
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo; vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
if (WARN_ON(!buf)) if (WARN_ON(!buf))
return 0; return 0;
......
...@@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, ...@@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
{ {
struct vmw_escape_video_flush *flush; struct vmw_escape_video_flush *flush;
size_t fifo_size; size_t fifo_size;
bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object); bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
int i, num_items; int i, num_items;
SVGAGuestPtr ptr; SVGAGuestPtr ptr;
......
// SPDX-License-Identifier: GPL-2.0 OR MIT // SPDX-License-Identifier: GPL-2.0 OR MIT
/************************************************************************** /**************************************************************************
* *
* Copyright 2013 VMware, Inc., Palo Alto, CA., USA * Copyright (c) 2013-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -31,6 +32,7 @@ ...@@ -31,6 +32,7 @@
*/ */
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
#include "ttm_object.h" #include "ttm_object.h"
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
...@@ -88,13 +90,35 @@ int vmw_prime_handle_to_fd(struct drm_device *dev, ...@@ -88,13 +90,35 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
uint32_t handle, uint32_t flags, uint32_t handle, uint32_t flags,
int *prime_fd) int *prime_fd)
{ {
struct vmw_private *vmw = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_bo *vbo;
int ret; int ret;
int surf_handle;
if (handle > VMWGFX_NUM_MOB) if (handle > VMWGFX_NUM_MOB) {
ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
} else {
ret = vmw_user_bo_lookup(file_priv, handle, &vbo);
if (ret)
return ret;
if (vbo && vbo->is_dumb) {
ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle,
flags, prime_fd);
} else {
surf_handle = vmw_lookup_surface_handle_for_buffer(vmw,
vbo,
handle);
if (surf_handle > 0)
ret = ttm_prime_handle_to_fd(tfile, surf_handle,
flags, prime_fd);
else else
ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd); ret = drm_gem_prime_handle_to_fd(dev, file_priv,
handle, flags,
prime_fd);
}
vmw_user_bo_unref(&vbo);
}
return ret; return ret;
} }
// SPDX-License-Identifier: GPL-2.0 OR MIT // SPDX-License-Identifier: GPL-2.0 OR MIT
/************************************************************************** /**************************************************************************
* *
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -58,6 +59,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res) ...@@ -58,6 +59,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
rb_link_node(&res->mob_node, parent, new); rb_link_node(&res->mob_node, parent, new);
rb_insert_color(&res->mob_node, &gbo->res_tree); rb_insert_color(&res->mob_node, &gbo->res_tree);
vmw_bo_del_detached_resource(gbo, res);
vmw_bo_prio_add(gbo, res->used_prio); vmw_bo_prio_add(gbo, res->used_prio);
} }
...@@ -287,28 +289,35 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, ...@@ -287,28 +289,35 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
* *
* The pointer this pointed at by out_surf and out_buf needs to be null. * The pointer this pointed at by out_surf and out_buf needs to be null.
*/ */
int vmw_user_lookup_handle(struct vmw_private *dev_priv, int vmw_user_object_lookup(struct vmw_private *dev_priv,
struct drm_file *filp, struct drm_file *filp,
uint32_t handle, u32 handle,
struct vmw_surface **out_surf, struct vmw_user_object *uo)
struct vmw_bo **out_buf)
{ {
struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile; struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res; struct vmw_resource *res;
int ret; int ret;
BUG_ON(*out_surf || *out_buf); WARN_ON(uo->surface || uo->buffer);
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
user_surface_converter, user_surface_converter,
&res); &res);
if (!ret) { if (!ret) {
*out_surf = vmw_res_to_srf(res); uo->surface = vmw_res_to_srf(res);
return 0; return 0;
} }
*out_surf = NULL; uo->surface = NULL;
ret = vmw_user_bo_lookup(filp, handle, out_buf); ret = vmw_user_bo_lookup(filp, handle, &uo->buffer);
if (!ret && !uo->buffer->is_dumb) {
uo->surface = vmw_lookup_surface_for_buffer(dev_priv,
uo->buffer,
handle);
if (uo->surface)
vmw_user_bo_unref(&uo->buffer);
}
return ret; return ret;
} }
......
// SPDX-License-Identifier: GPL-2.0 OR MIT // SPDX-License-Identifier: GPL-2.0 OR MIT
/************************************************************************** /**************************************************************************
* *
* Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the * copy of this software and associated documentation files (the
...@@ -240,7 +241,7 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -240,7 +241,7 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_connector_state *vmw_conn_state; struct vmw_connector_state *vmw_conn_state;
int x, y; int x, y;
sou->buffer = vps->bo; sou->buffer = vmw_user_object_buffer(&vps->uo);
conn_state = sou->base.connector.state; conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state); vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
...@@ -376,10 +377,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, ...@@ -376,10 +377,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
struct drm_crtc *crtc = plane->state->crtc ? struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc; plane->state->crtc : old_state->crtc;
struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
if (vps->bo) if (bo)
vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false); vmw_bo_unpin(vmw_priv(crtc->dev), bo, false);
vmw_bo_unreference(&vps->bo); vmw_user_object_unref(&vps->uo);
vps->bo_size = 0; vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state); vmw_du_plane_cleanup_fb(plane, old_state);
...@@ -411,9 +413,10 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -411,9 +413,10 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
.bo_type = ttm_bo_type_device, .bo_type = ttm_bo_type_device,
.pin = true .pin = true
}; };
struct vmw_bo *bo = NULL;
if (!new_fb) { if (!new_fb) {
vmw_bo_unreference(&vps->bo); vmw_user_object_unref(&vps->uo);
vps->bo_size = 0; vps->bo_size = 0;
return 0; return 0;
...@@ -422,17 +425,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -422,17 +425,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
bo_params.size = new_state->crtc_w * new_state->crtc_h * 4; bo_params.size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev); dev_priv = vmw_priv(crtc->dev);
if (vps->bo) { bo = vmw_user_object_buffer(&vps->uo);
if (bo) {
if (vps->bo_size == bo_params.size) { if (vps->bo_size == bo_params.size) {
/* /*
* Note that this might temporarily up the pin-count * Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called. * to 2, until cleanup_fb() is called.
*/ */
return vmw_bo_pin_in_vram(dev_priv, vps->bo, return vmw_bo_pin_in_vram(dev_priv, bo, true);
true);
} }
vmw_bo_unreference(&vps->bo); vmw_user_object_unref(&vps->uo);
vps->bo_size = 0; vps->bo_size = 0;
} }
...@@ -442,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -442,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc. * resume the overlays, this is preferred to failing to alloc.
*/ */
vmw_overlay_pause_all(dev_priv); vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo); ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv); vmw_overlay_resume_all(dev_priv);
if (ret) if (ret)
return ret; return ret;
...@@ -453,7 +456,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -453,7 +456,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* TTM already thinks the buffer is pinned, but make sure the * TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped. * pin_count is upped.
*/ */
return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); return vmw_bo_pin_in_vram(dev_priv, vps->uo.buffer, true);
} }
static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update, static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
...@@ -580,6 +583,7 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update, ...@@ -580,6 +583,7 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
{ {
struct vmw_kms_sou_dirty_cmd *blit = cmd; struct vmw_kms_sou_dirty_cmd *blit = cmd;
struct vmw_framebuffer_surface *vfbs; struct vmw_framebuffer_surface *vfbs;
struct vmw_surface *surf = NULL;
vfbs = container_of(update->vfb, typeof(*vfbs), base); vfbs = container_of(update->vfb, typeof(*vfbs), base);
...@@ -587,7 +591,8 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update, ...@@ -587,7 +591,8 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) * blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
num_hits; num_hits;
blit->body.srcImage.sid = vfbs->surface->res.id; surf = vmw_user_object_surface(&vfbs->uo);
blit->body.srcImage.sid = surf->res.id;
blit->body.destScreenId = update->du->unit; blit->body.destScreenId = update->du->unit;
/* Update the source and destination bounding box later in post_clip */ /* Update the source and destination bounding box later in post_clip */
...@@ -1104,7 +1109,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -1104,7 +1109,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
int ret; int ret;
if (!srf) if (!srf)
srf = &vfbs->surface->res; srf = &vmw_user_object_surface(&vfbs->uo)->res;
ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE, ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
NULL, NULL); NULL, NULL);
......
This diff is collapsed.
This diff is collapsed.
...@@ -75,7 +75,7 @@ vmw_surface_sync(struct vmw_private *vmw, ...@@ -75,7 +75,7 @@ vmw_surface_sync(struct vmw_private *vmw,
return ret; return ret;
} }
static int static void
compute_crc(struct drm_crtc *crtc, compute_crc(struct drm_crtc *crtc,
struct vmw_surface *surf, struct vmw_surface *surf,
u32 *crc) u32 *crc)
...@@ -101,8 +101,6 @@ compute_crc(struct drm_crtc *crtc, ...@@ -101,8 +101,6 @@ compute_crc(struct drm_crtc *crtc,
} }
vmw_bo_unmap(bo); vmw_bo_unmap(bo);
return 0;
} }
static void static void
...@@ -116,7 +114,6 @@ crc_generate_worker(struct work_struct *work) ...@@ -116,7 +114,6 @@ crc_generate_worker(struct work_struct *work)
u64 frame_start, frame_end; u64 frame_start, frame_end;
u32 crc32 = 0; u32 crc32 = 0;
struct vmw_surface *surf = 0; struct vmw_surface *surf = 0;
int ret;
spin_lock_irq(&du->vkms.crc_state_lock); spin_lock_irq(&du->vkms.crc_state_lock);
crc_pending = du->vkms.crc_pending; crc_pending = du->vkms.crc_pending;
...@@ -130,22 +127,24 @@ crc_generate_worker(struct work_struct *work) ...@@ -130,22 +127,24 @@ crc_generate_worker(struct work_struct *work)
return; return;
spin_lock_irq(&du->vkms.crc_state_lock); spin_lock_irq(&du->vkms.crc_state_lock);
surf = du->vkms.surface; surf = vmw_surface_reference(du->vkms.surface);
spin_unlock_irq(&du->vkms.crc_state_lock); spin_unlock_irq(&du->vkms.crc_state_lock);
if (surf) {
if (vmw_surface_sync(vmw, surf)) { if (vmw_surface_sync(vmw, surf)) {
drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n"); drm_warn(
crtc->dev,
"CRC worker wasn't able to sync the crc surface!\n");
return; return;
} }
ret = compute_crc(crtc, surf, &crc32); compute_crc(crtc, surf, &crc32);
if (ret) vmw_surface_unreference(&surf);
return; }
spin_lock_irq(&du->vkms.crc_state_lock); spin_lock_irq(&du->vkms.crc_state_lock);
frame_start = du->vkms.frame_start; frame_start = du->vkms.frame_start;
frame_end = du->vkms.frame_end; frame_end = du->vkms.frame_end;
crc_pending = du->vkms.crc_pending;
du->vkms.frame_start = 0; du->vkms.frame_start = 0;
du->vkms.frame_end = 0; du->vkms.frame_end = 0;
du->vkms.crc_pending = false; du->vkms.crc_pending = false;
...@@ -164,7 +163,7 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer) ...@@ -164,7 +163,7 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer)
struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer); struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
struct drm_crtc *crtc = &du->crtc; struct drm_crtc *crtc = &du->crtc;
struct vmw_private *vmw = vmw_priv(crtc->dev); struct vmw_private *vmw = vmw_priv(crtc->dev);
struct vmw_surface *surf = NULL; bool has_surface = false;
u64 ret_overrun; u64 ret_overrun;
bool locked, ret; bool locked, ret;
...@@ -179,10 +178,10 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer) ...@@ -179,10 +178,10 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer)
WARN_ON(!ret); WARN_ON(!ret);
if (!locked) if (!locked)
return HRTIMER_RESTART; return HRTIMER_RESTART;
surf = du->vkms.surface; has_surface = du->vkms.surface != NULL;
vmw_vkms_unlock(crtc); vmw_vkms_unlock(crtc);
if (du->vkms.crc_enabled && surf) { if (du->vkms.crc_enabled && has_surface) {
u64 frame = drm_crtc_accurate_vblank_count(crtc); u64 frame = drm_crtc_accurate_vblank_count(crtc);
spin_lock(&du->vkms.crc_state_lock); spin_lock(&du->vkms.crc_state_lock);
...@@ -336,6 +335,8 @@ vmw_vkms_crtc_cleanup(struct drm_crtc *crtc) ...@@ -336,6 +335,8 @@ vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
{ {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc); struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
if (du->vkms.surface)
vmw_surface_unreference(&du->vkms.surface);
WARN_ON(work_pending(&du->vkms.crc_generator_work)); WARN_ON(work_pending(&du->vkms.crc_generator_work));
hrtimer_cancel(&du->vkms.timer); hrtimer_cancel(&du->vkms.timer);
} }
...@@ -497,9 +498,12 @@ vmw_vkms_set_crc_surface(struct drm_crtc *crtc, ...@@ -497,9 +498,12 @@ vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
struct vmw_display_unit *du = vmw_crtc_to_du(crtc); struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_private *vmw = vmw_priv(crtc->dev); struct vmw_private *vmw = vmw_priv(crtc->dev);
if (vmw->vkms_enabled) { if (vmw->vkms_enabled && du->vkms.surface != surf) {
WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET); WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
du->vkms.surface = surf; if (du->vkms.surface)
vmw_surface_unreference(&du->vkms.surface);
if (surf)
du->vkms.surface = vmw_surface_reference(surf);
} }
} }
......
...@@ -29,9 +29,11 @@ static int check_vgem(int fd) ...@@ -29,9 +29,11 @@ static int check_vgem(int fd)
version.name = name; version.name = name;
ret = ioctl(fd, DRM_IOCTL_VERSION, &version); ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
if (ret) if (ret || version.name_len != 4)
return 0; return 0;
name[4] = '\0';
return !strcmp(name, "vgem"); return !strcmp(name, "vgem");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment