Commit 13289241 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Remove the resource avail field

This field was previously used to prevent a lookup of a resource before its
constructor had run to its end. This was mainly intended for an interface
that is now removed that allowed looking up a resource by its device id.

Currently all affected resources are added to the lookup mechanism (its
TTM prime object is initialized) late in the constructor where it's OK to
look up the resource.

This means we can change the device resource_lock to an ordinary spinlock
instead of an rwlock and remove a locking sequence during lookup.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
parent d76ce03e
......@@ -217,9 +217,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
}
}
vmw_resource_activate(res, vmw_hw_context_destroy);
res->hw_destroy = vmw_hw_context_destroy;
return 0;
out_cotables:
......@@ -274,7 +272,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
res->hw_destroy = vmw_hw_context_destroy;
return 0;
out_early:
......
......@@ -615,7 +615,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
vcotbl->type = type;
vcotbl->ctx = ctx;
vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
return &vcotbl->res;
......
......@@ -667,8 +667,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->binding_mutex);
mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
......
......@@ -113,21 +113,49 @@ struct vmw_validate_buffer {
};
struct vmw_res_func;
/**
* struct vmw-resource - base class for hardware resources
*
* @kref: For refcounting.
* @dev_priv: Pointer to the device private for this resource. Immutable.
* @id: Device id. Protected by @dev_priv::resource_lock.
* @backup_size: Backup buffer size. Immutable.
* @res_dirty: Resource contains data not yet in the backup buffer. Protected
* by resource reserved.
* @backup_dirty: Backup buffer contains data not yet in the HW resource.
* Protecte by resource reserved.
* @backup: The backup buffer if any. Protected by resource reserved.
* @backup_offset: Offset into the backup buffer if any. Protected by resource
* reserved. Note that only a few resource types can have a @backup_offset
* different from zero.
* @pin_count: The pin count for this resource. A pinned resource has a
* pin-count greater than zero. It is not on the resource LRU lists and its
* backup buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @mob_head: List head for the MOB backup list. Protected by @backup reserved.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
* @res_free: The resource destructor.
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
bool avail;
unsigned long backup_size;
bool res_dirty; /* Protected by backup buffer reserved */
bool backup_dirty; /* Protected by backup buffer reserved */
bool res_dirty;
bool backup_dirty;
struct vmw_buffer_object *backup;
unsigned long backup_offset;
unsigned long pin_count; /* Protected by resource reserved */
unsigned long pin_count;
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
struct list_head binding_head; /* Protected by binding_mutex */
struct list_head lru_head;
struct list_head mob_head;
struct list_head binding_head;
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
......@@ -471,7 +499,7 @@ struct vmw_private {
* Context and surface management.
*/
rwlock_t resource_lock;
spinlock_t resource_lock;
struct idr res_idr[vmw_res_max];
/*
* Block lastclose from racing with firstopen.
......
......@@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
if (res->id != -1)
idr_remove(idr, res->id);
res->id = -1;
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
}
static void vmw_resource_release(struct kref *kref)
......@@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref)
int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock);
res->avail = false;
spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
......@@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref)
else
kfree(res);
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
if (id != -1)
idr_remove(idr, id);
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
......@@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
BUG_ON(res->id != -1);
idr_preload(GFP_KERNEL);
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
if (ret >= 0)
res->id = ret;
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
}
......@@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
res->avail = false;
res->dev_priv = dev_priv;
res->func = func;
INIT_LIST_HEAD(&res->lru_head);
......@@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
return vmw_resource_alloc_id(res);
}
/**
* vmw_resource_activate
*
* @res: Pointer to the newly created resource
* @hw_destroy: Destroy function. NULL if none.
*
* Activate a resource after the hardware has been made aware of it.
* Set tye destroy function to @destroy. Typically this frees the
* resource and destroys the hardware resources associated with it.
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
void vmw_resource_activate(struct vmw_resource *res,
void (*hw_destroy) (struct vmw_resource *))
{
struct vmw_private *dev_priv = res->dev_priv;
write_lock(&dev_priv->resource_lock);
res->avail = true;
res->hw_destroy = hw_destroy;
write_unlock(&dev_priv->resource_lock);
}
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
......@@ -243,15 +219,10 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
read_lock(&dev_priv->resource_lock);
if (!res->avail || res->res_free != converter->res_free) {
read_unlock(&dev_priv->resource_lock);
if (res->res_free != converter->res_free)
goto out_bad_resource;
}
kref_get(&res->kref);
read_unlock(&dev_priv->resource_lock);
*p_res = res;
ret = 0;
......@@ -422,10 +393,10 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
list_add_tail(&res->lru_head,
&res->dev_priv->res_lru[res->func->res_type]);
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
}
/**
......@@ -504,9 +475,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
struct vmw_private *dev_priv = res->dev_priv;
int ret;
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
if (res->func->needs_backup && res->backup == NULL &&
!no_backup) {
......@@ -619,12 +590,12 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
if (likely(ret != -EBUSY))
break;
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) {
DRM_ERROR("Out of device device resources "
"for %s.\n", res->func->type_name);
ret = -EBUSY;
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
break;
}
......@@ -633,14 +604,14 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
lru_head));
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
/* Trylock backup buffers with a NULL ticket. */
ret = vmw_resource_do_evict(NULL, evict_res, intr);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
if (ret == -ERESTARTSYS ||
++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
......@@ -822,7 +793,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
struct ww_acquire_ctx ticket;
do {
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list))
goto out_unlock;
......@@ -831,14 +802,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
list_first_entry(lru_list, struct vmw_resource,
lru_head));
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
/* Wait lock backup buffers with a ticket. */
ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
return;
......@@ -849,7 +820,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
} while (1);
out_unlock:
write_unlock(&dev_priv->resource_lock);
spin_unlock(&dev_priv->resource_lock);
}
/**
......
......@@ -120,8 +120,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
bool delay_id,
void (*res_free) (struct vmw_resource *res),
const struct vmw_res_func *func);
void vmw_resource_activate(struct vmw_resource *res,
void (*hw_destroy) (struct vmw_resource *));
int
vmw_simple_resource_create_ioctl(struct drm_device *dev,
void *data,
......
......@@ -186,7 +186,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
shader->num_input_sig = num_input_sig;
shader->num_output_sig = num_output_sig;
vmw_resource_activate(res, vmw_hw_shader_destroy);
res->hw_destroy = vmw_hw_shader_destroy;
return 0;
}
......@@ -656,7 +656,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
goto out_resource_init;
res->id = shader->id;
vmw_resource_activate(res, vmw_hw_shader_destroy);
res->hw_destroy = vmw_hw_shader_destroy;
out_resource_init:
vmw_resource_unreference(&res);
......
......@@ -81,7 +81,7 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
return ret;
}
vmw_resource_activate(&simple->res, simple->func->hw_destroy);
simple->res.hw_destroy = simple->func->hw_destroy;
return 0;
}
......
......@@ -386,7 +386,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
goto out_resource_init;
res->id = view->view_id;
vmw_resource_activate(res, vmw_hw_view_destroy);
res->hw_destroy = vmw_hw_view_destroy;
out_resource_init:
vmw_resource_unreference(&res);
......
......@@ -614,7 +614,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
*/
INIT_LIST_HEAD(&srf->view_list);
vmw_resource_activate(res, vmw_hw_surface_destroy);
res->hw_destroy = vmw_hw_surface_destroy;
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment