Commit 9b590783 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Avoid cmdbuf alloc sleeping if !TASK_RUNNING

If the command buffer pool is out of space, the code waits until space is
available. However since the condition code tries to allocate a range manager
node while !TASK_RUNNING we get a kernel warning.

Avoid this by pre-allocating the mm node. This will also probably be more
efficient.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent 6bf6bf03
...@@ -33,7 +33,8 @@ ...@@ -33,7 +33,8 @@
* multiple of the DMA pool allocation size. * multiple of the DMA pool allocation size.
*/ */
#define VMW_CMDBUF_INLINE_ALIGN 64 #define VMW_CMDBUF_INLINE_ALIGN 64
#define VMW_CMDBUF_INLINE_SIZE (1024 - VMW_CMDBUF_INLINE_ALIGN) #define VMW_CMDBUF_INLINE_SIZE \
(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
/** /**
* struct vmw_cmdbuf_context - Command buffer context queues * struct vmw_cmdbuf_context - Command buffer context queues
...@@ -145,7 +146,7 @@ struct vmw_cmdbuf_header { ...@@ -145,7 +146,7 @@ struct vmw_cmdbuf_header {
SVGACBHeader *cb_header; SVGACBHeader *cb_header;
SVGACBContext cb_context; SVGACBContext cb_context;
struct list_head list; struct list_head list;
struct drm_mm_node *node; struct drm_mm_node node;
dma_addr_t handle; dma_addr_t handle;
u8 *cmd; u8 *cmd;
size_t size; size_t size;
...@@ -169,13 +170,13 @@ struct vmw_cmdbuf_dheader { ...@@ -169,13 +170,13 @@ struct vmw_cmdbuf_dheader {
* struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
* *
* @page_size: Size of requested command buffer space in pages. * @page_size: Size of requested command buffer space in pages.
* @node: The range manager node if allocation succeeded. * @node: Pointer to the range manager node.
* @ret: Error code if failure. Otherwise 0. * @done: True if this allocation has succeeded.
*/ */
struct vmw_cmdbuf_alloc_info { struct vmw_cmdbuf_alloc_info {
size_t page_size; size_t page_size;
struct drm_mm_node *node; struct drm_mm_node *node;
int ret; bool done;
}; };
/* Loop over each context in the command buffer manager. */ /* Loop over each context in the command buffer manager. */
...@@ -253,9 +254,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) ...@@ -253,9 +254,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
return; return;
} }
drm_mm_remove_node(header->node); drm_mm_remove_node(&header->node);
kfree(header->node);
header->node = NULL;
wake_up_all(&man->alloc_queue); wake_up_all(&man->alloc_queue);
if (header->cb_header) if (header->cb_header)
dma_pool_free(man->headers, header->cb_header, dma_pool_free(man->headers, header->cb_header,
...@@ -669,32 +668,26 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, ...@@ -669,32 +668,26 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
{ {
int ret; int ret;
if (info->node) if (info->done)
return true; return true;
info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); memset(info->node, 0, sizeof(*info->node));
if (!info->node) {
info->ret = -ENOMEM;
return true;
}
spin_lock_bh(&man->lock); spin_lock_bh(&man->lock);
ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, 0, 0, ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
0, 0,
DRM_MM_SEARCH_DEFAULT, DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT); DRM_MM_CREATE_DEFAULT);
spin_unlock_bh(&man->lock); spin_unlock_bh(&man->lock);
if (ret) { info->done = !ret;
kfree(info->node);
info->node = NULL;
}
return !!info->node; return info->done;
} }
/** /**
* vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool. * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
* *
* @man: The command buffer manager. * @man: The command buffer manager.
* @node: Pointer to pre-allocated range-manager node.
* @size: The size of the allocation. * @size: The size of the allocation.
* @interruptible: Whether to sleep interruptible while waiting for space. * @interruptible: Whether to sleep interruptible while waiting for space.
* *
...@@ -702,15 +695,16 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, ...@@ -702,15 +695,16 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
* no space available ATM, it turns on IRQ handling and sleeps waiting for it to * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
* become available. * become available.
*/ */
static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
size_t size, struct drm_mm_node *node,
bool interruptible) size_t size,
bool interruptible)
{ {
struct vmw_cmdbuf_alloc_info info; struct vmw_cmdbuf_alloc_info info;
info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
info.node = NULL; info.node = node;
info.ret = 0; info.done = false;
/* /*
* To prevent starvation of large requests, only one allocating call * To prevent starvation of large requests, only one allocating call
...@@ -718,22 +712,14 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, ...@@ -718,22 +712,14 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
*/ */
if (interruptible) { if (interruptible) {
if (mutex_lock_interruptible(&man->space_mutex)) if (mutex_lock_interruptible(&man->space_mutex))
return ERR_PTR(-ERESTARTSYS); return -ERESTARTSYS;
} else { } else {
mutex_lock(&man->space_mutex); mutex_lock(&man->space_mutex);
} }
/* Try to allocate space without waiting. */ /* Try to allocate space without waiting. */
(void) vmw_cmdbuf_try_alloc(man, &info); if (vmw_cmdbuf_try_alloc(man, &info))
if (info.ret && !info.node) { goto out_unlock;
mutex_unlock(&man->space_mutex);
return ERR_PTR(info.ret);
}
if (info.node) {
mutex_unlock(&man->space_mutex);
return info.node;
}
vmw_generic_waiter_add(man->dev_priv, vmw_generic_waiter_add(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER, SVGA_IRQFLAG_COMMAND_BUFFER,
...@@ -749,7 +735,7 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, ...@@ -749,7 +735,7 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters); &man->dev_priv->cmdbuf_waiters);
mutex_unlock(&man->space_mutex); mutex_unlock(&man->space_mutex);
return ERR_PTR(ret); return ret;
} }
} else { } else {
wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
...@@ -757,11 +743,11 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, ...@@ -757,11 +743,11 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
vmw_generic_waiter_remove(man->dev_priv, vmw_generic_waiter_remove(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER, SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters); &man->dev_priv->cmdbuf_waiters);
out_unlock:
mutex_unlock(&man->space_mutex); mutex_unlock(&man->space_mutex);
if (info.ret && !info.node)
return ERR_PTR(info.ret);
return info.node; return 0;
} }
/** /**
...@@ -785,10 +771,10 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, ...@@ -785,10 +771,10 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
if (!man->has_pool) if (!man->has_pool)
return -ENOMEM; return -ENOMEM;
header->node = vmw_cmdbuf_alloc_space(man, size, interruptible); ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
if (IS_ERR(header->node)) if (ret)
return PTR_ERR(header->node); return ret;
header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
&header->handle); &header->handle);
...@@ -797,9 +783,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, ...@@ -797,9 +783,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
goto out_no_cb_header; goto out_no_cb_header;
} }
header->size = header->node->size << PAGE_SHIFT; header->size = header->node.size << PAGE_SHIFT;
cb_hdr = header->cb_header; cb_hdr = header->cb_header;
offset = header->node->start << PAGE_SHIFT; offset = header->node.start << PAGE_SHIFT;
header->cmd = man->map + offset; header->cmd = man->map + offset;
memset(cb_hdr, 0, sizeof(*cb_hdr)); memset(cb_hdr, 0, sizeof(*cb_hdr));
if (man->using_mob) { if (man->using_mob) {
...@@ -814,9 +800,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, ...@@ -814,9 +800,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
out_no_cb_header: out_no_cb_header:
spin_lock_bh(&man->lock); spin_lock_bh(&man->lock);
drm_mm_remove_node(header->node); drm_mm_remove_node(&header->node);
spin_unlock_bh(&man->lock); spin_unlock_bh(&man->lock);
kfree(header->node);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment