Commit 7846b12f authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-vmwgfx-next' of git://people.freedesktop.org/~syeh/repos_linux into drm-next

vmwgfx add fence fd support.

* 'drm-vmwgfx-next' of git://people.freedesktop.org/~syeh/repos_linux:
  drm/vmwgfx: Bump the version for fence FD support
  drm/vmwgfx: Add export fence to file descriptor support
  drm/vmwgfx: Add support for imported Fence File Descriptor
  drm/vmwgfx: Prepare to support fence fd
  drm/vmwgfx: Fix incorrect command header offset at restart
  drm/vmwgfx: Support the NOP_ERROR command
  drm/vmwgfx: Restart command buffers after errors
  drm/vmwgfx: Move irq bottom half processing to threads
  drm/vmwgfx: Don't use drm_irq_[un]install
parents 7ebdb0dd d78acfe9
This diff is collapsed.
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h> #include <linux/dma_remapping.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0 #define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0 #define VMW_FB_RESERVATION 0
...@@ -825,7 +824,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -825,7 +824,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
} }
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
ret = drm_irq_install(dev, dev->pdev->irq); ret = vmw_irq_install(dev, dev->pdev->irq);
if (ret != 0) { if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret); DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq; goto out_no_irq;
...@@ -937,7 +936,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -937,7 +936,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
vmw_fence_manager_takedown(dev_priv->fman); vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman: out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev); vmw_irq_uninstall(dev_priv->dev);
out_no_irq: out_no_irq:
if (dev_priv->stealth) if (dev_priv->stealth)
pci_release_region(dev->pdev, 2); pci_release_region(dev->pdev, 2);
...@@ -990,7 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev) ...@@ -990,7 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_release_device_late(dev_priv); vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman); vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev); vmw_irq_uninstall(dev_priv->dev);
if (dev_priv->stealth) if (dev_priv->stealth)
pci_release_region(dev->pdev, 2); pci_release_region(dev->pdev, 2);
else else
...@@ -1516,10 +1515,6 @@ static struct drm_driver driver = { ...@@ -1516,10 +1515,6 @@ static struct drm_driver driver = {
.load = vmw_driver_load, .load = vmw_driver_load,
.unload = vmw_driver_unload, .unload = vmw_driver_unload,
.lastclose = vmw_lastclose, .lastclose = vmw_lastclose,
.irq_preinstall = vmw_irq_preinstall,
.irq_postinstall = vmw_irq_postinstall,
.irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter, .get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank, .enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank, .disable_vblank = vmw_disable_vblank,
......
...@@ -40,10 +40,12 @@ ...@@ -40,10 +40,12 @@
#include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h" #include "vmwgfx_fence.h"
#include <linux/sync_file.h>
#define VMWGFX_DRIVER_DATE "20170607" #define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DATE "20170612"
#define VMWGFX_DRIVER_MAJOR 2 #define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 13 #define VMWGFX_DRIVER_MINOR 14
#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
...@@ -351,6 +353,12 @@ struct vmw_otable_batch { ...@@ -351,6 +353,12 @@ struct vmw_otable_batch {
struct ttm_buffer_object *otable_bo; struct ttm_buffer_object *otable_bo;
}; };
enum {
VMW_IRQTHREAD_FENCE,
VMW_IRQTHREAD_CMDBUF,
VMW_IRQTHREAD_MAX
};
struct vmw_private { struct vmw_private {
struct ttm_bo_device bdev; struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_global_ref bo_global_ref;
...@@ -529,6 +537,7 @@ struct vmw_private { ...@@ -529,6 +537,7 @@ struct vmw_private {
struct vmw_otable_batch otable_batch; struct vmw_otable_batch otable_batch;
struct vmw_cmdbuf_man *cman; struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
}; };
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
...@@ -561,24 +570,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) ...@@ -561,24 +570,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
static inline void vmw_write(struct vmw_private *dev_priv, static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value) unsigned int offset, uint32_t value)
{ {
unsigned long irq_flags; spin_lock(&dev_priv->hw_lock);
spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); spin_unlock(&dev_priv->hw_lock);
} }
static inline uint32_t vmw_read(struct vmw_private *dev_priv, static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset) unsigned int offset)
{ {
unsigned long irq_flags;
u32 val; u32 val;
spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); spin_unlock(&dev_priv->hw_lock);
return val; return val;
} }
...@@ -821,7 +827,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -821,7 +827,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
uint32_t dx_context_handle, uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user struct drm_vmw_fence_rep __user
*user_fence_rep, *user_fence_rep,
struct vmw_fence_obj **out_fence); struct vmw_fence_obj **out_fence,
uint32_t flags);
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence); struct vmw_fence_obj *fence);
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
...@@ -836,23 +843,23 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -836,23 +843,23 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct drm_vmw_fence_rep __user struct drm_vmw_fence_rep __user
*user_fence_rep, *user_fence_rep,
struct vmw_fence_obj *fence, struct vmw_fence_obj *fence,
uint32_t fence_handle); uint32_t fence_handle,
int32_t out_fence_fd,
struct sync_file *sync_file);
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
bool validate_as_mob); bool validate_as_mob);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/** /**
* IRQs and wating - vmwgfx_irq.c * IRQs and wating - vmwgfx_irq.c
*/ */
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible, uint32_t seqno, bool interruptible,
unsigned long timeout); unsigned long timeout);
extern void vmw_irq_preinstall(struct drm_device *dev); extern int vmw_irq_install(struct drm_device *dev, int irq);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev); extern void vmw_irq_uninstall(struct drm_device *dev);
extern bool vmw_seqno_passed(struct vmw_private *dev_priv, extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno); uint32_t seqno);
...@@ -1150,13 +1157,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, ...@@ -1150,13 +1157,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header, struct vmw_cmdbuf_header *header,
bool flush); bool flush);
extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible, size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header); struct vmw_cmdbuf_header **p_header);
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header); extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible); bool interruptible);
extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
/** /**
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE. * USE OR OTHER DEALINGS IN THE SOFTWARE.
* *
**************************************************************************/ **************************************************************************/
#include <linux/sync_file.h>
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_reg.h" #include "vmwgfx_reg.h"
...@@ -112,11 +113,12 @@ struct vmw_cmd_entry { ...@@ -112,11 +113,12 @@ struct vmw_cmd_entry {
bool user_allow; bool user_allow;
bool gb_disable; bool gb_disable;
bool gb_enable; bool gb_enable;
const char *cmd_name;
}; };
#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
(_gb_disable), (_gb_enable)} (_gb_disable), (_gb_enable), #_cmd}
static int vmw_resource_context_res_add(struct vmw_private *dev_priv, static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
...@@ -3302,6 +3304,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { ...@@ -3302,6 +3304,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
false, false, true), false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
...@@ -3469,6 +3473,51 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { ...@@ -3469,6 +3473,51 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true), true, false, true),
}; };
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
{
u32 cmd_id = ((u32 *) buf)[0];
if (cmd_id >= SVGA_CMD_MAX) {
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
const struct vmw_cmd_entry *entry;
*size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id = header->id;
if (cmd_id >= SVGA_3D_CMD_MAX)
return false;
cmd_id -= SVGA_3D_CMD_BASE;
entry = &vmw_cmd_entries[cmd_id];
*cmd = entry->cmd_name;
return true;
}
switch (cmd_id) {
case SVGA_CMD_UPDATE:
*cmd = "SVGA_CMD_UPDATE";
*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
break;
case SVGA_CMD_DEFINE_GMRFB:
*cmd = "SVGA_CMD_DEFINE_GMRFB";
*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
break;
case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
default:
*cmd = "UNKNOWN";
*size = 0;
return false;
}
return true;
}
static int vmw_cmd_check(struct vmw_private *dev_priv, static int vmw_cmd_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
void *buf, uint32_t *size) void *buf, uint32_t *size)
...@@ -3781,6 +3830,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, ...@@ -3781,6 +3830,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* which the information should be copied. * which the information should be copied.
* @fence: Pointer to the fenc object. * @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle. * @fence_handle: User-space fence handle.
* @out_fence_fd: exported file descriptor for the fence. -1 if not used
* @sync_file: Only used to clean up in case of an error in this function.
* *
* This function copies fence information to user-space. If copying fails, * This function copies fence information to user-space. If copying fails,
* The user-space struct drm_vmw_fence_rep::error member is hopefully * The user-space struct drm_vmw_fence_rep::error member is hopefully
...@@ -3796,7 +3847,9 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -3796,7 +3847,9 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
int ret, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence, struct vmw_fence_obj *fence,
uint32_t fence_handle) uint32_t fence_handle,
int32_t out_fence_fd,
struct sync_file *sync_file)
{ {
struct drm_vmw_fence_rep fence_rep; struct drm_vmw_fence_rep fence_rep;
...@@ -3806,6 +3859,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -3806,6 +3859,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
memset(&fence_rep, 0, sizeof(fence_rep)); memset(&fence_rep, 0, sizeof(fence_rep));
fence_rep.error = ret; fence_rep.error = ret;
fence_rep.fd = out_fence_fd;
if (ret == 0) { if (ret == 0) {
BUG_ON(fence == NULL); BUG_ON(fence == NULL);
...@@ -3828,6 +3882,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -3828,6 +3882,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* and unreference the handle. * and unreference the handle.
*/ */
if (unlikely(ret != 0) && (fence_rep.error == 0)) { if (unlikely(ret != 0) && (fence_rep.error == 0)) {
if (sync_file)
fput(sync_file->file);
if (fence_rep.fd != -1) {
put_unused_fd(fence_rep.fd);
fence_rep.fd = -1;
}
ttm_ref_object_base_unref(vmw_fp->tfile, ttm_ref_object_base_unref(vmw_fp->tfile,
fence_handle, TTM_REF_USAGE); fence_handle, TTM_REF_USAGE);
DRM_ERROR("Fence copy error. Syncing.\n"); DRM_ERROR("Fence copy error. Syncing.\n");
...@@ -4003,7 +4065,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4003,7 +4065,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
uint64_t throttle_us, uint64_t throttle_us,
uint32_t dx_context_handle, uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence) struct vmw_fence_obj **out_fence,
uint32_t flags)
{ {
struct vmw_sw_context *sw_context = &dev_priv->ctx; struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL; struct vmw_fence_obj *fence = NULL;
...@@ -4013,20 +4076,33 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4013,20 +4076,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
uint32_t handle; uint32_t handle;
int ret; int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
DRM_ERROR("Failed to get a fence file descriptor.\n");
return out_fence_fd;
}
}
if (throttle_us) { if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us); throttle_us);
if (ret) if (ret)
return ret; goto out_free_fence_fd;
} }
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
kernel_commands, command_size, kernel_commands, command_size,
&header); &header);
if (IS_ERR(kernel_commands)) if (IS_ERR(kernel_commands)) {
return PTR_ERR(kernel_commands); ret = PTR_ERR(kernel_commands);
goto out_free_fence_fd;
}
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (ret) { if (ret) {
...@@ -4162,8 +4238,32 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4162,8 +4238,32 @@ int vmw_execbuf_process(struct drm_file *file_priv,
__vmw_execbuf_release_pinned_bo(dev_priv, fence); __vmw_execbuf_release_pinned_bo(dev_priv, fence);
vmw_clear_validations(sw_context); vmw_clear_validations(sw_context);
/*
* If anything fails here, give up trying to export the fence
* and do a sync since the user mode will not be able to sync
* the fence itself. This ensures we are still functionally
* correct.
*/
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
sync_file = sync_file_create(&fence->base);
if (!sync_file) {
DRM_ERROR("Unable to create sync file for fence\n");
put_unused_fd(out_fence_fd);
out_fence_fd = -1;
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
}
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle); user_fence_rep, fence, handle,
out_fence_fd, sync_file);
/* Don't unreference when handing fence out */ /* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) { if (unlikely(out_fence != NULL)) {
...@@ -4214,6 +4314,9 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4214,6 +4314,9 @@ int vmw_execbuf_process(struct drm_file *file_priv,
out_free_header: out_free_header:
if (header) if (header)
vmw_cmdbuf_header_free(header); vmw_cmdbuf_header_free(header);
out_free_fence_fd:
if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd);
return ret; return ret;
} }
...@@ -4366,6 +4469,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, ...@@ -4366,6 +4469,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
static const size_t copy_offset[] = { static const size_t copy_offset[] = {
offsetof(struct drm_vmw_execbuf_arg, context_handle), offsetof(struct drm_vmw_execbuf_arg, context_handle),
sizeof(struct drm_vmw_execbuf_arg)}; sizeof(struct drm_vmw_execbuf_arg)};
struct dma_fence *in_fence = NULL;
if (unlikely(size < copy_offset[0])) { if (unlikely(size < copy_offset[0])) {
DRM_ERROR("Invalid command size, ioctl %d\n", DRM_ERROR("Invalid command size, ioctl %d\n",
...@@ -4401,15 +4505,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, ...@@ -4401,15 +4505,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
arg.context_handle = (uint32_t) -1; arg.context_handle = (uint32_t) -1;
break; break;
case 2: case 2:
if (arg.pad64 != 0) {
DRM_ERROR("Unused IOCTL data not set to zero.\n");
return -EINVAL;
}
break;
default: default:
break; break;
} }
/* If imported a fence FD from elsewhere, then wait on it */
if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
in_fence = sync_file_get_fence(arg.imported_fence_fd);
if (!in_fence) {
DRM_ERROR("Cannot get imported fence\n");
return -EINVAL;
}
ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
if (ret)
goto out;
}
ret = ttm_read_lock(&dev_priv->reservation_sem, true); ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -4419,12 +4533,16 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, ...@@ -4419,12 +4533,16 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
NULL, arg.command_size, arg.throttle_us, NULL, arg.command_size, arg.throttle_us,
arg.context_handle, arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep, (void __user *)(unsigned long)arg.fence_rep,
NULL); NULL,
arg.flags);
ttm_read_unlock(&dev_priv->reservation_sem); ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; goto out;
vmw_kms_cursor_post_execbuf(dev_priv); vmw_kms_cursor_post_execbuf(dev_priv);
return 0; out:
if (in_fence)
dma_fence_put(in_fence);
return ret;
} }
...@@ -114,12 +114,11 @@ static void vmw_fence_obj_destroy(struct dma_fence *f) ...@@ -114,12 +114,11 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
container_of(f, struct vmw_fence_obj, base); container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence); struct vmw_fence_manager *fman = fman_from_fence(fence);
unsigned long irq_flags;
spin_lock_irqsave(&fman->lock, irq_flags); spin_lock(&fman->lock);
list_del_init(&fence->head); list_del_init(&fence->head);
--fman->num_fence_objects; --fman->num_fence_objects;
spin_unlock_irqrestore(&fman->lock, irq_flags); spin_unlock(&fman->lock);
fence->destroy(fence); fence->destroy(fence);
} }
...@@ -252,10 +251,10 @@ static void vmw_fence_work_func(struct work_struct *work) ...@@ -252,10 +251,10 @@ static void vmw_fence_work_func(struct work_struct *work)
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
mutex_lock(&fman->goal_irq_mutex); mutex_lock(&fman->goal_irq_mutex);
spin_lock_irq(&fman->lock); spin_lock(&fman->lock);
list_splice_init(&fman->cleanup_list, &list); list_splice_init(&fman->cleanup_list, &list);
seqno_valid = fman->seqno_valid; seqno_valid = fman->seqno_valid;
spin_unlock_irq(&fman->lock); spin_unlock(&fman->lock);
if (!seqno_valid && fman->goal_irq_on) { if (!seqno_valid && fman->goal_irq_on) {
fman->goal_irq_on = false; fman->goal_irq_on = false;
...@@ -305,15 +304,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) ...@@ -305,15 +304,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{ {
unsigned long irq_flags;
bool lists_empty; bool lists_empty;
(void) cancel_work_sync(&fman->work); (void) cancel_work_sync(&fman->work);
spin_lock_irqsave(&fman->lock, irq_flags); spin_lock(&fman->lock);
lists_empty = list_empty(&fman->fence_list) && lists_empty = list_empty(&fman->fence_list) &&
list_empty(&fman->cleanup_list); list_empty(&fman->cleanup_list);
spin_unlock_irqrestore(&fman->lock, irq_flags); spin_unlock(&fman->lock);
BUG_ON(!lists_empty); BUG_ON(!lists_empty);
kfree(fman); kfree(fman);
...@@ -323,7 +321,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, ...@@ -323,7 +321,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
struct vmw_fence_obj *fence, u32 seqno, struct vmw_fence_obj *fence, u32 seqno,
void (*destroy) (struct vmw_fence_obj *fence)) void (*destroy) (struct vmw_fence_obj *fence))
{ {
unsigned long irq_flags;
int ret = 0; int ret = 0;
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
...@@ -331,7 +328,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, ...@@ -331,7 +328,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
INIT_LIST_HEAD(&fence->seq_passed_actions); INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy; fence->destroy = destroy;
spin_lock_irqsave(&fman->lock, irq_flags); spin_lock(&fman->lock);
if (unlikely(fman->fifo_down)) { if (unlikely(fman->fifo_down)) {
ret = -EBUSY; ret = -EBUSY;
goto out_unlock; goto out_unlock;
...@@ -340,7 +337,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, ...@@ -340,7 +337,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
++fman->num_fence_objects; ++fman->num_fence_objects;
out_unlock: out_unlock:
spin_unlock_irqrestore(&fman->lock, irq_flags); spin_unlock(&fman->lock);
return ret; return ret;
} }
...@@ -489,11 +486,9 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman) ...@@ -489,11 +486,9 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
void vmw_fences_update(struct vmw_fence_manager *fman) void vmw_fences_update(struct vmw_fence_manager *fman)
{ {
unsigned long irq_flags; spin_lock(&fman->lock);
spin_lock_irqsave(&fman->lock, irq_flags);
__vmw_fences_update(fman); __vmw_fences_update(fman);
spin_unlock_irqrestore(&fman->lock, irq_flags); spin_unlock(&fman->lock);
} }
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
...@@ -649,6 +644,51 @@ int vmw_user_fence_create(struct drm_file *file_priv, ...@@ -649,6 +644,51 @@ int vmw_user_fence_create(struct drm_file *file_priv,
} }
/**
* vmw_wait_dma_fence - Wait for a dma fence
*
* @fman: pointer to a fence manager
* @fence: DMA fence to wait on
*
* This function handles the case when the fence is actually a fence
* array. If that's the case, it'll wait on each of the child fence
*/
int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
struct dma_fence *fence)
{
struct dma_fence_array *fence_array;
int ret = 0;
int i;
if (dma_fence_is_signaled(fence))
return 0;
if (!dma_fence_is_array(fence))
return dma_fence_wait(fence, true);
/* From i915: Note that if the fence-array was created in
* signal-on-any mode, we should *not* decompose it into its individual
* fences. However, we don't currently store which mode the fence-array
* is operating in. Fortunately, the only user of signal-on-any is
* private to amdgpu and we should not see any incoming fence-array
* from sync-file being in signal-on-any mode.
*/
fence_array = to_dma_fence_array(fence);
for (i = 0; i < fence_array->num_fences; i++) {
struct dma_fence *child = fence_array->fences[i];
ret = dma_fence_wait(child, true);
if (ret < 0)
return ret;
}
return 0;
}
/** /**
* vmw_fence_fifo_down - signal all unsignaled fence objects. * vmw_fence_fifo_down - signal all unsignaled fence objects.
*/ */
...@@ -663,14 +703,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) ...@@ -663,14 +703,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
* restart when we've released the fman->lock. * restart when we've released the fman->lock.
*/ */
spin_lock_irq(&fman->lock); spin_lock(&fman->lock);
fman->fifo_down = true; fman->fifo_down = true;
while (!list_empty(&fman->fence_list)) { while (!list_empty(&fman->fence_list)) {
struct vmw_fence_obj *fence = struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj, list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head); head);
dma_fence_get(&fence->base); dma_fence_get(&fence->base);
spin_unlock_irq(&fman->lock); spin_unlock(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false, ret = vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT); VMW_FENCE_WAIT_TIMEOUT);
...@@ -686,18 +726,16 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) ...@@ -686,18 +726,16 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
BUG_ON(!list_empty(&fence->head)); BUG_ON(!list_empty(&fence->head));
dma_fence_put(&fence->base); dma_fence_put(&fence->base);
spin_lock_irq(&fman->lock); spin_lock(&fman->lock);
} }
spin_unlock_irq(&fman->lock); spin_unlock(&fman->lock);
} }
void vmw_fence_fifo_up(struct vmw_fence_manager *fman) void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
{ {
unsigned long irq_flags; spin_lock(&fman->lock);
spin_lock_irqsave(&fman->lock, irq_flags);
fman->fifo_down = false; fman->fifo_down = false;
spin_unlock_irqrestore(&fman->lock, irq_flags); spin_unlock(&fman->lock);
} }
...@@ -812,9 +850,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, ...@@ -812,9 +850,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
arg->signaled = vmw_fence_obj_signaled(fence); arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags; arg->signaled_flags = arg->flags;
spin_lock_irq(&fman->lock); spin_lock(&fman->lock);
arg->passed_seqno = dev_priv->last_read_seqno; arg->passed_seqno = dev_priv->last_read_seqno;
spin_unlock_irq(&fman->lock); spin_unlock(&fman->lock);
ttm_base_object_unref(&base); ttm_base_object_unref(&base);
...@@ -841,8 +879,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, ...@@ -841,8 +879,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
* *
* This function is called when the seqno of the fence where @action is * This function is called when the seqno of the fence where @action is
* attached has passed. It queues the event on the submitter's event list. * attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context, and may be called * This function is always called from atomic context.
* from irq context.
*/ */
static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
{ {
...@@ -851,13 +888,13 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) ...@@ -851,13 +888,13 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
struct drm_device *dev = eaction->dev; struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event; struct drm_pending_event *event = eaction->event;
struct drm_file *file_priv; struct drm_file *file_priv;
unsigned long irq_flags;
if (unlikely(event == NULL)) if (unlikely(event == NULL))
return; return;
file_priv = event->file_priv; file_priv = event->file_priv;
spin_lock_irqsave(&dev->event_lock, irq_flags); spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) { if (likely(eaction->tv_sec != NULL)) {
struct timeval tv; struct timeval tv;
...@@ -869,7 +906,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) ...@@ -869,7 +906,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event); drm_send_event_locked(dev, eaction->event);
eaction->event = NULL; eaction->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, irq_flags); spin_unlock_irq(&dev->event_lock);
} }
/** /**
...@@ -904,11 +941,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, ...@@ -904,11 +941,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action) struct vmw_fence_action *action)
{ {
struct vmw_fence_manager *fman = fman_from_fence(fence); struct vmw_fence_manager *fman = fman_from_fence(fence);
unsigned long irq_flags;
bool run_update = false; bool run_update = false;
mutex_lock(&fman->goal_irq_mutex); mutex_lock(&fman->goal_irq_mutex);
spin_lock_irqsave(&fman->lock, irq_flags); spin_lock(&fman->lock);
fman->pending_actions[action->type]++; fman->pending_actions[action->type]++;
if (dma_fence_is_signaled_locked(&fence->base)) { if (dma_fence_is_signaled_locked(&fence->base)) {
...@@ -927,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, ...@@ -927,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
run_update = vmw_fence_goal_check_locked(fence); run_update = vmw_fence_goal_check_locked(fence);
} }
spin_unlock_irqrestore(&fman->lock, irq_flags); spin_unlock(&fman->lock);
if (run_update) { if (run_update) {
if (!fman->goal_irq_on) { if (!fman->goal_irq_on) {
...@@ -1114,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, ...@@ -1114,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
} }
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
handle); handle, -1, NULL);
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
return 0; return 0;
out_no_create: out_no_create:
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#ifndef _VMWGFX_FENCE_H_ #ifndef _VMWGFX_FENCE_H_
#include <linux/dma-fence.h> #include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ) #define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
...@@ -102,6 +103,9 @@ extern int vmw_user_fence_create(struct drm_file *file_priv, ...@@ -102,6 +103,9 @@ extern int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_obj **p_fence, struct vmw_fence_obj **p_fence,
uint32_t *p_handle); uint32_t *p_handle);
extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
struct dma_fence *fence);
extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman); extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman); extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
......
...@@ -30,11 +30,56 @@ ...@@ -30,11 +30,56 @@
#define VMW_FENCE_WRAP (1 << 24) #define VMW_FENCE_WRAP (1 << 24)
irqreturn_t vmw_irq_handler(int irq, void *arg) /**
* vmw_thread_fn - Deferred (process context) irq handler
*
* @irq: irq number
* @arg: Closure argument. Pointer to a struct drm_device cast to void *
*
* This function implements the deferred part of irq processing.
* The function is guaranteed to run at least once after the
* vmw_irq_handler has returned with IRQ_WAKE_THREAD.
*
*/
static irqreturn_t vmw_thread_fn(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
irqreturn_t ret = IRQ_NONE;
if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
dev_priv->irqthread_pending)) {
vmw_fences_update(dev_priv->fman);
wake_up_all(&dev_priv->fence_queue);
ret = IRQ_HANDLED;
}
if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
dev_priv->irqthread_pending)) {
vmw_cmdbuf_irqthread(dev_priv->cman);
ret = IRQ_HANDLED;
}
return ret;
}
/**
* vmw_irq_handler irq handler
*
* @irq: irq number
* @arg: Closure argument. Pointer to a struct drm_device cast to void *
*
* This function implements the quick part of irq processing.
* The function performs fast actions like clearing the device interrupt
* flags and also reasonably quick actions like waking processes waiting for
* FIFO space. Other IRQ actions are deferred to the IRQ thread.
*/
static irqreturn_t vmw_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *)arg; struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status; uint32_t status, masked_status;
irqreturn_t ret = IRQ_HANDLED;
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & READ_ONCE(dev_priv->irq_mask); masked_status = status & READ_ONCE(dev_priv->irq_mask);
...@@ -45,20 +90,21 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) ...@@ -45,20 +90,21 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
if (!status) if (!status)
return IRQ_NONE; return IRQ_NONE;
if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
SVGA_IRQFLAG_FENCE_GOAL)) {
vmw_fences_update(dev_priv->fman);
wake_up_all(&dev_priv->fence_queue);
}
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue); wake_up_all(&dev_priv->fifo_queue);
if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
SVGA_IRQFLAG_ERROR)) SVGA_IRQFLAG_FENCE_GOAL)) &&
vmw_cmdbuf_tasklet_schedule(dev_priv->cman); !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
ret = IRQ_WAKE_THREAD;
return IRQ_HANDLED; if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
SVGA_IRQFLAG_ERROR)) &&
!test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
dev_priv->irqthread_pending))
ret = IRQ_WAKE_THREAD;
return ret;
} }
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
...@@ -281,23 +327,15 @@ int vmw_wait_seqno(struct vmw_private *dev_priv, ...@@ -281,23 +327,15 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
return ret; return ret;
} }
void vmw_irq_preinstall(struct drm_device *dev) static void vmw_irq_preinstall(struct drm_device *dev)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status; uint32_t status;
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
} }
int vmw_irq_postinstall(struct drm_device *dev)
{
return 0;
}
void vmw_irq_uninstall(struct drm_device *dev) void vmw_irq_uninstall(struct drm_device *dev)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
...@@ -306,8 +344,41 @@ void vmw_irq_uninstall(struct drm_device *dev) ...@@ -306,8 +344,41 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return; return;
if (!dev->irq_enabled)
return;
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev->irq_enabled = false;
free_irq(dev->irq, dev);
}
/**
* vmw_irq_install - Install the irq handlers
*
* @dev: Pointer to the drm device.
* @irq: The irq number.
* Return: Zero if successful. Negative number otherwise.
*/
int vmw_irq_install(struct drm_device *dev, int irq)
{
int ret;
if (dev->irq_enabled)
return -EBUSY;
vmw_irq_preinstall(dev);
ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
if (ret < 0)
return ret;
dev->irq_enabled = true;
dev->irq = irq;
return ret;
} }
...@@ -2494,7 +2494,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, ...@@ -2494,7 +2494,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
if (file_priv) if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence, ret, user_fence_rep, fence,
handle); handle, -1, NULL);
if (out_fence) if (out_fence)
*out_fence = fence; *out_fence = fence;
else else
......
...@@ -297,13 +297,17 @@ union drm_vmw_surface_reference_arg { ...@@ -297,13 +297,17 @@ union drm_vmw_surface_reference_arg {
* @version: Allows expanding the execbuf ioctl parameters without breaking * @version: Allows expanding the execbuf ioctl parameters without breaking
* backwards compatibility, since user-space will always tell the kernel * backwards compatibility, since user-space will always tell the kernel
* which version it uses. * which version it uses.
* @flags: Execbuf flags. None currently. * @flags: Execbuf flags.
* @imported_fence_fd: FD for a fence imported from another device
* *
* Argument to the DRM_VMW_EXECBUF Ioctl. * Argument to the DRM_VMW_EXECBUF Ioctl.
*/ */
#define DRM_VMW_EXECBUF_VERSION 2 #define DRM_VMW_EXECBUF_VERSION 2
#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
struct drm_vmw_execbuf_arg { struct drm_vmw_execbuf_arg {
__u64 commands; __u64 commands;
__u32 command_size; __u32 command_size;
...@@ -312,7 +316,7 @@ struct drm_vmw_execbuf_arg { ...@@ -312,7 +316,7 @@ struct drm_vmw_execbuf_arg {
__u32 version; __u32 version;
__u32 flags; __u32 flags;
__u32 context_handle; __u32 context_handle;
__u32 pad64; __s32 imported_fence_fd;
}; };
/** /**
...@@ -328,6 +332,7 @@ struct drm_vmw_execbuf_arg { ...@@ -328,6 +332,7 @@ struct drm_vmw_execbuf_arg {
* @passed_seqno: The highest seqno number processed by the hardware * @passed_seqno: The highest seqno number processed by the hardware
* so far. This can be used to mark user-space fence objects as signaled, and * so far. This can be used to mark user-space fence objects as signaled, and
* to determine whether a fence seqno might be stale. * to determine whether a fence seqno might be stale.
* @fd: FD associated with the fence, -1 if not exported
* @error: This member should've been set to -EFAULT on submission. * @error: This member should've been set to -EFAULT on submission.
* The following actions should be take on completion: * The following actions should be take on completion:
* error == -EFAULT: Fence communication failed. The host is synchronized. * error == -EFAULT: Fence communication failed. The host is synchronized.
...@@ -345,7 +350,7 @@ struct drm_vmw_fence_rep { ...@@ -345,7 +350,7 @@ struct drm_vmw_fence_rep {
__u32 mask; __u32 mask;
__u32 seqno; __u32 seqno;
__u32 passed_seqno; __u32 passed_seqno;
__u32 pad64; __s32 fd;
__s32 error; __s32 error;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment