Commit 4468d048 authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Drop EXEC_QUEUE_FLAG_BANNED

Clean up laying violation of setting q->flags EXEC_QUEUE_FLAG_BANNED bit
in GuC backend. Move banned to GuC owned bit and report banned status to
upper layers via reset_status vfunc. This is a slight change in behavior
as reset_status returns true if wedged or killed bits set too, but in
all of these cases submission to queue is no longer allowed.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarJonathan Cavitt <jonathan.cavitt@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240604184700.1946918-1-matthew.brost@intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 7ecea18e
...@@ -141,7 +141,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -141,7 +141,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
q->width != args->num_batch_buffer)) q->width != args->num_batch_buffer))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) { if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) {
err = -ECANCELED; err = -ECANCELED;
goto err_exec_queue; goto err_exec_queue;
} }
......
...@@ -677,7 +677,7 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data, ...@@ -677,7 +677,7 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
switch (args->property) { switch (args->property) {
case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN: case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED); args->value = q->ops->reset_status(q);
ret = 0; ret = 0;
break; break;
default: default:
......
...@@ -70,18 +70,16 @@ struct xe_exec_queue { ...@@ -70,18 +70,16 @@ struct xe_exec_queue {
*/ */
struct dma_fence *last_fence; struct dma_fence *last_fence;
/* queue no longer allowed to submit */
#define EXEC_QUEUE_FLAG_BANNED BIT(0)
/* queue used for kernel submission only */ /* queue used for kernel submission only */
#define EXEC_QUEUE_FLAG_KERNEL BIT(1) #define EXEC_QUEUE_FLAG_KERNEL BIT(0)
/* kernel engine only destroyed at driver unload */ /* kernel engine only destroyed at driver unload */
#define EXEC_QUEUE_FLAG_PERMANENT BIT(2) #define EXEC_QUEUE_FLAG_PERMANENT BIT(1)
/* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */ /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
#define EXEC_QUEUE_FLAG_VM BIT(3) #define EXEC_QUEUE_FLAG_VM BIT(2)
/* child of VM queue for multi-tile VM jobs */ /* child of VM queue for multi-tile VM jobs */
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(4) #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(3)
/* kernel exec_queue only, set priority to highest level */ /* kernel exec_queue only, set priority to highest level */
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(5) #define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4)
/** /**
* @flags: flags for this exec queue, should statically setup aside from ban * @flags: flags for this exec queue, should statically setup aside from ban
......
...@@ -61,6 +61,7 @@ exec_queue_to_guc(struct xe_exec_queue *q) ...@@ -61,6 +61,7 @@ exec_queue_to_guc(struct xe_exec_queue *q)
#define EXEC_QUEUE_STATE_RESET (1 << 6) #define EXEC_QUEUE_STATE_RESET (1 << 6)
#define EXEC_QUEUE_STATE_KILLED (1 << 7) #define EXEC_QUEUE_STATE_KILLED (1 << 7)
#define EXEC_QUEUE_STATE_WEDGED (1 << 8) #define EXEC_QUEUE_STATE_WEDGED (1 << 8)
#define EXEC_QUEUE_STATE_BANNED (1 << 9)
static bool exec_queue_registered(struct xe_exec_queue *q) static bool exec_queue_registered(struct xe_exec_queue *q)
{ {
...@@ -134,12 +135,12 @@ static void set_exec_queue_destroyed(struct xe_exec_queue *q) ...@@ -134,12 +135,12 @@ static void set_exec_queue_destroyed(struct xe_exec_queue *q)
static bool exec_queue_banned(struct xe_exec_queue *q) static bool exec_queue_banned(struct xe_exec_queue *q)
{ {
return (q->flags & EXEC_QUEUE_FLAG_BANNED); return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
} }
static void set_exec_queue_banned(struct xe_exec_queue *q) static void set_exec_queue_banned(struct xe_exec_queue *q)
{ {
q->flags |= EXEC_QUEUE_FLAG_BANNED; atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state);
} }
static bool exec_queue_suspended(struct xe_exec_queue *q) static bool exec_queue_suspended(struct xe_exec_queue *q)
...@@ -189,8 +190,9 @@ static void set_exec_queue_wedged(struct xe_exec_queue *q) ...@@ -189,8 +190,9 @@ static void set_exec_queue_wedged(struct xe_exec_queue *q)
static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
{ {
return exec_queue_banned(q) || (atomic_read(&q->guc->state) & return (atomic_read(&q->guc->state) &
(EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_KILLED)); (EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_KILLED |
EXEC_QUEUE_STATE_BANNED));
} }
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment