drm/i915/guc/ct: Group request-related variables in a sub-structure

For better isolation of the request tracking from the rest of the
CT-related data.

v2: split to separate patch, move next_fence to substructure (Michal)
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191217012316.13271-4-daniele.ceraolospurio@intel.com
parent 9ab28cd2
...@@ -37,10 +37,10 @@ static void ct_incoming_request_worker_func(struct work_struct *w); ...@@ -37,10 +37,10 @@ static void ct_incoming_request_worker_func(struct work_struct *w);
*/ */
void intel_guc_ct_init_early(struct intel_guc_ct *ct) void intel_guc_ct_init_early(struct intel_guc_ct *ct)
{ {
spin_lock_init(&ct->lock); spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->pending_requests); INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->incoming_requests); INIT_LIST_HEAD(&ct->requests.incoming);
INIT_WORK(&ct->worker, ct_incoming_request_worker_func); INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
} }
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
...@@ -267,7 +267,7 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct) ...@@ -267,7 +267,7 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
static u32 ct_get_next_fence(struct intel_guc_ct *ct) static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{ {
/* For now it's trivial */ /* For now it's trivial */
return ++ct->next_fence; return ++ct->requests.next_fence;
} }
/** /**
...@@ -465,9 +465,9 @@ static int ct_send(struct intel_guc_ct *ct, ...@@ -465,9 +465,9 @@ static int ct_send(struct intel_guc_ct *ct,
request.response_len = response_buf_size; request.response_len = response_buf_size;
request.response_buf = response_buf; request.response_buf = response_buf;
spin_lock_irqsave(&ct->lock, flags); spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request.link, &ct->pending_requests); list_add_tail(&request.link, &ct->requests.pending);
spin_unlock_irqrestore(&ct->lock, flags); spin_unlock_irqrestore(&ct->requests.lock, flags);
err = ctb_write(ctb, action, len, fence, !!response_buf); err = ctb_write(ctb, action, len, fence, !!response_buf);
if (unlikely(err)) if (unlikely(err))
...@@ -500,9 +500,9 @@ static int ct_send(struct intel_guc_ct *ct, ...@@ -500,9 +500,9 @@ static int ct_send(struct intel_guc_ct *ct,
} }
unlink: unlink:
spin_lock_irqsave(&ct->lock, flags); spin_lock_irqsave(&ct->requests.lock, flags);
list_del(&request.link); list_del(&request.link);
spin_unlock_irqrestore(&ct->lock, flags); spin_unlock_irqrestore(&ct->requests.lock, flags);
return err; return err;
} }
...@@ -650,8 +650,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) ...@@ -650,8 +650,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status); CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
spin_lock(&ct->lock); spin_lock(&ct->requests.lock);
list_for_each_entry(req, &ct->pending_requests, link) { list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) { if (unlikely(fence != req->fence)) {
CT_DEBUG_DRIVER("CT: request %u awaits response\n", CT_DEBUG_DRIVER("CT: request %u awaits response\n",
req->fence); req->fence);
...@@ -669,7 +669,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) ...@@ -669,7 +669,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
found = true; found = true;
break; break;
} }
spin_unlock(&ct->lock); spin_unlock(&ct->requests.lock);
if (!found) if (!found)
DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg); DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
...@@ -707,13 +707,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) ...@@ -707,13 +707,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
u32 *payload; u32 *payload;
bool done; bool done;
spin_lock_irqsave(&ct->lock, flags); spin_lock_irqsave(&ct->requests.lock, flags);
request = list_first_entry_or_null(&ct->incoming_requests, request = list_first_entry_or_null(&ct->requests.incoming,
struct ct_incoming_request, link); struct ct_incoming_request, link);
if (request) if (request)
list_del(&request->link); list_del(&request->link);
done = !!list_empty(&ct->incoming_requests); done = !!list_empty(&ct->requests.incoming);
spin_unlock_irqrestore(&ct->lock, flags); spin_unlock_irqrestore(&ct->requests.lock, flags);
if (!request) if (!request)
return true; return true;
...@@ -731,12 +731,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) ...@@ -731,12 +731,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
static void ct_incoming_request_worker_func(struct work_struct *w) static void ct_incoming_request_worker_func(struct work_struct *w)
{ {
struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker); struct intel_guc_ct *ct =
container_of(w, struct intel_guc_ct, requests.worker);
bool done; bool done;
done = ct_process_incoming_requests(ct); done = ct_process_incoming_requests(ct);
if (!done) if (!done)
queue_work(system_unbound_wq, &ct->worker); queue_work(system_unbound_wq, &ct->requests.worker);
} }
/** /**
...@@ -774,11 +775,11 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) ...@@ -774,11 +775,11 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
} }
memcpy(request->msg, msg, 4 * msglen); memcpy(request->msg, msg, 4 * msglen);
spin_lock_irqsave(&ct->lock, flags); spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->incoming_requests); list_add_tail(&request->link, &ct->requests.incoming);
spin_unlock_irqrestore(&ct->lock, flags); spin_unlock_irqrestore(&ct->requests.lock, flags);
queue_work(system_unbound_wq, &ct->worker); queue_work(system_unbound_wq, &ct->requests.worker);
return 0; return 0;
} }
......
...@@ -48,12 +48,15 @@ struct intel_guc_ct { ...@@ -48,12 +48,15 @@ struct intel_guc_ct {
/* buffers for sending(0) and receiving(1) commands */ /* buffers for sending(0) and receiving(1) commands */
struct intel_guc_ct_buffer ctbs[2]; struct intel_guc_ct_buffer ctbs[2];
u32 next_fence; /* fence to be used with next send command */ struct {
u32 next_fence; /* fence to be used with next request to send */
spinlock_t lock; /* protects pending requests list */ spinlock_t lock; /* protects pending requests list */
struct list_head pending_requests; /* requests waiting for response */ struct list_head pending; /* requests waiting for response */
struct list_head incoming_requests; /* incoming requests */
struct work_struct worker; /* handler for incoming requests */ struct list_head incoming; /* incoming requests */
struct work_struct worker; /* handler for incoming requests */
} requests;
}; };
void intel_guc_ct_init_early(struct intel_guc_ct *ct); void intel_guc_ct_init_early(struct intel_guc_ct *ct);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment