Commit d2cb0b21 authored by Jonathan Kim's avatar Jonathan Kim Committed by Alex Deucher

drm/amdkfd: remove unneeded unmap single queue option

The KFD only unmaps all queues, all dynamics queues or all process queues
since RUN_LIST is mapped with all KFD queues.

There's no need to provide a single type unmap so remove this option.
Signed-off-by: default avatarJonathan Kim <jonathan.kim@amd.com>
Reviewed-by: default avatarFelix Kuehling <felix.kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7258fa31
...@@ -1555,8 +1555,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, ...@@ -1555,8 +1555,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist) if (!dqm->active_runlist)
return retval; return retval;
retval = pm_send_unmap_queue(&dqm->packet_mgr, KFD_QUEUE_TYPE_COMPUTE, retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
filter, filter_param, reset, 0);
if (retval) if (retval)
return retval; return retval;
......
...@@ -369,10 +369,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, ...@@ -369,10 +369,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
return retval; return retval;
} }
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, int pm_send_unmap_queue(struct packet_manager *pm,
enum kfd_unmap_queues_filter filter, enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset, uint32_t filter_param, bool reset)
unsigned int sdma_engine)
{ {
uint32_t *buffer, size; uint32_t *buffer, size;
int retval = 0; int retval = 0;
...@@ -387,8 +386,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, ...@@ -387,8 +386,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
goto out; goto out;
} }
retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param, retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
reset, sdma_engine);
if (!retval) if (!retval)
kq_submit_packet(pm->priv_queue); kq_submit_packet(pm->priv_queue);
else else
......
...@@ -247,10 +247,8 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, ...@@ -247,10 +247,8 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
} }
static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
enum kfd_queue_type type,
enum kfd_unmap_queues_filter filter, enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset, uint32_t filter_param, bool reset)
unsigned int sdma_engine)
{ {
struct pm4_mes_unmap_queues *packet; struct pm4_mes_unmap_queues *packet;
...@@ -259,31 +257,11 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, ...@@ -259,31 +257,11 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues)); sizeof(struct pm4_mes_unmap_queues));
switch (type) {
case KFD_QUEUE_TYPE_COMPUTE: packet->bitfields2.extended_engine_sel =
case KFD_QUEUE_TYPE_DIQ: extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
packet->bitfields2.extended_engine_sel = packet->bitfields2.engine_sel =
extended_engine_sel__mes_unmap_queues__legacy_engine_sel; engine_sel__mes_unmap_queues__compute;
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__compute;
break;
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
if (sdma_engine < 2) {
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
} else {
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel;
packet->bitfields2.engine_sel = sdma_engine;
}
break;
default:
WARN(1, "queue type %d", type);
return -EINVAL;
}
if (reset) if (reset)
packet->bitfields2.action = packet->bitfields2.action =
...@@ -293,12 +271,6 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, ...@@ -293,12 +271,6 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
action__mes_unmap_queues__preempt_queues; action__mes_unmap_queues__preempt_queues;
switch (filter) { switch (filter) {
case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
packet->bitfields2.num_queues = 1;
packet->bitfields3b.doorbell_offset0 = filter_param;
break;
case KFD_UNMAP_QUEUES_FILTER_BY_PASID: case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
packet->bitfields2.queue_sel = packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
......
...@@ -198,10 +198,8 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer, ...@@ -198,10 +198,8 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
} }
static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
enum kfd_queue_type type,
enum kfd_unmap_queues_filter filter, enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset, uint32_t filter_param, bool reset)
unsigned int sdma_engine)
{ {
struct pm4_mes_unmap_queues *packet; struct pm4_mes_unmap_queues *packet;
...@@ -210,21 +208,9 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, ...@@ -210,21 +208,9 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues)); sizeof(struct pm4_mes_unmap_queues));
switch (type) {
case KFD_QUEUE_TYPE_COMPUTE: packet->bitfields2.engine_sel =
case KFD_QUEUE_TYPE_DIQ:
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__compute; engine_sel__mes_unmap_queues__compute;
break;
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
break;
default:
WARN(1, "queue type %d", type);
return -EINVAL;
}
if (reset) if (reset)
packet->bitfields2.action = packet->bitfields2.action =
...@@ -234,12 +220,6 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, ...@@ -234,12 +220,6 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
action__mes_unmap_queues__preempt_queues; action__mes_unmap_queues__preempt_queues;
switch (filter) { switch (filter) {
case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
packet->bitfields2.num_queues = 1;
packet->bitfields3b.doorbell_offset0 = filter_param;
break;
case KFD_UNMAP_QUEUES_FILTER_BY_PASID: case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
packet->bitfields2.queue_sel = packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
......
...@@ -360,20 +360,20 @@ struct device *kfd_chardev(void); ...@@ -360,20 +360,20 @@ struct device *kfd_chardev(void);
/** /**
* enum kfd_unmap_queues_filter - Enum for queue filters. * enum kfd_unmap_queues_filter - Enum for queue filters.
* *
* @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
*
* @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
* running queues list. * running queues list.
* *
* @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues
* in the run list.
*
* @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
* specific process. * specific process.
* *
*/ */
enum kfd_unmap_queues_filter { enum kfd_unmap_queues_filter {
KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3
KFD_UNMAP_QUEUES_FILTER_BY_PASID
}; };
/** /**
...@@ -1247,10 +1247,8 @@ struct packet_manager_funcs { ...@@ -1247,10 +1247,8 @@ struct packet_manager_funcs {
int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, int (*map_queues)(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static); struct queue *q, bool is_static);
int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
enum kfd_queue_type type,
enum kfd_unmap_queues_filter mode, enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset, uint32_t filter_param, bool reset);
unsigned int sdma_engine);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer, int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint64_t fence_value); uint64_t fence_address, uint64_t fence_value);
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
...@@ -1277,10 +1275,9 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); ...@@ -1277,10 +1275,9 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint64_t fence_value); uint64_t fence_value);
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, int pm_send_unmap_queue(struct packet_manager *pm,
enum kfd_unmap_queues_filter mode, enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset, uint32_t filter_param, bool reset);
unsigned int sdma_engine);
void pm_release_ib(struct packet_manager *pm); void pm_release_ib(struct packet_manager *pm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment