Commit 7c9631af authored by Jay Cornwall's avatar Jay Cornwall Committed by Alex Deucher

drm/amdkfd: Move set_trap_handler out of dqm->ops

Trap handler is set per-process per-device and is unrelated
to queue management.

Move implementation closer to TMA setup code.
Signed-off-by: default avatarJay Cornwall <jay.cornwall@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 47c45c39
...@@ -556,11 +556,7 @@ static int kfd_ioctl_set_trap_handler(struct file *filep, ...@@ -556,11 +556,7 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
goto out; goto out;
} }
if (dev->dqm->ops.set_trap_handler(dev->dqm, kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
&pdd->qpd,
args->tba_addr,
args->tma_addr))
err = -EINVAL;
out: out:
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
......
...@@ -1596,26 +1596,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, ...@@ -1596,26 +1596,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
return retval; return retval;
} }
static int set_trap_handler(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr)
{
uint64_t *tma;
if (dqm->dev->cwsr_enabled) {
/* Jump from CWSR trap handler to user trap */
tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
tma[0] = tba_addr;
tma[1] = tma_addr;
} else {
qpd->tba_addr = tba_addr;
qpd->tma_addr = tma_addr;
}
return 0;
}
static int process_termination_nocpsch(struct device_queue_manager *dqm, static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd) struct qcm_process_device *qpd)
{ {
...@@ -1859,7 +1839,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) ...@@ -1859,7 +1839,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy; dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
dqm->ops.set_trap_handler = set_trap_handler;
dqm->ops.process_termination = process_termination_cpsch; dqm->ops.process_termination = process_termination_cpsch;
dqm->ops.evict_process_queues = evict_process_queues_cpsch; dqm->ops.evict_process_queues = evict_process_queues_cpsch;
dqm->ops.restore_process_queues = restore_process_queues_cpsch; dqm->ops.restore_process_queues = restore_process_queues_cpsch;
...@@ -1878,7 +1857,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) ...@@ -1878,7 +1857,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.initialize = initialize_nocpsch; dqm->ops.initialize = initialize_nocpsch;
dqm->ops.uninitialize = uninitialize; dqm->ops.uninitialize = uninitialize;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy; dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
dqm->ops.set_trap_handler = set_trap_handler;
dqm->ops.process_termination = process_termination_nocpsch; dqm->ops.process_termination = process_termination_nocpsch;
dqm->ops.evict_process_queues = evict_process_queues_nocpsch; dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
dqm->ops.restore_process_queues = dqm->ops.restore_process_queues =
......
...@@ -121,11 +121,6 @@ struct device_queue_manager_ops { ...@@ -121,11 +121,6 @@ struct device_queue_manager_ops {
void __user *alternate_aperture_base, void __user *alternate_aperture_base,
uint64_t alternate_aperture_size); uint64_t alternate_aperture_size);
int (*set_trap_handler)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr);
int (*process_termination)(struct device_queue_manager *dqm, int (*process_termination)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd); struct qcm_process_device *qpd);
......
...@@ -944,6 +944,10 @@ bool interrupt_is_wanted(struct kfd_dev *dev, ...@@ -944,6 +944,10 @@ bool interrupt_is_wanted(struct kfd_dev *dev,
/* amdkfd Apertures */ /* amdkfd Apertures */
int kfd_init_apertures(struct kfd_process *process); int kfd_init_apertures(struct kfd_process *process);
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr);
/* Queue Context Management */ /* Queue Context Management */
int init_queue(struct queue **q, const struct queue_properties *properties); int init_queue(struct queue **q, const struct queue_properties *properties);
void uninit_queue(struct queue *q); void uninit_queue(struct queue *q);
......
...@@ -1162,6 +1162,25 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) ...@@ -1162,6 +1162,25 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
return 0; return 0;
} }
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr)
{
if (qpd->cwsr_kaddr) {
/* KFD trap handler is bound, record as second-level TBA/TMA
* in first-level TMA. First-level trap will jump to second.
*/
uint64_t *tma =
(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
tma[0] = tba_addr;
tma[1] = tma_addr;
} else {
/* No trap handler bound, bind as first-level TBA/TMA. */
qpd->tba_addr = tba_addr;
qpd->tma_addr = tma_addr;
}
}
/* /*
* On return the kfd_process is fully operational and will be freed when the * On return the kfd_process is fully operational and will be freed when the
* mm is released * mm is released
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment