Commit ab7c1648 authored by Kent Russell's avatar Kent Russell Committed by Oded Gabbay

drm/amdkfd: Fix goto usage v2

Remove gotos that do not feature any common cleanup, and use gotos
instead of repeating cleanup commands.

According to kernel.org: "The goto statement comes in handy when a
function exits from multiple locations and some common work such as
cleanup has to be done. If there is no cleanup needed then just return
directly."

v2: Applied review suggestions in create_queue_nocpsch
Signed-off-by: default avatarKent Russell <kent.russell@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 4eacc26b
...@@ -460,9 +460,8 @@ static int kfd_ioctl_dbg_register(struct file *filep, ...@@ -460,9 +460,8 @@ static int kfd_ioctl_dbg_register(struct file *filep,
*/ */
pdd = kfd_bind_process_to_device(dev, p); pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) { if (IS_ERR(pdd)) {
mutex_unlock(&p->mutex); status = PTR_ERR(pdd);
mutex_unlock(kfd_get_dbgmgr_mutex()); goto out;
return PTR_ERR(pdd);
} }
if (!dev->dbgmgr) { if (!dev->dbgmgr) {
...@@ -480,6 +479,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, ...@@ -480,6 +479,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
status = -EINVAL; status = -EINVAL;
} }
out:
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
mutex_unlock(kfd_get_dbgmgr_mutex()); mutex_unlock(kfd_get_dbgmgr_mutex());
...@@ -580,8 +580,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, ...@@ -580,8 +580,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points; args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) { if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
kfree(args_buff); status = -EINVAL;
return -EINVAL; goto out;
} }
watch_mask_value = (uint64_t) args_buff[args_idx]; watch_mask_value = (uint64_t) args_buff[args_idx];
...@@ -604,8 +604,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, ...@@ -604,8 +604,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
} }
if (args_idx >= args->buf_size_in_bytes - sizeof(args)) { if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
kfree(args_buff); status = -EINVAL;
return -EINVAL; goto out;
} }
/* Currently HSA Event is not supported for DBG */ /* Currently HSA Event is not supported for DBG */
...@@ -617,6 +617,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, ...@@ -617,6 +617,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
mutex_unlock(kfd_get_dbgmgr_mutex()); mutex_unlock(kfd_get_dbgmgr_mutex());
out:
kfree(args_buff); kfree(args_buff);
return status; return status;
......
...@@ -161,32 +161,31 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -161,32 +161,31 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
if (dqm->total_queue_count >= max_num_of_queues_per_device) { if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n", pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count); dqm->total_queue_count);
mutex_unlock(&dqm->lock); retval = -EPERM;
return -EPERM; goto out_unlock;
} }
if (list_empty(&qpd->queues_list)) { if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q); retval = allocate_vmid(dqm, qpd, q);
if (retval) { if (retval)
mutex_unlock(&dqm->lock); goto out_unlock;
return retval;
}
} }
*allocated_vmid = qpd->vmid; *allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid; q->properties.vmid = qpd->vmid;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
retval = create_compute_queue_nocpsch(dqm, q, qpd); retval = create_compute_queue_nocpsch(dqm, q, qpd);
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
retval = create_sdma_queue_nocpsch(dqm, q, qpd); retval = create_sdma_queue_nocpsch(dqm, q, qpd);
else
retval = -EINVAL;
if (retval) { if (retval) {
if (list_empty(&qpd->queues_list)) { if (list_empty(&qpd->queues_list)) {
deallocate_vmid(dqm, qpd, q); deallocate_vmid(dqm, qpd, q);
*allocated_vmid = 0; *allocated_vmid = 0;
} }
mutex_unlock(&dqm->lock); goto out_unlock;
return retval;
} }
list_add(&q->list, &qpd->queues_list); list_add(&q->list, &qpd->queues_list);
...@@ -204,8 +203,9 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -204,8 +203,9 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("Total of %d queues are accountable so far\n", pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count); dqm->total_queue_count);
out_unlock:
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return 0; return retval;
} }
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
...@@ -271,23 +271,25 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -271,23 +271,25 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties); &q->gart_mqd_addr, &q->properties);
if (retval) { if (retval)
deallocate_hqd(dqm, q); goto out_deallocate_hqd;
return retval;
}
pr_debug("Loading mqd to hqd on pipe %d, queue %d\n", pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
q->pipe, q->queue); q->pipe, q->queue);
retval = mqd->load_mqd(mqd, q->mqd, q->pipe, retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
q->queue, (uint32_t __user *) q->properties.write_ptr); q->queue, (uint32_t __user *) q->properties.write_ptr);
if (retval) { if (retval)
deallocate_hqd(dqm, q); goto out_uninit_mqd;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
return retval;
}
return 0; return 0;
out_uninit_mqd:
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
out_deallocate_hqd:
deallocate_hqd(dqm, q);
return retval;
} }
static int destroy_queue_nocpsch(struct device_queue_manager *dqm, static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
...@@ -366,8 +368,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) ...@@ -366,8 +368,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
mqd = dqm->ops.get_mqd_manager(dqm, mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type)); get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) { if (!mqd) {
mutex_unlock(&dqm->lock); retval = -ENOMEM;
return -ENOMEM; goto out_unlock;
} }
if (q->properties.is_active) if (q->properties.is_active)
...@@ -387,6 +389,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) ...@@ -387,6 +389,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
if (sched_policy != KFD_SCHED_POLICY_NO_HWS) if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = execute_queues_cpsch(dqm, false); retval = execute_queues_cpsch(dqm, false);
out_unlock:
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return retval; return retval;
} }
...@@ -500,16 +503,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) ...@@ -500,16 +503,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
sizeof(unsigned int), GFP_KERNEL);
if (!dqm->allocated_queues)
return -ENOMEM;
mutex_init(&dqm->lock); mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues); INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0; dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0; dqm->sdma_queue_count = 0;
dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
sizeof(unsigned int), GFP_KERNEL);
if (!dqm->allocated_queues) {
mutex_destroy(&dqm->lock);
return -ENOMEM;
}
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm); int pipe_offset = pipe * get_queues_per_pipe(dqm);
...@@ -602,20 +604,22 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -602,20 +604,22 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties); &q->gart_mqd_addr, &q->properties);
if (retval) { if (retval)
deallocate_sdma_queue(dqm, q->sdma_id); goto out_deallocate_sdma_queue;
return retval;
}
retval = mqd->load_mqd(mqd, q->mqd, 0, retval = mqd->load_mqd(mqd, q->mqd, 0,
0, NULL); 0, NULL);
if (retval) { if (retval)
deallocate_sdma_queue(dqm, q->sdma_id); goto out_uninit_mqd;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
return retval;
}
return 0; return 0;
out_uninit_mqd:
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
out_deallocate_sdma_queue:
deallocate_sdma_queue(dqm, q->sdma_id);
return retval;
} }
/* /*
...@@ -681,12 +685,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm) ...@@ -681,12 +685,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->active_runlist = false; dqm->active_runlist = false;
retval = dqm->ops_asic_specific.initialize(dqm); retval = dqm->ops_asic_specific.initialize(dqm);
if (retval) if (retval)
goto fail_init_pipelines; mutex_destroy(&dqm->lock);
return 0;
fail_init_pipelines:
mutex_destroy(&dqm->lock);
return retval; return retval;
} }
...@@ -846,8 +846,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -846,8 +846,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
get_mqd_type_from_queue_type(q->properties.type)); get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) { if (!mqd) {
mutex_unlock(&dqm->lock); retval = -ENOMEM;
return -ENOMEM; goto out;
} }
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
...@@ -1097,14 +1097,11 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, ...@@ -1097,14 +1097,11 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
uint64_t base = (uintptr_t)alternate_aperture_base; uint64_t base = (uintptr_t)alternate_aperture_base;
uint64_t limit = base + alternate_aperture_size - 1; uint64_t limit = base + alternate_aperture_size - 1;
if (limit <= base) if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
goto out; (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
retval = false;
if ((base & APE1_FIXED_BITS_MASK) != 0)
goto out;
if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
goto out; goto out;
}
qpd->sh_mem_ape1_base = base >> 16; qpd->sh_mem_ape1_base = base >> 16;
qpd->sh_mem_ape1_limit = limit >> 16; qpd->sh_mem_ape1_limit = limit >> 16;
...@@ -1125,12 +1122,9 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, ...@@ -1125,12 +1122,9 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_config, qpd->sh_mem_ape1_base, qpd->sh_mem_config, qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit); qpd->sh_mem_ape1_limit);
mutex_unlock(&dqm->lock);
return retval;
out: out:
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return false; return retval;
} }
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
......
...@@ -105,7 +105,7 @@ static int __init kfd_module_init(void) ...@@ -105,7 +105,7 @@ static int __init kfd_module_init(void)
err = kfd_pasid_init(); err = kfd_pasid_init();
if (err < 0) if (err < 0)
goto err_pasid; return err;
err = kfd_chardev_init(); err = kfd_chardev_init();
if (err < 0) if (err < 0)
...@@ -127,7 +127,6 @@ static int __init kfd_module_init(void) ...@@ -127,7 +127,6 @@ static int __init kfd_module_init(void)
kfd_chardev_exit(); kfd_chardev_exit();
err_ioctl: err_ioctl:
kfd_pasid_exit(); kfd_pasid_exit();
err_pasid:
return err; return err;
} }
......
...@@ -442,6 +442,7 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -442,6 +442,7 @@ int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res) struct scheduling_resources *res)
{ {
struct pm4_set_resources *packet; struct pm4_set_resources *packet;
int retval = 0;
BUG_ON(!pm || !res); BUG_ON(!pm || !res);
...@@ -450,9 +451,9 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -450,9 +451,9 @@ int pm_send_set_resources(struct packet_manager *pm,
sizeof(*packet) / sizeof(uint32_t), sizeof(*packet) / sizeof(uint32_t),
(unsigned int **)&packet); (unsigned int **)&packet);
if (!packet) { if (!packet) {
mutex_unlock(&pm->lock);
pr_err("Failed to allocate buffer on kernel queue\n"); pr_err("Failed to allocate buffer on kernel queue\n");
return -ENOMEM; retval = -ENOMEM;
goto out;
} }
memset(packet, 0, sizeof(struct pm4_set_resources)); memset(packet, 0, sizeof(struct pm4_set_resources));
...@@ -475,9 +476,10 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -475,9 +476,10 @@ int pm_send_set_resources(struct packet_manager *pm,
pm->priv_queue->ops.submit_packet(pm->priv_queue); pm->priv_queue->ops.submit_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
return 0; return retval;
} }
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
...@@ -555,9 +557,6 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, ...@@ -555,9 +557,6 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
packet->data_lo = lower_32_bits((uint64_t)fence_value); packet->data_lo = lower_32_bits((uint64_t)fence_value);
pm->priv_queue->ops.submit_packet(pm->priv_queue); pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return 0;
fail_acquire_packet_buffer: fail_acquire_packet_buffer:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
...@@ -639,9 +638,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, ...@@ -639,9 +638,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
pm->priv_queue->ops.submit_packet(pm->priv_queue); pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return 0;
err_acquire_packet_buffer: err_acquire_packet_buffer:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
return retval; return retval;
......
...@@ -35,9 +35,8 @@ static inline struct process_queue_node *get_queue_by_qid( ...@@ -35,9 +35,8 @@ static inline struct process_queue_node *get_queue_by_qid(
BUG_ON(!pqm); BUG_ON(!pqm);
list_for_each_entry(pqn, &pqm->queues, process_queue_list) { list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if (pqn->q && pqn->q->properties.queue_id == qid) if ((pqn->q && pqn->q->properties.queue_id == qid) ||
return pqn; (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
return pqn; return pqn;
} }
...@@ -113,8 +112,6 @@ static int create_cp_queue(struct process_queue_manager *pqm, ...@@ -113,8 +112,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
{ {
int retval; int retval;
retval = 0;
/* Doorbell initialized in user space*/ /* Doorbell initialized in user space*/
q_properties->doorbell_ptr = NULL; q_properties->doorbell_ptr = NULL;
...@@ -127,7 +124,7 @@ static int create_cp_queue(struct process_queue_manager *pqm, ...@@ -127,7 +124,7 @@ static int create_cp_queue(struct process_queue_manager *pqm,
retval = init_queue(q, q_properties); retval = init_queue(q, q_properties);
if (retval != 0) if (retval != 0)
goto err_init_queue; return retval;
(*q)->device = dev; (*q)->device = dev;
(*q)->process = pqm->process; (*q)->process = pqm->process;
...@@ -135,9 +132,6 @@ static int create_cp_queue(struct process_queue_manager *pqm, ...@@ -135,9 +132,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
pr_debug("PQM After init queue"); pr_debug("PQM After init queue");
return retval; return retval;
err_init_queue:
return retval;
} }
int pqm_create_queue(struct process_queue_manager *pqm, int pqm_create_queue(struct process_queue_manager *pqm,
...@@ -181,7 +175,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -181,7 +175,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
list_for_each_entry(cur, &pdd->qpd.queues_list, list) list_for_each_entry(cur, &pdd->qpd.queues_list, list)
num_queues++; num_queues++;
if (num_queues >= dev->device_info->max_no_of_hqd/2) if (num_queues >= dev->device_info->max_no_of_hqd/2)
return (-ENOSPC); return -ENOSPC;
} }
retval = find_available_queue_slot(pqm, qid); retval = find_available_queue_slot(pqm, qid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment