Commit 33f86ff6 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'amdkfd-next-3.19' of git://people.freedesktop.org/~gabbayo/linux into drm-next

- Fixes for sparse warnings
- Memory leak fixes
- Fix for deadlock between amdkfd and iommu

* 'amdkfd-next-3.19' of git://people.freedesktop.org/~gabbayo/linux:
  amdkfd: delete some dead code
  amdkfd: Fix memory leak of mqds on dqm fini
  amdkfd: fix an error handling bug in pqm_create_queue()
  amdkfd: fix some error handling in ioctl
  amdkfd: Remove DRM_AMDGPU dependency from Kconfig
  amdkfd: explicitely include io.h in kfd_doorbell.c
  amdkfd: Clear ctx cb before suspend
  amdkfd: Instead of using get function, use container_of
  amdkfd: use schedule() in sync_with_hw
  amdkfd: Fix memory leak on process deregistration
  amdkfd: add __iomem attribute to doorbell_ptr
  amdkfd: fence_wait_timeout() can be static
  amdkfd: is_occupied() can be static
  amdkfd: Fix sparse warnings in kfd_flat_memory.c
  amdkfd: pqm_get_kernel_queue() can be static
  amdkfd: test_kq() can be static
  amdkfd: Fix sparse warnings in kfd_topology.c
  amdkfd: Fix sparse warnings in kfd_chardev.c
parents e38648f9 9cf4a281
...@@ -4,6 +4,6 @@ ...@@ -4,6 +4,6 @@
config HSA_AMD config HSA_AMD
tristate "HSA kernel driver for AMD GPU devices" tristate "HSA kernel driver for AMD GPU devices"
depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64 depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
help help
Enable this if you want to use HSA features on AMD GPU devices. Enable this if you want to use HSA features on AMD GPU devices.
...@@ -149,7 +149,9 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, ...@@ -149,7 +149,9 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
} }
if ((args->ring_base_address) && if ((args->ring_base_address) &&
(!access_ok(VERIFY_WRITE, args->ring_base_address, sizeof(uint64_t)))) { (!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n"); pr_err("kfd: can't access ring base address\n");
return -EFAULT; return -EFAULT;
} }
...@@ -159,12 +161,16 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, ...@@ -159,12 +161,16 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EINVAL; return -EINVAL;
} }
if (!access_ok(VERIFY_WRITE, args->read_pointer_address, sizeof(uint32_t))) { if (!access_ok(VERIFY_WRITE,
(const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
pr_err("kfd: can't access read pointer\n"); pr_err("kfd: can't access read pointer\n");
return -EFAULT; return -EFAULT;
} }
if (!access_ok(VERIFY_WRITE, args->write_pointer_address, sizeof(uint32_t))) { if (!access_ok(VERIFY_WRITE,
(const void __user *) args->write_pointer_address,
sizeof(uint32_t))) {
pr_err("kfd: can't access write pointer\n"); pr_err("kfd: can't access write pointer\n");
return -EFAULT; return -EFAULT;
} }
...@@ -236,7 +242,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, ...@@ -236,7 +242,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
mutex_lock(&p->mutex); mutex_lock(&p->mutex);
pdd = kfd_bind_process_to_device(dev, p); pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd) < 0) { if (IS_ERR(pdd)) {
err = PTR_ERR(pdd); err = PTR_ERR(pdd);
goto err_bind_process; goto err_bind_process;
} }
...@@ -325,7 +331,9 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, ...@@ -325,7 +331,9 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
} }
if ((args.ring_base_address) && if ((args.ring_base_address) &&
(!access_ok(VERIFY_WRITE, args.ring_base_address, sizeof(uint64_t)))) { (!access_ok(VERIFY_WRITE,
(const void __user *) args.ring_base_address,
sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n"); pr_err("kfd: can't access ring base address\n");
return -EFAULT; return -EFAULT;
} }
...@@ -381,7 +389,7 @@ static long kfd_ioctl_set_memory_policy(struct file *filep, ...@@ -381,7 +389,7 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
mutex_lock(&p->mutex); mutex_lock(&p->mutex);
pdd = kfd_bind_process_to_device(dev, p); pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd) < 0) { if (IS_ERR(pdd)) {
err = PTR_ERR(pdd); err = PTR_ERR(pdd);
goto out; goto out;
} }
......
...@@ -267,6 +267,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd) ...@@ -267,6 +267,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
if (kfd->init_complete) { if (kfd->init_complete) {
kfd->dqm->stop(kfd->dqm); kfd->dqm->stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
amd_iommu_free_device(kfd->pdev); amd_iommu_free_device(kfd->pdev);
} }
} }
......
...@@ -67,26 +67,21 @@ static inline unsigned int get_pipes_num_cpsch(void) ...@@ -67,26 +67,21 @@ static inline unsigned int get_pipes_num_cpsch(void)
return PIPE_PER_ME_CP_SCHEDULING; return PIPE_PER_ME_CP_SCHEDULING;
} }
static unsigned int get_sh_mem_bases_nybble_64(struct kfd_process *process, static inline unsigned int
struct kfd_dev *dev) get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
{ {
struct kfd_process_device *pdd;
uint32_t nybble; uint32_t nybble;
pdd = kfd_get_process_device_data(dev, process, 1);
nybble = (pdd->lds_base >> 60) & 0x0E; nybble = (pdd->lds_base >> 60) & 0x0E;
return nybble; return nybble;
} }
static unsigned int get_sh_mem_bases_32(struct kfd_process *process, static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
struct kfd_dev *dev)
{ {
struct kfd_process_device *pdd;
unsigned int shared_base; unsigned int shared_base;
pdd = kfd_get_process_device_data(dev, process, 1);
shared_base = (pdd->lds_base >> 16) & 0xFF; shared_base = (pdd->lds_base >> 16) & 0xFF;
return shared_base; return shared_base;
...@@ -96,10 +91,13 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble); ...@@ -96,10 +91,13 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
static void init_process_memory(struct device_queue_manager *dqm, static void init_process_memory(struct device_queue_manager *dqm,
struct qcm_process_device *qpd) struct qcm_process_device *qpd)
{ {
struct kfd_process_device *pdd;
unsigned int temp; unsigned int temp;
BUG_ON(!dqm || !qpd); BUG_ON(!dqm || !qpd);
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */ /* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) { if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config = qpd->sh_mem_config =
...@@ -111,11 +109,11 @@ static void init_process_memory(struct device_queue_manager *dqm, ...@@ -111,11 +109,11 @@ static void init_process_memory(struct device_queue_manager *dqm,
} }
if (qpd->pqm->process->is_32bit_user_mode) { if (qpd->pqm->process->is_32bit_user_mode) {
temp = get_sh_mem_bases_32(qpd->pqm->process, dqm->dev); temp = get_sh_mem_bases_32(pdd);
qpd->sh_mem_bases = SHARED_BASE(temp); qpd->sh_mem_bases = SHARED_BASE(temp);
qpd->sh_mem_config |= PTR32; qpd->sh_mem_config |= PTR32;
} else { } else {
temp = get_sh_mem_bases_nybble_64(qpd->pqm->process, dqm->dev); temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
} }
...@@ -409,6 +407,7 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm, ...@@ -409,6 +407,7 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry_safe(cur, next, &dqm->queues, list) { list_for_each_entry_safe(cur, next, &dqm->queues, list) {
if (qpd == cur->qpd) { if (qpd == cur->qpd) {
list_del(&cur->list); list_del(&cur->list);
kfree(cur);
dqm->processes_count--; dqm->processes_count--;
goto out; goto out;
} }
...@@ -576,11 +575,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) ...@@ -576,11 +575,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
static void uninitialize_nocpsch(struct device_queue_manager *dqm) static void uninitialize_nocpsch(struct device_queue_manager *dqm)
{ {
int i;
BUG_ON(!dqm); BUG_ON(!dqm);
BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0); BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues); kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqds[i]);
mutex_destroy(&dqm->lock); mutex_destroy(&dqm->lock);
kfd2kgd->free_mem(dqm->dev->kgd, kfd2kgd->free_mem(dqm->dev->kgd,
(struct kgd_mem *) dqm->pipeline_mem); (struct kgd_mem *) dqm->pipeline_mem);
...@@ -706,8 +709,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) ...@@ -706,8 +709,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
destroy_queues_cpsch(dqm, true); destroy_queues_cpsch(dqm, true);
list_for_each_entry(node, &dqm->queues, list) { list_for_each_entry(node, &dqm->queues, list) {
pdd = kfd_get_process_device_data(dqm->dev, pdd = qpd_to_pdd(node->qpd);
node->qpd->pqm->process, 1);
pdd->bound = false; pdd->bound = false;
} }
kfd2kgd->free_mem(dqm->dev->kgd, kfd2kgd->free_mem(dqm->dev->kgd,
...@@ -789,8 +791,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -789,8 +791,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
return retval; return retval;
} }
int fence_wait_timeout(unsigned int *fence_addr, unsigned int fence_value, static int fence_wait_timeout(unsigned int *fence_addr,
unsigned long timeout) unsigned int fence_value,
unsigned long timeout)
{ {
BUG_ON(!fence_addr); BUG_ON(!fence_addr);
timeout += jiffies; timeout += jiffies;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/io.h>
/* /*
* This extension supports a kernel level doorbells management for * This extension supports a kernel level doorbells management for
......
...@@ -276,21 +276,22 @@ ...@@ -276,21 +276,22 @@
*/ */
#define MAKE_GPUVM_APP_BASE(gpu_num) \ #define MAKE_GPUVM_APP_BASE(gpu_num) \
(((uint64_t)(gpu_num) << 61) + 0x1000000000000) (((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
#define MAKE_GPUVM_APP_LIMIT(base) \ #define MAKE_GPUVM_APP_LIMIT(base) \
(((uint64_t)(base) & 0xFFFFFF0000000000) | 0xFFFFFFFFFF) (((uint64_t)(base) & \
0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL)
#define MAKE_SCRATCH_APP_BASE(gpu_num) \ #define MAKE_SCRATCH_APP_BASE(gpu_num) \
(((uint64_t)(gpu_num) << 61) + 0x100000000) (((uint64_t)(gpu_num) << 61) + 0x100000000L)
#define MAKE_SCRATCH_APP_LIMIT(base) \ #define MAKE_SCRATCH_APP_LIMIT(base) \
(((uint64_t)base & 0xFFFFFFFF00000000) | 0xFFFFFFFF) (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
#define MAKE_LDS_APP_BASE(gpu_num) \ #define MAKE_LDS_APP_BASE(gpu_num) \
(((uint64_t)(gpu_num) << 61) + 0x0) (((uint64_t)(gpu_num) << 61) + 0x0)
#define MAKE_LDS_APP_LIMIT(base) \ #define MAKE_LDS_APP_LIMIT(base) \
(((uint64_t)(base) & 0xFFFFFFFF00000000) | 0xFFFFFFFF) (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
int kfd_init_apertures(struct kfd_process *process) int kfd_init_apertures(struct kfd_process *process)
{ {
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/sched.h>
#include "kfd_kernel_queue.h" #include "kfd_kernel_queue.h"
#include "kfd_priv.h" #include "kfd_priv.h"
#include "kfd_device_queue_manager.h" #include "kfd_device_queue_manager.h"
...@@ -66,8 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -66,8 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
if (kq->mqd == NULL) if (kq->mqd == NULL)
return false; return false;
prop.doorbell_ptr = prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
(uint32_t *)kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
if (prop.doorbell_ptr == NULL) if (prop.doorbell_ptr == NULL)
goto err_get_kernel_doorbell; goto err_get_kernel_doorbell;
...@@ -172,7 +172,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -172,7 +172,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq); kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
err_pq_allocate_vidmem: err_pq_allocate_vidmem:
pr_err("kfd: error init pq\n"); pr_err("kfd: error init pq\n");
kfd_release_kernel_doorbell(dev, (u32 *)prop.doorbell_ptr); kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
err_get_kernel_doorbell: err_get_kernel_doorbell:
pr_err("kfd: error init doorbell"); pr_err("kfd: error init doorbell");
return false; return false;
...@@ -195,7 +195,7 @@ static void uninitialize(struct kernel_queue *kq) ...@@ -195,7 +195,7 @@ static void uninitialize(struct kernel_queue *kq)
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem); kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq); kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
kfd_release_kernel_doorbell(kq->dev, kfd_release_kernel_doorbell(kq->dev,
(u32 *)kq->queue->properties.doorbell_ptr); kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue); uninit_queue(kq->queue);
} }
...@@ -255,7 +255,7 @@ static void submit_packet(struct kernel_queue *kq) ...@@ -255,7 +255,7 @@ static void submit_packet(struct kernel_queue *kq)
#endif #endif
*kq->wptr_kernel = kq->pending_wptr; *kq->wptr_kernel = kq->pending_wptr;
write_kernel_doorbell((u32 *)kq->queue->properties.doorbell_ptr, write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
kq->pending_wptr); kq->pending_wptr);
} }
...@@ -275,7 +275,7 @@ static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms) ...@@ -275,7 +275,7 @@ static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms)
*kq->wptr_kernel, *kq->rptr_kernel); *kq->wptr_kernel, *kq->rptr_kernel);
return -ETIME; return -ETIME;
} }
cpu_relax(); schedule();
} }
return 0; return 0;
...@@ -321,7 +321,7 @@ void kernel_queue_uninit(struct kernel_queue *kq) ...@@ -321,7 +321,7 @@ void kernel_queue_uninit(struct kernel_queue *kq)
kfree(kq); kfree(kq);
} }
void test_kq(struct kfd_dev *dev) static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
{ {
struct kernel_queue *kq; struct kernel_queue *kq;
uint32_t *buffer, i; uint32_t *buffer, i;
......
...@@ -179,9 +179,9 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, ...@@ -179,9 +179,9 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
pipe_id, queue_id); pipe_id, queue_id);
} }
bool is_occupied(struct mqd_manager *mm, void *mqd, static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id, uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id) uint32_t queue_id)
{ {
return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address, return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
......
...@@ -279,7 +279,7 @@ struct queue_properties { ...@@ -279,7 +279,7 @@ struct queue_properties {
uint32_t queue_percent; uint32_t queue_percent;
uint32_t *read_ptr; uint32_t *read_ptr;
uint32_t *write_ptr; uint32_t *write_ptr;
uint32_t *doorbell_ptr; uint32_t __iomem *doorbell_ptr;
uint32_t doorbell_off; uint32_t doorbell_off;
bool is_interop; bool is_interop;
bool is_active; bool is_active;
...@@ -414,6 +414,8 @@ struct kfd_process_device { ...@@ -414,6 +414,8 @@ struct kfd_process_device {
bool bound; bool bound;
}; };
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
/* Process data */ /* Process data */
struct kfd_process { struct kfd_process {
/* /*
......
...@@ -348,11 +348,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, ...@@ -348,11 +348,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
if (err < 0) if (err < 0)
return ERR_PTR(err); return ERR_PTR(err);
if (err < 0) {
amd_iommu_unbind_pasid(dev->pdev, p->pasid);
return ERR_PTR(err);
}
pdd->bound = true; pdd->bound = true;
return pdd; return pdd;
......
...@@ -208,7 +208,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -208,7 +208,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_DIQ:
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
if (kq == NULL) { if (kq == NULL) {
kernel_queue_uninit(kq); retval = -ENOMEM;
goto err_create_queue; goto err_create_queue;
} }
kq->queue->properties.queue_id = *qid; kq->queue->properties.queue_id = *qid;
...@@ -325,7 +325,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, ...@@ -325,7 +325,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
return 0; return 0;
} }
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, static __attribute__((unused)) struct kernel_queue *pqm_get_kernel_queue(
struct process_queue_manager *pqm,
unsigned int qid) unsigned int qid)
{ {
struct process_queue_node *pqn; struct process_queue_node *pqn;
......
...@@ -96,7 +96,7 @@ static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size) ...@@ -96,7 +96,7 @@ static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
return -EINVAL; return -EINVAL;
} }
if (*size >= crat_table->length && crat_image != 0) if (*size >= crat_table->length && crat_image != NULL)
memcpy(crat_image, crat_table, crat_table->length); memcpy(crat_image, crat_table, crat_table->length);
*size = crat_table->length; *size = crat_table->length;
...@@ -183,7 +183,7 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem) ...@@ -183,7 +183,7 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
list_for_each_entry(dev, &topology_device_list, list) { list_for_each_entry(dev, &topology_device_list, list) {
if (mem->promixity_domain == i) { if (mem->promixity_domain == i) {
props = kfd_alloc_struct(props); props = kfd_alloc_struct(props);
if (props == 0) if (props == NULL)
return -ENOMEM; return -ENOMEM;
if (dev->node_props.cpu_cores_count == 0) if (dev->node_props.cpu_cores_count == 0)
...@@ -231,7 +231,7 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache) ...@@ -231,7 +231,7 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
if (id == dev->node_props.cpu_core_id_base || if (id == dev->node_props.cpu_core_id_base ||
id == dev->node_props.simd_id_base) { id == dev->node_props.simd_id_base) {
props = kfd_alloc_struct(props); props = kfd_alloc_struct(props);
if (props == 0) if (props == NULL)
return -ENOMEM; return -ENOMEM;
props->processor_id_low = id; props->processor_id_low = id;
...@@ -282,7 +282,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink) ...@@ -282,7 +282,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
list_for_each_entry(dev, &topology_device_list, list) { list_for_each_entry(dev, &topology_device_list, list) {
if (id_from == i) { if (id_from == i) {
props = kfd_alloc_struct(props); props = kfd_alloc_struct(props);
if (props == 0) if (props == NULL)
return -ENOMEM; return -ENOMEM;
props->node_from = id_from; props->node_from = id_from;
...@@ -415,9 +415,9 @@ static struct kfd_topology_device *kfd_create_topology_device(void) ...@@ -415,9 +415,9 @@ static struct kfd_topology_device *kfd_create_topology_device(void)
struct kfd_topology_device *dev; struct kfd_topology_device *dev;
dev = kfd_alloc_struct(dev); dev = kfd_alloc_struct(dev);
if (dev == 0) { if (dev == NULL) {
pr_err("No memory to allocate a topology device"); pr_err("No memory to allocate a topology device");
return 0; return NULL;
} }
INIT_LIST_HEAD(&dev->mem_props); INIT_LIST_HEAD(&dev->mem_props);
...@@ -428,7 +428,7 @@ static struct kfd_topology_device *kfd_create_topology_device(void) ...@@ -428,7 +428,7 @@ static struct kfd_topology_device *kfd_create_topology_device(void)
sys_props.num_devices++; sys_props.num_devices++;
return dev; return dev;
} }
static int kfd_parse_crat_table(void *crat_image) static int kfd_parse_crat_table(void *crat_image)
{ {
...@@ -752,11 +752,11 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev) ...@@ -752,11 +752,11 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
if (iolink->kobj) { if (iolink->kobj) {
kfd_remove_sysfs_file(iolink->kobj, kfd_remove_sysfs_file(iolink->kobj,
&iolink->attr); &iolink->attr);
iolink->kobj = 0; iolink->kobj = NULL;
} }
kobject_del(dev->kobj_iolink); kobject_del(dev->kobj_iolink);
kobject_put(dev->kobj_iolink); kobject_put(dev->kobj_iolink);
dev->kobj_iolink = 0; dev->kobj_iolink = NULL;
} }
if (dev->kobj_cache) { if (dev->kobj_cache) {
...@@ -764,22 +764,22 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev) ...@@ -764,22 +764,22 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
if (cache->kobj) { if (cache->kobj) {
kfd_remove_sysfs_file(cache->kobj, kfd_remove_sysfs_file(cache->kobj,
&cache->attr); &cache->attr);
cache->kobj = 0; cache->kobj = NULL;
} }
kobject_del(dev->kobj_cache); kobject_del(dev->kobj_cache);
kobject_put(dev->kobj_cache); kobject_put(dev->kobj_cache);
dev->kobj_cache = 0; dev->kobj_cache = NULL;
} }
if (dev->kobj_mem) { if (dev->kobj_mem) {
list_for_each_entry(mem, &dev->mem_props, list) list_for_each_entry(mem, &dev->mem_props, list)
if (mem->kobj) { if (mem->kobj) {
kfd_remove_sysfs_file(mem->kobj, &mem->attr); kfd_remove_sysfs_file(mem->kobj, &mem->attr);
mem->kobj = 0; mem->kobj = NULL;
} }
kobject_del(dev->kobj_mem); kobject_del(dev->kobj_mem);
kobject_put(dev->kobj_mem); kobject_put(dev->kobj_mem);
dev->kobj_mem = 0; dev->kobj_mem = NULL;
} }
if (dev->kobj_node) { if (dev->kobj_node) {
...@@ -788,7 +788,7 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev) ...@@ -788,7 +788,7 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
sysfs_remove_file(dev->kobj_node, &dev->attr_props); sysfs_remove_file(dev->kobj_node, &dev->attr_props);
kobject_del(dev->kobj_node); kobject_del(dev->kobj_node);
kobject_put(dev->kobj_node); kobject_put(dev->kobj_node);
dev->kobj_node = 0; dev->kobj_node = NULL;
} }
} }
...@@ -939,7 +939,7 @@ static int kfd_topology_update_sysfs(void) ...@@ -939,7 +939,7 @@ static int kfd_topology_update_sysfs(void)
int ret; int ret;
pr_info("Creating topology SYSFS entries\n"); pr_info("Creating topology SYSFS entries\n");
if (sys_props.kobj_topology == 0) { if (sys_props.kobj_topology == NULL) {
sys_props.kobj_topology = sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology); kfd_alloc_struct(sys_props.kobj_topology);
if (!sys_props.kobj_topology) if (!sys_props.kobj_topology)
...@@ -989,17 +989,17 @@ static void kfd_topology_release_sysfs(void) ...@@ -989,17 +989,17 @@ static void kfd_topology_release_sysfs(void)
if (sys_props.kobj_nodes) { if (sys_props.kobj_nodes) {
kobject_del(sys_props.kobj_nodes); kobject_del(sys_props.kobj_nodes);
kobject_put(sys_props.kobj_nodes); kobject_put(sys_props.kobj_nodes);
sys_props.kobj_nodes = 0; sys_props.kobj_nodes = NULL;
} }
kobject_del(sys_props.kobj_topology); kobject_del(sys_props.kobj_topology);
kobject_put(sys_props.kobj_topology); kobject_put(sys_props.kobj_topology);
sys_props.kobj_topology = 0; sys_props.kobj_topology = NULL;
} }
} }
int kfd_topology_init(void) int kfd_topology_init(void)
{ {
void *crat_image = 0; void *crat_image = NULL;
size_t image_size = 0; size_t image_size = 0;
int ret; int ret;
...@@ -1094,12 +1094,12 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) ...@@ -1094,12 +1094,12 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{ {
struct kfd_topology_device *dev; struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = 0; struct kfd_topology_device *out_dev = NULL;
BUG_ON(!gpu); BUG_ON(!gpu);
list_for_each_entry(dev, &topology_device_list, list) list_for_each_entry(dev, &topology_device_list, list)
if (dev->gpu == 0 && dev->node_props.simd_count > 0) { if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
dev->gpu = gpu; dev->gpu = gpu;
out_dev = dev; out_dev = dev;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment