Commit df954b69 authored by Xiaogang Chen's avatar Xiaogang Chen Committed by Alex Deucher

drm/amdkfd: Separate dma unmap and free of dma address array operations

We do not need free dma address array of svm_range each time we do dma unmap
for pages in svm_range as we can reuse the same array. Only free it when free
svm_range. Separate these two operations and use them accordingly.
Signed-off-by: default avatarXiaogang Chen <xiaogang.chen@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8ab7fab6
......@@ -460,7 +460,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
......@@ -544,7 +544,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (cpages) {
prange->actual_loc = best_loc;
svm_range_free_dma_mappings(prange, true);
svm_range_dma_unmap(prange);
} else {
svm_range_vram_node_free(prange);
}
......@@ -745,7 +745,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
......
......@@ -231,7 +231,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
return r;
}
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
......@@ -249,7 +249,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
}
}
void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
void svm_range_dma_unmap(struct svm_range *prange)
{
struct kfd_process_device *pdd;
dma_addr_t *dma_addr;
......@@ -270,10 +270,8 @@ void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
continue;
}
dev = &pdd->dev->adev->pdev->dev;
if (unmap_dma)
svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
kvfree(dma_addr);
prange->dma_addr[gpuidx] = NULL;
svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
}
}
......@@ -281,18 +279,29 @@ static void svm_range_free(struct svm_range *prange, bool do_unmap)
{
uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint32_t gpuidx;
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
prange->start, prange->last);
svm_range_vram_node_free(prange);
svm_range_free_dma_mappings(prange, do_unmap);
if (do_unmap)
svm_range_dma_unmap(prange);
if (do_unmap && !p->xnack_enabled) {
pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
}
/* free dma_addr array for each gpu */
for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
if (prange->dma_addr[gpuidx]) {
kvfree(prange->dma_addr[gpuidx]);
prange->dma_addr[gpuidx] = NULL;
}
}
mutex_destroy(&prange->lock);
mutex_destroy(&prange->migrate_mutex);
kfree(prange);
......
......@@ -181,9 +181,9 @@ void svm_range_add_list_work(struct svm_range_list *svms,
struct svm_range *prange, struct mm_struct *mm,
enum svm_work_list_ops op);
void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma);
void svm_range_dma_unmap(struct svm_range *prange);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment