Commit 1d5dbfe6 authored by Alex Sierra's avatar Alex Sierra Committed by Alex Deucher

drm/amdkfd: classify and map mixed svm range pages in GPU

[Why]
svm ranges can have mixed pages from device or system memory.
A good example is, after a prange has been allocated in VRAM and a
copy-on-write is triggered by a fork. This invalidates some pages
inside the prange. Endding up in mixed pages.

[How]
By classifying each page inside a prange, based on its type. Device or
system memory, during dma mapping call. If page corresponds
to VRAM domain, a flag is set to its dma_addr entry for each GPU.
Then, at the GPU page table mapping. All group of contiguous pages within
the same type are mapped with their proper pte flags.

v2:
Instead of using ttm_res to calculate vram pfns in the svm_range. It is now
done by setting the vram real physical address into drm_addr array.
This makes more flexible VRAM management, plus removes the need to have
a BO reference in the svm_range.

v3:
Remove mapping member from svm_range
Signed-off-by: default avatarAlex Sierra <alex.sierra@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 278a7087
...@@ -119,28 +119,40 @@ static void svm_range_remove_notifier(struct svm_range *prange) ...@@ -119,28 +119,40 @@ static void svm_range_remove_notifier(struct svm_range *prange)
} }
static int static int
svm_range_dma_map_dev(struct device *dev, dma_addr_t **dma_addr, svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long *hmm_pfns, uint64_t npages) unsigned long *hmm_pfns, uint32_t gpuidx)
{ {
enum dma_data_direction dir = DMA_BIDIRECTIONAL; enum dma_data_direction dir = DMA_BIDIRECTIONAL;
dma_addr_t *addr = *dma_addr; dma_addr_t *addr = prange->dma_addr[gpuidx];
struct device *dev = adev->dev;
struct page *page; struct page *page;
int i, r; int i, r;
if (!addr) { if (!addr) {
addr = kvmalloc_array(npages, sizeof(*addr), addr = kvmalloc_array(prange->npages, sizeof(*addr),
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!addr) if (!addr)
return -ENOMEM; return -ENOMEM;
*dma_addr = addr; prange->dma_addr[gpuidx] = addr;
} }
for (i = 0; i < npages; i++) { for (i = 0; i < prange->npages; i++) {
if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]), if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
"leaking dma mapping\n")) "leaking dma mapping\n"))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]); page = hmm_pfn_to_page(hmm_pfns[i]);
if (is_zone_device_page(page)) {
struct amdgpu_device *bo_adev =
amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
bo_adev->vm_manager.vram_base_offset -
bo_adev->kfd.dev->pgmap.range.start;
addr[i] |= SVM_RANGE_VRAM_DOMAIN;
pr_debug("vram address detected: 0x%llx\n", addr[i]);
continue;
}
addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
r = dma_mapping_error(dev, addr[i]); r = dma_mapping_error(dev, addr[i]);
if (r) { if (r) {
...@@ -175,8 +187,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, ...@@ -175,8 +187,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
} }
adev = (struct amdgpu_device *)pdd->dev->kgd; adev = (struct amdgpu_device *)pdd->dev->kgd;
r = svm_range_dma_map_dev(adev->dev, &prange->dma_addr[gpuidx], r = svm_range_dma_map_dev(adev, prange, hmm_pfns, gpuidx);
hmm_pfns, prange->npages);
if (r) if (r)
break; break;
} }
...@@ -1020,21 +1031,22 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, ...@@ -1020,21 +1031,22 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
} }
static uint64_t static uint64_t
svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange) svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
int domain)
{ {
struct amdgpu_device *bo_adev; struct amdgpu_device *bo_adev;
uint32_t flags = prange->flags; uint32_t flags = prange->flags;
uint32_t mapping_flags = 0; uint32_t mapping_flags = 0;
uint64_t pte_flags; uint64_t pte_flags;
bool snoop = !prange->ttm_res; bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
if (prange->svm_bo && prange->ttm_res) if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_ARCTURUS: case CHIP_ARCTURUS:
if (prange->svm_bo && prange->ttm_res) { if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) { if (bo_adev == adev) {
mapping_flags |= coherent ? mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
...@@ -1050,7 +1062,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange) ...@@ -1050,7 +1062,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
} }
break; break;
case CHIP_ALDEBARAN: case CHIP_ALDEBARAN:
if (prange->svm_bo && prange->ttm_res) { if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) { if (bo_adev == adev) {
mapping_flags |= coherent ? mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
...@@ -1080,14 +1092,14 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange) ...@@ -1080,14 +1092,14 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
pte_flags = AMDGPU_PTE_VALID; pte_flags = AMDGPU_PTE_VALID;
pte_flags |= prange->ttm_res ? 0 : AMDGPU_PTE_SYSTEM; pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags); pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n", pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n",
prange->svms, prange->start, prange->last, prange->svms, prange->start, prange->last,
prange->ttm_res ? 1:0, pte_flags, mapping_flags); (domain == SVM_RANGE_VRAM_DOMAIN) ? 1:0, pte_flags, mapping_flags);
return pte_flags; return pte_flags;
} }
...@@ -1158,31 +1170,41 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -1158,31 +1170,41 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va bo_va; struct amdgpu_bo_va bo_va;
bool table_freed = false; bool table_freed = false;
uint64_t pte_flags; uint64_t pte_flags;
unsigned long last_start;
int last_domain;
int r = 0; int r = 0;
int64_t i;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last); prange->last);
if (prange->svm_bo && prange->ttm_res) { if (prange->svm_bo && prange->ttm_res)
bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev); bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
prange->mapping.bo_va = &bo_va;
}
prange->mapping.start = prange->start; last_start = prange->start;
prange->mapping.last = prange->last; for (i = 0; i < prange->npages; i++) {
prange->mapping.offset = prange->ttm_res ? prange->offset : 0; last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
pte_flags = svm_range_get_pte_flags(adev, prange); dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
if ((prange->start + i) < prange->last &&
last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
continue;
r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL, pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
prange->mapping.start, last_start, prange->start + i, last_domain ? "GPU" : "CPU");
prange->mapping.last, pte_flags, pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
prange->mapping.offset, r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
prange->ttm_res, last_start,
dma_addr, &vm->last_update, prange->start + i, pte_flags,
&table_freed); last_start - prange->start,
if (r) { NULL,
pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); dma_addr,
goto out; &vm->last_update,
&table_freed);
if (r) {
pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
goto out;
}
last_start = prange->start + i + 1;
} }
r = amdgpu_vm_update_pdes(adev, vm, false); r = amdgpu_vm_update_pdes(adev, vm, false);
...@@ -1203,7 +1225,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -1203,7 +1225,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
p->pasid, TLB_FLUSH_LEGACY); p->pasid, TLB_FLUSH_LEGACY);
} }
out: out:
prange->mapping.bo_va = NULL;
return r; return r;
} }
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "kfd_priv.h" #include "kfd_priv.h"
#define SVM_RANGE_VRAM_DOMAIN (1UL << 0)
#define SVM_ADEV_PGMAP_OWNER(adev)\ #define SVM_ADEV_PGMAP_OWNER(adev)\
((adev)->hive ? (void *)(adev)->hive : (void *)(adev)) ((adev)->hive ? (void *)(adev)->hive : (void *)(adev))
...@@ -113,7 +114,6 @@ struct svm_range { ...@@ -113,7 +114,6 @@ struct svm_range {
struct list_head update_list; struct list_head update_list;
struct list_head remove_list; struct list_head remove_list;
struct list_head insert_list; struct list_head insert_list;
struct amdgpu_bo_va_mapping mapping;
uint64_t npages; uint64_t npages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res; struct ttm_resource *ttm_res;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment