Commit a9a78b32 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: use a global LRU list for VMIDs

With the scheduler enabled managing per ring LRUs don't
make much sense any more.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 94dd0a4a
...@@ -925,18 +925,20 @@ struct amdgpu_vm { ...@@ -925,18 +925,20 @@ struct amdgpu_vm {
spinlock_t freed_lock; spinlock_t freed_lock;
}; };
struct amdgpu_vm_manager { struct amdgpu_vm_manager_id {
/* protecting IDs */ struct list_head list;
struct mutex lock;
struct {
struct fence *active; struct fence *active;
atomic_long_t owner; atomic_long_t owner;
} ids[AMDGPU_NUM_VM]; };
struct amdgpu_vm_manager {
/* Handling of VMIDs */
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM];
uint32_t max_pfn; uint32_t max_pfn;
/* number of VMIDs */
unsigned nvm;
/* vram base address for page table entry */ /* vram base address for page table entry */
u64 vram_base_offset; u64 vram_base_offset;
/* is vm enabled? */ /* is vm enabled? */
...@@ -946,6 +948,7 @@ struct amdgpu_vm_manager { ...@@ -946,6 +948,7 @@ struct amdgpu_vm_manager {
struct amdgpu_ring *vm_pte_funcs_ring; struct amdgpu_ring *vm_pte_funcs_ring;
}; };
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
......
...@@ -161,79 +161,52 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, ...@@ -161,79 +161,52 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence) struct amdgpu_sync *sync, struct fence *fence)
{ {
struct fence *best[AMDGPU_MAX_RINGS] = {};
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_manager_id *id;
unsigned choices[2] = {}; int r;
unsigned i;
mutex_lock(&adev->vm_manager.lock); mutex_lock(&adev->vm_manager.lock);
/* check if the id is still valid */ /* check if the id is still valid */
if (vm_id->id) { if (vm_id->id) {
unsigned id = vm_id->id;
long owner; long owner;
owner = atomic_long_read(&adev->vm_manager.ids[id].owner); id = &adev->vm_manager.ids[vm_id->id];
owner = atomic_long_read(&id->owner);
if (owner == (long)vm) { if (owner == (long)vm) {
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
fence_put(adev->vm_manager.ids[id].active);
adev->vm_manager.ids[id].active = fence_get(fence);
mutex_unlock(&adev->vm_manager.lock);
return 0;
}
}
/* we definately need to flush */ fence_put(id->active);
vm_id->pd_gpu_addr = ~0ll; id->active = fence_get(fence);
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.nvm; ++i) {
struct fence *fence = adev->vm_manager.ids[i].active;
struct amdgpu_ring *fring;
if (fence == NULL) {
/* found a free one */
vm_id->id = i;
trace_amdgpu_vm_grab_id(vm, i, ring->idx);
mutex_unlock(&adev->vm_manager.lock); mutex_unlock(&adev->vm_manager.lock);
return 0; return 0;
} }
fring = amdgpu_ring_from_fence(fence);
if (best[fring->idx] == NULL ||
fence_is_later(best[fring->idx], fence)) {
best[fring->idx] = fence;
choices[fring == ring ? 0 : 1] = i;
}
} }
for (i = 0; i < 2; ++i) { /* we definately need to flush */
struct fence *active; vm_id->pd_gpu_addr = ~0ll;
int r;
if (!choices[i])
continue;
vm_id->id = choices[i]; id = list_first_entry(&adev->vm_manager.ids_lru,
active = adev->vm_manager.ids[vm_id->id].active; struct amdgpu_vm_manager_id,
r = amdgpu_sync_fence(ring->adev, sync, active); list);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
atomic_long_set(&id->owner, (long)vm);
trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx); vm_id->id = id - adev->vm_manager.ids;
atomic_long_set(&adev->vm_manager.ids[vm_id->id].owner, (long)vm); trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
fence_put(adev->vm_manager.ids[vm_id->id].active); r = amdgpu_sync_fence(ring->adev, sync, id->active);
adev->vm_manager.ids[vm_id->id].active = fence_get(fence);
mutex_unlock(&adev->vm_manager.lock); if (!r) {
return r; fence_put(id->active);
id->active = fence_get(fence);
} }
/* should never happen */
BUG();
mutex_unlock(&adev->vm_manager.lock); mutex_unlock(&adev->vm_manager.lock);
return -EINVAL; return r;
} }
/** /**
...@@ -1358,6 +1331,25 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1358,6 +1331,25 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
} }
/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
*
* Initialize the VM manager structures
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
unsigned i;
INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.num_ids; ++i)
list_add_tail(&adev->vm_manager.ids[i].list,
&adev->vm_manager.ids_lru);
}
/** /**
* amdgpu_vm_manager_fini - cleanup VM manager * amdgpu_vm_manager_fini - cleanup VM manager
* *
......
...@@ -694,7 +694,8 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev) ...@@ -694,7 +694,8 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7 * amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15 * amdkfd will use VMIDs 8-15
*/ */
adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */ /* base offset of vram pages */
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
......
...@@ -774,7 +774,8 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev) ...@@ -774,7 +774,8 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7 * amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15 * amdkfd will use VMIDs 8-15
*/ */
adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */ /* base offset of vram pages */
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment