Commit 03373e2b authored by Kevin Wang's avatar Kevin Wang Committed by Alex Deucher

drm/amdgpu/ttm: optimize vram access in amdgpu_ttm_access_memory()

1. using vram aper to access vram if possible
2. avoid MM_INDEX/MM_DATA is not working when mmio protect feature is
enabled.
Signed-off-by: default avatarKevin Wang <kevin1.wang@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5fb95aa7
...@@ -1393,6 +1393,41 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, ...@@ -1393,6 +1393,41 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
return ttm_bo_eviction_valuable(bo, place); return ttm_bo_eviction_valuable(bo, place);
} }
static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write)
{
while (size) {
uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
uint64_t bytes = 4 - (pos & 0x3);
uint32_t shift = (pos & 0x3) * 8;
uint32_t mask = 0xffffffff << shift;
uint32_t value = 0;
if (size < bytes) {
mask &= 0xffffffff >> (bytes - size) * 8;
bytes = size;
}
if (mask != 0xffffffff) {
amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
} else {
value = (value & mask) >> shift;
memcpy(buf, &value, bytes);
}
} else {
amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
}
pos += bytes;
buf += bytes;
size -= bytes;
}
}
/** /**
* amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
* *
...@@ -1412,8 +1447,6 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, ...@@ -1412,8 +1447,6 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_res_cursor cursor; struct amdgpu_res_cursor cursor;
unsigned long flags;
uint32_t value = 0;
int ret = 0; int ret = 0;
if (bo->resource->mem_type != TTM_PL_VRAM) if (bo->resource->mem_type != TTM_PL_VRAM)
...@@ -1421,41 +1454,21 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, ...@@ -1421,41 +1454,21 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
amdgpu_res_first(bo->resource, offset, len, &cursor); amdgpu_res_first(bo->resource, offset, len, &cursor);
while (cursor.remaining) { while (cursor.remaining) {
uint64_t aligned_pos = cursor.start & ~(uint64_t)3; size_t count, size = cursor.size;
uint64_t bytes = 4 - (cursor.start & 3); loff_t pos = cursor.start;
uint32_t shift = (cursor.start & 3) * 8;
uint32_t mask = 0xffffffff << shift; count = amdgpu_device_aper_access(adev, pos, buf, size, write);
size -= count;
if (cursor.size < bytes) { if (size) {
mask &= 0xffffffff >> (bytes - cursor.size) * 8; /* using MM to access rest vram and handle un-aligned address */
bytes = cursor.size; pos += count;
buf += count;
amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
} }
if (mask != 0xffffffff) { ret += cursor.size;
spin_lock_irqsave(&adev->mmio_idx_lock, flags); buf += cursor.size;
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); amdgpu_res_next(&cursor, cursor.size);
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
value = RREG32_NO_KIQ(mmMM_DATA);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
WREG32_NO_KIQ(mmMM_DATA, value);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
if (!write) {
value = (value & mask) >> shift;
memcpy(buf, &value, bytes);
}
} else {
bytes = cursor.size & ~0x3ULL;
amdgpu_device_vram_access(adev, cursor.start,
(uint32_t *)buf, bytes,
write);
}
ret += bytes;
buf = (uint8_t *)buf + bytes;
amdgpu_res_next(&cursor, bytes);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment