Commit b9683c21 authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu/gfx: consolidate mqd buffer setup code

It was duplicated across multiple generations.
Reviewed-by: default avatarAlex Xie <AlexBin.Xie@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4853bbb6
...@@ -248,3 +248,69 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, ...@@ -248,3 +248,69 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
return 0; return 0;
} }
/* create MQD for each compute queue */
int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
unsigned mqd_size)
{
struct amdgpu_ring *ring = NULL;
int r, i;
/* create MQD for KIQ */
ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
}
/* prepare MQD backup */
adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
/* create MQD for each KCQ */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
}
/* prepare MQD backup */
adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[i])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
}
return 0;
}
void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = NULL;
int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
kfree(adev->gfx.mec.mqd_backup[i]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
ring = &adev->gfx.kiq.ring;
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
...@@ -43,6 +43,10 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev); ...@@ -43,6 +43,10 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
unsigned hpd_size); unsigned hpd_size);
int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
unsigned mqd_size);
void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev);
/** /**
* amdgpu_gfx_create_bitmask - create a bitmask * amdgpu_gfx_create_bitmask - create a bitmask
* *
......
...@@ -659,8 +659,6 @@ static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev); ...@@ -659,8 +659,6 @@ static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev); static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring); static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring); static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static int gfx_v8_0_compute_mqd_sw_init(struct amdgpu_device *adev);
static void gfx_v8_0_compute_mqd_sw_fini(struct amdgpu_device *adev);
static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
{ {
...@@ -2102,7 +2100,7 @@ static int gfx_v8_0_sw_init(void *handle) ...@@ -2102,7 +2100,7 @@ static int gfx_v8_0_sw_init(void *handle)
return r; return r;
/* create MQD for all compute queues as well as KIQ for SRIOV case */ /* create MQD for all compute queues as well as KIQ for SRIOV case */
r = gfx_v8_0_compute_mqd_sw_init(adev); r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct vi_mqd));
if (r) if (r)
return r; return r;
...@@ -2148,7 +2146,7 @@ static int gfx_v8_0_sw_fini(void *handle) ...@@ -2148,7 +2146,7 @@ static int gfx_v8_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++) for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v8_0_compute_mqd_sw_fini(adev); amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev); amdgpu_gfx_kiq_fini(adev);
...@@ -7146,68 +7144,3 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring) ...@@ -7146,68 +7144,3 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, upper_32_bits(de_payload_addr)); amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2); amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
} }
/* create MQD for each compute queue */
static int gfx_v8_0_compute_mqd_sw_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = NULL;
int r, i;
/* create MQD for KIQ */
ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, sizeof(struct vi_mqd), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
}
/* prepare MQD backup */
adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(sizeof(struct vi_mqd), GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
/* create MQD for each KCQ */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, sizeof(struct vi_mqd), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
}
/* prepare MQD backup */
adev->gfx.mec.mqd_backup[i] = kmalloc(sizeof(struct vi_mqd), GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[i])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
}
return 0;
}
static void gfx_v8_0_compute_mqd_sw_fini(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = NULL;
int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
kfree(adev->gfx.mec.mqd_backup[i]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
ring = &adev->gfx.kiq.ring;
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
...@@ -956,67 +956,6 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) ...@@ -956,67 +956,6 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
return 0; return 0;
} }
/* create MQD for each compute queue */
static int gfx_v9_0_compute_mqd_sw_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = NULL;
int r, i;
/* create MQD for KIQ */
ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, sizeof(struct v9_mqd), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
}
/* prepare MQD backup */
adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(sizeof(struct v9_mqd), GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
/* create MQD for each KCQ */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, sizeof(struct v9_mqd), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
}
/* prepare MQD backup */
adev->gfx.mec.mqd_backup[i] = kmalloc(sizeof(struct v9_mqd), GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[i])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
}
return 0;
}
static void gfx_v9_0_compute_mqd_sw_fini(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = NULL;
int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
kfree(adev->gfx.mec.mqd_backup[i]);
amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
}
ring = &adev->gfx.kiq.ring;
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
}
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{ {
WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
...@@ -1481,7 +1420,7 @@ static int gfx_v9_0_sw_init(void *handle) ...@@ -1481,7 +1420,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r; return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */ /* create MQD for all compute queues as wel as KIQ for SRIOV case */
r = gfx_v9_0_compute_mqd_sw_init(adev); r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
if (r) if (r)
return r; return r;
...@@ -1530,7 +1469,7 @@ static int gfx_v9_0_sw_fini(void *handle) ...@@ -1530,7 +1469,7 @@ static int gfx_v9_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++) for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v9_0_compute_mqd_sw_fini(adev); amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev); amdgpu_gfx_kiq_fini(adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment