Commit 63a93023 authored by Bhaskar Chowdhury's avatar Bhaskar Chowdhury Committed by Alex Deucher

drm/amd/amdgpu/gfx_v7_0: Trivial typo fixes

s/acccess/access/
s/inferface/interface/
s/sequnce/sequence/  .....two different places.
s/retrive/retrieve/
s/sheduling/scheduling/
s/independant/independent/
s/wether/whether/ ......two different places.
s/emmit/emit/
s/synce/sync/

Reviewed-by: Nirmoy Das<nirmoy.das@amd.com>
Signed-off-by: default avatarBhaskar Chowdhury <unixbhaskar@gmail.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f19a2067
...@@ -1877,7 +1877,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) ...@@ -1877,7 +1877,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA /* Initialize all compute VMIDs to have no GDS, GWS, or OA
acccess. These should be enabled by FW for target VMIDs. */ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
...@@ -2058,7 +2058,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) ...@@ -2058,7 +2058,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* *
* Set up the number and offset of the CP scratch registers. * Set up the number and offset of the CP scratch registers.
* NOTE: use of CP scratch registers is a legacy inferface and * NOTE: use of CP scratch registers is a legacy interface and
* is not used by default on newer asics (r6xx+). On newer asics, * is not used by default on newer asics (r6xx+). On newer asics,
* memory buffers are used for fences rather than scratch regs. * memory buffers are used for fences rather than scratch regs.
*/ */
...@@ -2172,7 +2172,7 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) ...@@ -2172,7 +2172,7 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
* @seq: sequence number * @seq: sequence number
* @flags: fence related flags * @flags: fence related flags
* *
* Emits a fence sequnce number on the gfx ring and flushes * Emits a fence sequence number on the gfx ring and flushes
* GPU caches. * GPU caches.
*/ */
static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
...@@ -2215,7 +2215,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, ...@@ -2215,7 +2215,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
* @seq: sequence number * @seq: sequence number
* @flags: fence related flags * @flags: fence related flags
* *
* Emits a fence sequnce number on the compute ring and flushes * Emits a fence sequence number on the compute ring and flushes
* GPU caches. * GPU caches.
*/ */
static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
...@@ -2245,14 +2245,14 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, ...@@ -2245,14 +2245,14 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
* *
* @ring: amdgpu_ring structure holding ring information * @ring: amdgpu_ring structure holding ring information
* @job: job to retrive vmid from * @job: job to retrieve vmid from
* @ib: amdgpu indirect buffer object * @ib: amdgpu indirect buffer object
* @flags: options (AMDGPU_HAVE_CTX_SWITCH) * @flags: options (AMDGPU_HAVE_CTX_SWITCH)
* *
* Emits an DE (drawing engine) or CE (constant engine) IB * Emits an DE (drawing engine) or CE (constant engine) IB
* on the gfx ring. IBs are usually generated by userspace * on the gfx ring. IBs are usually generated by userspace
* acceleration drivers and submitted to the kernel for * acceleration drivers and submitted to the kernel for
* sheduling on the ring. This function schedules the IB * scheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU. * on the gfx ring for execution by the GPU.
*/ */
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
...@@ -2402,7 +2402,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -2402,7 +2402,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
/* /*
* CP. * CP.
* On CIK, gfx and compute now have independant command processors. * On CIK, gfx and compute now have independent command processors.
* *
* GFX * GFX
* Gfx consists of a single ring and can process both gfx jobs and * Gfx consists of a single ring and can process both gfx jobs and
...@@ -2630,7 +2630,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) ...@@ -2630,7 +2630,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0; ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
/* set the wb address wether it's enabled or not */ /* set the wb address whether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
...@@ -2985,7 +2985,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, ...@@ -2985,7 +2985,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
/* set the wb address wether it's enabled or not */ /* set the wb address whether it's enabled or not */
wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi = mqd->cp_hqd_pq_rptr_report_addr_hi =
...@@ -3198,7 +3198,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) ...@@ -3198,7 +3198,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
/** /**
* gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
* *
* @ring: the ring to emmit the commands to * @ring: the ring to emit the commands to
* *
* Sync the command pipeline with the PFP. E.g. wait for everything * Sync the command pipeline with the PFP. E.g. wait for everything
* to be completed. * to be completed.
...@@ -3220,7 +3220,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -3220,7 +3220,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 4); /* poll interval */ amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) { if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */ /* sync CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment