Commit 9e690184 authored by Srinivasan Shanmugam's avatar Srinivasan Shanmugam Committed by Alex Deucher

drm/amd/amdgpu: Fix errors & warnings in amdgpu _bios, _cs, _dma_buf, _fence.c

The following checkpatch errors & warning is removed.

ERROR: else should follow close brace '}'
ERROR: trailing statements should be on next line
WARNING: Prefer 'unsigned int' to bare use of 'unsigned'
WARNING: Possible repeated word: 'Fences'
WARNING: Missing a blank line after declarations
WARNING: braces {} are not necessary for single statement blocks
WARNING: Comparisons should place the constant on the right side of the test
WARNING: printk() should include KERN_<LEVEL> facility level

Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarSrinivasan Shanmugam <srinivasan.shanmugam@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0e768043
...@@ -104,9 +104,8 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) ...@@ -104,9 +104,8 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL; adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0); vram_base = pci_resource_start(adev->pdev, 0);
bios = ioremap_wc(vram_base, size); bios = ioremap_wc(vram_base, size);
if (!bios) { if (!bios)
return false; return false;
}
adev->bios = kmalloc(size, GFP_KERNEL); adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) { if (!adev->bios) {
...@@ -133,9 +132,8 @@ bool amdgpu_read_bios(struct amdgpu_device *adev) ...@@ -133,9 +132,8 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
adev->bios = NULL; adev->bios = NULL;
/* XXX: some cards may return 0 for rom size? ddx has a workaround */ /* XXX: some cards may return 0 for rom size? ddx has a workaround */
bios = pci_map_rom(adev->pdev, &size); bios = pci_map_rom(adev->pdev, &size);
if (!bios) { if (!bios)
return false; return false;
}
adev->bios = kzalloc(size, GFP_KERNEL); adev->bios = kzalloc(size, GFP_KERNEL);
if (adev->bios == NULL) { if (adev->bios == NULL) {
...@@ -168,9 +166,9 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) ...@@ -168,9 +166,9 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
header[AMD_VBIOS_SIGNATURE_END] = 0; header[AMD_VBIOS_SIGNATURE_END] = 0;
if ((!AMD_IS_VALID_VBIOS(header)) || if ((!AMD_IS_VALID_VBIOS(header)) ||
0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET], memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
AMD_VBIOS_SIGNATURE, AMD_VBIOS_SIGNATURE,
strlen(AMD_VBIOS_SIGNATURE))) strlen(AMD_VBIOS_SIGNATURE)) != 0)
return false; return false;
/* valid vbios, go on */ /* valid vbios, go on */
...@@ -264,7 +262,7 @@ static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios, ...@@ -264,7 +262,7 @@ static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); DRM_ERROR("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV; return -ENODEV;
} }
...@@ -363,7 +361,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) ...@@ -363,7 +361,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
struct acpi_table_header *hdr; struct acpi_table_header *hdr;
acpi_size tbl_size; acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct; UEFI_ACPI_VFCT *vfct;
unsigned offset; unsigned int offset;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false; return false;
......
...@@ -397,7 +397,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, ...@@ -397,7 +397,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
{ {
struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned num_deps; unsigned int num_deps;
int i, r; int i, r;
num_deps = chunk->length_dw * 4 / num_deps = chunk->length_dw * 4 /
...@@ -468,7 +468,7 @@ static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, ...@@ -468,7 +468,7 @@ static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk) struct amdgpu_cs_chunk *chunk)
{ {
struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
unsigned num_deps; unsigned int num_deps;
int i, r; int i, r;
num_deps = chunk->length_dw * 4 / num_deps = chunk->length_dw * 4 /
...@@ -486,7 +486,7 @@ static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, ...@@ -486,7 +486,7 @@ static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk) struct amdgpu_cs_chunk *chunk)
{ {
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
unsigned num_deps; unsigned int num_deps;
int i, r; int i, r;
num_deps = chunk->length_dw * 4 / num_deps = chunk->length_dw * 4 /
...@@ -506,7 +506,7 @@ static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, ...@@ -506,7 +506,7 @@ static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk) struct amdgpu_cs_chunk *chunk)
{ {
struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
unsigned num_deps; unsigned int num_deps;
int i; int i;
num_deps = chunk->length_dw * 4 / num_deps = chunk->length_dw * 4 /
...@@ -540,7 +540,7 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, ...@@ -540,7 +540,7 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk) struct amdgpu_cs_chunk *chunk)
{ {
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
unsigned num_deps; unsigned int num_deps;
int i; int i;
num_deps = chunk->length_dw * 4 / num_deps = chunk->length_dw * 4 /
...@@ -758,6 +758,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, ...@@ -758,6 +758,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (used_vis_vram < total_vis_vram) { if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram; u64 free_vis_vram = total_vis_vram - used_vis_vram;
adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
increment_us, us_upper_bound); increment_us, us_upper_bound);
...@@ -1076,9 +1077,8 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, ...@@ -1076,9 +1077,8 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
/* the IB should be reserved at this point */ /* the IB should be reserved at this point */
r = amdgpu_bo_kmap(aobj, (void **)&kptr); r = amdgpu_bo_kmap(aobj, (void **)&kptr);
if (r) { if (r)
return r; return r;
}
kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
...@@ -1392,7 +1392,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1392,7 +1392,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
/* Cleanup the parser structure */ /* Cleanup the parser structure */
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{ {
unsigned i; unsigned int i;
amdgpu_sync_free(&parser->sync); amdgpu_sync_free(&parser->sync);
for (i = 0; i < parser->num_post_deps; i++) { for (i = 0; i < parser->num_post_deps; i++) {
......
...@@ -149,7 +149,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, ...@@ -149,7 +149,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
if (!bo->tbo.pin_count) { if (!bo->tbo.pin_count) {
/* move buffer into GTT or VRAM */ /* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
unsigned domains = AMDGPU_GEM_DOMAIN_GTT; unsigned int domains = AMDGPU_GEM_DOMAIN_GTT;
if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
attach->peer2peer) { attach->peer2peer) {
......
...@@ -42,7 +42,6 @@ ...@@ -42,7 +42,6 @@
#include "amdgpu_reset.h" #include "amdgpu_reset.h"
/* /*
* Fences
* Fences mark an event in the GPUs pipeline and are used * Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written, * for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence * it is expected that all buffers associated with that fence
...@@ -140,7 +139,7 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) ...@@ -140,7 +139,7 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Returns 0 on success, -ENOMEM on failure. * Returns 0 on success, -ENOMEM on failure.
*/ */
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
unsigned flags) unsigned int flags)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence; struct dma_fence *fence;
...@@ -174,11 +173,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd ...@@ -174,11 +173,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
adev->fence_context + ring->idx, seq); adev->fence_context + ring->idx, seq);
/* Against remove in amdgpu_job_{free, free_cb} */ /* Against remove in amdgpu_job_{free, free_cb} */
dma_fence_get(fence); dma_fence_get(fence);
} } else {
else
dma_fence_init(fence, &amdgpu_fence_ops, dma_fence_init(fence, &amdgpu_fence_ops,
&ring->fence_drv.lock, &ring->fence_drv.lock,
adev->fence_context + ring->idx, seq); adev->fence_context + ring->idx, seq);
}
} }
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
...@@ -396,7 +395,7 @@ signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, ...@@ -396,7 +395,7 @@ signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
* Returns the number of emitted fences on the ring. Used by the * Returns the number of emitted fences on the ring. Used by the
* dynpm code to ring track activity. * dynpm code to ring track activity.
*/ */
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
{ {
uint64_t emitted; uint64_t emitted;
...@@ -475,7 +474,7 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ...@@ -475,7 +474,7 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
*/ */
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src, struct amdgpu_irq_src *irq_src,
unsigned irq_type) unsigned int irq_type)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
uint64_t index; uint64_t index;
...@@ -653,6 +652,7 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) ...@@ -653,6 +652,7 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized) if (!ring || !ring->fence_drv.initialized)
continue; continue;
...@@ -840,6 +840,7 @@ static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) ...@@ -840,6 +840,7 @@ static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized) if (!ring || !ring->fence_drv.initialized)
continue; continue;
...@@ -913,6 +914,7 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work) ...@@ -913,6 +914,7 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work)
reset_work); reset_work);
struct amdgpu_reset_context reset_context; struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context)); memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE; reset_context.method = AMD_RESET_METHOD_NONE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment