Commit 3e8e7ee6 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe: Cleanup style warnings

Reduce the number of warnings reported by checkpatch.pl from 118 to 48 by
addressing those warnings types:

  LEADING_SPACE
  LINE_SPACING
  BRACES
  TRAILING_SEMICOLON
  CONSTANT_COMPARISON
  BLOCK_COMMENT_STYLE
  RETURN_VOID
  ONE_SEMICOLON
  SUSPECT_CODE_INDENT
  LINE_CONTINUATIONS
  UNNECESSARY_ELSE
  UNSPECIFIED_INT
  UNNECESSARY_INT
  MISORDERED_TYPE
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent b8c1ba83
...@@ -1720,7 +1720,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -1720,7 +1720,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
struct xe_vm *vm = NULL; struct xe_vm *vm = NULL;
struct xe_bo *bo; struct xe_bo *bo;
unsigned bo_flags = XE_BO_CREATE_USER_BIT; unsigned int bo_flags = XE_BO_CREATE_USER_BIT;
u32 handle; u32 handle;
int err; int err;
......
...@@ -243,6 +243,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -243,6 +243,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS) { vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
for (i = 0; i < args->num_syncs; i++) { for (i = 0; i < args->num_syncs; i++) {
struct dma_fence *fence = syncs[i].fence; struct dma_fence *fence = syncs[i].fence;
if (fence) { if (fence) {
err = xe_vm_async_fence_wait_start(fence); err = xe_vm_async_fence_wait_start(fence);
if (err) if (err)
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
struct xe_device; struct xe_device;
struct xe_gt; struct xe_gt;
#define xe_execlist_port_assert_held(port) lockdep_assert_held(&(port)->lock); #define xe_execlist_port_assert_held(port) lockdep_assert_held(&(port)->lock)
int xe_execlist_init(struct xe_gt *gt); int xe_execlist_init(struct xe_gt *gt);
struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe, struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
......
...@@ -186,7 +186,7 @@ static void guc_init_params(struct xe_guc *guc) ...@@ -186,7 +186,7 @@ static void guc_init_params(struct xe_guc *guc)
int i; int i;
BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
BUILD_BUG_ON(SOFT_SCRATCH_COUNT != GUC_CTL_MAX_DWORDS + 2); BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
......
...@@ -444,7 +444,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads, ...@@ -444,7 +444,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER); xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
struct xe_reg_sr_entry *entry; struct xe_reg_sr_entry *entry;
unsigned long idx; unsigned long idx;
unsigned count = 0; unsigned int count = 0;
const struct { const struct {
struct xe_reg reg; struct xe_reg reg;
bool skip; bool skip;
......
...@@ -716,9 +716,8 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, ...@@ -716,9 +716,8 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
ptr = xa_store(&ct->fence_lookup, ptr = xa_store(&ct->fence_lookup,
g2h_fence.seqno, g2h_fence.seqno,
&g2h_fence, GFP_KERNEL); &g2h_fence, GFP_KERNEL);
if (IS_ERR(ptr)) { if (IS_ERR(ptr))
return PTR_ERR(ptr); return PTR_ERR(ptr);
}
goto retry_same_fence; goto retry_same_fence;
} else if (unlikely(ret)) { } else if (unlikely(ret)) {
......
...@@ -140,16 +140,20 @@ struct guc_update_engine_policy { ...@@ -140,16 +140,20 @@ struct guc_update_engine_policy {
struct guc_policies { struct guc_policies {
u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES]; u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
/* In micro seconds. How much time to allow before DPC processing is /*
* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving). * called back via interrupt (to prevent DPC queue drain starving).
* Typically 1000s of micro seconds (example only, not granularity). */ * Typically 1000s of micro seconds (example only, not granularity).
*/
u32 dpc_promote_time; u32 dpc_promote_time;
/* Must be set to take these new values. */ /* Must be set to take these new values. */
u32 is_valid; u32 is_valid;
/* Max number of WIs to process per call. A large value may keep CS /*
* idle. */ * Max number of WIs to process per call. A large value may keep CS
* idle.
*/
u32 max_num_work_items; u32 max_num_work_items;
u32 global_flags; u32 global_flags;
......
...@@ -330,7 +330,7 @@ static void __guc_engine_policy_add_##func(struct engine_policy *policy, \ ...@@ -330,7 +330,7 @@ static void __guc_engine_policy_add_##func(struct engine_policy *policy, \
u32 data) \ u32 data) \
{ \ { \
XE_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \ XE_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
\ \
policy->h2g.klv[policy->count].kl = \ policy->h2g.klv[policy->count].kl = \
FIELD_PREP(GUC_KLV_0_KEY, \ FIELD_PREP(GUC_KLV_0_KEY, \
GUC_CONTEXT_POLICIES_KLV_ID_##id) | \ GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
......
...@@ -68,6 +68,7 @@ int xe_huc_auth(struct xe_huc *huc) ...@@ -68,6 +68,7 @@ int xe_huc_auth(struct xe_huc *huc)
struct xe_gt *gt = huc_to_gt(huc); struct xe_gt *gt = huc_to_gt(huc);
struct xe_guc *guc = huc_to_guc(huc); struct xe_guc *guc = huc_to_guc(huc);
int ret; int ret;
if (xe_uc_fw_is_disabled(&huc->fw)) if (xe_uc_fw_is_disabled(&huc->fw))
return 0; return 0;
......
...@@ -250,7 +250,7 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile, ...@@ -250,7 +250,7 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
} }
static void gt_irq_handler(struct xe_tile *tile, static void gt_irq_handler(struct xe_tile *tile,
u32 master_ctl, long unsigned int *intr_dw, u32 master_ctl, unsigned long *intr_dw,
u32 *identity) u32 *identity)
{ {
struct xe_device *xe = tile_to_xe(tile); struct xe_device *xe = tile_to_xe(tile);
...@@ -305,7 +305,7 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg) ...@@ -305,7 +305,7 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
struct xe_device *xe = arg; struct xe_device *xe = arg;
struct xe_tile *tile = xe_device_get_root_tile(xe); struct xe_tile *tile = xe_device_get_root_tile(xe);
u32 master_ctl, gu_misc_iir; u32 master_ctl, gu_misc_iir;
long unsigned int intr_dw[2]; unsigned long intr_dw[2];
u32 identity[32]; u32 identity[32];
master_ctl = xelp_intr_disable(xe); master_ctl = xelp_intr_disable(xe);
...@@ -360,7 +360,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) ...@@ -360,7 +360,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
struct xe_device *xe = arg; struct xe_device *xe = arg;
struct xe_tile *tile; struct xe_tile *tile;
u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0; u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
long unsigned int intr_dw[2]; unsigned long intr_dw[2];
u32 identity[32]; u32 identity[32];
u8 id; u8 id;
...@@ -502,11 +502,10 @@ static void xe_irq_postinstall(struct xe_device *xe) ...@@ -502,11 +502,10 @@ static void xe_irq_postinstall(struct xe_device *xe)
static irq_handler_t xe_irq_handler(struct xe_device *xe) static irq_handler_t xe_irq_handler(struct xe_device *xe)
{ {
if (GRAPHICS_VERx100(xe) >= 1210) { if (GRAPHICS_VERx100(xe) >= 1210)
return dg1_irq_handler; return dg1_irq_handler;
} else { else
return xelp_irq_handler; return xelp_irq_handler;
}
} }
static void irq_uninstall(struct drm_device *drm, void *arg) static void irq_uninstall(struct drm_device *drm, void *arg)
......
...@@ -511,7 +511,7 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, ...@@ -511,7 +511,7 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
#define EMIT_COPY_DW 10 #define EMIT_COPY_DW 10
static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u64 src_ofs, u64 dst_ofs, unsigned int size, u64 src_ofs, u64 dst_ofs, unsigned int size,
unsigned pitch) unsigned int pitch)
{ {
XE_BUG_ON(size / pitch > S16_MAX); XE_BUG_ON(size / pitch > S16_MAX);
XE_BUG_ON(pitch / 4 > S16_MAX); XE_BUG_ON(pitch / 4 > S16_MAX);
...@@ -1012,6 +1012,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, ...@@ -1012,6 +1012,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
do { do {
u64 addr = ppgtt_ofs + ofs * 8; u64 addr = ppgtt_ofs + ofs * 8;
chunk = min(update->qwords, 0x1ffU); chunk = min(update->qwords, 0x1ffU);
/* Ensure populatefn can do memset64 by aligning bb->cs */ /* Ensure populatefn can do memset64 by aligning bb->cs */
......
...@@ -58,6 +58,7 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, ...@@ -58,6 +58,7 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
bool atomic) bool atomic)
{ {
int err; int err;
lockdep_assert_held(&gt->pcode.lock); lockdep_assert_held(&gt->pcode.lock);
if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0) if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
......
...@@ -82,7 +82,7 @@ void xe_reg_whitelist_print_entry(struct drm_printer *p, unsigned int indent, ...@@ -82,7 +82,7 @@ void xe_reg_whitelist_print_entry(struct drm_printer *p, unsigned int indent,
{ {
u32 val = entry->set_bits; u32 val = entry->set_bits;
const char *access_str = "(invalid)"; const char *access_str = "(invalid)";
unsigned range_bit = 2; unsigned int range_bit = 2;
u32 range_start, range_end; u32 range_start, range_end;
bool deny; bool deny;
......
...@@ -130,7 +130,6 @@ static inline void xe_res_first(struct ttm_resource *res, ...@@ -130,7 +130,6 @@ static inline void xe_res_first(struct ttm_resource *res,
cur->node = NULL; cur->node = NULL;
cur->mem_type = XE_PL_TT; cur->mem_type = XE_PL_TT;
XE_WARN_ON(res && start + size > res->size); XE_WARN_ON(res && start + size > res->size);
return;
} }
static inline void __xe_res_sg_next(struct xe_res_cursor *cur) static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
......
...@@ -81,7 +81,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 ...@@ -81,7 +81,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
} }
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
unsigned size) unsigned int size)
{ {
return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0); return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
} }
......
...@@ -147,9 +147,9 @@ struct fw_blobs_by_type { ...@@ -147,9 +147,9 @@ struct fw_blobs_by_type {
entry__, \ entry__, \
}, },
XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, \ XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_major_ver) fw_filename_mmp_ver, fw_filename_major_ver)
XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, \ XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_no_ver) fw_filename_mmp_ver, fw_filename_no_ver)
static struct xe_gt * static struct xe_gt *
......
...@@ -2163,16 +2163,16 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo, ...@@ -2163,16 +2163,16 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
case XE_VM_BIND_OP_PREFETCH: case XE_VM_BIND_OP_PREFETCH:
vma = xe_vm_find_overlapping_vma(vm, addr, range); vma = xe_vm_find_overlapping_vma(vm, addr, range);
if (XE_IOCTL_DBG(xe, !vma)) if (XE_IOCTL_DBG(xe, !vma))
return -ENODATA; /* Not an actual error, IOCTL /* Not an actual error, IOCTL cleans up returns and 0 */
cleans up returns and 0 */ return -ENODATA;
if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr || if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
xe_vma_end(vma) != addr + range) && !async)) xe_vma_end(vma) != addr + range) && !async))
return -EINVAL; return -EINVAL;
break; break;
case XE_VM_BIND_OP_UNMAP_ALL: case XE_VM_BIND_OP_UNMAP_ALL:
if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list))) if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
return -ENODATA; /* Not an actual error, IOCTL /* Not an actual error, IOCTL cleans up returns and 0 */
cleans up returns and 0 */ return -ENODATA;
break; break;
default: default:
XE_BUG_ON("NOT POSSIBLE"); XE_BUG_ON("NOT POSSIBLE");
......
...@@ -428,8 +428,8 @@ ...@@ -428,8 +428,8 @@
* the list of userptrs mapped in the VM, the list of engines using this VM, and * the list of userptrs mapped in the VM, the list of engines using this VM, and
* the array of external BOs mapped in the VM. When adding or removing any of the * the array of external BOs mapped in the VM. When adding or removing any of the
* aforemented state from the VM should acquire this lock in write mode. The VM * aforemented state from the VM should acquire this lock in write mode. The VM
* bind path also acquires this lock in write while while the exec / compute * bind path also acquires this lock in write while the exec / compute mode
* mode rebind worker acquire this lock in read mode. * rebind worker acquire this lock in read mode.
* *
* VM dma-resv lock (vm->ttm.base.resv->lock) - WW lock. Protects VM dma-resv * VM dma-resv lock (vm->ttm.base.resv->lock) - WW lock. Protects VM dma-resv
* slots which is shared with any private BO in the VM. Expected to be acquired * slots which is shared with any private BO in the VM. Expected to be acquired
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment