Commit 3e8e7ee6 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe: Cleanup style warnings

Reduce the number of warnings reported by checkpatch.pl from 118 to 48 by
addressing those warnings types:

  LEADING_SPACE
  LINE_SPACING
  BRACES
  TRAILING_SEMICOLON
  CONSTANT_COMPARISON
  BLOCK_COMMENT_STYLE
  RETURN_VOID
  ONE_SEMICOLON
  SUSPECT_CODE_INDENT
  LINE_CONTINUATIONS
  UNNECESSARY_ELSE
  UNSPECIFIED_INT
  UNNECESSARY_INT
  MISORDERED_TYPE
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent b8c1ba83
......@@ -1720,7 +1720,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx ww;
struct xe_vm *vm = NULL;
struct xe_bo *bo;
unsigned bo_flags = XE_BO_CREATE_USER_BIT;
unsigned int bo_flags = XE_BO_CREATE_USER_BIT;
u32 handle;
int err;
......
......@@ -243,6 +243,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
for (i = 0; i < args->num_syncs; i++) {
struct dma_fence *fence = syncs[i].fence;
if (fence) {
err = xe_vm_async_fence_wait_start(fence);
if (err)
......
......@@ -11,7 +11,7 @@
struct xe_device;
struct xe_gt;
#define xe_execlist_port_assert_held(port) lockdep_assert_held(&(port)->lock);
#define xe_execlist_port_assert_held(port) lockdep_assert_held(&(port)->lock)
int xe_execlist_init(struct xe_gt *gt);
struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
......
......@@ -13,7 +13,7 @@
#define for_each_hw_engine(hwe__, gt__, id__) \
for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \
for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
xe_hw_engine_is_valid((hwe__)))
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
......
......@@ -186,7 +186,7 @@ static void guc_init_params(struct xe_guc *guc)
int i;
BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
BUILD_BUG_ON(SOFT_SCRATCH_COUNT != GUC_CTL_MAX_DWORDS + 2);
BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
......
......@@ -444,7 +444,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
struct xe_reg_sr_entry *entry;
unsigned long idx;
unsigned count = 0;
unsigned int count = 0;
const struct {
struct xe_reg reg;
bool skip;
......
......@@ -716,9 +716,8 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
ptr = xa_store(&ct->fence_lookup,
g2h_fence.seqno,
&g2h_fence, GFP_KERNEL);
if (IS_ERR(ptr)) {
if (IS_ERR(ptr))
return PTR_ERR(ptr);
}
goto retry_same_fence;
} else if (unlikely(ret)) {
......
......@@ -140,16 +140,20 @@ struct guc_update_engine_policy {
struct guc_policies {
u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
/* In micro seconds. How much time to allow before DPC processing is
/*
* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
* Typically 1000s of micro seconds (example only, not granularity). */
* Typically 1000s of micro seconds (example only, not granularity).
*/
u32 dpc_promote_time;
/* Must be set to take these new values. */
u32 is_valid;
/* Max number of WIs to process per call. A large value may keep CS
* idle. */
/*
* Max number of WIs to process per call. A large value may keep CS
* idle.
*/
u32 max_num_work_items;
u32 global_flags;
......
......@@ -330,7 +330,7 @@ static void __guc_engine_policy_add_##func(struct engine_policy *policy, \
u32 data) \
{ \
XE_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
\
\
policy->h2g.klv[policy->count].kl = \
FIELD_PREP(GUC_KLV_0_KEY, \
GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
......
......@@ -68,6 +68,7 @@ int xe_huc_auth(struct xe_huc *huc)
struct xe_gt *gt = huc_to_gt(huc);
struct xe_guc *guc = huc_to_guc(huc);
int ret;
if (xe_uc_fw_is_disabled(&huc->fw))
return 0;
......
......@@ -250,7 +250,7 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
}
static void gt_irq_handler(struct xe_tile *tile,
u32 master_ctl, long unsigned int *intr_dw,
u32 master_ctl, unsigned long *intr_dw,
u32 *identity)
{
struct xe_device *xe = tile_to_xe(tile);
......@@ -305,7 +305,7 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
struct xe_device *xe = arg;
struct xe_tile *tile = xe_device_get_root_tile(xe);
u32 master_ctl, gu_misc_iir;
long unsigned int intr_dw[2];
unsigned long intr_dw[2];
u32 identity[32];
master_ctl = xelp_intr_disable(xe);
......@@ -360,7 +360,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
struct xe_device *xe = arg;
struct xe_tile *tile;
u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
long unsigned int intr_dw[2];
unsigned long intr_dw[2];
u32 identity[32];
u8 id;
......@@ -502,11 +502,10 @@ static void xe_irq_postinstall(struct xe_device *xe)
static irq_handler_t xe_irq_handler(struct xe_device *xe)
{
if (GRAPHICS_VERx100(xe) >= 1210) {
if (GRAPHICS_VERx100(xe) >= 1210)
return dg1_irq_handler;
} else {
else
return xelp_irq_handler;
}
}
static void irq_uninstall(struct drm_device *drm, void *arg)
......
......@@ -374,46 +374,46 @@ static const u8 dg2_rcs_offsets[] = {
};
static const u8 mtl_rcs_offsets[] = {
NOP(1),
LRI(15, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
REG(0x120),
REG(0x124),
NOP(1),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
NOP(2),
LRI(2, POSTED),
REG16(0x5a8),
REG16(0x5ac),
NOP(6),
LRI(1, 0),
REG(0x0c8),
END
NOP(1),
LRI(15, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
REG(0x120),
REG(0x124),
NOP(1),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
NOP(2),
LRI(2, POSTED),
REG16(0x5a8),
REG16(0x5ac),
NOP(6),
LRI(1, 0),
REG(0x0c8),
END
};
#undef END
......
......@@ -511,7 +511,7 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
#define EMIT_COPY_DW 10
static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u64 src_ofs, u64 dst_ofs, unsigned int size,
unsigned pitch)
unsigned int pitch)
{
XE_BUG_ON(size / pitch > S16_MAX);
XE_BUG_ON(pitch / 4 > S16_MAX);
......@@ -1012,6 +1012,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
do {
u64 addr = ppgtt_ofs + ofs * 8;
chunk = min(update->qwords, 0x1ffU);
/* Ensure populatefn can do memset64 by aligning bb->cs */
......
......@@ -58,6 +58,7 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
bool atomic)
{
int err;
lockdep_assert_held(&gt->pcode.lock);
if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
......
......@@ -82,7 +82,7 @@ void xe_reg_whitelist_print_entry(struct drm_printer *p, unsigned int indent,
{
u32 val = entry->set_bits;
const char *access_str = "(invalid)";
unsigned range_bit = 2;
unsigned int range_bit = 2;
u32 range_start, range_end;
bool deny;
......
......@@ -130,7 +130,6 @@ static inline void xe_res_first(struct ttm_resource *res,
cur->node = NULL;
cur->mem_type = XE_PL_TT;
XE_WARN_ON(res && start + size > res->size);
return;
}
static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
......
......@@ -81,7 +81,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
}
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
unsigned size)
unsigned int size)
{
return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
}
......
......@@ -147,9 +147,9 @@ struct fw_blobs_by_type {
entry__, \
},
XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, \
XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_major_ver)
XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, \
XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_no_ver)
static struct xe_gt *
......
......@@ -2163,16 +2163,16 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
case XE_VM_BIND_OP_PREFETCH:
vma = xe_vm_find_overlapping_vma(vm, addr, range);
if (XE_IOCTL_DBG(xe, !vma))
return -ENODATA; /* Not an actual error, IOCTL
cleans up returns and 0 */
/* Not an actual error, IOCTL cleans up returns and 0 */
return -ENODATA;
if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
xe_vma_end(vma) != addr + range) && !async))
return -EINVAL;
break;
case XE_VM_BIND_OP_UNMAP_ALL:
if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
return -ENODATA; /* Not an actual error, IOCTL
cleans up returns and 0 */
/* Not an actual error, IOCTL cleans up returns and 0 */
return -ENODATA;
break;
default:
XE_BUG_ON("NOT POSSIBLE");
......
......@@ -428,8 +428,8 @@
* the list of userptrs mapped in the VM, the list of engines using this VM, and
* the array of external BOs mapped in the VM. When adding or removing any of the
* aforemented state from the VM should acquire this lock in write mode. The VM
* bind path also acquires this lock in write while while the exec / compute
* mode rebind worker acquire this lock in read mode.
* bind path also acquires this lock in write while the exec / compute mode
* rebind worker acquire this lock in read mode.
*
* VM dma-resv lock (vm->ttm.base.resv->lock) - WW lock. Protects VM dma-resv
* slots which is shared with any private BO in the VM. Expected to be acquired
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment