Commit dfe9cfcc authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Tomi Valkeinen

drm: omapdrm: Use kernel integer types

The standard kernel integer types are [us]{8,16,32}. Use them instead of
the u?int{8,16,32}_t types.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: default avatarSebastian Reichel <sebastian.reichel@collabora.co.uk>
Signed-off-by: default avatarTomi Valkeinen <tomi.valkeinen@ti.com>
parent f073d78e
......@@ -272,7 +272,7 @@ static const struct dss_mgr_ops mgr_ops = {
* Setup, Flush and Page Flip
*/
void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus)
void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
......@@ -492,7 +492,7 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane_state *pri_state;
if (state->color_mgmt_changed && state->gamma_lut) {
uint length = state->gamma_lut->length /
unsigned int length = state->gamma_lut->length /
sizeof(struct drm_color_lut);
if (length < 2)
......@@ -526,7 +526,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
if (crtc->state->color_mgmt_changed) {
struct drm_color_lut *lut = NULL;
uint length = 0;
unsigned int length = 0;
if (crtc->state->gamma_lut) {
lut = (struct drm_color_lut *)
......@@ -557,7 +557,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val)
u64 val)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct drm_plane_state *plane_state;
......@@ -585,7 +585,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
uint64_t *val)
u64 *val)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc_state *omap_state = to_omap_crtc_state(state);
......@@ -732,7 +732,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* gamma table is not supprted.
*/
if (priv->dispc_ops->mgr_gamma_size(channel)) {
uint gamma_lut_size = 256;
unsigned int gamma_lut_size = 256;
drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
......
......@@ -37,7 +37,7 @@ void omap_crtc_pre_uninit(void);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, struct omap_dss_device *dssdev);
int omap_crtc_wait_pending(struct drm_crtc *crtc);
void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus);
void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus);
void omap_crtc_vblank_irq(struct drm_crtc *crtc);
#endif /* __OMAPDRM_CRTC_H__ */
......@@ -102,10 +102,10 @@ struct pat_ctrl {
};
struct pat {
uint32_t next_pa;
u32 next_pa;
struct pat_area area;
struct pat_ctrl ctrl;
uint32_t data_pa;
u32 data_pa;
};
#define DMM_FIXED_RETRY_COUNT 1000
......@@ -129,7 +129,7 @@ struct dmm_txn {
void *engine_handle;
struct tcm *tcm;
uint8_t *current_va;
u8 *current_va;
dma_addr_t current_pa;
struct pat *last_pat;
......@@ -140,7 +140,7 @@ struct refill_engine {
struct dmm *dmm;
struct tcm *tcm;
uint8_t *refill_va;
u8 *refill_va;
dma_addr_t refill_pa;
/* only one trans per engine for now */
......@@ -154,7 +154,7 @@ struct refill_engine {
};
struct dmm_platform_data {
uint32_t cpu_cache_flags;
u32 cpu_cache_flags;
};
struct dmm {
......
......@@ -58,11 +58,11 @@ static DEFINE_SPINLOCK(list_lock);
}
static const struct {
uint32_t x_shft; /* unused X-bits (as part of bpp) */
uint32_t y_shft; /* unused Y-bits (as part of bpp) */
uint32_t cpp; /* bytes/chars per pixel */
uint32_t slot_w; /* width of each slot (in pixels) */
uint32_t slot_h; /* height of each slot (in pixels) */
u32 x_shft; /* unused X-bits (as part of bpp) */
u32 y_shft; /* unused Y-bits (as part of bpp) */
u32 cpp; /* bytes/chars per pixel */
u32 slot_w; /* width of each slot (in pixels) */
u32 slot_h; /* height of each slot (in pixels) */
} geom[TILFMT_NFORMATS] = {
[TILFMT_8BIT] = GEOM(0, 0, 1),
[TILFMT_16BIT] = GEOM(0, 1, 2),
......@@ -72,7 +72,7 @@ static const struct {
/* lookup table for registers w/ per-engine instances */
static const uint32_t reg[][4] = {
static const u32 reg[][4] = {
[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
[PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
......@@ -111,10 +111,10 @@ static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
}
/* check status and spin until wait_mask comes true */
static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
static int wait_status(struct refill_engine *engine, u32 wait_mask)
{
struct dmm *dmm = engine->dmm;
uint32_t r = 0, err, i;
u32 r = 0, err, i;
i = DMM_FIXED_RETRY_COUNT;
while (true) {
......@@ -158,7 +158,7 @@ static void release_engine(struct refill_engine *engine)
static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
{
struct dmm *dmm = arg;
uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
int i;
/* ack IRQ */
......@@ -226,10 +226,10 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
* corresponding slot is cleared (ie. dummy_pa is programmed)
*/
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
struct page **pages, u32 npages, u32 roll)
{
dma_addr_t pat_pa = 0, data_pa = 0;
uint32_t *data;
u32 *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
int columns = (1 + area->x1 - area->x0);
......@@ -239,7 +239,7 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
if (txn->last_pat)
txn->last_pat->next_pa = (uint32_t)pat_pa;
txn->last_pat->next_pa = (u32)pat_pa;
pat->area = *area;
......@@ -330,7 +330,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
* DMM programming
*/
static int fill(struct tcm_area *area, struct page **pages,
uint32_t npages, uint32_t roll, bool wait)
u32 npages, u32 roll, bool wait)
{
int ret = 0;
struct tcm_area slice, area_s;
......@@ -378,7 +378,7 @@ static int fill(struct tcm_area *area, struct page **pages,
/* note: slots for which pages[i] == NULL are filled w/ dummy page
*/
int tiler_pin(struct tiler_block *block, struct page **pages,
uint32_t npages, uint32_t roll, bool wait)
u32 npages, u32 roll, bool wait)
{
int ret;
......@@ -398,8 +398,8 @@ int tiler_unpin(struct tiler_block *block)
/*
* Reserve/release
*/
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
uint16_t h, uint16_t align)
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w,
u16 h, u16 align)
{
struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
u32 min_align = 128;
......@@ -542,8 +542,8 @@ dma_addr_t tiler_ssptr(struct tiler_block *block)
block->area.p0.y * geom[block->fmt].slot_h);
}
dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
uint32_t x, uint32_t y)
dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
u32 x, u32 y)
{
struct tcm_pt *p = &block->area.p0;
BUG_ON(!validfmt(block->fmt));
......@@ -553,14 +553,14 @@ dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
(p->y * geom[block->fmt].slot_h) + y);
}
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h)
{
BUG_ON(!validfmt(fmt));
*w = round_up(*w, geom[fmt].slot_w);
*h = round_up(*h, geom[fmt].slot_h);
}
uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
u32 tiler_stride(enum tiler_fmt fmt, u32 orient)
{
BUG_ON(!validfmt(fmt));
......@@ -570,19 +570,19 @@ uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
}
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h)
{
tiler_align(fmt, &w, &h);
return geom[fmt].cpp * w * h;
}
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h)
{
BUG_ON(!validfmt(fmt));
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
uint32_t tiler_get_cpu_cache_flags(void)
u32 tiler_get_cpu_cache_flags(void)
{
return omap_dmm->plat_data->cpu_cache_flags;
}
......
......@@ -88,30 +88,30 @@ int tiler_map_show(struct seq_file *s, void *arg);
/* pin/unpin */
int tiler_pin(struct tiler_block *block, struct page **pages,
uint32_t npages, uint32_t roll, bool wait);
u32 npages, u32 roll, bool wait);
int tiler_unpin(struct tiler_block *block);
/* reserve/release */
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
uint16_t align);
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, u16 h,
u16 align);
struct tiler_block *tiler_reserve_1d(size_t size);
int tiler_release(struct tiler_block *block);
/* utilities */
dma_addr_t tiler_ssptr(struct tiler_block *block);
dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
uint32_t x, uint32_t y);
uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
uint32_t tiler_get_cpu_cache_flags(void);
dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
u32 x, u32 y);
u32 tiler_stride(enum tiler_fmt fmt, u32 orient);
size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h);
size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h);
void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h);
u32 tiler_get_cpu_cache_flags(void);
bool dmm_is_available(void);
extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(uint32_t flags)
static inline enum tiler_fmt gem2fmt(u32 flags)
{
switch (flags & OMAP_BO_TILED) {
case OMAP_BO_TILED_8:
......
......@@ -46,7 +46,7 @@
struct omap_drm_usergart;
struct omap_drm_private {
uint32_t omaprev;
u32 omaprev;
const struct dispc_ops *dispc_ops;
......@@ -81,7 +81,7 @@ struct omap_drm_private {
/* irq handling: */
spinlock_t wait_lock; /* protects the wait_list */
struct list_head wait_list; /* list of omap_irq_wait */
uint32_t irq_mask; /* enabled irqs in addition to wait_list */
u32 irq_mask; /* enabled irqs in addition to wait_list */
/* memory bandwidth limit if it is needed on the platform */
unsigned int max_bandwidth;
......
......@@ -52,8 +52,8 @@ static const u32 formats[] = {
/* per-plane info for the fb: */
struct plane {
struct drm_gem_object *bo;
uint32_t pitch;
uint32_t offset;
u32 pitch;
u32 offset;
dma_addr_t dma_addr;
};
......@@ -100,10 +100,10 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.destroy = omap_framebuffer_destroy,
};
static uint32_t get_linear_addr(struct plane *plane,
static u32 get_linear_addr(struct plane *plane,
const struct drm_format_info *format, int n, int x, int y)
{
uint32_t offset;
u32 offset;
offset = plane->offset
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
......@@ -121,9 +121,9 @@ bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
static uint32_t drm_rotation_to_tiler(unsigned int drm_rot)
static u32 drm_rotation_to_tiler(unsigned int drm_rot)
{
uint32_t orient;
u32 orient;
switch (drm_rot & DRM_MODE_ROTATE_MASK) {
default:
......@@ -158,7 +158,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
struct plane *plane = &omap_fb->planes[0];
uint32_t x, y, orient = 0;
u32 x, y, orient = 0;
info->fourcc = fb->format->format;
......@@ -177,8 +177,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
y = state->src_y >> 16;
if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
uint32_t w = state->src_w >> 16;
uint32_t h = state->src_h >> 16;
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
orient = drm_rotation_to_tiler(state->rotation);
......
......@@ -39,13 +39,13 @@ struct omap_gem_object {
struct list_head mm_list;
uint32_t flags;
u32 flags;
/** width/height for tiled formats (rounded up to slot boundaries) */
uint16_t width, height;
u16 width, height;
/** roll applied when mapping to DMM */
uint32_t roll;
u32 roll;
/**
* dma_addr contains the buffer DMA address. It is valid for
......@@ -73,7 +73,7 @@ struct omap_gem_object {
/**
* # of users of dma_addr
*/
uint32_t dma_addr_cnt;
u32 dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
......@@ -137,7 +137,7 @@ struct omap_drm_usergart {
*/
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
static u64 mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
......@@ -331,14 +331,15 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
}
/* get buffer flags */
uint32_t omap_gem_flags(struct drm_gem_object *obj)
u32 omap_gem_flags(struct drm_gem_object *obj)
{
return to_omap_bo(obj)->flags;
}
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
u64 offset;
mutex_lock(&obj->dev->struct_mutex);
offset = mmap_offset(obj);
mutex_unlock(&obj->dev->struct_mutex);
......@@ -649,7 +650,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* into user memory. We don't have to do much here at the moment.
*/
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
u32 handle, u64 *offset)
{
struct drm_gem_object *obj;
int ret = 0;
......@@ -675,10 +676,10 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*
* Call only from non-atomic contexts.
*/
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
uint32_t npages = obj->size >> PAGE_SHIFT;
u32 npages = obj->size >> PAGE_SHIFT;
int ret = 0;
if (roll > npages) {
......@@ -808,7 +809,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
if (!is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) {
struct page **pages;
uint32_t npages = obj->size >> PAGE_SHIFT;
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
......@@ -904,7 +905,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
* specified orientation and x,y offset from top-left corner of buffer
* (only valid for tiled 2d buffers)
*/
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
......@@ -921,7 +922,7 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
}
/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
......@@ -1003,7 +1004,8 @@ int omap_gem_resume(struct drm_device *dev)
list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
if (omap_obj->block) {
struct drm_gem_object *obj = &omap_obj->base;
uint32_t npages = obj->size >> PAGE_SHIFT;
u32 npages = obj->size >> PAGE_SHIFT;
WARN_ON(!omap_obj->pages); /* this can't happen */
ret = tiler_pin(omap_obj->block,
omap_obj->pages, npages,
......@@ -1027,7 +1029,7 @@ int omap_gem_resume(struct drm_device *dev)
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
uint64_t off;
u64 off;
off = drm_vma_node_start(&obj->vma_node);
......@@ -1115,7 +1117,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, uint32_t flags)
union omap_gem_size gsize, u32 flags)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
......@@ -1280,7 +1282,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
union omap_gem_size gsize, u32 flags, u32 *handle)
{
struct drm_gem_object *obj;
int ret;
......@@ -1327,7 +1329,8 @@ void omap_gem_init(struct drm_device *dev)
/* reserve 4k aligned/wide regions for userspace mappings: */
for (i = 0; i < ARRAY_SIZE(fmts); i++) {
uint16_t h = 1, w = PAGE_SIZE >> i;
u16 h = 1, w = PAGE_SIZE >> i;
tiler_align(fmts[i], &w, &h);
/* note: since each region is 1 4kb page wide, and minimum
* number of rows, the height ends up being the same as the
......
......@@ -53,17 +53,17 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
/* GEM Object Creation and Deletion */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, uint32_t flags);
union omap_gem_size gsize, u32 flags);
struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
struct sg_table *sgt);
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
union omap_gem_size gsize, u32 flags, u32 *handle);
void omap_gem_free_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
/* Dumb Buffers Interface */
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
u32 handle, u64 *offset);
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
......@@ -71,7 +71,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int omap_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
u64 omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
/* PRIME Interface */
......@@ -81,7 +81,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
int omap_gem_fault(struct vm_fault *vmf);
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
enum dma_data_direction dir);
......@@ -91,9 +91,9 @@ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
uint32_t omap_gem_flags(struct drm_gem_object *obj);
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
u32 omap_gem_flags(struct drm_gem_object *obj);
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr);
int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient);
#endif /* __OMAPDRM_GEM_H__ */
......@@ -20,7 +20,7 @@
struct omap_irq_wait {
struct list_head node;
wait_queue_head_t wq;
uint32_t irqmask;
u32 irqmask;
int count;
};
......@@ -29,7 +29,7 @@ static void omap_irq_update(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait;
uint32_t irqmask = priv->irq_mask;
u32 irqmask = priv->irq_mask;
assert_spin_locked(&priv->wait_lock);
......@@ -48,7 +48,7 @@ static void omap_irq_wait_handler(struct omap_irq_wait *wait)
}
struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
uint32_t irqmask, int count)
u32 irqmask, int count)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
......
......@@ -32,7 +32,7 @@ void omap_drm_irq_uninstall(struct drm_device *dev);
int omap_drm_irq_install(struct drm_device *dev);
struct omap_irq_wait *omap_irq_wait_init(struct drm_device *dev,
uint32_t irqmask, int count);
u32 irqmask, int count);
int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
unsigned long timeout);
......
......@@ -201,7 +201,7 @@ static void omap_plane_reset(struct drm_plane *plane)
static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val)
u64 val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
......@@ -216,7 +216,7 @@ static int omap_plane_atomic_set_property(struct drm_plane *plane,
static int omap_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val)
u64 *val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
......
......@@ -33,8 +33,8 @@ static unsigned long mask[8];
* map ptr to bitmap
* stride slots in a row
*/
static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
unsigned long *map, uint16_t stride)
static void free_slots(unsigned long pos, u16 w, u16 h,
unsigned long *map, u16 stride)
{
int i;
......@@ -48,7 +48,7 @@ static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
* map ptr to bitmap
* num_bits number of bits in bitmap
*/
static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
static int r2l_b2t_1d(u16 w, unsigned long *pos, unsigned long *map,
size_t num_bits)
{
unsigned long search_count = 0;
......@@ -84,7 +84,7 @@ static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
* num_bits = size of bitmap
* stride = bits in one row of container
*/
static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset,
unsigned long *pos, unsigned long slot_bytes,
unsigned long *map, size_t num_bits, size_t slot_stride)
{
......@@ -179,7 +179,7 @@ static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
}
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align,
int16_t offset, uint16_t slot_bytes,
s16 offset, u16 slot_bytes,
struct tcm_area *area)
{
unsigned long pos;
......@@ -208,7 +208,7 @@ static void sita_deinit(struct tcm *tcm)
static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
{
unsigned long pos;
uint16_t w, h;
u16 w, h;
pos = area->p0.x + area->p0.y * tcm->width;
if (area->is2d) {
......
......@@ -65,7 +65,7 @@ struct tcm {
/* function table */
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align,
int16_t offset, uint16_t slot_bytes,
s16 offset, u16 slot_bytes,
struct tcm_area *area);
s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
s32 (*free)(struct tcm *tcm, struct tcm_area *area);
......@@ -129,7 +129,7 @@ static inline void tcm_deinit(struct tcm *tcm)
* allocation.
*/
static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
u16 align, int16_t offset, uint16_t slot_bytes,
u16 align, s16 offset, u16 slot_bytes,
struct tcm_area *area)
{
/* perform rudimentary error checking */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment