Commit 620063e1 authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Tomi Valkeinen

drm/omap: gem: Rename GEM function with omap_gem_* prefix

get_pages() as a local function name is too generic and easily confused
for a generic MM kernel function. Rename it to __omap_gem_get_pages().

Rename the is_contiguous(), is_cache_coherent(), evict(), evict_entry(),
fault_1d() and fault_2d() functions for the same reason.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: default avatarTomi Valkeinen <tomi.valkeinen@ti.com>
Signed-off-by: default avatarTomi Valkeinen <tomi.valkeinen@ti.com>
parent 6505d75c
...@@ -156,7 +156,7 @@ static u64 mmap_offset(struct drm_gem_object *obj) ...@@ -156,7 +156,7 @@ static u64 mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node); return drm_vma_node_offset_addr(&obj->vma_node);
} }
static bool is_contiguous(struct omap_gem_object *omap_obj) static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{ {
if (omap_obj->flags & OMAP_BO_MEM_DMA_API) if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true; return true;
...@@ -171,7 +171,7 @@ static bool is_contiguous(struct omap_gem_object *omap_obj) ...@@ -171,7 +171,7 @@ static bool is_contiguous(struct omap_gem_object *omap_obj)
* Eviction * Eviction
*/ */
static void evict_entry(struct drm_gem_object *obj, static void omap_gem_evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
...@@ -199,7 +199,7 @@ static void evict_entry(struct drm_gem_object *obj, ...@@ -199,7 +199,7 @@ static void evict_entry(struct drm_gem_object *obj,
} }
/* Evict a buffer from usergart, if it is mapped there */ /* Evict a buffer from usergart, if it is mapped there */
static void evict(struct drm_gem_object *obj) static void omap_gem_evict(struct drm_gem_object *obj)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private; struct omap_drm_private *priv = obj->dev->dev_private;
...@@ -213,7 +213,7 @@ static void evict(struct drm_gem_object *obj) ...@@ -213,7 +213,7 @@ static void evict(struct drm_gem_object *obj)
&priv->usergart[fmt].entry[i]; &priv->usergart[fmt].entry[i];
if (entry->obj == obj) if (entry->obj == obj)
evict_entry(obj, fmt, entry); omap_gem_evict_entry(obj, fmt, entry);
} }
} }
} }
...@@ -291,7 +291,8 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) ...@@ -291,7 +291,8 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
/* acquire pages when needed (for example, for DMA where physically /* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required * contiguous buffer is not required
*/ */
static int get_pages(struct drm_gem_object *obj, struct page ***pages) static int __omap_gem_get_pages(struct drm_gem_object *obj,
struct page ***pages)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0; int ret = 0;
...@@ -371,7 +372,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) ...@@ -371,7 +372,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
*/ */
/* Normal handling for the case of faulting in non-tiled buffers */ /* Normal handling for the case of faulting in non-tiled buffers */
static vm_fault_t fault_1d(struct drm_gem_object *obj, static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf) struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
...@@ -385,7 +386,7 @@ static vm_fault_t fault_1d(struct drm_gem_object *obj, ...@@ -385,7 +386,7 @@ static vm_fault_t fault_1d(struct drm_gem_object *obj,
omap_gem_cpu_sync_page(obj, pgoff); omap_gem_cpu_sync_page(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]); pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else { } else {
BUG_ON(!is_contiguous(omap_obj)); BUG_ON(!omap_gem_is_contiguous(omap_obj));
pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff; pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
} }
...@@ -397,7 +398,7 @@ static vm_fault_t fault_1d(struct drm_gem_object *obj, ...@@ -397,7 +398,7 @@ static vm_fault_t fault_1d(struct drm_gem_object *obj,
} }
/* Special handling for the case of faulting in 2d tiled buffers */ /* Special handling for the case of faulting in 2d tiled buffers */
static vm_fault_t fault_2d(struct drm_gem_object *obj, static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf) struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
...@@ -445,7 +446,7 @@ static vm_fault_t fault_2d(struct drm_gem_object *obj, ...@@ -445,7 +446,7 @@ static vm_fault_t fault_2d(struct drm_gem_object *obj,
/* evict previous buffer using this usergart entry, if any: */ /* evict previous buffer using this usergart entry, if any: */
if (entry->obj) if (entry->obj)
evict_entry(entry->obj, fmt, entry); omap_gem_evict_entry(entry->obj, fmt, entry);
entry->obj = obj; entry->obj = obj;
entry->obj_pgoff = base_pgoff; entry->obj_pgoff = base_pgoff;
...@@ -531,7 +532,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf) ...@@ -531,7 +532,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
/* if a shmem backed object, make sure we have pages attached now */ /* if a shmem backed object, make sure we have pages attached now */
err = get_pages(obj, &pages); err = __omap_gem_get_pages(obj, &pages);
if (err) { if (err) {
ret = vmf_error(err); ret = vmf_error(err);
goto fail; goto fail;
...@@ -544,9 +545,9 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf) ...@@ -544,9 +545,9 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
*/ */
if (omap_obj->flags & OMAP_BO_TILED) if (omap_obj->flags & OMAP_BO_TILED)
ret = fault_2d(obj, vma, vmf); ret = omap_gem_fault_2d(obj, vma, vmf);
else else
ret = fault_1d(obj, vma, vmf); ret = omap_gem_fault_1d(obj, vma, vmf);
fail: fail:
...@@ -689,7 +690,8 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll) ...@@ -689,7 +690,8 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
/* if we aren't mapped yet, we don't need to do anything */ /* if we aren't mapped yet, we don't need to do anything */
if (omap_obj->block) { if (omap_obj->block) {
struct page **pages; struct page **pages;
ret = get_pages(obj, &pages);
ret = __omap_gem_get_pages(obj, &pages);
if (ret) if (ret)
goto fail; goto fail;
ret = tiler_pin(omap_obj->block, pages, npages, roll, true); ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
...@@ -717,7 +719,7 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll) ...@@ -717,7 +719,7 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
* the omap_obj->dma_addrs[i] is set to the DMA address, and the page is * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
* unmapped from the CPU. * unmapped from the CPU.
*/ */
static inline bool is_cached_coherent(struct drm_gem_object *obj) static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
...@@ -733,7 +735,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff) ...@@ -733,7 +735,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
if (is_cached_coherent(obj)) if (omap_gem_is_cached_coherent(obj))
return; return;
if (omap_obj->dma_addrs[pgoff]) { if (omap_obj->dma_addrs[pgoff]) {
...@@ -753,7 +755,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, ...@@ -753,7 +755,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
struct page **pages = omap_obj->pages; struct page **pages = omap_obj->pages;
bool dirty = false; bool dirty = false;
if (is_cached_coherent(obj)) if (omap_gem_is_cached_coherent(obj))
return; return;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
...@@ -801,7 +803,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) ...@@ -801,7 +803,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&obj->dev->struct_mutex);
if (!is_contiguous(omap_obj) && priv->has_dmm) { if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) { if (omap_obj->dma_addr_cnt == 0) {
struct page **pages; struct page **pages;
u32 npages = obj->size >> PAGE_SHIFT; u32 npages = obj->size >> PAGE_SHIFT;
...@@ -810,7 +812,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) ...@@ -810,7 +812,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
BUG_ON(omap_obj->block); BUG_ON(omap_obj->block);
ret = get_pages(obj, &pages); ret = __omap_gem_get_pages(obj, &pages);
if (ret) if (ret)
goto fail; goto fail;
...@@ -848,7 +850,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) ...@@ -848,7 +850,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->dma_addr_cnt++; omap_obj->dma_addr_cnt++;
*dma_addr = omap_obj->dma_addr; *dma_addr = omap_obj->dma_addr;
} else if (is_contiguous(omap_obj)) { } else if (omap_gem_is_contiguous(omap_obj)) {
*dma_addr = omap_obj->dma_addr; *dma_addr = omap_obj->dma_addr;
} else { } else {
ret = -EINVAL; ret = -EINVAL;
...@@ -948,7 +950,7 @@ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, ...@@ -948,7 +950,7 @@ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
return 0; return 0;
} }
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&obj->dev->struct_mutex);
ret = get_pages(obj, pages); ret = __omap_gem_get_pages(obj, pages);
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&obj->dev->struct_mutex);
return ret; return ret;
} }
...@@ -974,7 +976,9 @@ void *omap_gem_vaddr(struct drm_gem_object *obj) ...@@ -974,7 +976,9 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
if (!omap_obj->vaddr) { if (!omap_obj->vaddr) {
struct page **pages; struct page **pages;
int ret = get_pages(obj, &pages); int ret;
ret = __omap_gem_get_pages(obj, &pages);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
...@@ -1076,7 +1080,7 @@ void omap_gem_free_object(struct drm_gem_object *obj) ...@@ -1076,7 +1080,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
struct omap_drm_private *priv = dev->dev_private; struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
evict(obj); omap_gem_evict(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment