Commit 09a58da0 authored by Haixia Shi's avatar Haixia Shi Committed by Dave Airlie

drm/udl: add cache flags definitions for udl_gem_object

By default set udl_gem_object as cacheable, but set WC flag when attaching
dmabuf. In udl_gem_mmap() update cache attributes based on the flags, similar
to exynos_drm_gem_mmap().
Signed-off-by: default avatarHaixia Shi <hshi@chromium.org>
Reviewed-by: default avatarSonny Rao <sonnyrao@chromium.org>
Reviewed-by: default avatarOlof Johansson <olofj@chromium.org>
Reviewed-by: default avatarStéphane Marchesin <marcheu@chromium.org>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent a7ca52e1
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
#define DRIVER_MINOR 0 #define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 1 #define DRIVER_PATCHLEVEL 1
#define UDL_BO_CACHEABLE (1 << 0)
#define UDL_BO_WC (1 << 1)
struct udl_device; struct udl_device;
struct urb_node { struct urb_node {
...@@ -69,6 +72,7 @@ struct udl_gem_object { ...@@ -69,6 +72,7 @@ struct udl_gem_object {
struct page **pages; struct page **pages;
void *vmapping; void *vmapping;
struct sg_table *sg; struct sg_table *sg;
unsigned int flags;
}; };
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base) #define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
......
...@@ -25,6 +25,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, ...@@ -25,6 +25,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
return NULL; return NULL;
} }
obj->flags = UDL_BO_CACHEABLE;
return obj; return obj;
} }
...@@ -56,6 +57,23 @@ udl_gem_create(struct drm_file *file, ...@@ -56,6 +57,23 @@ udl_gem_create(struct drm_file *file,
return 0; return 0;
} }
static void update_vm_cache_attr(struct udl_gem_object *obj,
struct vm_area_struct *vma)
{
DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
/* non-cacheable as default. */
if (obj->flags & UDL_BO_CACHEABLE) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
} else if (obj->flags & UDL_BO_WC) {
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
} else {
vma->vm_page_prot =
pgprot_noncached(vm_get_page_prot(vma->vm_flags));
}
}
int udl_dumb_create(struct drm_file *file, int udl_dumb_create(struct drm_file *file,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args) struct drm_mode_create_dumb *args)
...@@ -77,6 +95,8 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -77,6 +95,8 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags |= VM_MIXEDMAP;
update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
return ret; return ret;
} }
...@@ -279,6 +299,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, ...@@ -279,6 +299,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
} }
uobj->base.import_attach = attach; uobj->base.import_attach = attach;
uobj->flags = UDL_BO_WC;
return &uobj->base; return &uobj->base;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment