Commit bb112b14 authored by Haneen Mohammed's avatar Haneen Mohammed Committed by Sean Paul

drm/vkms: Add functions to map/unmap GEM backing storage

This patch add the necessary functions to map/unmap GEM
backing memory to the kernel's virtual address space.
Signed-off-by: default avatarHaneen Mohammed <hamohammed.sa@gmail.com>
Signed-off-by: default avatarSean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/4b6563ae4f4337a5fd51f872424addf64e8d59a6.1532446182.git.hamohammed.sa@gmail.com
parent 344d0079
......@@ -39,6 +39,8 @@ struct vkms_gem_object {
struct drm_gem_object gem;
struct mutex pages_lock; /* Page lock used in page fault handler */
struct page **pages;
unsigned int vmap_count;
void *vaddr;
};
#define drm_crtc_to_vkms_output(target) \
......@@ -47,6 +49,9 @@ struct vkms_gem_object {
#define drm_device_to_vkms_device(target) \
container_of(target, struct vkms_device, drm)
#define drm_gem_to_vkms_gem(target)\
container_of(target, struct vkms_gem_object, gem)
/* CRTC */
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
......@@ -75,4 +80,8 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
void vkms_gem_free_object(struct drm_gem_object *obj);
int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
#endif /* _VKMS_DRV_H_ */
......@@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj)
struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
gem);
kvfree(gem->pages);
WARN_ON(gem->pages);
WARN_ON(gem->vaddr);
mutex_destroy(&gem->pages_lock);
drm_gem_object_release(obj);
kfree(gem);
......@@ -177,3 +179,78 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
return ret;
}
static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
{
struct drm_gem_object *gem_obj = &vkms_obj->gem;
if (!vkms_obj->pages) {
struct page **pages = drm_gem_get_pages(gem_obj);
if (IS_ERR(pages))
return pages;
if (cmpxchg(&vkms_obj->pages, NULL, pages))
drm_gem_put_pages(gem_obj, pages, false, true);
}
return vkms_obj->pages;
}
void vkms_gem_vunmap(struct drm_gem_object *obj)
{
struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
mutex_lock(&vkms_obj->pages_lock);
if (vkms_obj->vmap_count < 1) {
WARN_ON(vkms_obj->vaddr);
WARN_ON(vkms_obj->pages);
mutex_unlock(&vkms_obj->pages_lock);
return;
}
vkms_obj->vmap_count--;
if (vkms_obj->vmap_count == 0) {
vunmap(vkms_obj->vaddr);
vkms_obj->vaddr = NULL;
drm_gem_put_pages(obj, vkms_obj->pages, false, true);
vkms_obj->pages = NULL;
}
mutex_unlock(&vkms_obj->pages_lock);
}
int vkms_gem_vmap(struct drm_gem_object *obj)
{
struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
int ret = 0;
mutex_lock(&vkms_obj->pages_lock);
if (!vkms_obj->vaddr) {
unsigned int n_pages = obj->size >> PAGE_SHIFT;
struct page **pages = _get_pages(vkms_obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out;
}
vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
if (!vkms_obj->vaddr)
goto err_vmap;
vkms_obj->vmap_count++;
}
goto out;
err_vmap:
ret = -ENOMEM;
drm_gem_put_pages(obj, vkms_obj->pages, false, true);
vkms_obj->pages = NULL;
out:
mutex_unlock(&vkms_obj->pages_lock);
return ret;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment