Commit 94065bf5 authored by Thomas Zimmermann's avatar Thomas Zimmermann Committed by Gerd Hoffmann

drm/vboxvideo: Convert vboxvideo driver to VRAM MM

The data structure |struct drm_vram_mm| and its helpers replace vboxvideo's
TTM-based memory manager. It's the same implementation; except for the type
names.

v4:
	* don't select DRM_TTM or DRM_VRAM_MM_HELPER
v3:
	* use drm_gem_vram_mm_funcs
	* convert driver to drm_device-based instance
v2:
	* implement vbox_mmap() with drm_vram_mm_mmap()
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: default avatarHans de Goede <hdegoede@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190508082630.15116-19-tzimmermann@suse.deSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent 5e6b9255
...@@ -2,7 +2,6 @@ config DRM_VBOXVIDEO ...@@ -2,7 +2,6 @@ config DRM_VBOXVIDEO
tristate "Virtual Box Graphics Card" tristate "Virtual Box Graphics Card"
depends on DRM && X86 && PCI depends on DRM && X86 && PCI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_TTM
select DRM_VRAM_HELPER select DRM_VRAM_HELPER
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
help help
......
...@@ -191,13 +191,7 @@ static struct pci_driver vbox_pci_driver = { ...@@ -191,13 +191,7 @@ static struct pci_driver vbox_pci_driver = {
static const struct file_operations vbox_fops = { static const struct file_operations vbox_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = drm_open, DRM_VRAM_MM_FILE_OPERATIONS
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.mmap = vbox_mmap,
.poll = drm_poll,
.read = drm_read,
}; };
static struct drm_driver driver = { static struct drm_driver driver = {
...@@ -215,10 +209,7 @@ static struct drm_driver driver = { ...@@ -215,10 +209,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR, .minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL, .patchlevel = DRIVER_PATCHLEVEL,
.gem_free_object_unlocked = DRM_GEM_VRAM_DRIVER,
drm_gem_vram_driver_gem_free_object_unlocked,
.dumb_create = vbox_dumb_create,
.dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export, .gem_prime_export = drm_gem_prime_export,
......
...@@ -20,11 +20,7 @@ ...@@ -20,11 +20,7 @@
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h> #include <drm/drm_gem_vram_helper.h>
#include <drm/ttm/ttm_bo_api.h> #include <drm/drm_vram_mm_helper.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
#include "vboxvideo_guest.h" #include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h" #include "vboxvideo_vbe.h"
...@@ -78,10 +74,6 @@ struct vbox_private { ...@@ -78,10 +74,6 @@ struct vbox_private {
int fb_mtrr; int fb_mtrr;
struct {
struct ttm_bo_device bdev;
} ttm;
struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */ struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
struct work_struct hotplug_work; struct work_struct hotplug_work;
u32 input_mapping_width; u32 input_mapping_width;
...@@ -169,16 +161,11 @@ int vboxfb_create(struct drm_fb_helper *helper, ...@@ -169,16 +161,11 @@ int vboxfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes); struct drm_fb_helper_surface_size *sizes);
void vbox_fbdev_fini(struct vbox_private *vbox); void vbox_fbdev_fini(struct vbox_private *vbox);
int vbox_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int vbox_mm_init(struct vbox_private *vbox); int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox); void vbox_mm_fini(struct vbox_private *vbox);
int vbox_gem_create(struct vbox_private *vbox, int vbox_gem_create(struct vbox_private *vbox,
u32 size, bool iskernel, struct drm_gem_object **obj); u32 size, bool iskernel, struct drm_gem_object **obj);
int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
/* vbox_prime.c */ /* vbox_prime.c */
int vbox_gem_prime_pin(struct drm_gem_object *obj); int vbox_gem_prime_pin(struct drm_gem_object *obj);
......
...@@ -283,7 +283,8 @@ int vbox_gem_create(struct vbox_private *vbox, ...@@ -283,7 +283,8 @@ int vbox_gem_create(struct vbox_private *vbox,
if (size == 0) if (size == 0)
return -EINVAL; return -EINVAL;
gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ttm.bdev, size, 0, false); gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev,
size, 0, false);
if (IS_ERR(gbo)) { if (IS_ERR(gbo)) {
ret = PTR_ERR(gbo); ret = PTR_ERR(gbo);
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS)
...@@ -295,13 +296,3 @@ int vbox_gem_create(struct vbox_private *vbox, ...@@ -295,13 +296,3 @@ int vbox_gem_create(struct vbox_private *vbox,
return 0; return 0;
} }
int vbox_dumb_create(struct drm_file *file,
struct drm_device *dev, struct drm_mode_create_dumb *args)
{
struct vbox_private *vbox = dev->dev_private;
return drm_gem_vram_fill_create_dumb(file, dev, &vbox->ttm.bdev, 0,
false, args);
}
...@@ -8,131 +8,23 @@ ...@@ -8,131 +8,23 @@
*/ */
#include <linux/pci.h> #include <linux/pci.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
#include <drm/ttm/ttm_page_alloc.h>
#include "vbox_drv.h" #include "vbox_drv.h"
static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
{
return container_of(bd, struct vbox_private, ttm.bdev);
}
static int
vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
struct ttm_mem_type_manager *man)
{
switch (type) {
case TTM_PL_SYSTEM:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
return -EINVAL;
}
return 0;
}
static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct vbox_private *vbox = vbox_bdev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
mem->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(vbox->ddev.pdev, 0);
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
}
static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
kfree(tt);
}
static struct ttm_backend_func vbox_tt_backend_func = {
.destroy = &vbox_ttm_backend_destroy,
};
static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
u32 page_flags)
{
struct ttm_tt *tt;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
if (!tt)
return NULL;
tt->func = &vbox_tt_backend_func;
if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
return tt;
}
static struct ttm_bo_driver vbox_bo_driver = {
.ttm_tt_create = vbox_ttm_tt_create,
.init_mem_type = vbox_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = drm_gem_vram_bo_driver_evict_flags,
.verify_access = drm_gem_vram_bo_driver_verify_access,
.io_mem_reserve = &vbox_ttm_io_mem_reserve,
.io_mem_free = &vbox_ttm_io_mem_free,
};
int vbox_mm_init(struct vbox_private *vbox) int vbox_mm_init(struct vbox_private *vbox)
{ {
struct drm_vram_mm *vmm;
int ret; int ret;
struct drm_device *dev = &vbox->ddev; struct drm_device *dev = &vbox->ddev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
ret = ttm_bo_device_init(&vbox->ttm.bdev, vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
&vbox_bo_driver, vbox->available_vram_size,
dev->anon_inode->i_mapping, &drm_gem_vram_mm_funcs);
true); if (IS_ERR(vmm)) {
if (ret) { ret = PTR_ERR(vmm);
DRM_ERROR("Error initialising bo driver; %d\n", ret); DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret; return ret;
} }
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
vbox->available_vram_size >> PAGE_SHIFT);
if (ret) {
DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
goto err_device_release;
}
#ifdef DRM_MTRR_WC #ifdef DRM_MTRR_WC
vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0), pci_resource_len(dev->pdev, 0),
...@@ -142,10 +34,6 @@ int vbox_mm_init(struct vbox_private *vbox) ...@@ -142,10 +34,6 @@ int vbox_mm_init(struct vbox_private *vbox)
pci_resource_len(dev->pdev, 0)); pci_resource_len(dev->pdev, 0));
#endif #endif
return 0; return 0;
err_device_release:
ttm_bo_device_release(&vbox->ttm.bdev);
return ret;
} }
void vbox_mm_fini(struct vbox_private *vbox) void vbox_mm_fini(struct vbox_private *vbox)
...@@ -157,13 +45,5 @@ void vbox_mm_fini(struct vbox_private *vbox) ...@@ -157,13 +45,5 @@ void vbox_mm_fini(struct vbox_private *vbox)
#else #else
arch_phys_wc_del(vbox->fb_mtrr); arch_phys_wc_del(vbox->fb_mtrr);
#endif #endif
ttm_bo_device_release(&vbox->ttm.bdev); drm_vram_helper_release_mm(&vbox->ddev);
}
int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct vbox_private *vbox = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment