drm_gem_shmem_helper.c 19.7 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright 2018 Noralf Trønnes
 */

#include <linux/dma-buf.h>
#include <linux/export.h>
8
#include <linux/module.h>
9 10 11 12
#include <linux/mutex.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
13
#include <linux/module.h>
14

15 16 17 18
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif

19
#include <drm/drm.h>
20 21 22 23 24 25
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_print.h>

26 27
MODULE_IMPORT_NS(DMA_BUF);

28 29 30 31 32
/**
 * DOC: overview
 *
 * This library provides helpers for GEM objects backed by shmem buffers
 * allocated using anonymous pageable memory.
33 34 35 36 37
 *
 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38 39 40
 */

static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 42 43 44 45 46 47 48
	.free = drm_gem_shmem_object_free,
	.print_info = drm_gem_shmem_object_print_info,
	.pin = drm_gem_shmem_object_pin,
	.unpin = drm_gem_shmem_object_unpin,
	.get_sg_table = drm_gem_shmem_object_get_sg_table,
	.vmap = drm_gem_shmem_object_vmap,
	.vunmap = drm_gem_shmem_object_vunmap,
	.mmap = drm_gem_shmem_object_mmap,
49 50
};

51 52
static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
53 54 55
{
	struct drm_gem_shmem_object *shmem;
	struct drm_gem_object *obj;
56
	int ret = 0;
57 58 59

	size = PAGE_ALIGN(size);

60
	if (dev->driver->gem_create_object) {
61
		obj = dev->driver->gem_create_object(dev, size);
62 63 64 65 66 67 68 69 70
		if (IS_ERR(obj))
			return ERR_CAST(obj);
		shmem = to_drm_gem_shmem_obj(obj);
	} else {
		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
		if (!shmem)
			return ERR_PTR(-ENOMEM);
		obj = &shmem->base;
	}
71

72 73 74
	if (!obj->funcs)
		obj->funcs = &drm_gem_shmem_funcs;

75
	if (private) {
76
		drm_gem_private_object_init(dev, obj, size);
77 78
		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
	} else {
79
		ret = drm_gem_object_init(dev, obj, size);
80
	}
81 82 83 84 85 86 87 88 89
	if (ret)
		goto err_free;

	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		goto err_release;

	mutex_init(&shmem->pages_lock);
	mutex_init(&shmem->vmap_lock);
90
	INIT_LIST_HEAD(&shmem->madv_list);
91

92 93 94 95 96 97 98 99 100 101 102
	if (!private) {
		/*
		 * Our buffers are kept pinned, so allocating them
		 * from the MOVABLE zone is a really bad idea, and
		 * conflicts with CMA. See comments above new_inode()
		 * why this is required _and_ expected if you're
		 * going to pin these pages.
		 */
		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
	}
103 104 105 106 107 108 109 110 111 112

	return shmem;

err_release:
	drm_gem_object_release(obj);
err_free:
	kfree(obj);

	return ERR_PTR(ret);
}
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
/**
 * drm_gem_shmem_create - Allocate an object with the given size
 * @dev: DRM device
 * @size: Size of the object to allocate
 *
 * This function creates a shmem GEM object.
 *
 * Returns:
 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
 * error code on failure.
 */
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
{
	return __drm_gem_shmem_create(dev, size, false);
}
128 129 130
EXPORT_SYMBOL_GPL(drm_gem_shmem_create);

/**
131 132
 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
 * @shmem: shmem GEM object to free
133 134
 *
 * This function cleans up the GEM object state and frees the memory used to
135
 * store the object itself.
136
 */
137
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
138
{
139
	struct drm_gem_object *obj = &shmem->base;
140 141 142 143 144 145 146

	WARN_ON(shmem->vmap_use_count);

	if (obj->import_attach) {
		drm_prime_gem_destroy(obj, shmem->sgt);
	} else {
		if (shmem->sgt) {
147 148
			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
					  DMA_BIDIRECTIONAL, 0);
149 150 151
			sg_free_table(shmem->sgt);
			kfree(shmem->sgt);
		}
152 153
		if (shmem->pages)
			drm_gem_shmem_put_pages(shmem);
154 155 156 157 158 159 160 161 162
	}

	WARN_ON(shmem->pages_use_count);

	drm_gem_object_release(obj);
	mutex_destroy(&shmem->pages_lock);
	mutex_destroy(&shmem->vmap_lock);
	kfree(shmem);
}
163
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
{
	struct drm_gem_object *obj = &shmem->base;
	struct page **pages;

	if (shmem->pages_use_count++ > 0)
		return 0;

	pages = drm_gem_get_pages(obj);
	if (IS_ERR(pages)) {
		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
		shmem->pages_use_count = 0;
		return PTR_ERR(pages);
	}

180 181 182 183 184 185 186 187 188 189
	/*
	 * TODO: Allocating WC pages which are correctly flushed is only
	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
	 * ttm_pool.c could use.
	 */
#ifdef CONFIG_X86
	if (shmem->map_wc)
		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
#endif

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
	shmem->pages = pages;

	return 0;
}

/*
 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
 * @shmem: shmem GEM object
 *
 * This function makes sure that backing pages exists for the shmem GEM object
 * and increases the use count.
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
	int ret;

209 210
	WARN_ON(shmem->base.import_attach);

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
	ret = mutex_lock_interruptible(&shmem->pages_lock);
	if (ret)
		return ret;
	ret = drm_gem_shmem_get_pages_locked(shmem);
	mutex_unlock(&shmem->pages_lock);

	return ret;
}
EXPORT_SYMBOL(drm_gem_shmem_get_pages);

static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
	struct drm_gem_object *obj = &shmem->base;

	if (WARN_ON_ONCE(!shmem->pages_use_count))
		return;

	if (--shmem->pages_use_count > 0)
		return;

231 232 233 234 235
#ifdef CONFIG_X86
	if (shmem->map_wc)
		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
	drm_gem_put_pages(obj, shmem->pages,
			  shmem->pages_mark_dirty_on_put,
			  shmem->pages_mark_accessed_on_put);
	shmem->pages = NULL;
}

/*
 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
 * @shmem: shmem GEM object
 *
 * This function decreases the use count and puts the backing pages when use drops to zero.
 */
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
{
	mutex_lock(&shmem->pages_lock);
	drm_gem_shmem_put_pages_locked(shmem);
	mutex_unlock(&shmem->pages_lock);
}
EXPORT_SYMBOL(drm_gem_shmem_put_pages);

/**
 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
258
 * @shmem: shmem GEM object
259 260
 *
 * This function makes sure the backing pages are pinned in memory while the
261
 * buffer is exported.
262 263 264 265
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
266
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
267
{
268 269
	WARN_ON(shmem->base.import_attach);

270 271 272 273 274 275
	return drm_gem_shmem_get_pages(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_pin);

/**
 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
276
 * @shmem: shmem GEM object
277 278
 *
 * This function removes the requirement that the backing pages are pinned in
279
 * memory.
280
 */
281
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
282
{
283 284
	WARN_ON(shmem->base.import_attach);

285 286 287 288
	drm_gem_shmem_put_pages(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin);

289
static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
290 291
{
	struct drm_gem_object *obj = &shmem->base;
292
	int ret = 0;
293

294 295 296 297
	if (shmem->vmap_use_count++ > 0) {
		dma_buf_map_set_vaddr(map, shmem->vaddr);
		return 0;
	}
298

299
	if (obj->import_attach) {
300 301 302 303 304 305 306 307
		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
		if (!ret) {
			if (WARN_ON(map->is_iomem)) {
				ret = -EIO;
				goto err_put_pages;
			}
			shmem->vaddr = map->vaddr;
		}
308 309 310
	} else {
		pgprot_t prot = PAGE_KERNEL;

311 312 313 314
		ret = drm_gem_shmem_get_pages(shmem);
		if (ret)
			goto err_zero_use;

315
		if (shmem->map_wc)
316
			prot = pgprot_writecombine(prot);
317
		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
318
				    VM_MAP, prot);
319 320
		if (!shmem->vaddr)
			ret = -ENOMEM;
321 322
		else
			dma_buf_map_set_vaddr(map, shmem->vaddr);
323
	}
324

325 326
	if (ret) {
		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
327 328 329
		goto err_put_pages;
	}

330
	return 0;
331 332

err_put_pages:
333 334
	if (!obj->import_attach)
		drm_gem_shmem_put_pages(shmem);
335 336 337
err_zero_use:
	shmem->vmap_use_count = 0;

338
	return ret;
339 340 341 342 343
}

/*
 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
 * @shmem: shmem GEM object
344 345
 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
 *       store.
346
 *
347
 * This function makes sure that a contiguous kernel virtual address mapping
348 349
 * exists for the buffer backing the shmem GEM object. It hides the differences
 * between dma-buf imported and natively allocated objects.
350 351
 *
 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
352 353 354 355
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
356
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
357 358 359 360 361
{
	int ret;

	ret = mutex_lock_interruptible(&shmem->vmap_lock);
	if (ret)
362 363
		return ret;
	ret = drm_gem_shmem_vmap_locked(shmem, map);
364 365
	mutex_unlock(&shmem->vmap_lock);

366
	return ret;
367 368 369
}
EXPORT_SYMBOL(drm_gem_shmem_vmap);

370 371
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
					struct dma_buf_map *map)
372 373 374 375 376 377 378 379 380
{
	struct drm_gem_object *obj = &shmem->base;

	if (WARN_ON_ONCE(!shmem->vmap_use_count))
		return;

	if (--shmem->vmap_use_count > 0)
		return;

381
	if (obj->import_attach) {
382
		dma_buf_vunmap(obj->import_attach->dmabuf, map);
383
	} else {
384
		vunmap(shmem->vaddr);
385 386
		drm_gem_shmem_put_pages(shmem);
	}
387 388 389 390 391

	shmem->vaddr = NULL;
}

/*
Cai Huoqing's avatar
Cai Huoqing committed
392
 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
393
 * @shmem: shmem GEM object
394
 * @map: Kernel virtual address where the SHMEM GEM object was mapped
395
 *
396 397 398 399
 * This function cleans up a kernel virtual address mapping acquired by
 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
 * zero.
 *
400 401
 * This function hides the differences between dma-buf imported and natively
 * allocated objects.
402
 */
403
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
404 405
{
	mutex_lock(&shmem->vmap_lock);
406
	drm_gem_shmem_vunmap_locked(shmem, map);
407 408 409 410
	mutex_unlock(&shmem->vmap_lock);
}
EXPORT_SYMBOL(drm_gem_shmem_vunmap);

411
static struct drm_gem_shmem_object *
412 413 414 415 416 417 418
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
				 struct drm_device *dev, size_t size,
				 uint32_t *handle)
{
	struct drm_gem_shmem_object *shmem;
	int ret;

419
	shmem = drm_gem_shmem_create(dev, size);
420 421 422 423 424 425 426 427 428
	if (IS_ERR(shmem))
		return shmem;

	/*
	 * Allocate an id of idr table where the obj is registered
	 * and handle has the id what user can see.
	 */
	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
	/* drop reference from allocate - handle holds it now. */
429
	drm_gem_object_put(&shmem->base);
430 431 432 433 434 435
	if (ret)
		return ERR_PTR(ret);

	return shmem;
}

436 437 438
/* Update madvise status, returns true if not purged, else
 * false or -errno.
 */
439
int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
440 441 442 443 444 445 446 447 448 449 450 451 452 453
{
	mutex_lock(&shmem->pages_lock);

	if (shmem->madv >= 0)
		shmem->madv = madv;

	madv = shmem->madv;

	mutex_unlock(&shmem->pages_lock);

	return (madv >= 0);
}
EXPORT_SYMBOL(drm_gem_shmem_madvise);

454
void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
455
{
456
	struct drm_gem_object *obj = &shmem->base;
457 458 459 460
	struct drm_device *dev = obj->dev;

	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));

461
	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
462 463 464 465
	sg_free_table(shmem->sgt);
	kfree(shmem->sgt);
	shmem->sgt = NULL;

466 467 468 469 470 471 472 473 474 475 476 477 478 479
	drm_gem_shmem_put_pages_locked(shmem);

	shmem->madv = -1;

	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
	drm_gem_free_mmap_offset(obj);

	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);

480
	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
481 482 483
}
EXPORT_SYMBOL(drm_gem_shmem_purge_locked);

484
bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
485
{
486 487
	if (!mutex_trylock(&shmem->pages_lock))
		return false;
488
	drm_gem_shmem_purge_locked(shmem);
489
	mutex_unlock(&shmem->pages_lock);
490 491

	return true;
492 493 494
}
EXPORT_SYMBOL(drm_gem_shmem_purge);

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
/**
 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
 * @file: DRM file structure to create the dumb buffer for
 * @dev: DRM device
 * @args: IOCTL data
 *
 * This function computes the pitch of the dumb buffer and rounds it up to an
 * integer number of bytes per pixel. Drivers for hardware that doesn't have
 * any additional restrictions on the pitch can directly use this function as
 * their &drm_driver.dumb_create callback.
 *
 * For hardware with additional restrictions, drivers can adjust the fields
 * set up by userspace before calling into this function.
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
			      struct drm_mode_create_dumb *args)
{
	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
	struct drm_gem_shmem_object *shmem;

	if (!args->pitch || !args->size) {
		args->pitch = min_pitch;
520
		args->size = PAGE_ALIGN(args->pitch * args->height);
521 522 523 524 525
	} else {
		/* ensure sane minimum values */
		if (args->pitch < min_pitch)
			args->pitch = min_pitch;
		if (args->size < args->pitch * args->height)
526
			args->size = PAGE_ALIGN(args->pitch * args->height);
527 528 529 530 531 532 533 534 535 536 537 538 539 540
	}

	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);

	return PTR_ERR_OR_ZERO(shmem);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);

static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
	loff_t num_pages = obj->size >> PAGE_SHIFT;
541
	vm_fault_t ret;
542
	struct page *page;
543 544 545 546
	pgoff_t page_offset;

	/* We don't use vmf->pgoff since that has the fake offset */
	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
547

548 549
	mutex_lock(&shmem->pages_lock);

550
	if (page_offset >= num_pages ||
551 552 553 554
	    WARN_ON_ONCE(!shmem->pages) ||
	    shmem->madv < 0) {
		ret = VM_FAULT_SIGBUS;
	} else {
555
		page = shmem->pages[page_offset];
556

557
		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
558
	}
559

560 561 562
	mutex_unlock(&shmem->pages_lock);

	return ret;
563 564 565 566 567 568 569 570
}

static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
{
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
	int ret;

571 572
	WARN_ON(shmem->base.import_attach);

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	ret = drm_gem_shmem_get_pages(shmem);
	WARN_ON_ONCE(ret != 0);

	drm_gem_vm_open(vma);
}

static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
{
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);

	drm_gem_shmem_put_pages(shmem);
	drm_gem_vm_close(vma);
}

588
static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
589 590 591 592 593 594 595
	.fault = drm_gem_shmem_fault,
	.open = drm_gem_shmem_vm_open,
	.close = drm_gem_shmem_vm_close,
};

/**
 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
596
 * @shmem: shmem GEM object
597 598 599
 * @vma: VMA for the area to be mapped
 *
 * This function implements an augmented version of the GEM DRM file mmap
600
 * operation for shmem objects.
601 602 603 604
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
605
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
606
{
607
	struct drm_gem_object *obj = &shmem->base;
608 609
	int ret;

610 611 612 613 614
	if (obj->import_attach) {
		/* Drop the reference drm_gem_mmap_obj() acquired.*/
		drm_gem_object_put(obj);
		vma->vm_private_data = NULL;

615
		return dma_buf_mmap(obj->dma_buf, vma, 0);
616
	}
617

618 619 620 621 622 623
	ret = drm_gem_shmem_get_pages(shmem);
	if (ret) {
		drm_gem_vm_close(vma);
		return ret;
	}

624
	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
625
	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
626
	if (shmem->map_wc)
627
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
628
	vma->vm_ops = &drm_gem_shmem_vm_ops;
629 630 631 632 633 634 635

	return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);

/**
 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
636
 * @shmem: shmem GEM object
637 638 639
 * @p: DRM printer
 * @indent: Tab indentation level
 */
640 641
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
			      struct drm_printer *p, unsigned int indent)
642 643 644 645 646 647 648 649 650 651
{
	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
EXPORT_SYMBOL(drm_gem_shmem_print_info);

/**
 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
 *                              pages for a shmem GEM object
652
 * @shmem: shmem GEM object
653 654
 *
 * This function exports a scatter/gather table suitable for PRIME usage by
655
 * calling the standard DMA mapping API.
656 657 658
 *
 * Drivers who need to acquire an scatter/gather table for objects need to call
 * drm_gem_shmem_get_pages_sgt() instead.
659 660 661 662
 *
 * Returns:
 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
 */
663
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
664
{
665
	struct drm_gem_object *obj = &shmem->base;
666

667 668
	WARN_ON(shmem->base.import_attach);

669
	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
670 671 672 673 674 675
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);

/**
 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
 *				 scatter/gather table for a shmem GEM object.
676
 * @shmem: shmem GEM object
677 678 679 680 681
 *
 * This function returns a scatter/gather table suitable for driver usage. If
 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
 * table created.
 *
682 683 684 685
 * This is the main function for drivers to get at backing storage, and it hides
 * and difference between dma-buf imported and natively allocated objects.
 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
 *
686 687 688
 * Returns:
 * A pointer to the scatter/gather table of pinned pages or errno on failure.
 */
689
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
690
{
691
	struct drm_gem_object *obj = &shmem->base;
692 693 694 695 696 697 698 699 700 701 702 703
	int ret;
	struct sg_table *sgt;

	if (shmem->sgt)
		return shmem->sgt;

	WARN_ON(obj->import_attach);

	ret = drm_gem_shmem_get_pages(shmem);
	if (ret)
		return ERR_PTR(ret);

704
	sgt = drm_gem_shmem_get_sg_table(shmem);
705 706 707 708 709
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_put_pages;
	}
	/* Map the pages for use by the h/w. */
710 711 712
	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
	if (ret)
		goto err_free_sgt;
713 714 715 716 717

	shmem->sgt = sgt;

	return sgt;

718 719 720
err_free_sgt:
	sg_free_table(sgt);
	kfree(sgt);
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
err_put_pages:
	drm_gem_shmem_put_pages(shmem);
	return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);

/**
 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
 *                 another driver's scatter/gather table of pinned pages
 * @dev: Device to import into
 * @attach: DMA-BUF attachment
 * @sgt: Scatter/gather table of pinned pages
 *
 * This function imports a scatter/gather table exported via DMA-BUF by
 * another driver. Drivers that use the shmem helpers should set this as their
 * &drm_driver.gem_prime_import_sg_table callback.
 *
 * Returns:
 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
 * error code on failure.
 */
struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
				    struct dma_buf_attachment *attach,
				    struct sg_table *sgt)
{
	size_t size = PAGE_ALIGN(attach->dmabuf->size);
	struct drm_gem_shmem_object *shmem;

750
	shmem = __drm_gem_shmem_create(dev, size, true);
751 752 753 754 755 756 757 758 759 760
	if (IS_ERR(shmem))
		return ERR_CAST(shmem);

	shmem->sgt = sgt;

	DRM_DEBUG_PRIME("size = %zu\n", size);

	return &shmem->base;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
761 762

MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
763
MODULE_IMPORT_NS(DMA_BUF);
764
MODULE_LICENSE("GPL v2");