etnaviv_gem_submit.c 15 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 * Copyright (C) 2015 Etnaviv Project
 */

6
#include <drm/drm_file.h>
7
#include <linux/dma-fence-array.h>
8
#include <linux/file.h>
9
#include <linux/pm_runtime.h>
10
#include <linux/dma-resv.h>
11
#include <linux/sync_file.h>
12 13 14
#include <linux/uaccess.h>
#include <linux/vmalloc.h>

15
#include "etnaviv_cmdbuf.h"
16 17 18
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
19
#include "etnaviv_perfmon.h"
20
#include "etnaviv_sched.h"
21 22 23 24 25 26 27 28 29 30 31

/*
 * Cmdstream submission:
 */

#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
#define BO_LOCKED   0x4000
#define BO_PINNED   0x2000

static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
32
		struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
33 34
{
	struct etnaviv_gem_submit *submit;
35
	size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
36

37 38 39
	submit = kzalloc(sz, GFP_KERNEL);
	if (!submit)
		return NULL;
40

41 42 43 44 45 46 47 48
	submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
			       GFP_KERNEL);
	if (!submit->pmrs) {
		kfree(submit);
		return NULL;
	}
	submit->nr_pmrs = nr_pmrs;

49
	submit->gpu = gpu;
50
	kref_init(&submit->refcount);
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

	return submit;
}

static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
	struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
	unsigned nr_bos)
{
	struct drm_etnaviv_gem_submit_bo *bo;
	unsigned i;
	int ret = 0;

	spin_lock(&file->table_lock);

	for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
		struct drm_gem_object *obj;

		if (bo->flags & BO_INVALID_FLAGS) {
			DRM_ERROR("invalid flags: %x\n", bo->flags);
			ret = -EINVAL;
			goto out_unlock;
		}

		submit->bos[i].flags = bo->flags;
75 76 77 78 79 80 81 82
		if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
			if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
				DRM_ERROR("invalid softpin address\n");
				ret = -EINVAL;
				goto out_unlock;
			}
			submit->bos[i].va = bo->presumed;
		}
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

		/* normally use drm_gem_object_lookup(), but for bulk lookup
		 * all under single table_lock just hit object_idr directly:
		 */
		obj = idr_find(&file->object_idr, bo->handle);
		if (!obj) {
			DRM_ERROR("invalid handle %u at index %u\n",
				  bo->handle, i);
			ret = -EINVAL;
			goto out_unlock;
		}

		/*
		 * Take a refcount on the object. The file table lock
		 * prevents the object_idr's refcount on this being dropped.
		 */
99
		drm_gem_object_get(obj);
100 101 102 103 104 105 106 107 108 109 110 111 112 113

		submit->bos[i].obj = to_etnaviv_bo(obj);
	}

out_unlock:
	submit->nr_bos = i;
	spin_unlock(&file->table_lock);

	return ret;
}

static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
{
	if (submit->bos[i].flags & BO_LOCKED) {
114
		struct drm_gem_object *obj = &submit->bos[i].obj->base;
115

116
		dma_resv_unlock(obj->resv);
117 118 119 120
		submit->bos[i].flags &= ~BO_LOCKED;
	}
}

121 122
static int submit_lock_objects(struct etnaviv_gem_submit *submit,
		struct ww_acquire_ctx *ticket)
123 124 125 126 127
{
	int contended, slow_locked = -1, i, ret = 0;

retry:
	for (i = 0; i < submit->nr_bos; i++) {
128
		struct drm_gem_object *obj = &submit->bos[i].obj->base;
129 130 131 132 133 134 135

		if (slow_locked == i)
			slow_locked = -1;

		contended = i;

		if (!(submit->bos[i].flags & BO_LOCKED)) {
136
			ret = dma_resv_lock_interruptible(obj->resv, ticket);
137 138 139 140 141 142 143 144 145
			if (ret == -EALREADY)
				DRM_ERROR("BO at index %u already on submit list\n",
					  i);
			if (ret)
				goto fail;
			submit->bos[i].flags |= BO_LOCKED;
		}
	}

146
	ww_acquire_done(ticket);
147 148 149 150 151 152 153 154 155 156 157

	return 0;

fail:
	for (; i >= 0; i--)
		submit_unlock_object(submit, i);

	if (slow_locked > 0)
		submit_unlock_object(submit, slow_locked);

	if (ret == -EDEADLK) {
158
		struct drm_gem_object *obj;
159

160
		obj = &submit->bos[contended].obj->base;
161 162

		/* we lost out in a seqno race, lock and retry.. */
163
		ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
164 165 166 167 168 169 170 171 172 173
		if (!ret) {
			submit->bos[contended].flags |= BO_LOCKED;
			slow_locked = contended;
			goto retry;
		}
	}

	return ret;
}

174
static int submit_fence_sync(struct etnaviv_gem_submit *submit)
175 176 177 178
{
	int i, ret = 0;

	for (i = 0; i < submit->nr_bos; i++) {
179
		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
180
		struct dma_resv *robj = bo->obj->base.resv;
181

182 183 184
		ret = dma_resv_reserve_fences(robj, 1);
		if (ret)
			return ret;
185 186 187 188

		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
			continue;

189 190 191
		ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
							      &bo->obj->base,
							      bo->flags & ETNA_SUBMIT_BO_WRITE);
192 193
		if (ret)
			return ret;
194 195
	}

196 197 198
	return ret;
}

199 200 201 202 203
static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
{
	int i;

	for (i = 0; i < submit->nr_bos; i++) {
204
		struct drm_gem_object *obj = &submit->bos[i].obj->base;
205
		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
206

207 208
		dma_resv_add_fence(obj->resv, submit->out_fence, write ?
				   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
209 210 211 212
		submit_unlock_object(submit, i);
	}
}

213 214 215 216 217 218
static int submit_pin_objects(struct etnaviv_gem_submit *submit)
{
	int i, ret = 0;

	for (i = 0; i < submit->nr_bos; i++) {
		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
219
		struct etnaviv_vram_mapping *mapping;
220

221
		mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
222 223
						  submit->mmu_context,
						  submit->bos[i].va);
224 225
		if (IS_ERR(mapping)) {
			ret = PTR_ERR(mapping);
226
			break;
227
		}
228 229

		if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
230 231
		     submit->bos[i].va != mapping->iova) {
			etnaviv_gem_mapping_unreference(mapping);
232
			return -EINVAL;
233
		}
234

235
		atomic_inc(&etnaviv_obj->gpu_active);
236 237

		submit->bos[i].flags |= BO_PINNED;
238
		submit->bos[i].mapping = mapping;
239 240 241 242 243 244
	}

	return ret;
}

static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
245
	struct etnaviv_gem_submit_bo **bo)
246 247 248 249 250 251 252
{
	if (idx >= submit->nr_bos) {
		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
				idx, submit->nr_bos);
		return -EINVAL;
	}

253
	*bo = &submit->bos[idx];
254 255 256 257 258 259 260 261 262 263 264 265 266

	return 0;
}

/* process the reloc's and patch up the cmdstream as needed: */
static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
		u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
		u32 nr_relocs)
{
	u32 i, last_offset = 0;
	u32 *ptr = stream;
	int ret;

267 268 269 270
	/* Submits using softpin don't blend with relocs */
	if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
		return -EINVAL;

271 272
	for (i = 0; i < nr_relocs; i++) {
		const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
273 274
		struct etnaviv_gem_submit_bo *bo;
		u32 off;
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295

		if (unlikely(r->flags)) {
			DRM_ERROR("invalid reloc flags\n");
			return -EINVAL;
		}

		if (r->submit_offset % 4) {
			DRM_ERROR("non-aligned reloc offset: %u\n",
				  r->submit_offset);
			return -EINVAL;
		}

		/* offset in dwords: */
		off = r->submit_offset / 4;

		if ((off >= size ) ||
				(off < last_offset)) {
			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
			return -EINVAL;
		}

296
		ret = submit_bo(submit, r->reloc_idx, &bo);
297 298 299
		if (ret)
			return ret;

300 301
		if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
			DRM_ERROR("relocation %u outside object\n", i);
302 303 304
			return -EINVAL;
		}

305
		ptr[off] = bo->mapping->iova + r->reloc_offset;
306 307 308 309 310 311 312

		last_offset = off;
	}

	return 0;
}

313
static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
314
		u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
315 316 317
{
	u32 i;

318
	for (i = 0; i < submit->nr_pmrs; i++) {
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
		const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
		struct etnaviv_gem_submit_bo *bo;
		int ret;

		ret = submit_bo(submit, r->read_idx, &bo);
		if (ret)
			return ret;

		/* at offset 0 a sequence number gets stored used for userspace sync */
		if (r->read_offset == 0) {
			DRM_ERROR("perfmon request: offset is 0");
			return -EINVAL;
		}

		if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
			DRM_ERROR("perfmon request: offset %u outside object", i);
			return -EINVAL;
		}

		if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
			DRM_ERROR("perfmon request: flags are not valid");
			return -EINVAL;
		}

343
		if (etnaviv_pm_req_validate(r, exec_state)) {
344 345 346 347
			DRM_ERROR("perfmon request: domain or signal not valid");
			return -EINVAL;
		}

348 349 350 351 352 353
		submit->pmrs[i].flags = r->flags;
		submit->pmrs[i].domain = r->domain;
		submit->pmrs[i].signal = r->signal;
		submit->pmrs[i].sequence = r->sequence;
		submit->pmrs[i].offset = r->read_offset;
		submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
354 355 356 357 358
	}

	return 0;
}

359
static void submit_cleanup(struct kref *kref)
360
{
361 362
	struct etnaviv_gem_submit *submit =
			container_of(kref, struct etnaviv_gem_submit, refcount);
363 364
	unsigned i;

365 366 367
	if (submit->cmdbuf.suballoc)
		etnaviv_cmdbuf_free(&submit->cmdbuf);

368 369 370 371 372 373
	if (submit->mmu_context)
		etnaviv_iommu_context_put(submit->mmu_context);

	if (submit->prev_mmu_context)
		etnaviv_iommu_context_put(submit->prev_mmu_context);

374 375 376
	for (i = 0; i < submit->nr_bos; i++) {
		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;

377 378 379
		/* unpin all objects */
		if (submit->bos[i].flags & BO_PINNED) {
			etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
380
			atomic_dec(&etnaviv_obj->gpu_active);
381 382 383 384
			submit->bos[i].mapping = NULL;
			submit->bos[i].flags &= ~BO_PINNED;
		}

385
		/* if the GPU submit failed, objects might still be locked */
386
		submit_unlock_object(submit, i);
387
		drm_gem_object_put(&etnaviv_obj->base);
388 389
	}

390 391
	wake_up_all(&submit->gpu->fence_event);

392
	if (submit->out_fence) {
393 394 395 396 397
		/*
		 * Remove from user fence array before dropping the reference,
		 * so fence can not be found in lookup anymore.
		 */
		xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
398
		dma_fence_put(submit->out_fence);
399
	}
400 401 402

	put_pid(submit->pid);

403
	kfree(submit->pmrs);
404 405 406
	kfree(submit);
}

407 408 409 410 411
void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
{
	kref_put(&submit->refcount, submit_cleanup);
}

412 413 414
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
		struct drm_file *file)
{
415
	struct etnaviv_file_private *ctx = file->driver_priv;
416 417 418
	struct etnaviv_drm_private *priv = dev->dev_private;
	struct drm_etnaviv_gem_submit *args = data;
	struct drm_etnaviv_gem_submit_reloc *relocs;
419
	struct drm_etnaviv_gem_submit_pmr *pmrs;
420 421 422
	struct drm_etnaviv_gem_submit_bo *bos;
	struct etnaviv_gem_submit *submit;
	struct etnaviv_gpu *gpu;
423
	struct sync_file *sync_file = NULL;
424
	struct ww_acquire_ctx ticket;
425
	int out_fence_fd = -1;
426
	struct pid *pid = get_pid(task_pid(current));
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
	void *stream;
	int ret;

	if (args->pipe >= ETNA_MAX_PIPES)
		return -EINVAL;

	gpu = priv->gpu[args->pipe];
	if (!gpu)
		return -ENXIO;

	if (args->stream_size % 4) {
		DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
			  args->stream_size);
		return -EINVAL;
	}

	if (args->exec_state != ETNA_PIPE_3D &&
	    args->exec_state != ETNA_PIPE_2D &&
	    args->exec_state != ETNA_PIPE_VG) {
		DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
		return -EINVAL;
	}

450 451 452 453 454
	if (args->flags & ~ETNA_SUBMIT_FLAGS) {
		DRM_ERROR("invalid flags: 0x%x\n", args->flags);
		return -EINVAL;
	}

455 456 457 458 459 460
	if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
	    priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
		DRM_ERROR("softpin requested on incompatible MMU\n");
		return -EINVAL;
	}

461 462
	if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
	    args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
463 464 465 466
		DRM_ERROR("submit arguments out of size limits\n");
		return -EINVAL;
	}

467 468 469 470
	/*
	 * Copy the command submission and bo array to kernel space in
	 * one go, and do this outside of any locks.
	 */
471 472
	bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
	relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
473
	pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
474
	stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
475
	if (!bos || !relocs || !pmrs || !stream) {
476 477 478 479
		ret = -ENOMEM;
		goto err_submit_cmds;
	}

480
	ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
481 482 483 484 485 486
			     args->nr_bos * sizeof(*bos));
	if (ret) {
		ret = -EFAULT;
		goto err_submit_cmds;
	}

487
	ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
488 489 490 491 492 493
			     args->nr_relocs * sizeof(*relocs));
	if (ret) {
		ret = -EFAULT;
		goto err_submit_cmds;
	}

494 495 496 497 498 499 500
	ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
			     args->nr_pmrs * sizeof(*pmrs));
	if (ret) {
		ret = -EFAULT;
		goto err_submit_cmds;
	}

501
	ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
502 503 504 505 506 507
			     args->stream_size);
	if (ret) {
		ret = -EFAULT;
		goto err_submit_cmds;
	}

508 509 510 511 512 513 514 515
	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
			ret = out_fence_fd;
			goto err_submit_cmds;
		}
	}

516 517
	ww_acquire_init(&ticket, &reservation_ww_class);

518
	submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
519 520
	if (!submit) {
		ret = -ENOMEM;
521
		goto err_submit_ww_acquire;
522 523
	}

524 525
	submit->pid = pid;

526
	ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
527 528
				  ALIGN(args->stream_size, 8) + 8);
	if (ret)
529
		goto err_submit_put;
530

531
	submit->ctx = file->driver_priv;
532
	submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
533
	submit->exec_state = args->exec_state;
534 535
	submit->flags = args->flags;

536 537 538 539 540 541
	ret = drm_sched_job_init(&submit->sched_job,
				 &ctx->sched_entity[args->pipe],
				 submit->ctx);
	if (ret)
		goto err_submit_put;

542 543
	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
	if (ret)
544
		goto err_submit_job;
545

546 547
	if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
	    !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
548 549
				      relocs, args->nr_relocs)) {
		ret = -EINVAL;
550
		goto err_submit_job;
551 552
	}

553
	if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
554 555
		struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
		if (!in_fence) {
556
			ret = -EINVAL;
557
			goto err_submit_job;
558
		}
559 560 561 562 563

		ret = drm_sched_job_add_dependency(&submit->sched_job,
						   in_fence);
		if (ret)
			goto err_submit_job;
564 565
	}

566 567
	ret = submit_pin_objects(submit);
	if (ret)
568
		goto err_submit_job;
569 570 571 572

	ret = submit_reloc(submit, stream, args->stream_size / 4,
			   relocs, args->nr_relocs);
	if (ret)
573
		goto err_submit_job;
574

575
	ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
576
	if (ret)
577
		goto err_submit_job;
578

579
	memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
580

581 582
	ret = submit_lock_objects(submit, &ticket);
	if (ret)
583
		goto err_submit_job;
584 585 586

	ret = submit_fence_sync(submit);
	if (ret)
587
		goto err_submit_job;
588

589
	ret = etnaviv_sched_push_job(submit);
590
	if (ret)
591
		goto err_submit_job;
592

593 594
	submit_attach_object_fences(submit);

595 596 597 598 599 600 601
	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
		/*
		 * This can be improved: ideally we want to allocate the sync
		 * file before kicking off the GPU job and just attach the
		 * fence to the sync file here, eliminating the ENOMEM
		 * possibility at this stage.
		 */
602
		sync_file = sync_file_create(submit->out_fence);
603 604
		if (!sync_file) {
			ret = -ENOMEM;
605 606 607 608 609 610
			/*
			 * When this late error is hit, the submit has already
			 * been handed over to the scheduler. At this point
			 * the sched_job must not be cleaned up.
			 */
			goto err_submit_put;
611 612 613 614 615
		}
		fd_install(out_fence_fd, sync_file->file);
	}

	args->fence_fd = out_fence_fd;
616
	args->fence = submit->out_fence_id;
617

618
err_submit_job:
619 620
	if (ret)
		drm_sched_job_cleanup(&submit->sched_job);
621
err_submit_put:
622
	etnaviv_submit_put(submit);
623

624 625 626
err_submit_ww_acquire:
	ww_acquire_fini(&ticket);

627
err_submit_cmds:
628 629
	if (ret && (out_fence_fd >= 0))
		put_unused_fd(out_fence_fd);
630 631 632 633
	kvfree(stream);
	kvfree(bos);
	kvfree(relocs);
	kvfree(pmrs);
634 635 636

	return ret;
}