base.c 12.3 KB
Newer Older
Ben Skeggs's avatar
Ben Skeggs committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2010 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "priv.h"
Ben Skeggs's avatar
Ben Skeggs committed
25

26
#include <core/gpuobj.h>
27
#include <subdev/fb.h>
Ben Skeggs's avatar
Ben Skeggs committed
28 29

void
30
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
Ben Skeggs's avatar
Ben Skeggs committed
31
{
32 33 34
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_mm_node *r;
35
	int big = vma->node->type != mmu->func->spg_shift;
Ben Skeggs's avatar
Ben Skeggs committed
36 37
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
38 39 40
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
Ben Skeggs's avatar
Ben Skeggs committed
41 42
	u32 end, len;

43
	delta = 0;
44
	list_for_each_entry(r, &node->regions, rl_entry) {
Ben Skeggs's avatar
Ben Skeggs committed
45 46 47 48
		u64 phys = (u64)r->offset << 12;
		u32 num  = r->length >> bits;

		while (num) {
49
			struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
Ben Skeggs's avatar
Ben Skeggs committed
50 51 52 53 54 55

			end = (pte + num);
			if (unlikely(end >= max))
				end = max;
			len = end - pte;

56
			mmu->func->map(vma, pgt, node, pte, len, phys, delta);
Ben Skeggs's avatar
Ben Skeggs committed
57 58 59 60

			num -= len;
			pte += len;
			if (unlikely(end >= max)) {
61
				phys += len << (bits + 12);
Ben Skeggs's avatar
Ben Skeggs committed
62 63 64
				pde++;
				pte = 0;
			}
65 66

			delta += (u64)len << vma->node->type;
Ben Skeggs's avatar
Ben Skeggs committed
67 68 69
		}
	}

70
	mmu->func->flush(vm);
Ben Skeggs's avatar
Ben Skeggs committed
71 72
}

73
static void
74 75
nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
		     struct nvkm_mem *mem)
Dave Airlie's avatar
Dave Airlie committed
76
{
77 78
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
79
	int big = vma->node->type != mmu->func->spg_shift;
Dave Airlie's avatar
Dave Airlie committed
80 81 82
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
83 84 85
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
Dave Airlie's avatar
Dave Airlie committed
86 87 88 89 90 91
	unsigned m, sglen;
	u32 end, len;
	int i;
	struct scatterlist *sg;

	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
92
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
Dave Airlie's avatar
Dave Airlie committed
93 94 95 96 97 98 99 100 101 102
		sglen = sg_dma_len(sg) >> PAGE_SHIFT;

		end = pte + sglen;
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

		for (m = 0; m < len; m++) {
			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);

103
			mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
Dave Airlie's avatar
Dave Airlie committed
104 105 106 107 108 109 110 111 112 113 114 115 116 117
			num--;
			pte++;

			if (num == 0)
				goto finish;
		}
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
		if (m < sglen) {
			for (; m < sglen; m++) {
				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);

118
				mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
Dave Airlie's avatar
Dave Airlie committed
119 120 121 122 123 124 125 126 127
				num--;
				pte++;
				if (num == 0)
					goto finish;
			}
		}

	}
finish:
128
	mmu->func->flush(vm);
Dave Airlie's avatar
Dave Airlie committed
129 130
}

131
static void
132 133
nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
	       struct nvkm_mem *mem)
Ben Skeggs's avatar
Ben Skeggs committed
134
{
135 136
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
137
	dma_addr_t *list = mem->pages;
138
	int big = vma->node->type != mmu->func->spg_shift;
Ben Skeggs's avatar
Ben Skeggs committed
139 140 141
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
142 143 144
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
Ben Skeggs's avatar
Ben Skeggs committed
145 146 147
	u32 end, len;

	while (num) {
148
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
Ben Skeggs's avatar
Ben Skeggs committed
149 150 151 152 153 154

		end = (pte + num);
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

155
		mmu->func->map_sg(vma, pgt, mem, pte, len, list);
Ben Skeggs's avatar
Ben Skeggs committed
156 157 158 159 160 161 162 163 164 165

		num  -= len;
		pte  += len;
		list += len;
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
	}

166
	mmu->func->flush(vm);
Ben Skeggs's avatar
Ben Skeggs committed
167 168
}

169
void
170
nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
171 172
{
	if (node->sg)
173
		nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
174 175
	else
	if (node->pages)
176
		nvkm_vm_map_sg(vma, 0, node->size << 12, node);
177
	else
178
		nvkm_vm_map_at(vma, 0, node);
179 180
}

Ben Skeggs's avatar
Ben Skeggs committed
181
void
182
nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
Ben Skeggs's avatar
Ben Skeggs committed
183
{
184 185
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
186
	int big = vma->node->type != mmu->func->spg_shift;
Ben Skeggs's avatar
Ben Skeggs committed
187 188 189
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
190 191 192
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
Ben Skeggs's avatar
Ben Skeggs committed
193 194 195
	u32 end, len;

	while (num) {
196
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
Ben Skeggs's avatar
Ben Skeggs committed
197 198 199 200 201 202

		end = (pte + num);
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

203
		mmu->func->unmap(vma, pgt, pte, len);
Ben Skeggs's avatar
Ben Skeggs committed
204 205 206 207 208 209 210 211 212

		num -= len;
		pte += len;
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
	}

213
	mmu->func->flush(vm);
Ben Skeggs's avatar
Ben Skeggs committed
214 215 216
}

void
217
nvkm_vm_unmap(struct nvkm_vma *vma)
Ben Skeggs's avatar
Ben Skeggs committed
218
{
219
	nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
Ben Skeggs's avatar
Ben Skeggs committed
220 221 222
}

static void
223
nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
Ben Skeggs's avatar
Ben Skeggs committed
224
{
225 226 227
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgd *vpgd;
	struct nvkm_vm_pgt *vpgt;
228
	struct nvkm_memory *pgt;
Ben Skeggs's avatar
Ben Skeggs committed
229 230 231 232
	u32 pde;

	for (pde = fpde; pde <= lpde; pde++) {
		vpgt = &vm->pgt[pde - vm->fpde];
233
		if (--vpgt->refcount[big])
Ben Skeggs's avatar
Ben Skeggs committed
234 235
			continue;

236 237
		pgt = vpgt->mem[big];
		vpgt->mem[big] = NULL;
238

Ben Skeggs's avatar
Ben Skeggs committed
239
		list_for_each_entry(vpgd, &vm->pgd_list, head) {
240
			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
Ben Skeggs's avatar
Ben Skeggs committed
241 242
		}

243 244
		mmu->func->flush(vm);

245
		nvkm_memory_del(&pgt);
Ben Skeggs's avatar
Ben Skeggs committed
246 247 248 249
	}
}

static int
250
nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
Ben Skeggs's avatar
Ben Skeggs committed
251
{
252 253 254
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
	struct nvkm_vm_pgd *vpgd;
255
	int big = (type != mmu->func->spg_shift);
Ben Skeggs's avatar
Ben Skeggs committed
256 257 258
	u32 pgt_size;
	int ret;

259
	pgt_size  = (1 << (mmu->func->pgt_bits + 12)) >> type;
Ben Skeggs's avatar
Ben Skeggs committed
260 261
	pgt_size *= 8;

262 263
	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
			      pgt_size, 0x1000, true, &vpgt->mem[big]);
Ben Skeggs's avatar
Ben Skeggs committed
264 265 266 267
	if (unlikely(ret))
		return ret;

	list_for_each_entry(vpgd, &vm->pgd_list, head) {
268
		mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
Ben Skeggs's avatar
Ben Skeggs committed
269 270
	}

271
	vpgt->refcount[big]++;
Ben Skeggs's avatar
Ben Skeggs committed
272 273 274 275
	return 0;
}

int
276 277
nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
	    struct nvkm_vma *vma)
Ben Skeggs's avatar
Ben Skeggs committed
278
{
279
	struct nvkm_mmu *mmu = vm->mmu;
Ben Skeggs's avatar
Ben Skeggs committed
280 281 282 283 284
	u32 align = (1 << page_shift) >> 12;
	u32 msize = size >> 12;
	u32 fpde, lpde, pde;
	int ret;

285
	mutex_lock(&vm->mutex);
286 287
	ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
			   &vma->node);
Ben Skeggs's avatar
Ben Skeggs committed
288
	if (unlikely(ret != 0)) {
289
		mutex_unlock(&vm->mutex);
Ben Skeggs's avatar
Ben Skeggs committed
290 291 292
		return ret;
	}

293 294
	fpde = (vma->node->offset >> mmu->func->pgt_bits);
	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
295

Ben Skeggs's avatar
Ben Skeggs committed
296
	for (pde = fpde; pde <= lpde; pde++) {
297
		struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
298
		int big = (vma->node->type != mmu->func->spg_shift);
Ben Skeggs's avatar
Ben Skeggs committed
299

300 301
		if (likely(vpgt->refcount[big])) {
			vpgt->refcount[big]++;
Ben Skeggs's avatar
Ben Skeggs committed
302 303 304
			continue;
		}

305
		ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
Ben Skeggs's avatar
Ben Skeggs committed
306 307
		if (ret) {
			if (pde != fpde)
308 309
				nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
			nvkm_mm_free(&vm->mm, &vma->node);
310
			mutex_unlock(&vm->mutex);
Ben Skeggs's avatar
Ben Skeggs committed
311 312 313
			return ret;
		}
	}
314
	mutex_unlock(&vm->mutex);
Ben Skeggs's avatar
Ben Skeggs committed
315

316
	vma->vm = NULL;
317
	nvkm_vm_ref(vm, &vma->vm, NULL);
Ben Skeggs's avatar
Ben Skeggs committed
318 319 320 321 322 323
	vma->offset = (u64)vma->node->offset << 12;
	vma->access = access;
	return 0;
}

void
324
nvkm_vm_put(struct nvkm_vma *vma)
Ben Skeggs's avatar
Ben Skeggs committed
325
{
326 327
	struct nvkm_mmu *mmu;
	struct nvkm_vm *vm;
Ben Skeggs's avatar
Ben Skeggs committed
328 329 330 331
	u32 fpde, lpde;

	if (unlikely(vma->node == NULL))
		return;
332 333 334
	vm = vma->vm;
	mmu = vm->mmu;

335 336
	fpde = (vma->node->offset >> mmu->func->pgt_bits);
	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
Ben Skeggs's avatar
Ben Skeggs committed
337

338
	mutex_lock(&vm->mutex);
339
	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
340
	nvkm_mm_free(&vm->mm, &vma->node);
341
	mutex_unlock(&vm->mutex);
342

343
	nvkm_vm_ref(NULL, &vma->vm, NULL);
Ben Skeggs's avatar
Ben Skeggs committed
344 345
}

346 347 348 349
int
nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
{
	struct nvkm_mmu *mmu = vm->mmu;
350
	struct nvkm_memory *pgt;
351 352
	int ret;

353
	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
354
			      (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
355 356
	if (ret == 0) {
		vm->pgt[0].refcount[0] = 1;
357 358
		vm->pgt[0].mem[0] = pgt;
		nvkm_memory_boot(pgt, vm);
359 360 361 362 363
	}

	return ret;
}

Ben Skeggs's avatar
Ben Skeggs committed
364
int
365
nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
366
	       u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
Ben Skeggs's avatar
Ben Skeggs committed
367
{
368
	static struct lock_class_key _key;
369
	struct nvkm_vm *vm;
Ben Skeggs's avatar
Ben Skeggs committed
370 371 372
	u64 mm_length = (offset + length) - mm_offset;
	int ret;

373
	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
Ben Skeggs's avatar
Ben Skeggs committed
374 375 376
	if (!vm)
		return -ENOMEM;

377
	__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
378
	INIT_LIST_HEAD(&vm->pgd_list);
379
	vm->mmu = mmu;
380
	kref_init(&vm->refcount);
381 382
	vm->fpde = offset >> (mmu->func->pgt_bits + 12);
	vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
Ben Skeggs's avatar
Ben Skeggs committed
383

384
	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
Ben Skeggs's avatar
Ben Skeggs committed
385 386 387 388 389
	if (!vm->pgt) {
		kfree(vm);
		return -ENOMEM;
	}

390 391
	ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
			   block >> 12);
Ben Skeggs's avatar
Ben Skeggs committed
392
	if (ret) {
393
		vfree(vm->pgt);
Ben Skeggs's avatar
Ben Skeggs committed
394 395 396 397
		kfree(vm);
		return ret;
	}

398 399
	*pvm = vm;

Ben Skeggs's avatar
Ben Skeggs committed
400 401 402
	return 0;
}

403
int
404
nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
405
	    struct lock_class_key *key, struct nvkm_vm **pvm)
406
{
407 408 409 410
	struct nvkm_mmu *mmu = device->mmu;
	if (!mmu->func->create)
		return -EINVAL;
	return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
411 412
}

Ben Skeggs's avatar
Ben Skeggs committed
413
static int
414
nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
Ben Skeggs's avatar
Ben Skeggs committed
415
{
416 417
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgd *vpgd;
Ben Skeggs's avatar
Ben Skeggs committed
418 419 420 421 422 423 424 425 426
	int i;

	if (!pgd)
		return 0;

	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
	if (!vpgd)
		return -ENOMEM;

427
	vpgd->obj = pgd;
Ben Skeggs's avatar
Ben Skeggs committed
428

429
	mutex_lock(&vm->mutex);
430
	for (i = vm->fpde; i <= vm->lpde; i++)
431
		mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
Ben Skeggs's avatar
Ben Skeggs committed
432
	list_add(&vpgd->head, &vm->pgd_list);
433
	mutex_unlock(&vm->mutex);
Ben Skeggs's avatar
Ben Skeggs committed
434 435 436 437
	return 0;
}

static void
438
nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
Ben Skeggs's avatar
Ben Skeggs committed
439
{
440
	struct nvkm_vm_pgd *vpgd, *tmp;
Ben Skeggs's avatar
Ben Skeggs committed
441

442
	if (!mpgd)
Ben Skeggs's avatar
Ben Skeggs committed
443 444
		return;

445
	mutex_lock(&vm->mutex);
Ben Skeggs's avatar
Ben Skeggs committed
446
	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
447 448 449 450 451
		if (vpgd->obj == mpgd) {
			list_del(&vpgd->head);
			kfree(vpgd);
			break;
		}
Ben Skeggs's avatar
Ben Skeggs committed
452
	}
453
	mutex_unlock(&vm->mutex);
Ben Skeggs's avatar
Ben Skeggs committed
454 455 456
}

static void
457
nvkm_vm_del(struct kref *kref)
Ben Skeggs's avatar
Ben Skeggs committed
458
{
459 460
	struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
	struct nvkm_vm_pgd *vpgd, *tmp;
Ben Skeggs's avatar
Ben Skeggs committed
461 462

	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
463
		nvkm_vm_unlink(vm, vpgd->obj);
Ben Skeggs's avatar
Ben Skeggs committed
464 465
	}

466
	nvkm_mm_fini(&vm->mm);
467
	vfree(vm->pgt);
Ben Skeggs's avatar
Ben Skeggs committed
468 469 470 471
	kfree(vm);
}

int
472
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
Ben Skeggs's avatar
Ben Skeggs committed
473
{
474
	if (ref) {
475
		int ret = nvkm_vm_link(ref, pgd);
Ben Skeggs's avatar
Ben Skeggs committed
476 477 478
		if (ret)
			return ret;

479
		kref_get(&ref->refcount);
Ben Skeggs's avatar
Ben Skeggs committed
480 481
	}

482
	if (*ptr) {
483 484
		nvkm_vm_unlink(*ptr, pgd);
		kref_put(&(*ptr)->refcount, nvkm_vm_del);
Ben Skeggs's avatar
Ben Skeggs committed
485 486
	}

487
	*ptr = ref;
Ben Skeggs's avatar
Ben Skeggs committed
488 489
	return 0;
}
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544

static int
nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
	if (mmu->func->oneinit)
		return mmu->func->oneinit(mmu);
	return 0;
}

static int
nvkm_mmu_init(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
	if (mmu->func->init)
		mmu->func->init(mmu);
	return 0;
}

static void *
nvkm_mmu_dtor(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
	if (mmu->func->dtor)
		return mmu->func->dtor(mmu);
	return mmu;
}

static const struct nvkm_subdev_func
nvkm_mmu = {
	.dtor = nvkm_mmu_dtor,
	.oneinit = nvkm_mmu_oneinit,
	.init = nvkm_mmu_init,
};

void
nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
	      int index, struct nvkm_mmu *mmu)
{
	nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
	mmu->func = func;
	mmu->limit = func->limit;
	mmu->dma_bits = func->dma_bits;
	mmu->lpg_shift = func->lpg_shift;
}

int
nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
	      int index, struct nvkm_mmu **pmmu)
{
	if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
		return -ENOMEM;
	nvkm_mmu_ctor(func, device, index, *pmmu);
	return 0;
}