Commit 9ce523cc authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau: separate buffer object backing memory from nvkm structures

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent cb7e88e7
...@@ -30,6 +30,7 @@ nouveau-y += nouveau_vga.o ...@@ -30,6 +30,7 @@ nouveau-y += nouveau_vga.o
# DRM - memory management # DRM - memory management
nouveau-y += nouveau_bo.o nouveau-y += nouveau_bo.o
nouveau-y += nouveau_gem.o nouveau-y += nouveau_gem.o
nouveau-y += nouveau_mem.o
nouveau-y += nouveau_prime.o nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o nouveau-y += nouveau_ttm.o
......
...@@ -22,12 +22,6 @@ ...@@ -22,12 +22,6 @@
#define NV_MEM_COMP_VM 0x03 #define NV_MEM_COMP_VM 0x03
struct nvkm_mem { struct nvkm_mem {
struct drm_device *dev;
struct nvkm_vma bar_vma;
struct nvkm_vma vma[2];
u8 page_shift;
struct nvkm_mm_node *tag; struct nvkm_mm_node *tag;
struct nvkm_mm_node *mem; struct nvkm_mm_node *mem;
dma_addr_t *pages; dma_addr_t *pages;
......
...@@ -20,7 +20,10 @@ struct nvkm_vma { ...@@ -20,7 +20,10 @@ struct nvkm_vma {
int refcount; int refcount;
struct nvkm_vm *vm; struct nvkm_vm *vm;
struct nvkm_mm_node *node; struct nvkm_mm_node *node;
union {
u64 offset; u64 offset;
u64 addr;
};
u32 access; u32 access;
}; };
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "nouveau_bo.h" #include "nouveau_bo.h"
#include "nouveau_ttm.h" #include "nouveau_ttm.h"
#include "nouveau_gem.h" #include "nouveau_gem.h"
#include "nouveau_mem.h"
/* /*
* NV10-NV40 tiling helpers * NV10-NV40 tiling helpers
...@@ -670,14 +671,14 @@ static int ...@@ -670,14 +671,14 @@ static int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{ {
struct nvkm_mem *mem = old_reg->mm_node; struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 10); int ret = RING_SPACE(chan, 10);
if (ret == 0) { if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, PAGE_SIZE); OUT_RING (chan, PAGE_SIZE);
OUT_RING (chan, PAGE_SIZE); OUT_RING (chan, PAGE_SIZE);
OUT_RING (chan, PAGE_SIZE); OUT_RING (chan, PAGE_SIZE);
...@@ -702,9 +703,9 @@ static int ...@@ -702,9 +703,9 @@ static int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{ {
struct nvkm_mem *mem = old_reg->mm_node; struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].offset; u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].offset; u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages; u32 page_count = new_reg->num_pages;
int ret; int ret;
...@@ -740,9 +741,9 @@ static int ...@@ -740,9 +741,9 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{ {
struct nvkm_mem *mem = old_reg->mm_node; struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].offset; u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].offset; u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages; u32 page_count = new_reg->num_pages;
int ret; int ret;
...@@ -779,9 +780,9 @@ static int ...@@ -779,9 +780,9 @@ static int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{ {
struct nvkm_mem *mem = old_reg->mm_node; struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].offset; u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].offset; u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages; u32 page_count = new_reg->num_pages;
int ret; int ret;
...@@ -817,14 +818,14 @@ static int ...@@ -817,14 +818,14 @@ static int
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{ {
struct nvkm_mem *mem = old_reg->mm_node; struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 7); int ret = RING_SPACE(chan, 7);
if (ret == 0) { if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, 0x00000000 /* COPY */); OUT_RING (chan, 0x00000000 /* COPY */);
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
} }
...@@ -835,15 +836,15 @@ static int ...@@ -835,15 +836,15 @@ static int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{ {
struct nvkm_mem *mem = old_reg->mm_node; struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 7); int ret = RING_SPACE(chan, 7);
if (ret == 0) { if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
} }
return ret; return ret;
...@@ -869,12 +870,12 @@ static int ...@@ -869,12 +870,12 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{ {
struct nvkm_mem *mem = old_reg->mm_node; struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 length = (new_reg->num_pages << PAGE_SHIFT); u64 length = (new_reg->num_pages << PAGE_SHIFT);
u64 src_offset = mem->vma[0].offset; u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].offset; u64 dst_offset = mem->vma[1].addr;
int src_tiled = !!mem->memtype; int src_tiled = !!mem->kind;
int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype; int dst_tiled = !!nouveau_mem(new_reg)->kind;
int ret; int ret;
while (length) { while (length) {
...@@ -1011,25 +1012,34 @@ static int ...@@ -1011,25 +1012,34 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_mem_reg *reg) struct ttm_mem_reg *reg)
{ {
struct nvkm_mem *old_mem = bo->mem.mm_node; struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
struct nvkm_mem *new_mem = reg->mm_node; struct nouveau_mem *new_mem = nouveau_mem(reg);
struct nvkm_vm *vmm = drm->client.vm;
u64 size = (u64)reg->num_pages << PAGE_SHIFT; u64 size = (u64)reg->num_pages << PAGE_SHIFT;
int ret; int ret;
ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift, ret = nvkm_vm_get(vmm, size, old_mem->mem.page, NV_MEM_ACCESS_RW,
NV_MEM_ACCESS_RW, &old_mem->vma[0]); &old_mem->vma[0]);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift, ret = nvkm_vm_get(vmm, size, new_mem->mem.page, NV_MEM_ACCESS_RW,
NV_MEM_ACCESS_RW, &old_mem->vma[1]); &old_mem->vma[1]);
if (ret) { if (ret) {
nvkm_vm_put(&old_mem->vma[0]); nvkm_vm_put(&old_mem->vma[0]);
return ret; return ret;
} }
nvkm_vm_map(&old_mem->vma[0], old_mem); ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
nvkm_vm_map(&old_mem->vma[1], new_mem); if (ret)
goto done;
ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
done:
if (ret) {
nvkm_vm_put(&old_mem->vma[1]);
nvkm_vm_put(&old_mem->vma[0]);
}
return 0; return 0;
} }
...@@ -1211,8 +1221,8 @@ static void ...@@ -1211,8 +1221,8 @@ static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
struct ttm_mem_reg *new_reg) struct ttm_mem_reg *new_reg)
{ {
struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_mem *mem = new_reg ? new_reg->mm_node : NULL;
struct nvkm_vma *vma; struct nvkm_vma *vma;
/* ttm can now (stupidly) pass the driver bos it didn't create... */ /* ttm can now (stupidly) pass the driver bos it didn't create... */
...@@ -1220,9 +1230,9 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, ...@@ -1220,9 +1230,9 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
return; return;
if (mem && new_reg->mem_type != TTM_PL_SYSTEM && if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
mem->page_shift == nvbo->page) { mem->mem.page == nvbo->page) {
list_for_each_entry(vma, &nvbo->vma_list, head) { list_for_each_entry(vma, &nvbo->vma_list, head) {
nvkm_vm_map(vma, mem); nvkm_vm_map(vma, mem->_mem);
} }
} else { } else {
list_for_each_entry(vma, &nvbo->vma_list, head) { list_for_each_entry(vma, &nvbo->vma_list, head) {
...@@ -1343,7 +1353,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) ...@@ -1343,7 +1353,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type]; struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device); struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nvkm_mem *mem = reg->mm_node; struct nouveau_mem *mem = nouveau_mem(reg);
int ret; int ret;
reg->bus.addr = NULL; reg->bus.addr = NULL;
...@@ -1365,7 +1375,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) ...@@ -1365,7 +1375,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
reg->bus.is_iomem = !drm->agp.cma; reg->bus.is_iomem = !drm->agp.cma;
} }
#endif #endif
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype) if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->kind)
/* untiled */ /* untiled */
break; break;
/* fallthrough, tiled memory */ /* fallthrough, tiled memory */
...@@ -1377,14 +1387,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) ...@@ -1377,14 +1387,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int page_shift = 12; int page_shift = 12;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
page_shift = mem->page_shift; page_shift = mem->mem.page;
ret = nvkm_vm_get(bar, mem->size << 12, page_shift, ret = nvkm_vm_get(bar, mem->_mem->size << 12,
NV_MEM_ACCESS_RW, &mem->bar_vma); page_shift, NV_MEM_ACCESS_RW,
&mem->bar_vma);
if (ret) if (ret)
return ret; return ret;
nvkm_vm_map(&mem->bar_vma, mem); nvkm_vm_map(&mem->bar_vma, mem->_mem);
reg->bus.offset = mem->bar_vma.offset; reg->bus.offset = mem->bar_vma.offset;
} }
break; break;
...@@ -1397,7 +1408,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) ...@@ -1397,7 +1408,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
static void static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
{ {
struct nvkm_mem *mem = reg->mm_node; struct nouveau_mem *mem = nouveau_mem(reg);
if (!mem->bar_vma.node) if (!mem->bar_vma.node)
return; return;
...@@ -1606,7 +1617,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, ...@@ -1606,7 +1617,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
struct nvkm_vma *vma) struct nvkm_vma *vma)
{ {
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
struct nvkm_mem *mem = nvbo->bo.mem.mm_node; struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
int ret; int ret;
ret = nvkm_vm_get(vm, size, nvbo->page, NV_MEM_ACCESS_RW, vma); ret = nvkm_vm_get(vm, size, nvbo->page, NV_MEM_ACCESS_RW, vma);
...@@ -1614,8 +1625,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, ...@@ -1614,8 +1625,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
return ret; return ret;
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
mem->page_shift == nvbo->page) mem->mem.page == nvbo->page)
nvkm_vm_map(vma, nvbo->bo.mem.mm_node); nvkm_vm_map(vma, mem->_mem);
list_add_tail(&vma->head, &nvbo->vma_list); list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1; vma->refcount = 1;
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_mem.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"
#include <drm/ttm/ttm_bo_driver.h>
int
nouveau_mem_map(struct nouveau_mem *mem,
struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
nvkm_vm_map(vma, mem->_mem);
return 0;
}
void
nouveau_mem_fini(struct nouveau_mem *mem)
{
if (mem->vma[1].node) {
nvkm_vm_unmap(&mem->vma[1]);
nvkm_vm_put(&mem->vma[1]);
}
if (mem->vma[0].node) {
nvkm_vm_unmap(&mem->vma[0]);
nvkm_vm_put(&mem->vma[0]);
}
}
int
nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
if (mem->kind && cli->device.info.chipset == 0x50)
mem->comp = mem->kind = 0;
if (mem->comp) {
if (cli->device.info.chipset >= 0xc0)
mem->kind = gf100_pte_storage_type_map[mem->kind];
mem->comp = 0;
}
mem->__mem.size = (reg->num_pages << PAGE_SHIFT) >> 12;
mem->__mem.memtype = (mem->comp << 7) | mem->kind;
if (tt->ttm.sg) mem->__mem.sg = tt->ttm.sg;
else mem->__mem.pages = tt->dma_address;
mem->_mem = &mem->__mem;
mem->mem.page = 12;
return 0;
}
int
nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nvkm_ram *ram = nvxx_fb(&mem->cli->device)->ram;
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
int ret;
mem->mem.page = page;
ret = ram->func->get(ram, size, 1 << page, contig ? 0 : 1 << page,
(mem->comp << 8) | mem->kind, &mem->_mem);
if (ret)
return ret;
reg->start = mem->_mem->offset >> PAGE_SHIFT;
return ret;
}
void
nouveau_mem_del(struct ttm_mem_reg *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
nouveau_mem_fini(mem);
kfree(reg->mm_node);
reg->mm_node = NULL;
}
int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
struct ttm_mem_reg *reg)
{
struct nouveau_mem *mem;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
mem->cli = cli;
mem->kind = kind;
mem->comp = comp;
reg->mm_node = mem;
return 0;
}
#ifndef __NOUVEAU_MEM_H__
#define __NOUVEAU_MEM_H__
#include <subdev/fb.h>
#include <drm/ttm/ttm_bo_api.h>
struct ttm_dma_tt;
static inline struct nouveau_mem *
nouveau_mem(struct ttm_mem_reg *reg)
{
return reg->mm_node;
}
struct nouveau_mem {
struct nouveau_cli *cli;
u8 kind;
u8 comp;
struct {
u8 page;
} mem;
struct nvkm_vma vma[2];
struct nvkm_mem __mem;
struct nvkm_mem *_mem;
struct nvkm_vma bar_vma;
};
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
struct ttm_mem_reg *);
void nouveau_mem_del(struct ttm_mem_reg *);
int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvkm_vmm *, struct nvkm_vma *);
#endif
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h" #include "nouveau_ttm.h"
struct nouveau_sgdma_be { struct nouveau_sgdma_be {
...@@ -9,7 +10,7 @@ struct nouveau_sgdma_be { ...@@ -9,7 +10,7 @@ struct nouveau_sgdma_be {
* nouve_bo.c works properly, otherwise have to move them here * nouve_bo.c works properly, otherwise have to move them here
*/ */
struct ttm_dma_tt ttm; struct ttm_dma_tt ttm;
struct nvkm_mem *node; struct nouveau_mem *mem;
}; };
static void static void
...@@ -27,19 +28,20 @@ static int ...@@ -27,19 +28,20 @@ static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nvkm_mem *node = reg->mm_node; struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
if (ttm->sg) {
node->sg = ttm->sg; ret = nouveau_mem_host(reg, &nvbe->ttm);
node->pages = NULL; if (ret)
} else { return ret;
node->sg = NULL;
node->pages = nvbe->ttm.dma_address; ret = nouveau_mem_map(mem, mem->cli->vm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
} }
node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
nvkm_vm_map(&node->vma[0], node); nvbe->mem = mem;
nvbe->node = node;
return 0; return 0;
} }
...@@ -47,7 +49,7 @@ static int ...@@ -47,7 +49,7 @@ static int
nv04_sgdma_unbind(struct ttm_tt *ttm) nv04_sgdma_unbind(struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
nvkm_vm_unmap(&nvbe->node->vma[0]); nouveau_mem_fini(nvbe->mem);
return 0; return 0;
} }
...@@ -61,30 +63,20 @@ static int ...@@ -61,30 +63,20 @@ static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nvkm_mem *node = reg->mm_node; struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
/* noop: bound in move_notify() */
if (ttm->sg) {
node->sg = ttm->sg;
node->pages = NULL;
} else {
node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
}
node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
return 0;
}
static int ret = nouveau_mem_host(reg, &nvbe->ttm);
nv50_sgdma_unbind(struct ttm_tt *ttm) if (ret)
{ return ret;
/* noop: unbound in move_notify() */
nvbe->mem = mem;
return 0; return 0;
} }
static struct ttm_backend_func nv50_sgdma_backend = { static struct ttm_backend_func nv50_sgdma_backend = {
.bind = nv50_sgdma_bind, .bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind, .unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy .destroy = nouveau_sgdma_destroy
}; };
......
...@@ -23,10 +23,10 @@ ...@@ -23,10 +23,10 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE. * USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h" #include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
#include <drm/drm_legacy.h> #include <drm/drm_legacy.h>
...@@ -45,23 +45,15 @@ nouveau_manager_fini(struct ttm_mem_type_manager *man) ...@@ -45,23 +45,15 @@ nouveau_manager_fini(struct ttm_mem_type_manager *man)
} }
static void static void
nouveau_manager_debug(struct ttm_mem_type_manager *man, nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
struct drm_printer *printer)
{ {
nouveau_mem_del(reg);
} }
static inline void static void
nvkm_mem_node_cleanup(struct nvkm_mem *node) nouveau_manager_debug(struct ttm_mem_type_manager *man,
struct drm_printer *printer)
{ {
if (node->vma[0].node) {
nvkm_vm_unmap(&node->vma[0]);
nvkm_vm_put(&node->vma[0]);
}
if (node->vma[1].node) {
nvkm_vm_unmap(&node->vma[1]);
nvkm_vm_put(&node->vma[1]);
}
} }
static void static void
...@@ -70,8 +62,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, ...@@ -70,8 +62,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{ {
struct nouveau_drm *drm = nouveau_bdev(man->bdev); struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram; struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
nvkm_mem_node_cleanup(reg->mm_node); struct nvkm_mem *mem = nouveau_mem(reg)->_mem;
ram->func->put(ram, (struct nvkm_mem **)&reg->mm_node); nouveau_mem_del(reg);
ram->func->put(ram, &mem);
} }
static int static int
...@@ -80,31 +73,29 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ...@@ -80,31 +73,29 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_mem_reg *reg) struct ttm_mem_reg *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_mem *node; struct nouveau_drm *drm = nvbo->cli->drm;
u32 size_nc = 0; struct nouveau_mem *mem;
int ret; int ret;
if (drm->client.device.info.ram_size == 0) if (drm->client.device.info.ram_size == 0)
return -ENOMEM; return -ENOMEM;
if (!nvbo->contig) ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
size_nc = 1 << nvbo->page; mem = nouveau_mem(reg);
if (ret)
return ret;
ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT, ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
reg->page_alignment << PAGE_SHIFT, size_nc,
nvbo->comp << 8 | nvbo->kind, &node);
if (ret) { if (ret) {
nouveau_mem_del(reg);
if (ret == -ENOSPC) {
reg->mm_node = NULL; reg->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret; return 0;
}
return ret;
} }
node->page_shift = nvbo->page;
reg->mm_node = node;
reg->start = node->offset >> PAGE_SHIFT;
return 0; return 0;
} }
...@@ -116,53 +107,23 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = { ...@@ -116,53 +107,23 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
.debug = nouveau_manager_debug, .debug = nouveau_manager_debug,
}; };
static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *reg)
{
nvkm_mem_node_cleanup(reg->mm_node);
kfree(reg->mm_node);
reg->mm_node = NULL;
}
static int static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man, nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_mem_reg *reg) struct ttm_mem_reg *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_mem *node; struct nouveau_drm *drm = nvbo->cli->drm;
struct nouveau_mem *mem;
node = kzalloc(sizeof(*node), GFP_KERNEL); int ret;
if (!node)
return -ENOMEM;
node->page_shift = 12; ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
mem = nouveau_mem(reg);
switch (drm->client.device.info.family) { if (ret)
case NV_DEVICE_INFO_V0_TNT: return ret;
case NV_DEVICE_INFO_V0_CELSIUS:
case NV_DEVICE_INFO_V0_KELVIN:
case NV_DEVICE_INFO_V0_RANKINE:
case NV_DEVICE_INFO_V0_CURIE:
break;
case NV_DEVICE_INFO_V0_TESLA:
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
case NV_DEVICE_INFO_V0_PASCAL:
if (drm->client.device.info.chipset != 0x50)
node->memtype = nvbo->kind;
break;
default:
NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
drm->client.device.info.family);
break;
}
reg->mm_node = node; mem->_mem = &mem->__mem;
reg->start = 0; reg->start = 0;
return 0; return 0;
} }
...@@ -171,50 +132,40 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = { ...@@ -171,50 +132,40 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
.init = nouveau_manager_init, .init = nouveau_manager_init,
.takedown = nouveau_manager_fini, .takedown = nouveau_manager_fini,
.get_node = nouveau_gart_manager_new, .get_node = nouveau_gart_manager_new,
.put_node = nouveau_gart_manager_del, .put_node = nouveau_manager_del,
.debug = nouveau_manager_debug .debug = nouveau_manager_debug
}; };
static void
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
{
struct nvkm_mem *node = reg->mm_node;
if (node->vma[0].node)
nvkm_vm_put(&node->vma[0]);
kfree(reg->mm_node);
reg->mm_node = NULL;
}
static int static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man, nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_mem_reg *reg) struct ttm_mem_reg *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(man->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nvbo->cli->drm;
struct nouveau_mem *mem;
struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device); struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
struct nvkm_mem *node;
int ret; int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL); ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
if (!node) mem = nouveau_mem(reg);
return -ENOMEM; if (ret)
return ret;
node->page_shift = 12;
ret = nvkm_vm_get(mmu->vmm, reg->num_pages << 12, node->page_shift, ret = nvkm_vm_get(mmu->vmm, reg->num_pages << 12, 12,
NV_MEM_ACCESS_RW, &node->vma[0]); NV_MEM_ACCESS_RW, &mem->vma[0]);
if (ret) { if (ret) {
nouveau_mem_del(reg);
if (ret == -ENOSPC) { if (ret == -ENOSPC) {
reg->mm_node = NULL; reg->mm_node = NULL;
ret = 0; return 0;
} }
kfree(node);
return ret; return ret;
} }
reg->mm_node = node; mem->_mem = &mem->__mem;
reg->start = node->vma[0].offset >> PAGE_SHIFT; reg->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0; return 0;
} }
...@@ -222,7 +173,7 @@ const struct ttm_mem_type_manager_func nv04_gart_manager = { ...@@ -222,7 +173,7 @@ const struct ttm_mem_type_manager_func nv04_gart_manager = {
.init = nouveau_manager_init, .init = nouveau_manager_init,
.takedown = nouveau_manager_fini, .takedown = nouveau_manager_fini,
.get_node = nv04_gart_manager_new, .get_node = nv04_gart_manager_new,
.put_node = nv04_gart_manager_del, .put_node = nouveau_manager_del,
.debug = nouveau_manager_debug .debug = nouveau_manager_debug
}; };
......
...@@ -530,7 +530,6 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, ...@@ -530,7 +530,6 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
/* present memory for being mapped using small pages */ /* present memory for being mapped using small pages */
node->mem.size = size >> 12; node->mem.size = size >> 12;
node->mem.memtype = 0; node->mem.memtype = 0;
node->mem.page_shift = 12;
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
size, align, node->mem.offset); size, align, node->mem.offset);
......
...@@ -356,7 +356,6 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, ...@@ -356,7 +356,6 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
if (ret) if (ret)
return ret; return ret;
iobj->mem->page_shift = 12;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment