Commit 07bbc1c5 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/core/memory: split info pointers from accessor pointers

The accessor functions can change as a result of acquire()/release() calls,
and are protected by any refcounting done there.

Other functions must remain constant, as they can be called any time.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent dde59b9c
...@@ -9,7 +9,10 @@ struct nvkm_vm; ...@@ -9,7 +9,10 @@ struct nvkm_vm;
#define NVOBJ_FLAG_HEAP 0x00000004 #define NVOBJ_FLAG_HEAP 0x00000004
struct nvkm_gpuobj { struct nvkm_gpuobj {
union {
const struct nvkm_gpuobj_func *func; const struct nvkm_gpuobj_func *func;
const struct nvkm_gpuobj_func *ptrs;
};
struct nvkm_gpuobj *parent; struct nvkm_gpuobj *parent;
struct nvkm_memory *memory; struct nvkm_memory *memory;
struct nvkm_mm_node *node; struct nvkm_mm_node *node;
......
...@@ -14,6 +14,7 @@ enum nvkm_memory_target { ...@@ -14,6 +14,7 @@ enum nvkm_memory_target {
struct nvkm_memory { struct nvkm_memory {
const struct nvkm_memory_func *func; const struct nvkm_memory_func *func;
const struct nvkm_memory_ptrs *ptrs;
}; };
struct nvkm_memory_func { struct nvkm_memory_func {
...@@ -24,9 +25,12 @@ struct nvkm_memory_func { ...@@ -24,9 +25,12 @@ struct nvkm_memory_func {
void (*boot)(struct nvkm_memory *, struct nvkm_vm *); void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
void __iomem *(*acquire)(struct nvkm_memory *); void __iomem *(*acquire)(struct nvkm_memory *);
void (*release)(struct nvkm_memory *); void (*release)(struct nvkm_memory *);
void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
};
struct nvkm_memory_ptrs {
u32 (*rd32)(struct nvkm_memory *, u64 offset); u32 (*rd32)(struct nvkm_memory *, u64 offset);
void (*wr32)(struct nvkm_memory *, u64 offset, u32 data); void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
}; };
void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *); void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
...@@ -43,8 +47,8 @@ void nvkm_memory_del(struct nvkm_memory **); ...@@ -43,8 +47,8 @@ void nvkm_memory_del(struct nvkm_memory **);
* macros to guarantee correct behaviour across all chipsets * macros to guarantee correct behaviour across all chipsets
*/ */
#define nvkm_kmap(o) (o)->func->acquire(o) #define nvkm_kmap(o) (o)->func->acquire(o)
#define nvkm_ro32(o,a) (o)->func->rd32((o), (a)) #define nvkm_ro32(o,a) (o)->ptrs->rd32((o), (a))
#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d)) #define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d))
#define nvkm_mo32(o,a,m,d) ({ \ #define nvkm_mo32(o,a,m,d) ({ \
u32 _addr = (a), _data = nvkm_ro32((o), _addr); \ u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \ nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
......
...@@ -112,9 +112,13 @@ nvkm_instobj_func = { ...@@ -112,9 +112,13 @@ nvkm_instobj_func = {
.size = nvkm_instobj_size, .size = nvkm_instobj_size,
.acquire = nvkm_instobj_acquire, .acquire = nvkm_instobj_acquire,
.release = nvkm_instobj_release, .release = nvkm_instobj_release,
.map = nvkm_instobj_map,
};
static const struct nvkm_memory_ptrs
nvkm_instobj_ptrs = {
.rd32 = nvkm_instobj_rd32, .rd32 = nvkm_instobj_rd32,
.wr32 = nvkm_instobj_wr32, .wr32 = nvkm_instobj_wr32,
.map = nvkm_instobj_map,
}; };
static void static void
...@@ -137,8 +141,10 @@ nvkm_instobj_acquire_slow(struct nvkm_memory *memory) ...@@ -137,8 +141,10 @@ nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
{ {
struct nvkm_instobj *iobj = nvkm_instobj(memory); struct nvkm_instobj *iobj = nvkm_instobj(memory);
iobj->map = nvkm_kmap(iobj->parent); iobj->map = nvkm_kmap(iobj->parent);
if (iobj->map) if (iobj->map) {
memory->func = &nvkm_instobj_func; memory->func = &nvkm_instobj_func;
memory->ptrs = &nvkm_instobj_ptrs;
}
return iobj->map; return iobj->map;
} }
...@@ -165,9 +171,13 @@ nvkm_instobj_func_slow = { ...@@ -165,9 +171,13 @@ nvkm_instobj_func_slow = {
.boot = nvkm_instobj_boot, .boot = nvkm_instobj_boot,
.acquire = nvkm_instobj_acquire_slow, .acquire = nvkm_instobj_acquire_slow,
.release = nvkm_instobj_release_slow, .release = nvkm_instobj_release_slow,
.map = nvkm_instobj_map,
};
static const struct nvkm_memory_ptrs
nvkm_instobj_ptrs_slow = {
.rd32 = nvkm_instobj_rd32_slow, .rd32 = nvkm_instobj_rd32_slow,
.wr32 = nvkm_instobj_wr32_slow, .wr32 = nvkm_instobj_wr32_slow,
.map = nvkm_instobj_map,
}; };
int int
...@@ -196,6 +206,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, ...@@ -196,6 +206,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
} }
nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
iobj->memory.ptrs = &nvkm_instobj_ptrs_slow;
iobj->parent = memory; iobj->parent = memory;
iobj->imem = imem; iobj->imem = imem;
spin_lock(&iobj->imem->lock); spin_lock(&iobj->imem->lock);
......
...@@ -346,8 +346,6 @@ gk20a_instobj_func_dma = { ...@@ -346,8 +346,6 @@ gk20a_instobj_func_dma = {
.size = gk20a_instobj_size, .size = gk20a_instobj_size,
.acquire = gk20a_instobj_acquire_dma, .acquire = gk20a_instobj_acquire_dma,
.release = gk20a_instobj_release_dma, .release = gk20a_instobj_release_dma,
.rd32 = gk20a_instobj_rd32,
.wr32 = gk20a_instobj_wr32,
.map = gk20a_instobj_map, .map = gk20a_instobj_map,
}; };
...@@ -359,9 +357,13 @@ gk20a_instobj_func_iommu = { ...@@ -359,9 +357,13 @@ gk20a_instobj_func_iommu = {
.size = gk20a_instobj_size, .size = gk20a_instobj_size,
.acquire = gk20a_instobj_acquire_iommu, .acquire = gk20a_instobj_acquire_iommu,
.release = gk20a_instobj_release_iommu, .release = gk20a_instobj_release_iommu,
.map = gk20a_instobj_map,
};
static const struct nvkm_memory_ptrs
gk20a_instobj_ptrs = {
.rd32 = gk20a_instobj_rd32, .rd32 = gk20a_instobj_rd32,
.wr32 = gk20a_instobj_wr32, .wr32 = gk20a_instobj_wr32,
.map = gk20a_instobj_map,
}; };
static int static int
...@@ -377,6 +379,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, ...@@ -377,6 +379,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
*_node = &node->base; *_node = &node->base;
nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
node->base.memory.ptrs = &gk20a_instobj_ptrs;
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL, &node->handle, GFP_KERNEL,
...@@ -424,6 +427,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, ...@@ -424,6 +427,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
node->dma_addrs = (void *)(node->pages + npages); node->dma_addrs = (void *)(node->pages + npages);
nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
node->base.memory.ptrs = &gk20a_instobj_ptrs;
/* Allocate backing memory */ /* Allocate backing memory */
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
......
...@@ -43,22 +43,31 @@ struct nv04_instobj { ...@@ -43,22 +43,31 @@ struct nv04_instobj {
struct nvkm_mm_node *node; struct nvkm_mm_node *node;
}; };
static enum nvkm_memory_target static void
nv04_instobj_target(struct nvkm_memory *memory) nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{ {
return NVKM_MEM_TARGET_INST; struct nv04_instobj *iobj = nv04_instobj(memory);
struct nvkm_device *device = iobj->imem->base.subdev.device;
nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
} }
static u64 static u32
nv04_instobj_addr(struct nvkm_memory *memory) nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{ {
return nv04_instobj(memory)->node->offset; struct nv04_instobj *iobj = nv04_instobj(memory);
struct nvkm_device *device = iobj->imem->base.subdev.device;
return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
} }
static u64 static const struct nvkm_memory_ptrs
nv04_instobj_size(struct nvkm_memory *memory) nv04_instobj_ptrs = {
.rd32 = nv04_instobj_rd32,
.wr32 = nv04_instobj_wr32,
};
static void
nv04_instobj_release(struct nvkm_memory *memory)
{ {
return nv04_instobj(memory)->node->length;
} }
static void __iomem * static void __iomem *
...@@ -69,25 +78,22 @@ nv04_instobj_acquire(struct nvkm_memory *memory) ...@@ -69,25 +78,22 @@ nv04_instobj_acquire(struct nvkm_memory *memory)
return device->pri + 0x700000 + iobj->node->offset; return device->pri + 0x700000 + iobj->node->offset;
} }
static void static u64
nv04_instobj_release(struct nvkm_memory *memory) nv04_instobj_size(struct nvkm_memory *memory)
{ {
return nv04_instobj(memory)->node->length;
} }
static u32 static u64
nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset) nv04_instobj_addr(struct nvkm_memory *memory)
{ {
struct nv04_instobj *iobj = nv04_instobj(memory); return nv04_instobj(memory)->node->offset;
struct nvkm_device *device = iobj->imem->base.subdev.device;
return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
} }
static void static enum nvkm_memory_target
nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) nv04_instobj_target(struct nvkm_memory *memory)
{ {
struct nv04_instobj *iobj = nv04_instobj(memory); return NVKM_MEM_TARGET_INST;
struct nvkm_device *device = iobj->imem->base.subdev.device;
nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
} }
static void * static void *
...@@ -108,8 +114,6 @@ nv04_instobj_func = { ...@@ -108,8 +114,6 @@ nv04_instobj_func = {
.addr = nv04_instobj_addr, .addr = nv04_instobj_addr,
.acquire = nv04_instobj_acquire, .acquire = nv04_instobj_acquire,
.release = nv04_instobj_release, .release = nv04_instobj_release,
.rd32 = nv04_instobj_rd32,
.wr32 = nv04_instobj_wr32,
}; };
static int static int
...@@ -125,6 +129,7 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, ...@@ -125,6 +129,7 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
*pmemory = &iobj->memory; *pmemory = &iobj->memory;
nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory); nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
iobj->memory.ptrs = &nv04_instobj_ptrs;
iobj->imem = imem; iobj->imem = imem;
mutex_lock(&imem->base.subdev.mutex); mutex_lock(&imem->base.subdev.mutex);
......
...@@ -45,22 +45,29 @@ struct nv40_instobj { ...@@ -45,22 +45,29 @@ struct nv40_instobj {
struct nvkm_mm_node *node; struct nvkm_mm_node *node;
}; };
static enum nvkm_memory_target static void
nv40_instobj_target(struct nvkm_memory *memory) nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{ {
return NVKM_MEM_TARGET_INST; struct nv40_instobj *iobj = nv40_instobj(memory);
iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
} }
static u64 static u32
nv40_instobj_addr(struct nvkm_memory *memory) nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{ {
return nv40_instobj(memory)->node->offset; struct nv40_instobj *iobj = nv40_instobj(memory);
return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
} }
static u64 static const struct nvkm_memory_ptrs
nv40_instobj_size(struct nvkm_memory *memory) nv40_instobj_ptrs = {
.rd32 = nv40_instobj_rd32,
.wr32 = nv40_instobj_wr32,
};
static void
nv40_instobj_release(struct nvkm_memory *memory)
{ {
return nv40_instobj(memory)->node->length;
} }
static void __iomem * static void __iomem *
...@@ -70,23 +77,22 @@ nv40_instobj_acquire(struct nvkm_memory *memory) ...@@ -70,23 +77,22 @@ nv40_instobj_acquire(struct nvkm_memory *memory)
return iobj->imem->iomem + iobj->node->offset; return iobj->imem->iomem + iobj->node->offset;
} }
static void static u64
nv40_instobj_release(struct nvkm_memory *memory) nv40_instobj_size(struct nvkm_memory *memory)
{ {
return nv40_instobj(memory)->node->length;
} }
static u32 static u64
nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) nv40_instobj_addr(struct nvkm_memory *memory)
{ {
struct nv40_instobj *iobj = nv40_instobj(memory); return nv40_instobj(memory)->node->offset;
return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
} }
static void static enum nvkm_memory_target
nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) nv40_instobj_target(struct nvkm_memory *memory)
{ {
struct nv40_instobj *iobj = nv40_instobj(memory); return NVKM_MEM_TARGET_INST;
iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
} }
static void * static void *
...@@ -107,8 +113,6 @@ nv40_instobj_func = { ...@@ -107,8 +113,6 @@ nv40_instobj_func = {
.addr = nv40_instobj_addr, .addr = nv40_instobj_addr,
.acquire = nv40_instobj_acquire, .acquire = nv40_instobj_acquire,
.release = nv40_instobj_release, .release = nv40_instobj_release,
.rd32 = nv40_instobj_rd32,
.wr32 = nv40_instobj_wr32,
}; };
static int static int
...@@ -124,6 +128,7 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, ...@@ -124,6 +128,7 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
*pmemory = &iobj->memory; *pmemory = &iobj->memory;
nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory); nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
iobj->memory.ptrs = &nv40_instobj_ptrs;
iobj->imem = imem; iobj->imem = imem;
mutex_lock(&imem->base.subdev.mutex); mutex_lock(&imem->base.subdev.mutex);
......
...@@ -49,50 +49,51 @@ struct nv50_instobj { ...@@ -49,50 +49,51 @@ struct nv50_instobj {
void *map; void *map;
}; };
static enum nvkm_memory_target static void
nv50_instobj_target(struct nvkm_memory *memory) nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
{ {
return NVKM_MEM_TARGET_VRAM; struct nv50_instobj *iobj = nv50_instobj(memory);
} struct nv50_instmem *imem = iobj->imem;
struct nvkm_device *device = imem->base.subdev.device;
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
static u64 if (unlikely(imem->addr != base)) {
nv50_instobj_addr(struct nvkm_memory *memory) nvkm_wr32(device, 0x001700, base >> 16);
{ imem->addr = base;
return nv50_instobj(memory)->mem->offset; }
nvkm_wr32(device, 0x700000 + addr, data);
} }
static u64 static u32
nv50_instobj_size(struct nvkm_memory *memory) nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
{ {
return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT; struct nv50_instobj *iobj = nv50_instobj(memory);
struct nv50_instmem *imem = iobj->imem;
struct nvkm_device *device = imem->base.subdev.device;
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
u32 data;
if (unlikely(imem->addr != base)) {
nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
data = nvkm_rd32(device, 0x700000 + addr);
return data;
} }
static const struct nvkm_memory_ptrs
nv50_instobj_slow = {
.rd32 = nv50_instobj_rd32_slow,
.wr32 = nv50_instobj_wr32_slow,
};
static void static void
nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm) nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
{ {
struct nv50_instobj *iobj = nv50_instobj(memory); struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_subdev *subdev = &iobj->imem->base.subdev; nvkm_vm_map_at(vma, offset, iobj->mem);
struct nvkm_device *device = subdev->device;
u64 size = nvkm_memory_size(memory);
void __iomem *map;
int ret;
iobj->map = ERR_PTR(-ENOMEM);
ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
if (ret == 0) {
map = ioremap(device->func->resource_addr(device, 3) +
(u32)iobj->bar.offset, size);
if (map) {
nvkm_memory_map(memory, &iobj->bar, 0);
iobj->map = map;
} else {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
nvkm_vm_put(&iobj->bar);
}
} else {
nvkm_warn(subdev, "PRAMIN exhausted\n");
}
} }
static void static void
...@@ -120,45 +121,50 @@ nv50_instobj_acquire(struct nvkm_memory *memory) ...@@ -120,45 +121,50 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
return NULL; return NULL;
} }
static u32 static void
nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset) nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
{ {
struct nv50_instobj *iobj = nv50_instobj(memory); struct nv50_instobj *iobj = nv50_instobj(memory);
struct nv50_instmem *imem = iobj->imem; struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
struct nvkm_device *device = imem->base.subdev.device; struct nvkm_device *device = subdev->device;
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL; u64 size = nvkm_memory_size(memory);
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL; void __iomem *map;
u32 data; int ret;
if (unlikely(imem->addr != base)) { iobj->map = ERR_PTR(-ENOMEM);
nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base; ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
if (ret == 0) {
map = ioremap(device->func->resource_addr(device, 3) +
(u32)iobj->bar.offset, size);
if (map) {
nvkm_memory_map(memory, &iobj->bar, 0);
iobj->map = map;
} else {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
nvkm_vm_put(&iobj->bar);
}
} else {
nvkm_warn(subdev, "PRAMIN exhausted\n");
} }
data = nvkm_rd32(device, 0x700000 + addr);
return data;
} }
static void static u64
nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) nv50_instobj_size(struct nvkm_memory *memory)
{ {
struct nv50_instobj *iobj = nv50_instobj(memory); return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
struct nv50_instmem *imem = iobj->imem; }
struct nvkm_device *device = imem->base.subdev.device;
u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
if (unlikely(imem->addr != base)) { static u64
nvkm_wr32(device, 0x001700, base >> 16); nv50_instobj_addr(struct nvkm_memory *memory)
imem->addr = base; {
} return nv50_instobj(memory)->mem->offset;
nvkm_wr32(device, 0x700000 + addr, data);
} }
static void static enum nvkm_memory_target
nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) nv50_instobj_target(struct nvkm_memory *memory)
{ {
struct nv50_instobj *iobj = nv50_instobj(memory); return NVKM_MEM_TARGET_VRAM;
nvkm_vm_map_at(vma, offset, iobj->mem);
} }
static void * static void *
...@@ -183,8 +189,6 @@ nv50_instobj_func = { ...@@ -183,8 +189,6 @@ nv50_instobj_func = {
.boot = nv50_instobj_boot, .boot = nv50_instobj_boot,
.acquire = nv50_instobj_acquire, .acquire = nv50_instobj_acquire,
.release = nv50_instobj_release, .release = nv50_instobj_release,
.rd32 = nv50_instobj_rd32,
.wr32 = nv50_instobj_wr32,
.map = nv50_instobj_map, .map = nv50_instobj_map,
}; };
...@@ -202,6 +206,7 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, ...@@ -202,6 +206,7 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
*pmemory = &iobj->memory; *pmemory = &iobj->memory;
nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory); nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
iobj->memory.ptrs = &nv50_instobj_slow;
iobj->imem = imem; iobj->imem = imem;
size = max((size + 4095) & ~4095, (u32)4096); size = max((size + 4095) & ~4095, (u32)4096);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment