Commit 00c55507 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/imem: switch to subdev printk macros

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 18019e95
...@@ -207,6 +207,7 @@ gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -207,6 +207,7 @@ gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
{ {
struct gk20a_instobj_dma *node; struct gk20a_instobj_dma *node;
struct gk20a_instmem *imem = (void *)nvkm_instmem(parent); struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
struct nvkm_subdev *subdev = &imem->base.subdev;
struct device *dev = nv_device_base(nv_device(parent)); struct device *dev = nv_device_base(nv_device(parent));
int ret; int ret;
...@@ -220,14 +221,15 @@ gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -220,14 +221,15 @@ gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
&node->handle, GFP_KERNEL, &node->handle, GFP_KERNEL,
&imem->attrs); &imem->attrs);
if (!node->cpuaddr) { if (!node->cpuaddr) {
nv_error(imem, "cannot allocate DMA memory\n"); nvkm_error(subdev, "cannot allocate DMA memory\n");
return -ENOMEM; return -ENOMEM;
} }
/* alignment check */ /* alignment check */
if (unlikely(node->handle & (align - 1))) if (unlikely(node->handle & (align - 1)))
nv_warn(imem, "memory not aligned as requested: %pad (0x%x)\n", nvkm_warn(subdev,
&node->handle, align); "memory not aligned as requested: %pad (0x%x)\n",
&node->handle, align);
/* present memory for being mapped using small pages */ /* present memory for being mapped using small pages */
node->r.type = 12; node->r.type = 12;
...@@ -249,6 +251,7 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -249,6 +251,7 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
{ {
struct gk20a_instobj_iommu *node; struct gk20a_instobj_iommu *node;
struct gk20a_instmem *imem = (void *)nvkm_instmem(parent); struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_mm_node *r; struct nvkm_mm_node *r;
int ret; int ret;
int i; int i;
...@@ -277,7 +280,7 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -277,7 +280,7 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
align >> imem->iommu_pgshift, &r); align >> imem->iommu_pgshift, &r);
mutex_unlock(imem->mm_mutex); mutex_unlock(imem->mm_mutex);
if (ret) { if (ret) {
nv_error(imem, "virtual space is full!\n"); nvkm_error(subdev, "virtual space is full!\n");
goto free_pages; goto free_pages;
} }
...@@ -289,7 +292,7 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -289,7 +292,7 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
ret = iommu_map(imem->domain, offset, page_to_phys(p), ret = iommu_map(imem->domain, offset, page_to_phys(p),
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
if (ret < 0) { if (ret < 0) {
nv_error(imem, "IOMMU mapping failure: %d\n", ret); nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
while (i-- > 0) { while (i-- > 0) {
offset -= PAGE_SIZE; offset -= PAGE_SIZE;
...@@ -329,11 +332,12 @@ gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -329,11 +332,12 @@ gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_instobj_args *args = data; struct nvkm_instobj_args *args = data;
struct gk20a_instmem *imem = (void *)nvkm_instmem(parent); struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
struct gk20a_instobj *node; struct gk20a_instobj *node;
struct nvkm_subdev *subdev = &imem->base.subdev;
u32 size, align; u32 size, align;
int ret; int ret;
nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__, nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
imem->domain ? "IOMMU" : "DMA", args->size, args->align); imem->domain ? "IOMMU" : "DMA", args->size, args->align);
/* Round size and align to page bounds */ /* Round size and align to page bounds */
size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE); size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
...@@ -359,8 +363,8 @@ gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -359,8 +363,8 @@ gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
node->base.addr = node->mem->offset; node->base.addr = node->mem->offset;
node->base.size = size; node->base.size = size;
nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
size, align, node->mem->offset); size, align, node->mem->offset);
return 0; return 0;
} }
...@@ -410,7 +414,7 @@ gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -410,7 +414,7 @@ gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
imem->iommu_pgshift = plat->gpu->iommu.pgshift; imem->iommu_pgshift = plat->gpu->iommu.pgshift;
imem->mm_mutex = &plat->gpu->iommu.mutex; imem->mm_mutex = &plat->gpu->iommu.mutex;
nv_info(imem, "using IOMMU\n"); nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else { } else {
init_dma_attrs(&imem->attrs); init_dma_attrs(&imem->attrs);
/* /*
...@@ -422,7 +426,7 @@ gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -422,7 +426,7 @@ gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs); dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
nv_info(imem, "using DMA API\n"); nvkm_info(&imem->base.subdev, "using DMA API\n");
} }
return 0; return 0;
......
...@@ -49,7 +49,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -49,7 +49,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = (void *)parent;
struct nv04_instmem *imem; struct nv04_instmem *imem;
int ret, bar, vs; int ret, bar, vs;
...@@ -67,7 +67,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -67,7 +67,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
imem->iomem = ioremap(nv_device_resource_start(device, bar), imem->iomem = ioremap(nv_device_resource_start(device, bar),
nv_device_resource_len(device, bar)); nv_device_resource_len(device, bar));
if (!imem->iomem) { if (!imem->iomem) {
nv_error(imem, "unable to map PRAMIN BAR\n"); nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT; return -EFAULT;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment