Commit b3beb167 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau: modify object accessors, offset in bytes rather than dwords

Reviewed-by: default avatarFrancisco Jerez <currojerez@riseup.net>
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 479dcaea
...@@ -138,6 +138,7 @@ enum nouveau_flags { ...@@ -138,6 +138,7 @@ enum nouveau_flags {
#define NVOBJ_FLAG_ZERO_FREE (1 << 2) #define NVOBJ_FLAG_ZERO_FREE (1 << 2)
#define NVOBJ_FLAG_FAKE (1 << 3) #define NVOBJ_FLAG_FAKE (1 << 3)
struct nouveau_gpuobj { struct nouveau_gpuobj {
struct drm_device *dev;
struct list_head list; struct list_head list;
struct nouveau_channel *im_channel; struct nouveau_channel *im_channel;
...@@ -1291,17 +1292,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val) ...@@ -1291,17 +1292,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
} }
/* object access */ /* object access */
static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj, extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
unsigned index) extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
{
return nv_ri32(dev, obj->im_pramin->start + index * 4);
}
static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
unsigned index, u32 val)
{
nv_wi32(dev, obj->im_pramin->start + index * 4, val);
}
/* /*
* Logging * Logging
......
...@@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val) ...@@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
reg = (reg - 0x00400000) / 4; reg = (reg - 0x00400000) / 4;
reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
nv_wo32(ctx->dev, ctx->data, reg, val); nv_wo32(ctx->data, reg * 4, val);
} }
#endif #endif
......
...@@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, ...@@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
virt += (end - pte); virt += (end - pte);
while (pte < end) { while (pte < end) {
nv_wo32(dev, pgt, pte++, offset_l); nv_wo32(pgt, (pte * 4) + 0, offset_l);
nv_wo32(dev, pgt, pte++, offset_h); nv_wo32(pgt, (pte * 4) + 4, offset_h);
pte += 2;
} }
} }
} }
...@@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) ...@@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
pages -= (end - pte); pages -= (end - pte);
virt += (end - pte) << 15; virt += (end - pte) << 15;
while (pte < end) while (pte < end) {
nv_wo32(dev, pgt, pte++, 0); nv_wo32(pgt, (pte * 4), 0);
pte++;
}
} }
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
......
...@@ -88,6 +88,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -88,6 +88,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
if (!gpuobj) if (!gpuobj)
return -ENOMEM; return -ENOMEM;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj); NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
gpuobj->dev = dev;
gpuobj->flags = flags; gpuobj->flags = flags;
gpuobj->im_channel = chan; gpuobj->im_channel = chan;
...@@ -134,7 +135,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -134,7 +135,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
int i; int i;
for (i = 0; i < gpuobj->im_pramin->size; i += 4) for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0); nv_wo32(gpuobj, i, 0);
engine->instmem.flush(dev); engine->instmem.flush(dev);
} }
...@@ -224,7 +225,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) ...@@ -224,7 +225,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
for (i = 0; i < gpuobj->im_pramin->size; i += 4) for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0); nv_wo32(gpuobj, i, 0);
engine->instmem.flush(dev); engine->instmem.flush(dev);
} }
...@@ -435,6 +436,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, ...@@ -435,6 +436,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
if (!gpuobj) if (!gpuobj)
return -ENOMEM; return -ENOMEM;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj); NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
gpuobj->dev = dev;
gpuobj->im_channel = NULL; gpuobj->im_channel = NULL;
gpuobj->flags = flags | NVOBJ_FLAG_FAKE; gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
...@@ -458,7 +460,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, ...@@ -458,7 +460,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
for (i = 0; i < gpuobj->im_pramin->size; i += 4) for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0); nv_wo32(gpuobj, i, 0);
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
} }
...@@ -555,14 +557,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, ...@@ -555,14 +557,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
adjust = offset & 0x00000fff; adjust = offset & 0x00000fff;
frame = offset & ~0x00000fff; frame = offset & ~0x00000fff;
nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) | nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
(adjust << 20) | (access << 14) | (target << 16) |
(access << 14) | class));
(target << 16) | nv_wo32(*gpuobj, 4, size - 1);
class)); nv_wo32(*gpuobj, 8, frame | pte_flags);
nv_wo32(dev, *gpuobj, 1, size - 1); nv_wo32(*gpuobj, 12, frame | pte_flags);
nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
} else { } else {
uint64_t limit = offset + size - 1; uint64_t limit = offset + size - 1;
uint32_t flags0, flags5; uint32_t flags0, flags5;
...@@ -575,12 +575,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, ...@@ -575,12 +575,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
flags5 = 0x00080000; flags5 = 0x00080000;
} }
nv_wo32(dev, *gpuobj, 0, flags0 | class); nv_wo32(*gpuobj, 0, flags0 | class);
nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit)); nv_wo32(*gpuobj, 4, lower_32_bits(limit));
nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset)); nv_wo32(*gpuobj, 8, lower_32_bits(offset));
nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) | nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
(upper_32_bits(offset) & 0xff)); (upper_32_bits(offset) & 0xff));
nv_wo32(dev, *gpuobj, 5, flags5); nv_wo32(*gpuobj, 20, flags5);
} }
instmem->flush(dev); instmem->flush(dev);
...@@ -699,25 +699,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, ...@@ -699,25 +699,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
} }
if (dev_priv->card_type >= NV_50) { if (dev_priv->card_type >= NV_50) {
nv_wo32(dev, *gpuobj, 0, class); nv_wo32(*gpuobj, 0, class);
nv_wo32(dev, *gpuobj, 5, 0x00010000); nv_wo32(*gpuobj, 20, 0x00010000);
} else { } else {
switch (class) { switch (class) {
case NV_CLASS_NULL: case NV_CLASS_NULL:
nv_wo32(dev, *gpuobj, 0, 0x00001030); nv_wo32(*gpuobj, 0, 0x00001030);
nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF); nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
break; break;
default: default:
if (dev_priv->card_type >= NV_40) { if (dev_priv->card_type >= NV_40) {
nv_wo32(dev, *gpuobj, 0, class); nv_wo32(*gpuobj, 0, class);
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
nv_wo32(dev, *gpuobj, 2, 0x01000000); nv_wo32(*gpuobj, 8, 0x01000000);
#endif #endif
} else { } else {
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
nv_wo32(dev, *gpuobj, 0, class | 0x00080000); nv_wo32(*gpuobj, 0, class | 0x00080000);
#else #else
nv_wo32(dev, *gpuobj, 0, class); nv_wo32(*gpuobj, 0, class);
#endif #endif
} }
} }
...@@ -836,21 +836,20 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ...@@ -836,21 +836,20 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
if (ret) if (ret)
return ret; return ret;
for (i = 0; i < 0x4000; i += 8) { for (i = 0; i < 0x4000; i += 8) {
nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); nv_wo32(chan->vm_pd, i + 0, 0x00000000);
nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
} }
pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2; pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
ret = nouveau_gpuobj_ref_add(dev, NULL, 0, ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
dev_priv->gart_info.sg_ctxdma, dev_priv->gart_info.sg_ctxdma,
&chan->vm_gart_pt); &chan->vm_gart_pt);
if (ret) if (ret)
return ret; return ret;
nv_wo32(dev, chan->vm_pd, pde++, nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->instance | 3);
chan->vm_gart_pt->instance | 0x03); nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2; pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
ret = nouveau_gpuobj_ref_add(dev, NULL, 0, ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
dev_priv->vm_vram_pt[i], dev_priv->vm_vram_pt[i],
...@@ -858,9 +857,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ...@@ -858,9 +857,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
if (ret) if (ret)
return ret; return ret;
nv_wo32(dev, chan->vm_pd, pde++, nv_wo32(chan->vm_pd, pde + 0,
chan->vm_vram_pt[i]->instance | 0x61); chan->vm_vram_pt[i]->instance | 0x61);
nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
pde += 8;
} }
instmem->flush(dev); instmem->flush(dev);
...@@ -996,8 +996,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev) ...@@ -996,8 +996,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < gpuobj->im_pramin->size / 4; i++) for (i = 0; i < gpuobj->im_pramin->size; i += 4)
gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
} }
return 0; return 0;
...@@ -1042,8 +1042,8 @@ nouveau_gpuobj_resume(struct drm_device *dev) ...@@ -1042,8 +1042,8 @@ nouveau_gpuobj_resume(struct drm_device *dev)
if (!gpuobj->im_backing_suspend) if (!gpuobj->im_backing_suspend)
continue; continue;
for (i = 0; i < gpuobj->im_pramin->size / 4; i++) for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
} }
...@@ -1120,3 +1120,17 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, ...@@ -1120,3 +1120,17 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
return 0; return 0;
} }
u32
nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
{
struct drm_device *dev = gpuobj->dev;
return nv_ri32(dev, gpuobj->im_pramin->start + offset);
}
void
nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
{
struct drm_device *dev = gpuobj->dev;
nv_wi32(dev, gpuobj->im_pramin->start + offset, val);
}
...@@ -54,7 +54,7 @@ nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, ...@@ -54,7 +54,7 @@ nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
uint32_t offset) uint32_t offset)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); uint32_t ctx = nv_ro32(ramht, offset + 4);
if (dev_priv->card_type < NV_40) if (dev_priv->card_type < NV_40)
return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
...@@ -100,15 +100,15 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ...@@ -100,15 +100,15 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
NV_DEBUG(dev, NV_DEBUG(dev,
"insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
chan->id, co, ref->handle, ctx); chan->id, co, ref->handle, ctx);
nv_wo32(dev, ramht, (co + 0)/4, ref->handle); nv_wo32(ramht, co + 0, ref->handle);
nv_wo32(dev, ramht, (co + 4)/4, ctx); nv_wo32(ramht, co + 4, ctx);
list_add_tail(&ref->list, &chan->ramht_refs); list_add_tail(&ref->list, &chan->ramht_refs);
instmem->flush(dev); instmem->flush(dev);
return 0; return 0;
} }
NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
chan->id, co, nv_ro32(dev, ramht, co/4)); chan->id, co, nv_ro32(ramht, co));
co += 8; co += 8;
if (co >= dev_priv->ramht_size) if (co >= dev_priv->ramht_size)
...@@ -136,13 +136,13 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ...@@ -136,13 +136,13 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
do { do {
if (nouveau_ramht_entry_valid(dev, ramht, co) && if (nouveau_ramht_entry_valid(dev, ramht, co) &&
(ref->handle == nv_ro32(dev, ramht, (co/4)))) { (ref->handle == nv_ro32(ramht, co))) {
NV_DEBUG(dev, NV_DEBUG(dev,
"remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
chan->id, co, ref->handle, chan->id, co, ref->handle,
nv_ro32(dev, ramht, (co + 4))); nv_ro32(ramht, co + 4));
nv_wo32(dev, ramht, (co + 0)/4, 0x00000000); nv_wo32(ramht, co + 0, 0x00000000);
nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); nv_wo32(ramht, co + 4, 0x00000000);
list_del(&ref->list); list_del(&ref->list);
instmem->flush(dev); instmem->flush(dev);
......
...@@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) ...@@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
uint32_t offset_h = upper_32_bits(dma_offset); uint32_t offset_h = upper_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
if (dev_priv->card_type < NV_50) if (dev_priv->card_type < NV_50) {
nv_wo32(dev, gpuobj, pte++, offset_l | 3); nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
else { pte += 1;
nv_wo32(dev, gpuobj, pte++, offset_l | 0x21); } else {
nv_wo32(dev, gpuobj, pte++, offset_h & 0xff); nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
pte += 2;
} }
dma_offset += NV_CTXDMA_PAGE_SIZE; dma_offset += NV_CTXDMA_PAGE_SIZE;
...@@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be) ...@@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
if (dev_priv->card_type < NV_50) if (dev_priv->card_type < NV_50) {
nv_wo32(dev, gpuobj, pte++, dma_offset | 3); nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
else { pte += 1;
nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21); } else {
nv_wo32(dev, gpuobj, pte++, 0x00000000); nv_wo32(gpuobj, (pte * 4), dma_offset | 0x21);
nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
pte += 2;
} }
dma_offset += NV_CTXDMA_PAGE_SIZE; dma_offset += NV_CTXDMA_PAGE_SIZE;
...@@ -258,21 +262,21 @@ nouveau_sgdma_init(struct drm_device *dev) ...@@ -258,21 +262,21 @@ nouveau_sgdma_init(struct drm_device *dev)
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
* on those cards? */ * on those cards? */
nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
(1 << 12) /* PT present */ | (1 << 12) /* PT present */ |
(0 << 13) /* PT *not* linear */ | (0 << 13) /* PT *not* linear */ |
(NV_DMA_ACCESS_RW << 14) | (NV_DMA_ACCESS_RW << 14) |
(NV_DMA_TARGET_PCI << 16)); (NV_DMA_TARGET_PCI << 16));
nv_wo32(dev, gpuobj, 1, aper_size - 1); nv_wo32(gpuobj, 4, aper_size - 1);
for (i = 2; i < 2 + (aper_size >> 12); i++) { for (i = 2; i < 2 + (aper_size >> 12); i++) {
nv_wo32(dev, gpuobj, i, nv_wo32(gpuobj, i * 4,
dev_priv->gart_info.sg_dummy_bus | 3); dev_priv->gart_info.sg_dummy_bus | 3);
} }
} else { } else {
for (i = 0; i < obj_size; i += 8) { for (i = 0; i < obj_size; i += 8) {
nv_wo32(dev, gpuobj, (i+0)/4, nv_wo32(gpuobj, i + 0,
dev_priv->gart_info.sg_dummy_bus | 0x21); dev_priv->gart_info.sg_dummy_bus | 0x21);
nv_wo32(dev, gpuobj, (i+4)/4, 0); nv_wo32(gpuobj, i + 4, 0);
} }
} }
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
...@@ -308,9 +312,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) ...@@ -308,9 +312,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
int pte; int pte;
pte = (offset >> NV_CTXDMA_PAGE_SHIFT); pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
if (dev_priv->card_type < NV_50) { if (dev_priv->card_type < NV_50) {
*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
return 0; return 0;
} }
......
...@@ -38,10 +38,10 @@ ...@@ -38,10 +38,10 @@
#define NV04_RAMFC_ENGINE 0x14 #define NV04_RAMFC_ENGINE 0x14
#define NV04_RAMFC_PULL1_ENGINE 0x18 #define NV04_RAMFC_PULL1_ENGINE 0x18
#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \ #define RAMFC_WR(offset, val) nv_wo32(chan->ramfc->gpuobj, \
NV04_RAMFC_##offset/4, (val)) NV04_RAMFC_##offset, (val))
#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \ #define RAMFC_RD(offset) nv_ro32(chan->ramfc->gpuobj, \
NV04_RAMFC_##offset/4) NV04_RAMFC_##offset)
void void
nv04_fifo_disable(struct drm_device *dev) nv04_fifo_disable(struct drm_device *dev)
......
This diff is collapsed.
...@@ -73,8 +73,8 @@ nv40_graph_create_context(struct nouveau_channel *chan) ...@@ -73,8 +73,8 @@ nv40_graph_create_context(struct nouveau_channel *chan)
ctx.data = chan->ramin_grctx->gpuobj; ctx.data = chan->ramin_grctx->gpuobj;
nv40_grctx_init(&ctx); nv40_grctx_init(&ctx);
nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, nv_wo32(chan->ramin_grctx->gpuobj, 0,
chan->ramin_grctx->gpuobj->im_pramin->start); chan->ramin_grctx->gpuobj->im_pramin->start);
return 0; return 0;
} }
......
...@@ -596,13 +596,13 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx) ...@@ -596,13 +596,13 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
offset += 0x0280/4; offset += 0x0280/4;
for (i = 0; i < 16; i++, offset += 2) for (i = 0; i < 16; i++, offset += 2)
nv_wo32(dev, obj, offset, 0x3f800000); nv_wo32(obj, offset * 4, 0x3f800000);
for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
for (i = 0; i < vs_nr_b0 * 6; i += 6) for (i = 0; i < vs_nr_b0 * 6; i += 6)
nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001); nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
for (i = 0; i < vs_nr_b1 * 4; i += 4) for (i = 0; i < vs_nr_b1 * 4; i += 4)
nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000); nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
} }
} }
......
...@@ -72,15 +72,15 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, ...@@ -72,15 +72,15 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
return ret; return ret;
} }
nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
nv_wo32(dev, obj, 1, limit); nv_wo32(obj, 4, limit);
nv_wo32(dev, obj, 2, offset); nv_wo32(obj, 8, offset);
nv_wo32(dev, obj, 3, 0x00000000); nv_wo32(obj, 12, 0x00000000);
nv_wo32(dev, obj, 4, 0x00000000); nv_wo32(obj, 16, 0x00000000);
if (dev_priv->card_type < NV_C0) if (dev_priv->card_type < NV_C0)
nv_wo32(dev, obj, 5, 0x00010000); nv_wo32(obj, 20, 0x00010000);
else else
nv_wo32(dev, obj, 5, 0x00020000); nv_wo32(obj, 20, 0x00020000);
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
return 0; return 0;
......
This diff is collapsed.
...@@ -220,20 +220,20 @@ nv50_graph_create_context(struct nouveau_channel *chan) ...@@ -220,20 +220,20 @@ nv50_graph_create_context(struct nouveau_channel *chan)
obj = chan->ramin_grctx->gpuobj; obj = chan->ramin_grctx->gpuobj;
hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); nv_wo32(ramin, hdr + 0x00, 0x00190002);
nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->instance +
pgraph->grctx_size - 1); pgraph->grctx_size - 1);
nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->instance);
nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); nv_wo32(ramin, hdr + 0x0c, 0);
nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); nv_wo32(ramin, hdr + 0x10, 0);
nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); nv_wo32(ramin, hdr + 0x14, 0x00010000);
ctx.dev = chan->dev; ctx.dev = chan->dev;
ctx.mode = NOUVEAU_GRCTX_VALS; ctx.mode = NOUVEAU_GRCTX_VALS;
ctx.data = obj; ctx.data = obj;
nv50_grctx_init(&ctx); nv50_grctx_init(&ctx);
nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12); nv_wo32(obj, 0x00000, chan->ramin->instance >> 12);
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
return 0; return 0;
...@@ -252,7 +252,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) ...@@ -252,7 +252,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
return; return;
for (i = hdr; i < hdr + 24; i += 4) for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); nv_wo32(chan->ramin->gpuobj, i, 0);
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
......
...@@ -995,7 +995,7 @@ xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { ...@@ -995,7 +995,7 @@ xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
int i; int i;
if (val && ctx->mode == NOUVEAU_GRCTX_VALS) if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val); nv_wo32(ctx->data, (ctx->ctxvals_pos + (i << 3))*4, val);
ctx->ctxvals_pos += num << 3; ctx->ctxvals_pos += num << 3;
} }
......
...@@ -449,9 +449,10 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ...@@ -449,9 +449,10 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
} }
while (pte < pte_end) { while (pte < pte_end) {
nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
vram += NV50_INSTMEM_PAGE_SIZE; vram += NV50_INSTMEM_PAGE_SIZE;
pte += 2;
} }
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
...@@ -476,8 +477,9 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ...@@ -476,8 +477,9 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
while (pte < pte_end) { while (pte < pte_end) {
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 0, 0x00000000);
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 4, 0x00000000);
pte += 2;
} }
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment