Commit 134fdc1a authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/core/mm: replace region list with next pointer

We never have any need for a double-linked list here, and as there's
generally a large number of these objects, replace it with a single-
linked list in order to save some memory.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 04b88677
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
struct nvkm_mm_node { struct nvkm_mm_node {
struct list_head nl_entry; struct list_head nl_entry;
struct list_head fl_entry; struct list_head fl_entry;
struct list_head rl_entry; struct nvkm_mm_node *next;
#define NVKM_MM_HEAP_ANY 0x00 #define NVKM_MM_HEAP_ANY 0x00
u8 heap; u8 heap;
...@@ -38,4 +38,10 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, ...@@ -38,4 +38,10 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
u32 size_min, u32 align, struct nvkm_mm_node **); u32 size_min, u32 align, struct nvkm_mm_node **);
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
void nvkm_mm_dump(struct nvkm_mm *, const char *); void nvkm_mm_dump(struct nvkm_mm *, const char *);
static inline bool
nvkm_mm_contiguous(struct nvkm_mm_node *node)
{
return !node->next;
}
#endif #endif
...@@ -29,7 +29,7 @@ struct nvkm_mem { ...@@ -29,7 +29,7 @@ struct nvkm_mem {
u8 page_shift; u8 page_shift;
struct nvkm_mm_node *tag; struct nvkm_mm_node *tag;
struct list_head regions; struct nvkm_mm_node *mem;
dma_addr_t *pages; dma_addr_t *pages;
u32 memtype; u32 memtype;
u64 offset; u64 offset;
......
...@@ -321,7 +321,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) ...@@ -321,7 +321,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
if (bo->mem.mem_type == TTM_PL_VRAM) { if (bo->mem.mem_type == TTM_PL_VRAM) {
struct nvkm_mem *mem = bo->mem.mm_node; struct nvkm_mem *mem = bo->mem.mm_node;
if (!list_is_singular(&mem->regions)) if (!nvkm_mm_contiguous(mem->mem))
evict = true; evict = true;
} }
nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
......
...@@ -147,6 +147,7 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, ...@@ -147,6 +147,7 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
if (!this) if (!this)
return -ENOMEM; return -ENOMEM;
this->next = NULL;
this->type = type; this->type = type;
list_del(&this->fl_entry); list_del(&this->fl_entry);
*pnode = this; *pnode = this;
...@@ -225,6 +226,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, ...@@ -225,6 +226,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
if (!this) if (!this)
return -ENOMEM; return -ENOMEM;
this->next = NULL;
this->type = type; this->type = type;
list_del(&this->fl_entry); list_del(&this->fl_entry);
*pnode = this; *pnode = this;
......
...@@ -445,7 +445,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, ...@@ -445,7 +445,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
{ {
struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc; struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
struct nvkm_mm *mm = &ram->vram; struct nvkm_mm *mm = &ram->vram;
struct nvkm_mm_node *r; struct nvkm_mm_node **node, *r;
struct nvkm_mem *mem; struct nvkm_mem *mem;
int type = (memtype & 0x0ff); int type = (memtype & 0x0ff);
int back = (memtype & 0x800); int back = (memtype & 0x800);
...@@ -462,7 +462,6 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, ...@@ -462,7 +462,6 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
if (!mem) if (!mem)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&mem->regions);
mem->size = size; mem->size = size;
mutex_lock(&ram->fb->subdev.mutex); mutex_lock(&ram->fb->subdev.mutex);
...@@ -478,6 +477,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, ...@@ -478,6 +477,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
} }
mem->memtype = type; mem->memtype = type;
node = &mem->mem;
do { do {
if (back) if (back)
ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r); ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
...@@ -489,13 +489,13 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, ...@@ -489,13 +489,13 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
return ret; return ret;
} }
list_add_tail(&r->rl_entry, &mem->regions); *node = r;
node = &r->next;
size -= r->length; size -= r->length;
} while (size); } while (size);
mutex_unlock(&ram->fb->subdev.mutex); mutex_unlock(&ram->fb->subdev.mutex);
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
*pmem = mem; *pmem = mem;
return 0; return 0;
} }
......
...@@ -496,15 +496,12 @@ nv50_ram_tidy(struct nvkm_ram *base) ...@@ -496,15 +496,12 @@ nv50_ram_tidy(struct nvkm_ram *base)
void void
__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem) __nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
{ {
struct nvkm_mm_node *this; struct nvkm_mm_node *next = mem->mem;
struct nvkm_mm_node *node;
while (!list_empty(&mem->regions)) { while ((node = next)) {
this = list_first_entry(&mem->regions, typeof(*this), rl_entry); next = node->next;
nvkm_mm_free(&ram->vram, &node);
list_del(&this->rl_entry);
nvkm_mm_free(&ram->vram, &this);
} }
nvkm_mm_free(&ram->tags, &mem->tag); nvkm_mm_free(&ram->tags, &mem->tag);
} }
...@@ -530,7 +527,7 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, ...@@ -530,7 +527,7 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
{ {
struct nvkm_mm *heap = &ram->vram; struct nvkm_mm *heap = &ram->vram;
struct nvkm_mm *tags = &ram->tags; struct nvkm_mm *tags = &ram->tags;
struct nvkm_mm_node *r; struct nvkm_mm_node **node, *r;
struct nvkm_mem *mem; struct nvkm_mem *mem;
int comp = (memtype & 0x300) >> 8; int comp = (memtype & 0x300) >> 8;
int type = (memtype & 0x07f); int type = (memtype & 0x07f);
...@@ -559,11 +556,11 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, ...@@ -559,11 +556,11 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
comp = 0; comp = 0;
} }
INIT_LIST_HEAD(&mem->regions);
mem->memtype = (comp << 7) | type; mem->memtype = (comp << 7) | type;
mem->size = max; mem->size = max;
type = nv50_fb_memtype[type]; type = nv50_fb_memtype[type];
node = &mem->mem;
do { do {
if (back) if (back)
ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r); ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
...@@ -575,13 +572,13 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin, ...@@ -575,13 +572,13 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
return ret; return ret;
} }
list_add_tail(&r->rl_entry, &mem->regions); *node = r;
node = &r->next;
max -= r->length; max -= r->length;
} while (max); } while (max);
mutex_unlock(&ram->fb->subdev.mutex); mutex_unlock(&ram->fb->subdev.mutex);
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
*pmem = mem; *pmem = mem;
return 0; return 0;
} }
......
...@@ -305,11 +305,11 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) ...@@ -305,11 +305,11 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem; struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev; struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r; struct nvkm_mm_node *r = node->base.mem.mem;
unsigned long flags; unsigned long flags;
int i; int i;
if (unlikely(list_empty(&node->base.mem.regions))) if (unlikely(!r))
goto out; goto out;
spin_lock_irqsave(&imem->lock, flags); spin_lock_irqsave(&imem->lock, flags);
...@@ -320,9 +320,6 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) ...@@ -320,9 +320,6 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
spin_unlock_irqrestore(&imem->lock, flags); spin_unlock_irqrestore(&imem->lock, flags);
r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
rl_entry);
/* clear IOMMU bit to unmap pages */ /* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
...@@ -404,10 +401,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, ...@@ -404,10 +401,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node->r.length = (npages << PAGE_SHIFT) >> 12; node->r.length = (npages << PAGE_SHIFT) >> 12;
node->base.mem.offset = node->handle; node->base.mem.offset = node->handle;
node->base.mem.mem = &node->r;
INIT_LIST_HEAD(&node->base.mem.regions);
list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
return 0; return 0;
} }
...@@ -484,10 +478,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, ...@@ -484,10 +478,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
node->base.mem.mem = r;
INIT_LIST_HEAD(&node->base.mem.regions);
list_add_tail(&r->rl_entry, &node->base.mem.regions);
return 0; return 0;
release_area: release_area:
......
...@@ -31,7 +31,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) ...@@ -31,7 +31,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
{ {
struct nvkm_vm *vm = vma->vm; struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu; struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_mm_node *r; struct nvkm_mm_node *r = node->mem;
int big = vma->node->type != mmu->func->spg_shift; int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12); u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12; u32 bits = vma->node->type - 12;
...@@ -41,7 +41,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) ...@@ -41,7 +41,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
u32 end, len; u32 end, len;
delta = 0; delta = 0;
list_for_each_entry(r, &node->regions, rl_entry) { while (r) {
u64 phys = (u64)r->offset << 12; u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits; u32 num = r->length >> bits;
...@@ -65,7 +65,8 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) ...@@ -65,7 +65,8 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
delta += (u64)len << vma->node->type; delta += (u64)len << vma->node->type;
} }
} r = r->next;
};
mmu->func->flush(vm); mmu->func->flush(vm);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment