Commit 32faa34d authored by Dave Airlie's avatar Dave Airlie

drm/Merge branch 'drm-ttm-glisse' of ../drm-radeon-next into drm-core-next

Merge topic branch containing Jerome's TTM changes, contains one change from
Konrad to swiotlb export.

* 'drm-ttm-glisse' of ../drm-radeon-next:
  drm/ttm: callback move_notify any time bo placement change v4
  drm/ttm: simplify memory accounting for ttm user v2
  drm/ttm: isolate dma data from ttm_tt V4
  drm/nouveau: enable the ttm dma pool when swiotlb is active V3
  drm/radeon/kms: enable the ttm dma pool if swiotlb is on V4
  drm/ttm: provide dma aware ttm page pool code V9
  drm/ttm: introduce callback for ttm_tt populate & unpopulate V4
  drm/ttm: merge ttm_backend and ttm_tt V5
  drm/ttm: page allocation use page array instead of list
  drm/ttm: test for dma_address array allocation failure
  drm/ttm: use ttm put pages function to properly restore cache attribute
  drm/ttm: remove unused backend flags field
  drm/ttm: remove split btw highmen and lowmem page
  drm/ttm: remove userspace backed ttm object support
  swiotlb: Expose swiotlb_nr_tlb function to modules
parents bcdd6b2f dc97b340
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
*/ */
#include "drmP.h" #include "drmP.h"
#include "ttm/ttm_page_alloc.h"
#include "nouveau_drm.h" #include "nouveau_drm.h"
#include "nouveau_drv.h" #include "nouveau_drv.h"
...@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, ...@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
size_t acc_size;
int ret; int ret;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
...@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, ...@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0); nouveau_bo_placement_set(nvbo, flags, 0);
acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
sizeof(struct nouveau_bo));
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, ttm_bo_type_device, &nvbo->placement,
align >> PAGE_SHIFT, 0, false, NULL, size, align >> PAGE_SHIFT, 0, false, NULL, acc_size,
nouveau_bo_del_ttm); nouveau_bo_del_ttm);
if (ret) { if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */ /* ttm will call nouveau_bo_del_ttm if it fails.. */
...@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) ...@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
*mem = val; *mem = val;
} }
static struct ttm_backend * static struct ttm_tt *
nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
...@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) ...@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
switch (dev_priv->gart_info.type) { switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP #if __OS_HAS_AGP
case NOUVEAU_GART_AGP: case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge); return ttm_agp_tt_create(bdev, dev->agp->bridge,
size, page_flags, dummy_read_page);
#endif #endif
case NOUVEAU_GART_PDMA: case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW: case NOUVEAU_GART_HW:
return nouveau_sgdma_init_ttm(dev); return nouveau_sgdma_create_ttm(bdev, size, page_flags,
dummy_read_page);
default: default:
NV_ERROR(dev, "Unknown GART type %d\n", NV_ERROR(dev, "Unknown GART type %d\n",
dev_priv->gart_info.type); dev_priv->gart_info.type);
...@@ -806,10 +815,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) ...@@ -806,10 +815,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
struct nouveau_vma *vma; struct nouveau_vma *vma;
list_for_each_entry(vma, &nvbo->vma_list, head) { list_for_each_entry(vma, &nvbo->vma_list, head) {
if (new_mem->mem_type == TTM_PL_VRAM) { if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
nouveau_vm_map(vma, new_mem->mm_node); nouveau_vm_map(vma, new_mem->mm_node);
} else } else
if (new_mem->mem_type == TTM_PL_TT && if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->spg_shift) { nvbo->page_shift == vma->vm->spg_shift) {
nouveau_vm_map_sg(vma, 0, new_mem-> nouveau_vm_map_sg(vma, 0, new_mem->
num_pages << PAGE_SHIFT, num_pages << PAGE_SHIFT,
...@@ -1044,8 +1053,81 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) ...@@ -1044,8 +1053,81 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
nouveau_fence_unref(&old_fence); nouveau_fence_unref(&old_fence);
} }
static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
int r;
if (ttm->state != tt_unpopulated)
return 0;
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
return ttm_dma_populate((void *)ttm, dev->dev);
}
#endif
r = ttm_pool_populate(ttm);
if (r) {
return r;
}
for (i = 0; i < ttm->num_pages; i++) {
ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
while (--i) {
pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ttm_dma->dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
}
return 0;
}
static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate((void *)ttm, dev->dev);
return;
}
#endif
for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) {
pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
}
ttm_pool_unpopulate(ttm);
}
struct ttm_bo_driver nouveau_bo_driver = { struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, .ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches, .invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type, .init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags, .evict_flags = nouveau_bo_evict_flags,
......
...@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = { ...@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "memory", nouveau_debugfs_memory_info, 0, NULL }, { "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
{ "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
}; };
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
......
...@@ -1000,7 +1000,10 @@ extern int nouveau_sgdma_init(struct drm_device *); ...@@ -1000,7 +1000,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *); extern void nouveau_sgdma_takedown(struct drm_device *);
extern uint32_t nouveau_sgdma_get_physical(struct drm_device *, extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
uint32_t offset); uint32_t offset);
extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
unsigned long size,
uint32_t page_flags,
struct page *dummy_read_page);
/* nouveau_debugfs.c */ /* nouveau_debugfs.c */
#if defined(CONFIG_DRM_NOUVEAU_DEBUG) #if defined(CONFIG_DRM_NOUVEAU_DEBUG)
......
...@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev) ...@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret) if (ret)
return ret; return ret;
ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret) {
/* Reset to default value. */
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
}
ret = nouveau_ttm_global_init(dev_priv); ret = nouveau_ttm_global_init(dev_priv);
if (ret) if (ret)
......
...@@ -8,88 +8,30 @@ ...@@ -8,88 +8,30 @@
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be { struct nouveau_sgdma_be {
struct ttm_backend backend; /* this has to be the first field so populate/unpopulated in
* nouve_bo.c works properly, otherwise have to move them here
*/
struct ttm_dma_tt ttm;
struct drm_device *dev; struct drm_device *dev;
dma_addr_t *pages;
unsigned nr_pages;
bool unmap_pages;
u64 offset; u64 offset;
bool bound;
}; };
static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
struct page **pages, struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
int i;
NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
nvbe->pages = dma_addrs;
nvbe->nr_pages = num_pages;
nvbe->unmap_pages = true;
/* this code path isn't called and is incorrect anyways */
if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
nvbe->unmap_pages = false;
return 0;
}
for (i = 0; i < num_pages; i++) {
nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
nvbe->nr_pages = --i;
be->func->clear(be);
return -EFAULT;
}
}
return 0;
}
static void static void
nouveau_sgdma_clear(struct ttm_backend *be) nouveau_sgdma_destroy(struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
if (nvbe->bound) if (ttm) {
be->func->unbind(be);
if (nvbe->unmap_pages) {
while (nvbe->nr_pages--) {
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
}
}
static void
nouveau_sgdma_destroy(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
if (be) {
NV_DEBUG(nvbe->dev, "\n"); NV_DEBUG(nvbe->dev, "\n");
ttm_dma_tt_fini(&nvbe->ttm);
if (nvbe) { kfree(nvbe);
if (nvbe->pages)
be->func->clear(be);
kfree(nvbe);
}
} }
} }
static int static int
nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev; struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
...@@ -99,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) ...@@ -99,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nvbe->offset = mem->start << PAGE_SHIFT; nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) { for (i = 0; i < ttm->num_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i]; dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
uint32_t offset_l = lower_32_bits(dma_offset); uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
...@@ -109,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) ...@@ -109,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
} }
} }
nvbe->bound = true;
return 0; return 0;
} }
static int static int
nv04_sgdma_unbind(struct ttm_backend *be) nv04_sgdma_unbind(struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev; struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
...@@ -124,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be) ...@@ -124,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
NV_DEBUG(dev, "\n"); NV_DEBUG(dev, "\n");
if (!nvbe->bound) if (ttm->state != tt_bound)
return 0; return 0;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) { for (i = 0; i < ttm->num_pages; i++) {
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
} }
nvbe->bound = false;
return 0; return 0;
} }
static struct ttm_backend_func nv04_sgdma_backend = { static struct ttm_backend_func nv04_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv04_sgdma_bind, .bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind, .unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy .destroy = nouveau_sgdma_destroy
...@@ -158,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe) ...@@ -158,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
} }
static int static int
nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
dma_addr_t *list = nvbe->pages; dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2; u32 pte = mem->start << 2;
u32 cnt = nvbe->nr_pages; u32 cnt = ttm->num_pages;
nvbe->offset = mem->start << PAGE_SHIFT; nvbe->offset = mem->start << PAGE_SHIFT;
...@@ -175,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) ...@@ -175,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
} }
nv41_sgdma_flush(nvbe); nv41_sgdma_flush(nvbe);
nvbe->bound = true;
return 0; return 0;
} }
static int static int
nv41_sgdma_unbind(struct ttm_backend *be) nv41_sgdma_unbind(struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2; u32 pte = (nvbe->offset >> 12) << 2;
u32 cnt = nvbe->nr_pages; u32 cnt = ttm->num_pages;
while (cnt--) { while (cnt--) {
nv_wo32(pgt, pte, 0x00000000); nv_wo32(pgt, pte, 0x00000000);
...@@ -194,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be) ...@@ -194,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
} }
nv41_sgdma_flush(nvbe); nv41_sgdma_flush(nvbe);
nvbe->bound = false;
return 0; return 0;
} }
static struct ttm_backend_func nv41_sgdma_backend = { static struct ttm_backend_func nv41_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv41_sgdma_bind, .bind = nv41_sgdma_bind,
.unbind = nv41_sgdma_unbind, .unbind = nv41_sgdma_unbind,
.destroy = nouveau_sgdma_destroy .destroy = nouveau_sgdma_destroy
}; };
static void static void
nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe) nv44_sgdma_flush(struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev; struct drm_device *dev = nvbe->dev;
nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12); nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
nv_wr32(dev, 0x100808, nvbe->offset | 0x20); nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
NV_ERROR(dev, "gart flush timeout: 0x%08x\n", NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
...@@ -270,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt) ...@@ -270,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
} }
static int static int
nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
dma_addr_t *list = nvbe->pages; dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2, tmp[4]; u32 pte = mem->start << 2, tmp[4];
u32 cnt = nvbe->nr_pages; u32 cnt = ttm->num_pages;
int i; int i;
nvbe->offset = mem->start << PAGE_SHIFT; nvbe->offset = mem->start << PAGE_SHIFT;
...@@ -305,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) ...@@ -305,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
if (cnt) if (cnt)
nv44_sgdma_fill(pgt, list, pte, cnt); nv44_sgdma_fill(pgt, list, pte, cnt);
nv44_sgdma_flush(nvbe); nv44_sgdma_flush(ttm);
nvbe->bound = true;
return 0; return 0;
} }
static int static int
nv44_sgdma_unbind(struct ttm_backend *be) nv44_sgdma_unbind(struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2; u32 pte = (nvbe->offset >> 12) << 2;
u32 cnt = nvbe->nr_pages; u32 cnt = ttm->num_pages;
if (pte & 0x0000000c) { if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3); u32 max = 4 - ((pte >> 2) & 0x3);
...@@ -339,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be) ...@@ -339,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
if (cnt) if (cnt)
nv44_sgdma_fill(pgt, NULL, pte, cnt); nv44_sgdma_fill(pgt, NULL, pte, cnt);
nv44_sgdma_flush(nvbe); nv44_sgdma_flush(ttm);
nvbe->bound = false;
return 0; return 0;
} }
static struct ttm_backend_func nv44_sgdma_backend = { static struct ttm_backend_func nv44_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv44_sgdma_bind, .bind = nv44_sgdma_bind,
.unbind = nv44_sgdma_unbind, .unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy .destroy = nouveau_sgdma_destroy
}; };
static int static int
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node; struct nouveau_mem *node = mem->mm_node;
/* noop: bound in move_notify() */ /* noop: bound in move_notify() */
node->pages = nvbe->pages; node->pages = nvbe->ttm.dma_address;
nvbe->pages = (dma_addr_t *)node;
nvbe->bound = true;
return 0; return 0;
} }
static int static int
nv50_sgdma_unbind(struct ttm_backend *be) nv50_sgdma_unbind(struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */ /* noop: unbound in move_notify() */
nvbe->pages = node->pages;
node->pages = NULL;
nvbe->bound = false;
return 0; return 0;
} }
static struct ttm_backend_func nv50_sgdma_backend = { static struct ttm_backend_func nv50_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv50_sgdma_bind, .bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind, .unbind = nv50_sgdma_unbind,
.destroy = nouveau_sgdma_destroy .destroy = nouveau_sgdma_destroy
}; };
struct ttm_backend * struct ttm_tt *
nouveau_sgdma_init_ttm(struct drm_device *dev) nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_sgdma_be *nvbe; struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
...@@ -395,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev) ...@@ -395,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
return NULL; return NULL;
nvbe->dev = dev; nvbe->dev = dev;
nvbe->ttm.ttm.func = dev_priv->gart_info.func;
nvbe->backend.func = dev_priv->gart_info.func; if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
return &nvbe->backend; kfree(nvbe);
return NULL;
}
return &nvbe->ttm.ttm;
} }
int int
......
...@@ -320,7 +320,6 @@ struct radeon_gart { ...@@ -320,7 +320,6 @@ struct radeon_gart {
unsigned table_size; unsigned table_size;
struct page **pages; struct page **pages;
dma_addr_t *pages_addr; dma_addr_t *pages_addr;
bool *ttm_alloced;
bool ready; bool ready;
}; };
......
...@@ -765,8 +765,14 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -765,8 +765,14 @@ int radeon_device_init(struct radeon_device *rdev,
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) { if (r) {
rdev->need_dma32 = true; rdev->need_dma32 = true;
dma_bits = 32;
printk(KERN_WARNING "radeon: No suitable DMA available.\n"); printk(KERN_WARNING "radeon: No suitable DMA available.\n");
} }
r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "radeon: No coherent DMA available.\n");
}
/* Registers mapping */ /* Registers mapping */
/* TODO: block userspace mapping of io register */ /* TODO: block userspace mapping of io register */
......
...@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) { if (rdev->gart.pages[p]) {
if (!rdev->gart.ttm_alloced[p])
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL; rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr; rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p]; page_base = rdev->gart.pages_addr[p];
...@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
/* we reverted the patch using dma_addr in TTM for now but this rdev->gart.pages_addr[p] = dma_addr[i];
* code stops building on alpha so just comment it out for now */
if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
rdev->gart.ttm_alloced[p] = true;
rdev->gart.pages_addr[p] = dma_addr[i];
} else {
/* we need to support large memory configurations */
/* assume that unbind have already been call on the range */
rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
/* FIXME: failed to map page (return -ENOMEM?) */
radeon_gart_unbind(rdev, offset, pages);
return -ENOMEM;
}
}
rdev->gart.pages[p] = pagelist[i]; rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) { if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p]; page_base = rdev->gart.pages_addr[p];
...@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev) ...@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
return -ENOMEM; return -ENOMEM;
} }
rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
rdev->gart.num_cpu_pages, GFP_KERNEL);
if (rdev->gart.ttm_alloced == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */ /* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) { for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr; rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
...@@ -296,10 +271,8 @@ void radeon_gart_fini(struct radeon_device *rdev) ...@@ -296,10 +271,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false; rdev->gart.ready = false;
kfree(rdev->gart.pages); kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr); kfree(rdev->gart.pages_addr);
kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL; rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL; rdev->gart.pages_addr = NULL;
rdev->gart.ttm_alloced = NULL;
radeon_dummy_page_fini(rdev); radeon_dummy_page_fini(rdev);
} }
...@@ -95,6 +95,7 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -95,6 +95,7 @@ int radeon_bo_create(struct radeon_device *rdev,
enum ttm_bo_type type; enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
unsigned long max_size = 0; unsigned long max_size = 0;
size_t acc_size;
int r; int r;
size = ALIGN(size, PAGE_SIZE); size = ALIGN(size, PAGE_SIZE);
...@@ -117,6 +118,9 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -117,6 +118,9 @@ int radeon_bo_create(struct radeon_device *rdev,
return -ENOMEM; return -ENOMEM;
} }
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
sizeof(struct radeon_bo));
retry: retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL) if (bo == NULL)
...@@ -134,8 +138,8 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -134,8 +138,8 @@ int radeon_bo_create(struct radeon_device *rdev,
/* Kernel allocation are uninterruptible */ /* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex); mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, 0, !kernel, NULL, size, &bo->placement, page_align, 0, !kernel, NULL,
&radeon_ttm_bo_destroy); acc_size, &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex); mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
......
...@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev) ...@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
} }
} }
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
static struct ttm_backend*
radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
{
struct radeon_device *rdev;
rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
} else
#endif
{
return radeon_ttm_backend_create(rdev);
}
}
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{ {
return 0; return 0;
...@@ -515,8 +497,155 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) ...@@ -515,8 +497,155 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
return radeon_fence_signaled((struct radeon_fence *)sync_obj); return radeon_fence_signaled((struct radeon_fence *)sync_obj);
} }
/*
* TTM backend functions.
*/
struct radeon_ttm_tt {
struct ttm_dma_tt ttm;
struct radeon_device *rdev;
u64 offset;
};
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
int r;
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
return r;
}
return 0;
}
static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
return 0;
}
static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
ttm_dma_tt_fini(&gtt->ttm);
kfree(gtt);
}
static struct ttm_backend_func radeon_backend_func = {
.bind = &radeon_ttm_backend_bind,
.unbind = &radeon_ttm_backend_unbind,
.destroy = &radeon_ttm_backend_destroy,
};
struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt;
rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
size, page_flags, dummy_read_page);
}
#endif
gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.ttm.func = &radeon_backend_func;
gtt->rdev = rdev;
if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
kfree(gtt);
return NULL;
}
return &gtt->ttm.ttm;
}
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
if (ttm->state != tt_unpopulated)
return 0;
rdev = radeon_get_rdev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
return ttm_dma_populate(&gtt->ttm, rdev->dev);
}
#endif
r = ttm_pool_populate(ttm);
if (r) {
return r;
}
for (i = 0; i < ttm->num_pages; i++) {
gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
while (--i) {
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
}
return 0;
}
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
rdev = radeon_get_rdev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
return;
}
#endif
for (i = 0; i < ttm->num_pages; i++) {
if (gtt->ttm.dma_address[i]) {
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
}
ttm_pool_unpopulate(ttm);
}
static struct ttm_bo_driver radeon_bo_driver = { static struct ttm_bo_driver radeon_bo_driver = {
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry, .ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches, .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type, .init_mem_type = &radeon_init_mem_type,
.evict_flags = &radeon_evict_flags, .evict_flags = &radeon_evict_flags,
...@@ -680,124 +809,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -680,124 +809,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
} }
/*
* TTM backend functions.
*/
struct radeon_ttm_backend {
struct ttm_backend backend;
struct radeon_device *rdev;
unsigned long num_pages;
struct page **pages;
struct page *dummy_read_page;
dma_addr_t *dma_addrs;
bool populated;
bool bound;
unsigned offset;
};
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = pages;
gtt->dma_addrs = dma_addrs;
gtt->num_pages = num_pages;
gtt->dummy_read_page = dummy_read_page;
gtt->populated = true;
return 0;
}
static void radeon_ttm_backend_clear(struct ttm_backend *backend)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = NULL;
gtt->dma_addrs = NULL;
gtt->num_pages = 0;
gtt->dummy_read_page = NULL;
gtt->populated = false;
gtt->bound = false;
}
static int radeon_ttm_backend_bind(struct ttm_backend *backend,
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_backend *gtt;
int r;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->offset = bo_mem->start << PAGE_SHIFT;
if (!gtt->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
gtt->num_pages, bo_mem, backend);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
gtt->num_pages, gtt->pages, gtt->dma_addrs);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
gtt->num_pages, gtt->offset);
return r;
}
gtt->bound = true;
return 0;
}
static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
gtt->bound = false;
return 0;
}
static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
if (gtt->bound) {
radeon_ttm_backend_unbind(backend);
}
kfree(gtt);
}
static struct ttm_backend_func radeon_backend_func = {
.populate = &radeon_ttm_backend_populate,
.clear = &radeon_ttm_backend_clear,
.bind = &radeon_ttm_backend_bind,
.unbind = &radeon_ttm_backend_unbind,
.destroy = &radeon_ttm_backend_destroy,
};
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
{
struct radeon_ttm_backend *gtt;
gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->backend.bdev = &rdev->mman.bdev;
gtt->backend.flags = 0;
gtt->backend.func = &radeon_backend_func;
gtt->rdev = rdev;
gtt->pages = NULL;
gtt->num_pages = 0;
gtt->dummy_read_page = NULL;
gtt->populated = false;
gtt->bound = false;
return &gtt->backend;
}
#define RADEON_DEBUGFS_MEM_TYPES 2 #define RADEON_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
...@@ -820,8 +831,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) ...@@ -820,8 +831,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev) static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{ {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1]; static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32]; static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
unsigned i; unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
...@@ -843,8 +854,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) ...@@ -843,8 +854,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].name = radeon_mem_types_names[i];
radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
radeon_mem_types_list[i].driver_features = 0; radeon_mem_types_list[i].driver_features = 0;
radeon_mem_types_list[i].data = NULL; radeon_mem_types_list[i++].data = NULL;
return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1); #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
radeon_mem_types_list[i].name = radeon_mem_types_names[i];
radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
radeon_mem_types_list[i].driver_features = 0;
radeon_mem_types_list[i++].data = NULL;
}
#endif
return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
#endif #endif
return 0; return 0;
......
...@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ...@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
ttm_bo_manager.o ttm_bo_manager.o
ifeq ($(CONFIG_SWIOTLB),y)
ttm-y += ttm_page_alloc_dma.o
endif
obj-$(CONFIG_DRM_TTM) += ttm.o obj-$(CONFIG_DRM_TTM) += ttm.o
...@@ -40,45 +40,33 @@ ...@@ -40,45 +40,33 @@
#include <asm/agp.h> #include <asm/agp.h>
struct ttm_agp_backend { struct ttm_agp_backend {
struct ttm_backend backend; struct ttm_tt ttm;
struct agp_memory *mem; struct agp_memory *mem;
struct agp_bridge_data *bridge; struct agp_bridge_data *bridge;
}; };
static int ttm_agp_populate(struct ttm_backend *backend, static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{ {
struct ttm_agp_backend *agp_be = struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
container_of(backend, struct ttm_agp_backend, backend); struct drm_mm_node *node = bo_mem->mm_node;
struct page **cur_page, **last_page = pages + num_pages;
struct agp_memory *mem; struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
unsigned i;
mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL)) if (unlikely(mem == NULL))
return -ENOMEM; return -ENOMEM;
mem->page_count = 0; mem->page_count = 0;
for (cur_page = pages; cur_page < last_page; ++cur_page) { for (i = 0; i < ttm->num_pages; i++) {
struct page *page = *cur_page; struct page *page = ttm->pages[i];
if (!page) if (!page)
page = dummy_read_page; page = ttm->dummy_read_page;
mem->pages[mem->page_count++] = page; mem->pages[mem->page_count++] = page;
} }
agp_be->mem = mem; agp_be->mem = mem;
return 0;
}
static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend);
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem = agp_be->mem;
int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
int ret;
mem->is_flushed = 1; mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
...@@ -90,50 +78,38 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) ...@@ -90,50 +78,38 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
return ret; return ret;
} }
static int ttm_agp_unbind(struct ttm_backend *backend) static int ttm_agp_unbind(struct ttm_tt *ttm)
{ {
struct ttm_agp_backend *agp_be = struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
container_of(backend, struct ttm_agp_backend, backend);
if (agp_be->mem->is_bound)
return agp_unbind_memory(agp_be->mem);
else
return 0;
}
static void ttm_agp_clear(struct ttm_backend *backend) if (agp_be->mem) {
{ if (agp_be->mem->is_bound)
struct ttm_agp_backend *agp_be = return agp_unbind_memory(agp_be->mem);
container_of(backend, struct ttm_agp_backend, backend); agp_free_memory(agp_be->mem);
struct agp_memory *mem = agp_be->mem; agp_be->mem = NULL;
if (mem) {
ttm_agp_unbind(backend);
agp_free_memory(mem);
} }
agp_be->mem = NULL; return 0;
} }
static void ttm_agp_destroy(struct ttm_backend *backend) static void ttm_agp_destroy(struct ttm_tt *ttm)
{ {
struct ttm_agp_backend *agp_be = struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
container_of(backend, struct ttm_agp_backend, backend);
if (agp_be->mem) if (agp_be->mem)
ttm_agp_clear(backend); ttm_agp_unbind(ttm);
kfree(agp_be); kfree(agp_be);
} }
static struct ttm_backend_func ttm_agp_func = { static struct ttm_backend_func ttm_agp_func = {
.populate = ttm_agp_populate,
.clear = ttm_agp_clear,
.bind = ttm_agp_bind, .bind = ttm_agp_bind,
.unbind = ttm_agp_unbind, .unbind = ttm_agp_unbind,
.destroy = ttm_agp_destroy, .destroy = ttm_agp_destroy,
}; };
struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
struct agp_bridge_data *bridge) struct agp_bridge_data *bridge,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{ {
struct ttm_agp_backend *agp_be; struct ttm_agp_backend *agp_be;
...@@ -143,10 +119,14 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, ...@@ -143,10 +119,14 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
agp_be->mem = NULL; agp_be->mem = NULL;
agp_be->bridge = bridge; agp_be->bridge = bridge;
agp_be->backend.func = &ttm_agp_func; agp_be->ttm.func = &ttm_agp_func;
agp_be->backend.bdev = bdev;
return &agp_be->backend; if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
return NULL;
}
return &agp_be->ttm;
} }
EXPORT_SYMBOL(ttm_agp_backend_init); EXPORT_SYMBOL(ttm_agp_tt_create);
#endif #endif
...@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref) ...@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
struct ttm_buffer_object *bo = struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref); container_of(list_kref, struct ttm_buffer_object, list_kref);
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
BUG_ON(atomic_read(&bo->list_kref.refcount)); BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount)); BUG_ON(atomic_read(&bo->kref.refcount));
...@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref) ...@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->destroy) if (bo->destroy)
bo->destroy(bo); bo->destroy(bo);
else { else {
ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
kfree(bo); kfree(bo);
} }
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
} }
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
...@@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) ...@@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (zero_alloc) if (zero_alloc)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
case ttm_bo_type_kernel: case ttm_bo_type_kernel:
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags, glob->dummy_read_page); page_flags, glob->dummy_read_page);
if (unlikely(bo->ttm == NULL)) if (unlikely(bo->ttm == NULL))
ret = -ENOMEM; ret = -ENOMEM;
break; break;
case ttm_bo_type_user:
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags | TTM_PAGE_FLAG_USER,
glob->dummy_read_page);
if (unlikely(bo->ttm == NULL)) {
ret = -ENOMEM;
break;
}
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
if (unlikely(ret != 0)) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
break;
default: default:
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -419,9 +404,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -419,9 +404,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
} }
} }
if (bdev->driver->move_notify)
bdev->driver->move_notify(bo, mem);
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
...@@ -434,6 +416,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -434,6 +416,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (ret) if (ret)
goto out_err; goto out_err;
if (bdev->driver->move_notify)
bdev->driver->move_notify(bo, mem);
moved: moved:
if (bo->evicted) { if (bo->evicted) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
...@@ -472,6 +457,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -472,6 +457,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{ {
if (bo->bdev->driver->move_notify)
bo->bdev->driver->move_notify(bo, NULL);
if (bo->ttm) { if (bo->ttm) {
ttm_tt_unbind(bo->ttm); ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm); ttm_tt_destroy(bo->ttm);
...@@ -907,16 +895,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, ...@@ -907,16 +895,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
} }
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
bool disallow_fixed,
uint32_t mem_type, uint32_t mem_type,
uint32_t proposed_placement, uint32_t proposed_placement,
uint32_t *masked_placement) uint32_t *masked_placement)
{ {
uint32_t cur_flags = ttm_bo_type_flags(mem_type); uint32_t cur_flags = ttm_bo_type_flags(mem_type);
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
return false;
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
return false; return false;
...@@ -961,7 +945,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -961,7 +945,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem_type]; man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man, type_ok = ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user,
mem_type, mem_type,
placement->placement[i], placement->placement[i],
&cur_flags); &cur_flags);
...@@ -1009,7 +992,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1009,7 +992,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!man->has_type) if (!man->has_type)
continue; continue;
if (!ttm_bo_mt_compatible(man, if (!ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user,
mem_type, mem_type,
placement->busy_placement[i], placement->busy_placement[i],
&cur_flags)) &cur_flags))
...@@ -1179,6 +1161,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -1179,6 +1161,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
{ {
int ret = 0; int ret = 0;
unsigned long num_pages; unsigned long num_pages;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (ret) {
printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
if (destroy)
(*destroy)(bo);
else
kfree(bo);
return -ENOMEM;
}
size += buffer_start & ~PAGE_MASK; size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
...@@ -1249,14 +1242,34 @@ int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -1249,14 +1242,34 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
} }
EXPORT_SYMBOL(ttm_bo_init); EXPORT_SYMBOL(ttm_bo_init);
static inline size_t ttm_bo_size(struct ttm_bo_global *glob, size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
unsigned long num_pages) unsigned long bo_size,
unsigned struct_size)
{ {
size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
PAGE_MASK; size_t size = 0;
return glob->ttm_bo_size + 2 * page_array_size; size += ttm_round_pot(struct_size);
size += PAGE_ALIGN(npages * sizeof(void *));
size += ttm_round_pot(sizeof(struct ttm_tt));
return size;
} }
EXPORT_SYMBOL(ttm_bo_acc_size);
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size)
{
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
size_t size = 0;
size += ttm_round_pot(struct_size);
size += PAGE_ALIGN(npages * sizeof(void *));
size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
size += ttm_round_pot(sizeof(struct ttm_dma_tt));
return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
int ttm_bo_create(struct ttm_bo_device *bdev, int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size, unsigned long size,
...@@ -1270,10 +1283,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev, ...@@ -1270,10 +1283,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
{ {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size;
int ret; int ret;
size_t acc_size = acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1459,13 +1472,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref) ...@@ -1459,13 +1472,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
goto out_no_shrink; goto out_no_shrink;
} }
glob->ttm_bo_extra_size =
ttm_round_pot(sizeof(struct ttm_tt)) +
ttm_round_pot(sizeof(struct ttm_backend));
glob->ttm_bo_size = glob->ttm_bo_extra_size +
ttm_round_pot(sizeof(struct ttm_buffer_object));
atomic_set(&glob->bo_count, 0); atomic_set(&glob->bo_count, 0);
ret = kobject_init_and_add( ret = kobject_init_and_add(
......
...@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, ...@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page, unsigned long page,
pgprot_t prot) pgprot_t prot)
{ {
struct page *d = ttm_tt_get_page(ttm, page); struct page *d = ttm->pages[page];
void *dst; void *dst;
if (!d) if (!d)
...@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, ...@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page, unsigned long page,
pgprot_t prot) pgprot_t prot)
{ {
struct page *s = ttm_tt_get_page(ttm, page); struct page *s = ttm->pages[page];
void *src; void *src;
if (!s) if (!s)
...@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (old_iomap == NULL && ttm == NULL) if (old_iomap == NULL && ttm == NULL)
goto out2; goto out2;
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
goto out1;
}
add = 0; add = 0;
dir = 1; dir = 1;
...@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ...@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->list_kref); kref_init(&fbo->list_kref);
kref_init(&fbo->kref); kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy; fbo->destroy = &ttm_transfered_destroy;
fbo->acc_size = 0;
*new_obj = fbo; *new_obj = fbo;
return 0; return 0;
...@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, ...@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
{ {
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
struct page *d; int ret;
int i;
BUG_ON(!ttm); BUG_ON(!ttm);
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
}
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/* /*
* We're mapping a single page, and the desired * We're mapping a single page, and the desired
...@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, ...@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
*/ */
map->bo_kmap_type = ttm_bo_map_kmap; map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm_tt_get_page(ttm, start_page); map->page = ttm->pages[start_page];
map->virtual = kmap(map->page); map->virtual = kmap(map->page);
} else { } else {
/*
* Populate the part we're mapping;
*/
for (i = start_page; i < start_page + num_pages; ++i) {
d = ttm_tt_get_page(ttm, i);
if (!d)
return -ENOMEM;
}
/* /*
* We need to use vmap to get the desired page protection * We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous. * or to make the buffer object look contiguous.
......
...@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) : vm_get_page_prot(vma->vm_flags) :
ttm_io_prot(bo->mem.placement, vma->vm_page_prot); ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
/* Allocate all page at once, most common usage */
if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
retval = VM_FAULT_OOM;
goto out_io_unlock;
}
} }
/* /*
* Speculatively prefault a number of pages. Only error on * Speculatively prefault a number of pages. Only error on
* first page. * first page.
*/ */
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem) if (bo->mem.bus.is_iomem)
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else { else {
page = ttm_tt_get_page(ttm, page_offset); page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) { if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM; retval = VM_FAULT_OOM;
goto out_io_unlock; goto out_io_unlock;
......
...@@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) ...@@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
zone->name, (unsigned long long) zone->max_mem >> 10); zone->name, (unsigned long long) zone->max_mem >> 10);
} }
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0; return 0;
out_no_zone: out_no_zone:
ttm_mem_global_release(glob); ttm_mem_global_release(glob);
...@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) ...@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
/* let the page allocator first stop the shrink work. */ /* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini(); ttm_page_alloc_fini();
ttm_dma_page_alloc_fini();
flush_workqueue(glob->swap_queue); flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue);
......
...@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ...@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* @return count of pages still required to fulfill the request. * @return count of pages still required to fulfill the request.
*/ */
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, int ttm_flags, struct list_head *pages,
enum ttm_caching_state cstate, unsigned count) int ttm_flags,
enum ttm_caching_state cstate,
unsigned count)
{ {
unsigned long irq_flags; unsigned long irq_flags;
struct list_head *p; struct list_head *p;
...@@ -660,17 +662,67 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -660,17 +662,67 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
return count; return count;
} }
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
for (i = 0; i < npages; i++) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
__free_page(pages[i]);
pages[i] = NULL;
}
}
return;
}
spin_lock_irqsave(&pool->lock, irq_flags);
for (i = 0; i < npages; i++) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
list_add_tail(&pages[i]->lru, &pool->list);
pages[i] = NULL;
pool->npages++;
}
}
/* Check that we don't go over the pool limit */
npages = 0;
if (pool->npages > _manager->options.max_size) {
npages = pool->npages - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */
if (npages < NUM_PAGES_TO_ALLOC)
npages = NUM_PAGES_TO_ALLOC;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
ttm_page_pool_free(pool, npages);
}
/* /*
* On success pages list will hold count number of correctly * On success pages list will hold count number of correctly
* cached pages. * cached pages.
*/ */
int ttm_get_pages(struct list_head *pages, int flags, static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate, unsigned count, enum ttm_caching_state cstate)
dma_addr_t *dma_address)
{ {
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct list_head plist;
struct page *p = NULL; struct page *p = NULL;
gfp_t gfp_flags = GFP_USER; gfp_t gfp_flags = GFP_USER;
unsigned count;
int r; int r;
/* set zero flag for page allocation if required */ /* set zero flag for page allocation if required */
...@@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
else else
gfp_flags |= GFP_HIGHUSER; gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) { for (r = 0; r < npages; ++r) {
p = alloc_page(gfp_flags); p = alloc_page(gfp_flags);
if (!p) { if (!p) {
...@@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags,
return -ENOMEM; return -ENOMEM;
} }
list_add(&p->lru, pages); pages[r] = p;
} }
return 0; return 0;
} }
/* combine zero flag to pool flags */ /* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags; gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */ /* First we take pages from the pool */
count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); INIT_LIST_HEAD(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
/* clear the pages coming from the pool if requested */ /* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, pages, lru) { list_for_each_entry(p, &plist, lru) {
clear_page(page_address(p)); clear_page(page_address(p));
} }
} }
/* If pool didn't have enough pages allocate new one. */ /* If pool didn't have enough pages allocate new one. */
if (count > 0) { if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run /* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel. * multiple requests in parallel.
**/ **/
r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); INIT_LIST_HEAD(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
if (r) { if (r) {
/* If there is any pages in the list put them back to /* If there is any pages in the list put them back to
* the pool. */ * the pool. */
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages " "Failed to allocate extra pages "
"for large request."); "for large request.");
ttm_put_pages(pages, 0, flags, cstate, NULL); ttm_put_pages(pages, count, flags, cstate);
return r; return r;
} }
} }
return 0; return 0;
} }
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
enum ttm_caching_state cstate, dma_addr_t *dma_address)
{
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
list_for_each_entry_safe(p, tmp, pages, lru) {
__free_page(p);
}
/* Make the pages list empty */
INIT_LIST_HEAD(pages);
return;
}
if (page_count == 0) {
list_for_each_entry_safe(p, tmp, pages, lru) {
++page_count;
}
}
spin_lock_irqsave(&pool->lock, irq_flags);
list_splice_init(pages, &pool->list);
pool->npages += page_count;
/* Check that we don't go over the pool limit */
page_count = 0;
if (pool->npages > _manager->options.max_size) {
page_count = pool->npages - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */
if (page_count < NUM_PAGES_TO_ALLOC)
page_count = NUM_PAGES_TO_ALLOC;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (page_count)
ttm_page_pool_free(pool, page_count);
}
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name) char *name)
{ {
...@@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void) ...@@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void)
_manager = NULL; _manager = NULL;
} }
int ttm_pool_populate(struct ttm_tt *ttm)
{
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
unsigned i;
int ret;
if (ttm->state != tt_unpopulated)
return 0;
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_get_pages(&ttm->pages[i], 1,
ttm->page_flags,
ttm->caching_state);
if (ret != 0) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
false, false);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
}
}
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return ret;
}
}
ttm->state = tt_unbound;
return 0;
}
EXPORT_SYMBOL(ttm_pool_populate);
void ttm_pool_unpopulate(struct ttm_tt *ttm)
{
unsigned i;
for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i]) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]);
ttm_put_pages(&ttm->pages[i], 1,
ttm->page_flags,
ttm->caching_state);
}
}
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
int ttm_page_alloc_debugfs(struct seq_file *m, void *data) int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{ {
struct ttm_page_pool *p; struct ttm_page_pool *p;
......
/*
* Copyright 2011 (c) Oracle Corp.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
*/
/*
* A simple DMA pool losely based on dmapool.c. It has certain advantages
* over the DMA pools:
* - Pool collects resently freed pages for reuse (and hooks up to
* the shrinker).
* - Tracks currently in use pages
* - Tracks whether the page is UC, WB or cached (and reverts to WB
* when freed).
*/
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>
#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/kthread.h>
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_page_alloc.h"
#ifdef TTM_HAS_AGP
#include <asm/agp.h>
#endif
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U)
/* times are in msecs */
#define IS_UNDEFINED (0)
#define IS_WC (1<<1)
#define IS_UC (1<<2)
#define IS_CACHED (1<<3)
#define IS_DMA32 (1<<4)
enum pool_type {
POOL_IS_UNDEFINED,
POOL_IS_WC = IS_WC,
POOL_IS_UC = IS_UC,
POOL_IS_CACHED = IS_CACHED,
POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
};
/*
* The pool structure. There are usually six pools:
* - generic (not restricted to DMA32):
* - write combined, uncached, cached.
* - dma32 (up to 2^32 - so up 4GB):
* - write combined, uncached, cached.
* for each 'struct device'. The 'cached' is for pages that are actively used.
* The other ones can be shrunk by the shrinker API if neccessary.
* @pools: The 'struct device->dma_pools' link.
* @type: Type of the pool
* @lock: Protects the inuse_list and free_list from concurrnet access. Must be
* used with irqsave/irqrestore variants because pool allocator maybe called
* from delayed work.
* @inuse_list: Pool of pages that are in use. The order is very important and
* it is in the order that the TTM pages that are put back are in.
* @free_list: Pool of pages that are free to be used. No order requirements.
* @dev: The device that is associated with these pools.
* @size: Size used during DMA allocation.
* @npages_free: Count of available pages for re-use.
* @npages_in_use: Count of pages that are in use.
* @nfrees: Stats when pool is shrinking.
* @nrefills: Stats when the pool is grown.
* @gfp_flags: Flags to pass for alloc_page.
* @name: Name of the pool.
* @dev_name: Name derieved from dev - similar to how dev_info works.
* Used during shutdown as the dev_info during release is unavailable.
*/
struct dma_pool {
struct list_head pools; /* The 'struct device->dma_pools link */
enum pool_type type;
spinlock_t lock;
struct list_head inuse_list;
struct list_head free_list;
struct device *dev;
unsigned size;
unsigned npages_free;
unsigned npages_in_use;
unsigned long nfrees; /* Stats when shrunk. */
unsigned long nrefills; /* Stats when grown. */
gfp_t gfp_flags;
char name[13]; /* "cached dma32" */
char dev_name[64]; /* Constructed from dev */
};
/*
* The accounting page keeping track of the allocated page along with
* the DMA address.
* @page_list: The link to the 'page_list' in 'struct dma_pool'.
* @vaddr: The virtual address of the page
* @dma: The bus address of the page. If the page is not allocated
* via the DMA API, it will be -1.
*/
struct dma_page {
struct list_head page_list;
void *vaddr;
struct page *p;
dma_addr_t dma;
};
/*
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
* so forcing serialization to access them is pointless.
*/
struct ttm_pool_opts {
unsigned alloc_size;
unsigned max_size;
unsigned small;
};
/*
* Contains the list of all of the 'struct device' and their corresponding
* DMA pools. Guarded by _mutex->lock.
* @pools: The link to 'struct ttm_pool_manager->pools'
* @dev: The 'struct device' associated with the 'pool'
* @pool: The 'struct dma_pool' associated with the 'dev'
*/
struct device_pools {
struct list_head pools;
struct device *dev;
struct dma_pool *pool;
};
/*
* struct ttm_pool_manager - Holds memory pools for fast allocation
*
* @lock: Lock used when adding/removing from pools
* @pools: List of 'struct device' and 'struct dma_pool' tuples.
* @options: Limits for the pool.
* @npools: Total amount of pools in existence.
* @shrinker: The structure used by [un|]register_shrinker
*/
struct ttm_pool_manager {
struct mutex lock;
struct list_head pools;
struct ttm_pool_opts options;
unsigned npools;
struct shrinker mm_shrink;
struct kobject kobj;
};
static struct ttm_pool_manager *_manager;
static struct attribute ttm_page_pool_max = {
.name = "pool_max_size",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_small = {
.name = "pool_small_allocation",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_alloc_size = {
.name = "pool_allocation_size",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute *ttm_pool_attrs[] = {
&ttm_page_pool_max,
&ttm_page_pool_small,
&ttm_page_pool_alloc_size,
NULL
};
static void ttm_pool_kobj_release(struct kobject *kobj)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
kfree(m);
}
static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
const char *buffer, size_t size)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
int chars;
unsigned val;
chars = sscanf(buffer, "%u", &val);
if (chars == 0)
return size;
/* Convert kb to number of pages */
val = val / (PAGE_SIZE >> 10);
if (attr == &ttm_page_pool_max)
m->options.max_size = val;
else if (attr == &ttm_page_pool_small)
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
printk(KERN_ERR TTM_PFX
"Setting allocation size to %lu "
"is not allowed. Recommended size is "
"%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
printk(KERN_WARNING TTM_PFX
"Setting allocation size to "
"larger than %lu is not recommended.\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
return size;
}
static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
unsigned val = 0;
if (attr == &ttm_page_pool_max)
val = m->options.max_size;
else if (attr == &ttm_page_pool_small)
val = m->options.small;
else if (attr == &ttm_page_pool_alloc_size)
val = m->options.alloc_size;
val = val * (PAGE_SIZE >> 10);
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
}
static const struct sysfs_ops ttm_pool_sysfs_ops = {
.show = &ttm_pool_show,
.store = &ttm_pool_store,
};
static struct kobj_type ttm_pool_kobj_type = {
.release = &ttm_pool_kobj_release,
.sysfs_ops = &ttm_pool_sysfs_ops,
.default_attrs = ttm_pool_attrs,
};
#ifndef CONFIG_X86
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
for (i = 0; i < addrinarray; i++)
unmap_page_from_agp(pages[i]);
#endif
return 0;
}
static int set_pages_array_wc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
return 0;
}
static int set_pages_array_uc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
return 0;
}
#endif /* for !CONFIG_X86 */
static int ttm_set_pages_caching(struct dma_pool *pool,
struct page **pages, unsigned cpages)
{
int r = 0;
/* Set page caching */
if (pool->type & IS_UC) {
r = set_pages_array_uc(pages, cpages);
if (r)
pr_err(TTM_PFX
"%s: Failed to set %d pages to uc!\n",
pool->dev_name, cpages);
}
if (pool->type & IS_WC) {
r = set_pages_array_wc(pages, cpages);
if (r)
pr_err(TTM_PFX
"%s: Failed to set %d pages to wc!\n",
pool->dev_name, cpages);
}
return r;
}
static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
{
dma_addr_t dma = d_page->dma;
dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
kfree(d_page);
d_page = NULL;
}
static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
{
struct dma_page *d_page;
d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
if (!d_page)
return NULL;
d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
&d_page->dma,
pool->gfp_flags);
if (d_page->vaddr)
d_page->p = virt_to_page(d_page->vaddr);
else {
kfree(d_page);
d_page = NULL;
}
return d_page;
}
static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
{
enum pool_type type = IS_UNDEFINED;
if (flags & TTM_PAGE_FLAG_DMA32)
type |= IS_DMA32;
if (cstate == tt_cached)
type |= IS_CACHED;
else if (cstate == tt_uncached)
type |= IS_UC;
else
type |= IS_WC;
return type;
}
static void ttm_pool_update_free_locked(struct dma_pool *pool,
unsigned freed_pages)
{
pool->npages_free -= freed_pages;
pool->nfrees += freed_pages;
}
/* set memory back to wb and free the pages. */
static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
struct page *pages[], unsigned npages)
{
struct dma_page *d_page, *tmp;
if (npages && set_pages_array_wb(pages, npages))
pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
pool->dev_name, npages);
list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
list_del(&d_page->page_list);
__ttm_dma_free_page(pool, d_page);
}
}
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{
if (set_pages_array_wb(&d_page->p, 1))
pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
pool->dev_name, 1);
list_del(&d_page->page_list);
__ttm_dma_free_page(pool, d_page);
}
/*
* Free pages from pool.
*
* To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
* number of pages in one go.
*
* @pool: to free the pages from
* @nr_free: If set to true will free all pages in pool
**/
static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
{
unsigned long irq_flags;
struct dma_page *dma_p, *tmp;
struct page **pages_to_free;
struct list_head d_pages;
unsigned freed_pages = 0,
npages_to_free = nr_free;
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
#if 0
if (nr_free > 1) {
pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
pool->dev_name, pool->name, current->pid,
npages_to_free, nr_free);
}
#endif
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
pr_err(TTM_PFX
"%s: Failed to allocate memory for pool free operation.\n",
pool->dev_name);
return 0;
}
INIT_LIST_HEAD(&d_pages);
restart:
spin_lock_irqsave(&pool->lock, irq_flags);
/* We picking the oldest ones off the list */
list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
page_list) {
if (freed_pages >= npages_to_free)
break;
/* Move the dma_page from one list to another. */
list_move(&dma_p->page_list, &d_pages);
pages_to_free[freed_pages++] = dma_p->p;
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
ttm_pool_update_free_locked(pool, freed_pages);
/**
* Because changing page caching is costly
* we unlock the pool to prevent stalling.
*/
spin_unlock_irqrestore(&pool->lock, irq_flags);
ttm_dma_pages_put(pool, &d_pages, pages_to_free,
freed_pages);
INIT_LIST_HEAD(&d_pages);
if (likely(nr_free != FREE_ALL_PAGES))
nr_free -= freed_pages;
if (NUM_PAGES_TO_ALLOC >= nr_free)
npages_to_free = nr_free;
else
npages_to_free = NUM_PAGES_TO_ALLOC;
freed_pages = 0;
/* free all so restart the processing */
if (nr_free)
goto restart;
/* Not allowed to fall through or break because
* following context is inside spinlock while we are
* outside here.
*/
goto out;
}
}
/* remove range of pages from the pool */
if (freed_pages) {
ttm_pool_update_free_locked(pool, freed_pages);
nr_free -= freed_pages;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (freed_pages)
ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
out:
kfree(pages_to_free);
return nr_free;
}
static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
{
struct device_pools *p;
struct dma_pool *pool;
if (!dev)
return;
mutex_lock(&_manager->lock);
list_for_each_entry_reverse(p, &_manager->pools, pools) {
if (p->dev != dev)
continue;
pool = p->pool;
if (pool->type != type)
continue;
list_del(&p->pools);
kfree(p);
_manager->npools--;
break;
}
list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
if (pool->type != type)
continue;
/* Takes a spinlock.. */
ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
/* This code path is called after _all_ references to the
* struct device has been dropped - so nobody should be
* touching it. In case somebody is trying to _add_ we are
* guarded by the mutex. */
list_del(&pool->pools);
kfree(pool);
break;
}
mutex_unlock(&_manager->lock);
}
/*
* On free-ing of the 'struct device' this deconstructor is run.
* Albeit the pool might have already been freed earlier.
*/
static void ttm_dma_pool_release(struct device *dev, void *res)
{
struct dma_pool *pool = *(struct dma_pool **)res;
if (pool)
ttm_dma_free_pool(dev, pool->type);
}
static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
{
return *(struct dma_pool **)res == match_data;
}
static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
enum pool_type type)
{
char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
struct device_pools *sec_pool = NULL;
struct dma_pool *pool = NULL, **ptr;
unsigned i;
int ret = -ENODEV;
char *p;
if (!dev)
return NULL;
ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
ret = -ENOMEM;
pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
dev_to_node(dev));
if (!pool)
goto err_mem;
sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
dev_to_node(dev));
if (!sec_pool)
goto err_mem;
INIT_LIST_HEAD(&sec_pool->pools);
sec_pool->dev = dev;
sec_pool->pool = pool;
INIT_LIST_HEAD(&pool->free_list);
INIT_LIST_HEAD(&pool->inuse_list);
INIT_LIST_HEAD(&pool->pools);
spin_lock_init(&pool->lock);
pool->dev = dev;
pool->npages_free = pool->npages_in_use = 0;
pool->nfrees = 0;
pool->gfp_flags = flags;
pool->size = PAGE_SIZE;
pool->type = type;
pool->nrefills = 0;
p = pool->name;
for (i = 0; i < 5; i++) {
if (type & t[i]) {
p += snprintf(p, sizeof(pool->name) - (p - pool->name),
"%s", n[i]);
}
}
*p = 0;
/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
* - the kobj->name has already been deallocated.*/
snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
dev_driver_string(dev), dev_name(dev));
mutex_lock(&_manager->lock);
/* You can get the dma_pool from either the global: */
list_add(&sec_pool->pools, &_manager->pools);
_manager->npools++;
/* or from 'struct device': */
list_add(&pool->pools, &dev->dma_pools);
mutex_unlock(&_manager->lock);
*ptr = pool;
devres_add(dev, ptr);
return pool;
err_mem:
devres_free(ptr);
kfree(sec_pool);
kfree(pool);
return ERR_PTR(ret);
}
static struct dma_pool *ttm_dma_find_pool(struct device *dev,
enum pool_type type)
{
struct dma_pool *pool, *tmp, *found = NULL;
if (type == IS_UNDEFINED)
return found;
/* NB: We iterate on the 'struct dev' which has no spinlock, but
* it does have a kref which we have taken. The kref is taken during
* graphic driver loading - in the drm_pci_init it calls either
* pci_dev_get or pci_register_driver which both end up taking a kref
* on 'struct device'.
*
* On teardown, the graphic drivers end up quiescing the TTM (put_pages)
* and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
* thing is at that point of time there are no pages associated with the
* driver so this function will not be called.
*/
list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
if (pool->type != type)
continue;
found = pool;
break;
}
return found;
}
/*
* Free pages the pages that failed to change the caching state. If there
* are pages that have changed their caching state already put them to the
* pool.
*/
static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
struct list_head *d_pages,
struct page **failed_pages,
unsigned cpages)
{
struct dma_page *d_page, *tmp;
struct page *p;
unsigned i = 0;
p = failed_pages[0];
if (!p)
return;
/* Find the failed page. */
list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
if (d_page->p != p)
continue;
/* .. and then progress over the full list. */
list_del(&d_page->page_list);
__ttm_dma_free_page(pool, d_page);
if (++i < cpages)
p = failed_pages[i];
else
break;
}
}
/*
* Allocate 'count' pages, and put 'need' number of them on the
* 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
* The full list of pages should also be on 'd_pages'.
* We return zero for success, and negative numbers as errors.
*/
static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
struct list_head *d_pages,
unsigned count)
{
struct page **caching_array;
struct dma_page *dma_p;
struct page *p;
int r = 0;
unsigned i, cpages;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
pr_err(TTM_PFX
"%s: Unable to allocate table for new pages.",
pool->dev_name);
return -ENOMEM;
}
if (count > 1) {
pr_debug("%s: (%s:%d) Getting %d pages\n",
pool->dev_name, pool->name, current->pid,
count);
}
for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool);
if (!dma_p) {
pr_err(TTM_PFX "%s: Unable to get page %u.\n",
pool->dev_name, i);
/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
r = ttm_set_pages_caching(pool, caching_array,
cpages);
if (r)
ttm_dma_handle_caching_state_failure(
pool, d_pages, caching_array,
cpages);
}
r = -ENOMEM;
goto out;
}
p = dma_p->p;
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
if (!PageHighMem(p))
#endif
{
caching_array[cpages++] = p;
if (cpages == max_cpages) {
/* Note: Cannot hold the spinlock */
r = ttm_set_pages_caching(pool, caching_array,
cpages);
if (r) {
ttm_dma_handle_caching_state_failure(
pool, d_pages, caching_array,
cpages);
goto out;
}
cpages = 0;
}
}
list_add(&dma_p->page_list, d_pages);
}
if (cpages) {
r = ttm_set_pages_caching(pool, caching_array, cpages);
if (r)
ttm_dma_handle_caching_state_failure(pool, d_pages,
caching_array, cpages);
}
out:
kfree(caching_array);
return r;
}
/*
* @return count of pages still required to fulfill the request.
*/
static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
unsigned long *irq_flags)
{
unsigned count = _manager->options.small;
int r = pool->npages_free;
if (count > pool->npages_free) {
struct list_head d_pages;
INIT_LIST_HEAD(&d_pages);
spin_unlock_irqrestore(&pool->lock, *irq_flags);
/* Returns how many more are neccessary to fulfill the
* request. */
r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) {
/* Add the fresh to the end.. */
list_splice(&d_pages, &pool->free_list);
++pool->nrefills;
pool->npages_free += count;
r = count;
} else {
struct dma_page *d_page;
unsigned cpages = 0;
pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
pool->dev_name, pool->name, r);
list_for_each_entry(d_page, &d_pages, page_list) {
cpages++;
}
list_splice_tail(&d_pages, &pool->free_list);
pool->npages_free += cpages;
r = cpages;
}
}
return r;
}
/*
* @return count of pages still required to fulfill the request.
* The populate list is actually a stack (not that is matters as TTM
* allocates one page at a time.
*/
static int ttm_dma_pool_get_pages(struct dma_pool *pool,
struct ttm_dma_tt *ttm_dma,
unsigned index)
{
struct dma_page *d_page;
struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long irq_flags;
int count, r = -ENOMEM;
spin_lock_irqsave(&pool->lock, irq_flags);
count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
r = 0;
pool->npages_in_use += 1;
pool->npages_free -= 1;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
return r;
}
/*
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct dma_pool *pool;
enum pool_type type;
unsigned i;
gfp_t gfp_flags;
int ret;
if (ttm->state != tt_unpopulated)
return 0;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
gfp_flags = GFP_USER | GFP_DMA32;
else
gfp_flags = GFP_HIGHUSER;
if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
pool = ttm_dma_find_pool(dev, type);
if (!pool) {
pool = ttm_dma_pool_init(dev, gfp_flags, type);
if (IS_ERR_OR_NULL(pool)) {
return -ENOMEM;
}
}
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
if (ret != 0) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
false, false);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
}
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return ret;
}
}
ttm->state = tt_unbound;
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Get good estimation how many pages are free in pools */
static int ttm_dma_pool_get_num_unused_pages(void)
{
struct device_pools *p;
unsigned total = 0;
mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools) {
if (p)
total += p->pool->npages_free;
}
mutex_unlock(&_manager->lock);
return total;
}
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
bool is_cached = false;
unsigned count = 0, i;
unsigned long irq_flags;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
pool = ttm_dma_find_pool(dev, type);
if (!pool) {
WARN_ON(!pool);
return;
}
is_cached = (ttm_dma_find_pool(pool->dev,
ttm_to_type(ttm->page_flags, tt_cached)) == pool);
/* make sure pages array match list and count number of pages */
list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
ttm->pages[count] = d_page->p;
count++;
}
spin_lock_irqsave(&pool->lock, irq_flags);
pool->npages_in_use -= count;
if (is_cached) {
pool->nfrees += count;
} else {
pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list);
if (pool->npages_free > _manager->options.max_size) {
count = pool->npages_free - _manager->options.max_size;
}
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (is_cached) {
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
d_page->p);
ttm_dma_page_put(pool, d_page);
}
} else {
for (i = 0; i < count; i++) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]);
}
}
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
ttm_dma->dma_address[i] = 0;
}
/* shrink pool if necessary */
if (count)
ttm_dma_page_pool_free(pool, count);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
/**
* Callback for mm to request pool to reduce number of page held.
*/
static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
if (list_empty(&_manager->pools))
return 0;
mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
list_for_each_entry(p, &_manager->pools, pools) {
unsigned nr_free;
if (!p && !p->dev)
continue;
if (shrink_pages == 0)
break;
/* Do it in round-robin fashion. */
if (++idx < pool_offset)
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid, nr_free,
shrink_pages);
}
mutex_unlock(&_manager->lock);
/* return estimated number of unused pages in pool */
return ttm_dma_pool_get_num_unused_pages();
}
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
{
unregister_shrinker(&manager->mm_shrink);
}
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
int ret = -ENOMEM;
WARN_ON(_manager);
printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
if (!_manager)
goto err_manager;
mutex_init(&_manager->lock);
INIT_LIST_HEAD(&_manager->pools);
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
/* This takes care of auto-freeing the _manager */
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "dma_pool");
if (unlikely(ret != 0)) {
kobject_put(&_manager->kobj);
goto err;
}
ttm_dma_pool_mm_shrink_init(_manager);
return 0;
err_manager:
kfree(_manager);
_manager = NULL;
err:
return ret;
}
void ttm_dma_page_alloc_fini(void)
{
struct device_pools *p, *t;
printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
ttm_dma_pool_mm_shrink_fini(_manager);
list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
current->pid);
WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
ttm_dma_pool_match, p->pool));
ttm_dma_free_pool(p->dev, p->pool->type);
}
kobject_put(&_manager->kobj);
_manager = NULL;
}
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct device_pools *p;
struct dma_pool *pool = NULL;
char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
"name", "virt", "busaddr"};
if (!_manager) {
seq_printf(m, "No pool allocator running.\n");
return 0;
}
seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
h[0], h[1], h[2], h[3], h[4], h[5]);
mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools) {
struct device *dev = p->dev;
if (!dev)
continue;
pool = p->pool;
seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
pool->name, pool->nrefills,
pool->nfrees, pool->npages_in_use,
pool->npages_free,
pool->dev_name);
}
mutex_unlock(&_manager->lock);
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
...@@ -43,139 +43,20 @@ ...@@ -43,139 +43,20 @@
#include "ttm/ttm_placement.h" #include "ttm/ttm_placement.h"
#include "ttm/ttm_page_alloc.h" #include "ttm/ttm_page_alloc.h"
static int ttm_tt_swapin(struct ttm_tt *ttm);
/** /**
* Allocates storage for pointers to the pages that back the ttm. * Allocates storage for pointers to the pages that back the ttm.
*/ */
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{ {
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
ttm->dma_address = drm_calloc_large(ttm->num_pages,
sizeof(*ttm->dma_address));
}
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{
drm_free_large(ttm->pages);
ttm->pages = NULL;
drm_free_large(ttm->dma_address);
ttm->dma_address = NULL;
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
int write;
int dirty;
struct page *page;
int i;
struct ttm_backend *be = ttm->be;
BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
if (be)
be->func->clear(be);
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (page == NULL)
continue;
if (page == ttm->dummy_read_page) {
BUG_ON(write);
continue;
}
if (write && dirty && !PageReserved(page))
set_page_dirty_lock(page);
ttm->pages[i] = NULL;
ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
put_page(page);
}
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
}
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
INIT_LIST_HEAD(&h);
ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
&ttm->dma_address[index]);
if (ret != 0)
return NULL;
p = list_first_entry(&h, struct page, lru);
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0))
goto out_err;
if (PageHighMem(p))
ttm->pages[--ttm->first_himem_page] = p;
else
ttm->pages[++ttm->last_lomem_page] = p;
}
return p;
out_err:
put_page(p);
return NULL;
}
struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
int ret;
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0))
return NULL;
}
return __ttm_tt_get_page(ttm, index);
} }
int ttm_tt_populate(struct ttm_tt *ttm) static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{ {
struct page *page; ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
unsigned long i; ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
struct ttm_backend *be; sizeof(*ttm->dma_address));
int ret;
if (ttm->state != tt_unpopulated)
return 0;
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0))
return ret;
}
be = ttm->be;
for (i = 0; i < ttm->num_pages; ++i) {
page = __ttm_tt_get_page(ttm, i);
if (!page)
return -ENOMEM;
}
be->func->populate(be, ttm->num_pages, ttm->pages,
ttm->dummy_read_page, ttm->dma_address);
ttm->state = tt_unbound;
return 0;
} }
EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86 #ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p, static inline int ttm_tt_set_page_caching(struct page *p,
...@@ -278,153 +159,98 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) ...@@ -278,153 +159,98 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
} }
EXPORT_SYMBOL(ttm_tt_set_placement_caching); EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
int i;
unsigned count = 0;
struct list_head h;
struct page *cur_page;
struct ttm_backend *be = ttm->be;
INIT_LIST_HEAD(&h);
if (be)
be->func->clear(be);
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
ttm->pages[i] = NULL;
if (cur_page) {
if (page_count(cur_page) != 1)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
list_add(&cur_page->lru, &h);
count++;
}
}
ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
ttm->dma_address);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
}
void ttm_tt_destroy(struct ttm_tt *ttm) void ttm_tt_destroy(struct ttm_tt *ttm)
{ {
struct ttm_backend *be;
if (unlikely(ttm == NULL)) if (unlikely(ttm == NULL))
return; return;
be = ttm->be; if (ttm->state == tt_bound) {
if (likely(be != NULL)) { ttm_tt_unbind(ttm);
be->func->destroy(be);
ttm->be = NULL;
} }
if (likely(ttm->pages != NULL)) { if (likely(ttm->pages != NULL)) {
if (ttm->page_flags & TTM_PAGE_FLAG_USER) ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm_tt_free_user_pages(ttm);
else
ttm_tt_free_alloced_pages(ttm);
ttm_tt_free_page_directory(ttm);
} }
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage) ttm->swap_storage)
fput(ttm->swap_storage); fput(ttm->swap_storage);
kfree(ttm); ttm->swap_storage = NULL;
ttm->func->destroy(ttm);
} }
int ttm_tt_set_user(struct ttm_tt *ttm, int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
struct task_struct *tsk, unsigned long size, uint32_t page_flags,
unsigned long start, unsigned long num_pages) struct page *dummy_read_page)
{ {
struct mm_struct *mm = tsk->mm; ttm->bdev = bdev;
int ret; ttm->glob = bdev->glob;
int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
BUG_ON(num_pages != ttm->num_pages); ttm->dummy_read_page = dummy_read_page;
BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); ttm->state = tt_unpopulated;
/**
* Account user pages as lowmem pages for now.
*/
ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
false, false);
if (unlikely(ret != 0))
return ret;
down_read(&mm->mmap_sem);
ret = get_user_pages(tsk, mm, start, num_pages,
write, 0, ttm->pages, NULL);
up_read(&mm->mmap_sem);
if (ret != num_pages && write) { ttm_tt_alloc_page_directory(ttm);
ttm_tt_free_user_pages(ttm); if (!ttm->pages) {
ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
return -ENOMEM; return -ENOMEM;
} }
ttm->tsk = tsk;
ttm->start = start;
ttm->state = tt_unbound;
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_tt_init);
struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, void ttm_tt_fini(struct ttm_tt *ttm)
uint32_t page_flags, struct page *dummy_read_page)
{ {
struct ttm_bo_driver *bo_driver = bdev->driver; drm_free_large(ttm->pages);
struct ttm_tt *ttm; ttm->pages = NULL;
}
if (!bo_driver) EXPORT_SYMBOL(ttm_tt_fini);
return NULL;
ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
if (!ttm) unsigned long size, uint32_t page_flags,
return NULL; struct page *dummy_read_page)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
ttm->bdev = bdev;
ttm->glob = bdev->glob; ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
ttm->caching_state = tt_cached; ttm->caching_state = tt_cached;
ttm->page_flags = page_flags; ttm->page_flags = page_flags;
ttm->dummy_read_page = dummy_read_page; ttm->dummy_read_page = dummy_read_page;
ttm->state = tt_unpopulated;
ttm_tt_alloc_page_directory(ttm); INIT_LIST_HEAD(&ttm_dma->pages_list);
if (!ttm->pages) { ttm_dma_tt_alloc_page_directory(ttm_dma);
if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm); ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
return NULL; return -ENOMEM;
}
ttm->be = bo_driver->create_ttm_backend_entry(bdev);
if (!ttm->be) {
ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
return NULL;
} }
ttm->state = tt_unpopulated; return 0;
return ttm; }
EXPORT_SYMBOL(ttm_dma_tt_init);
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
drm_free_large(ttm->pages);
ttm->pages = NULL;
drm_free_large(ttm_dma->dma_address);
ttm_dma->dma_address = NULL;
} }
EXPORT_SYMBOL(ttm_dma_tt_fini);
void ttm_tt_unbind(struct ttm_tt *ttm) void ttm_tt_unbind(struct ttm_tt *ttm)
{ {
int ret; int ret;
struct ttm_backend *be = ttm->be;
if (ttm->state == tt_bound) { if (ttm->state == tt_bound) {
ret = be->func->unbind(be); ret = ttm->func->unbind(ttm);
BUG_ON(ret); BUG_ON(ret);
ttm->state = tt_unbound; ttm->state = tt_unbound;
} }
...@@ -433,7 +259,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm) ...@@ -433,7 +259,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{ {
int ret = 0; int ret = 0;
struct ttm_backend *be;
if (!ttm) if (!ttm)
return -EINVAL; return -EINVAL;
...@@ -441,25 +266,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) ...@@ -441,25 +266,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
if (ttm->state == tt_bound) if (ttm->state == tt_bound)
return 0; return 0;
be = ttm->be; ret = ttm->bdev->driver->ttm_tt_populate(ttm);
ret = ttm_tt_populate(ttm);
if (ret) if (ret)
return ret; return ret;
ret = be->func->bind(be, bo_mem); ret = ttm->func->bind(ttm, bo_mem);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ttm->state = tt_bound; ttm->state = tt_bound;
if (ttm->page_flags & TTM_PAGE_FLAG_USER)
ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_tt_bind); EXPORT_SYMBOL(ttm_tt_bind);
static int ttm_tt_swapin(struct ttm_tt *ttm) int ttm_tt_swapin(struct ttm_tt *ttm)
{ {
struct address_space *swap_space; struct address_space *swap_space;
struct file *swap_storage; struct file *swap_storage;
...@@ -470,16 +291,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) ...@@ -470,16 +291,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
int i; int i;
int ret = -ENOMEM; int ret = -ENOMEM;
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
ttm->num_pages);
if (unlikely(ret != 0))
return ret;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
return 0;
}
swap_storage = ttm->swap_storage; swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL); BUG_ON(swap_storage == NULL);
...@@ -491,7 +302,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) ...@@ -491,7 +302,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
ret = PTR_ERR(from_page); ret = PTR_ERR(from_page);
goto out_err; goto out_err;
} }
to_page = __ttm_tt_get_page(ttm, i); to_page = ttm->pages[i];
if (unlikely(to_page == NULL)) if (unlikely(to_page == NULL))
goto out_err; goto out_err;
...@@ -512,7 +323,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) ...@@ -512,7 +323,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
return 0; return 0;
out_err: out_err:
ttm_tt_free_alloced_pages(ttm);
return ret; return ret;
} }
...@@ -530,18 +340,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ...@@ -530,18 +340,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached); BUG_ON(ttm->caching_state != tt_cached);
/*
* For user buffers, just unpin the pages, as there should be
* vma references.
*/
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
ttm_tt_free_user_pages(ttm);
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
ttm->swap_storage = NULL;
return 0;
}
if (!persistent_swap_storage) { if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap", swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT, ttm->num_pages << PAGE_SHIFT,
...@@ -576,7 +374,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ...@@ -576,7 +374,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
page_cache_release(to_page); page_cache_release(to_page);
} }
ttm_tt_free_alloced_pages(ttm); ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage; ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage) if (persistent_swap_storage)
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h" #include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h" #include "ttm/ttm_placement.h"
#include "ttm/ttm_page_alloc.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED; TTM_PL_FLAG_CACHED;
...@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = { ...@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
.busy_placement = gmr_vram_placement_flags .busy_placement = gmr_vram_placement_flags
}; };
struct vmw_ttm_backend { struct vmw_ttm_tt {
struct ttm_backend backend; struct ttm_tt ttm;
struct page **pages;
unsigned long num_pages;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
int gmr_id; int gmr_id;
}; };
static int vmw_ttm_populate(struct ttm_backend *backend, static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{ {
struct vmw_ttm_backend *vmw_be = struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
container_of(backend, struct vmw_ttm_backend, backend);
vmw_be->pages = pages;
vmw_be->num_pages = num_pages;
return 0;
}
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
vmw_be->gmr_id = bo_mem->start; vmw_be->gmr_id = bo_mem->start;
return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages, return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
vmw_be->num_pages, vmw_be->gmr_id); ttm->num_pages, vmw_be->gmr_id);
} }
static int vmw_ttm_unbind(struct ttm_backend *backend) static int vmw_ttm_unbind(struct ttm_tt *ttm)
{ {
struct vmw_ttm_backend *vmw_be = struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
container_of(backend, struct vmw_ttm_backend, backend);
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0; return 0;
} }
static void vmw_ttm_clear(struct ttm_backend *backend) static void vmw_ttm_destroy(struct ttm_tt *ttm)
{ {
struct vmw_ttm_backend *vmw_be = struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
container_of(backend, struct vmw_ttm_backend, backend);
vmw_be->pages = NULL;
vmw_be->num_pages = 0;
}
static void vmw_ttm_destroy(struct ttm_backend *backend)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
ttm_tt_fini(ttm);
kfree(vmw_be); kfree(vmw_be);
} }
static struct ttm_backend_func vmw_ttm_func = { static struct ttm_backend_func vmw_ttm_func = {
.populate = vmw_ttm_populate,
.clear = vmw_ttm_clear,
.bind = vmw_ttm_bind, .bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind, .unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy, .destroy = vmw_ttm_destroy,
}; };
struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{ {
struct vmw_ttm_backend *vmw_be; struct vmw_ttm_tt *vmw_be;
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be) if (!vmw_be)
return NULL; return NULL;
vmw_be->backend.func = &vmw_ttm_func; vmw_be->ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
return &vmw_be->backend; if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
kfree(vmw_be);
return NULL;
}
return &vmw_be->ttm;
} }
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
...@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, ...@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
} }
struct ttm_bo_driver vmw_bo_driver = { struct ttm_bo_driver vmw_bo_driver = {
.create_ttm_backend_entry = vmw_ttm_backend_init, .ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &ttm_pool_populate,
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
.invalidate_caches = vmw_invalidate_caches, .invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type, .init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags, .evict_flags = vmw_evict_flags,
......
...@@ -1517,29 +1517,10 @@ int vmw_surface_check(struct vmw_private *dev_priv, ...@@ -1517,29 +1517,10 @@ int vmw_surface_check(struct vmw_private *dev_priv,
/** /**
* Buffer management. * Buffer management.
*/ */
static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
unsigned long num_pages)
{
static size_t bo_user_size = ~0;
size_t page_array_size =
(num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
if (unlikely(bo_user_size == ~0)) {
bo_user_size = glob->ttm_bo_extra_size +
ttm_round_pot(sizeof(struct vmw_dma_buffer));
}
return bo_user_size + page_array_size;
}
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{ {
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
struct ttm_bo_global *glob = bo->glob;
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_bo); kfree(vmw_bo);
} }
...@@ -1550,24 +1531,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, ...@@ -1550,24 +1531,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
void (*bo_free) (struct ttm_buffer_object *bo)) void (*bo_free) (struct ttm_buffer_object *bo))
{ {
struct ttm_bo_device *bdev = &dev_priv->bdev; struct ttm_bo_device *bdev = &dev_priv->bdev;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size; size_t acc_size;
int ret; int ret;
BUG_ON(!bo_free); BUG_ON(!bo_free);
acc_size = acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
vmw_dmabuf_acc_size(bdev->glob,
(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (unlikely(ret != 0)) {
/* we must free the bo here as
* ttm_buffer_object_init does so as well */
bo_free(&vmw_bo->base);
return ret;
}
memset(vmw_bo, 0, sizeof(*vmw_bo)); memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->validate_list); INIT_LIST_HEAD(&vmw_bo->validate_list);
...@@ -1582,9 +1551,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, ...@@ -1582,9 +1551,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{ {
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
struct ttm_bo_global *glob = bo->glob;
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_user_bo); kfree(vmw_user_bo);
} }
......
...@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose) ...@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
char *m = NULL; char *m = NULL;
unsigned int repeat = 3; unsigned int repeat = 3;
nr_tbl = swioltb_nr_tbl(); nr_tbl = swiotlb_nr_tbl();
if (nr_tbl) if (nr_tbl)
xen_io_tlb_nslabs = nr_tbl; xen_io_tlb_nslabs = nr_tbl;
else { else {
......
...@@ -122,17 +122,12 @@ struct ttm_mem_reg { ...@@ -122,17 +122,12 @@ struct ttm_mem_reg {
* be mmapped by user space. Each of these bos occupy a slot in the * be mmapped by user space. Each of these bos occupy a slot in the
* device address space, that can be used for normal vm operations. * device address space, that can be used for normal vm operations.
* *
* @ttm_bo_type_user: These are user-space memory areas that are made
* available to the GPU by mapping the buffer pages into the GPU aperture
* space. These buffers cannot be mmaped from the device address space.
*
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
* but they cannot be accessed from user-space. For kernel-only use. * but they cannot be accessed from user-space. For kernel-only use.
*/ */
enum ttm_bo_type { enum ttm_bo_type {
ttm_bo_type_device, ttm_bo_type_device,
ttm_bo_type_user,
ttm_bo_type_kernel ttm_bo_type_kernel
}; };
...@@ -434,9 +429,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, ...@@ -434,9 +429,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
* -EBUSY if the buffer is busy and no_wait is true. * -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
extern int extern int
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
/** /**
* ttm_bo_synccpu_write_release: * ttm_bo_synccpu_write_release:
* *
...@@ -446,6 +441,22 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); ...@@ -446,6 +441,22 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
*/ */
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/**
* ttm_bo_acc_size
*
* @bdev: Pointer to a ttm_bo_device struct.
* @bo_size: size of the buffer object in byte.
* @struct_size: size of the structure holding buffer object datas
*
* Returns size to account for a buffer object
*/
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size);
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size);
/** /**
* ttm_bo_init * ttm_bo_init
* *
...@@ -493,6 +504,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -493,6 +504,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
struct file *persistent_swap_storage, struct file *persistent_swap_storage,
size_t acc_size, size_t acc_size,
void (*destroy) (struct ttm_buffer_object *)); void (*destroy) (struct ttm_buffer_object *));
/** /**
* ttm_bo_synccpu_object_init * ttm_bo_synccpu_object_init
* *
......
...@@ -42,37 +42,10 @@ ...@@ -42,37 +42,10 @@
struct ttm_backend; struct ttm_backend;
struct ttm_backend_func { struct ttm_backend_func {
/**
* struct ttm_backend_func member populate
*
* @backend: Pointer to a struct ttm_backend.
* @num_pages: Number of pages to populate.
* @pages: Array of pointers to ttm pages.
* @dummy_read_page: Page to be used instead of NULL pages in the
* array @pages.
* @dma_addrs: Array of DMA (bus) address of the ttm pages.
*
* Populate the backend with ttm pages. Depending on the backend,
* it may or may not copy the @pages array.
*/
int (*populate) (struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page,
dma_addr_t *dma_addrs);
/**
* struct ttm_backend_func member clear
*
* @backend: Pointer to a struct ttm_backend.
*
* This is an "unpopulate" function. Release all resources
* allocated with populate.
*/
void (*clear) (struct ttm_backend *backend);
/** /**
* struct ttm_backend_func member bind * struct ttm_backend_func member bind
* *
* @backend: Pointer to a struct ttm_backend. * @ttm: Pointer to a struct ttm_tt.
* @bo_mem: Pointer to a struct ttm_mem_reg describing the * @bo_mem: Pointer to a struct ttm_mem_reg describing the
* memory type and location for binding. * memory type and location for binding.
* *
...@@ -80,46 +53,29 @@ struct ttm_backend_func { ...@@ -80,46 +53,29 @@ struct ttm_backend_func {
* indicated by @bo_mem. This function should be able to handle * indicated by @bo_mem. This function should be able to handle
* differences between aperture and system page sizes. * differences between aperture and system page sizes.
*/ */
int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/** /**
* struct ttm_backend_func member unbind * struct ttm_backend_func member unbind
* *
* @backend: Pointer to a struct ttm_backend. * @ttm: Pointer to a struct ttm_tt.
* *
* Unbind previously bound backend pages. This function should be * Unbind previously bound backend pages. This function should be
* able to handle differences between aperture and system page sizes. * able to handle differences between aperture and system page sizes.
*/ */
int (*unbind) (struct ttm_backend *backend); int (*unbind) (struct ttm_tt *ttm);
/** /**
* struct ttm_backend_func member destroy * struct ttm_backend_func member destroy
* *
* @backend: Pointer to a struct ttm_backend. * @ttm: Pointer to a struct ttm_tt.
* *
* Destroy the backend. * Destroy the backend. This will be call back from ttm_tt_destroy so
* don't call ttm_tt_destroy from the callback or infinite loop.
*/ */
void (*destroy) (struct ttm_backend *backend); void (*destroy) (struct ttm_tt *ttm);
};
/**
* struct ttm_backend
*
* @bdev: Pointer to a struct ttm_bo_device.
* @flags: For driver use.
* @func: Pointer to a struct ttm_backend_func that describes
* the backend methods.
*
*/
struct ttm_backend {
struct ttm_bo_device *bdev;
uint32_t flags;
struct ttm_backend_func *func;
}; };
#define TTM_PAGE_FLAG_USER (1 << 1)
#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
#define TTM_PAGE_FLAG_WRITE (1 << 3) #define TTM_PAGE_FLAG_WRITE (1 << 3)
#define TTM_PAGE_FLAG_SWAPPED (1 << 4) #define TTM_PAGE_FLAG_SWAPPED (1 << 4)
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
...@@ -135,23 +91,18 @@ enum ttm_caching_state { ...@@ -135,23 +91,18 @@ enum ttm_caching_state {
/** /**
* struct ttm_tt * struct ttm_tt
* *
* @bdev: Pointer to a struct ttm_bo_device.
* @func: Pointer to a struct ttm_backend_func that describes
* the backend methods.
* @dummy_read_page: Page to map where the ttm_tt page array contains a NULL * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
* pointer. * pointer.
* @pages: Array of pages backing the data. * @pages: Array of pages backing the data.
* @first_himem_page: Himem pages are put last in the page array, which
* enables us to run caching attribute changes on only the first part
* of the page array containing lomem pages. This is the index of the
* first himem page.
* @last_lomem_page: Index of the last lomem page in the page array.
* @num_pages: Number of pages in the page array. * @num_pages: Number of pages in the page array.
* @bdev: Pointer to the current struct ttm_bo_device. * @bdev: Pointer to the current struct ttm_bo_device.
* @be: Pointer to the ttm backend. * @be: Pointer to the ttm backend.
* @tsk: The task for user ttm.
* @start: virtual address for user ttm.
* @swap_storage: Pointer to shmem struct file for swap storage. * @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages. * @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages. * @state: The current binding state of the pages.
* @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
* *
* This is a structure holding the pages, caching- and aperture binding * This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP) * status for a buffer object that isn't backed by fixed (VRAM / AGP)
...@@ -159,16 +110,14 @@ enum ttm_caching_state { ...@@ -159,16 +110,14 @@ enum ttm_caching_state {
*/ */
struct ttm_tt { struct ttm_tt {
struct ttm_bo_device *bdev;
struct ttm_backend_func *func;
struct page *dummy_read_page; struct page *dummy_read_page;
struct page **pages; struct page **pages;
long first_himem_page;
long last_lomem_page;
uint32_t page_flags; uint32_t page_flags;
unsigned long num_pages; unsigned long num_pages;
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
struct ttm_backend *be; struct ttm_backend *be;
struct task_struct *tsk;
unsigned long start;
struct file *swap_storage; struct file *swap_storage;
enum ttm_caching_state caching_state; enum ttm_caching_state caching_state;
enum { enum {
...@@ -176,7 +125,23 @@ struct ttm_tt { ...@@ -176,7 +125,23 @@ struct ttm_tt {
tt_unbound, tt_unbound,
tt_unpopulated, tt_unpopulated,
} state; } state;
};
/**
* struct ttm_dma_tt
*
* @ttm: Base ttm_tt struct.
* @dma_address: The DMA (bus) addresses of the pages
* @pages_list: used by some page allocation backend
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_dma_tt {
struct ttm_tt ttm;
dma_addr_t *dma_address; dma_addr_t *dma_address;
struct list_head pages_list;
}; };
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
...@@ -351,15 +316,42 @@ struct ttm_mem_type_manager { ...@@ -351,15 +316,42 @@ struct ttm_mem_type_manager {
struct ttm_bo_driver { struct ttm_bo_driver {
/** /**
* struct ttm_bo_driver member create_ttm_backend_entry * ttm_tt_create
* *
* @bdev: The buffer object device. * @bdev: pointer to a struct ttm_bo_device:
* @size: Size of the data needed backing.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
* @dummy_read_page: See struct ttm_bo_device.
* *
* Create a driver specific struct ttm_backend. * Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* Returns:
* NULL: Out of memory.
*/ */
struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
unsigned long size,
uint32_t page_flags,
struct page *dummy_read_page);
struct ttm_backend *(*create_ttm_backend_entry) /**
(struct ttm_bo_device *bdev); * ttm_tt_populate
*
* @ttm: The struct ttm_tt to contain the backing pages.
*
* Allocate all backing pages
* Returns:
* -ENOMEM: Out of memory.
*/
int (*ttm_tt_populate)(struct ttm_tt *ttm);
/**
* ttm_tt_unpopulate
*
* @ttm: The struct ttm_tt to contain the backing pages.
*
* Free all backing page
*/
void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
/** /**
* struct ttm_bo_driver member invalidate_caches * struct ttm_bo_driver member invalidate_caches
...@@ -477,9 +469,6 @@ struct ttm_bo_global_ref { ...@@ -477,9 +469,6 @@ struct ttm_bo_global_ref {
* @dummy_read_page: Pointer to a dummy page used for mapping requests * @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages. * of unpopulated pages.
* @shrink: A shrink callback object used for buffer object swap. * @shrink: A shrink callback object used for buffer object swap.
* @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
* used by a buffer object. This is excluding page arrays and backing pages.
* @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
* @device_list_mutex: Mutex protecting the device list. * @device_list_mutex: Mutex protecting the device list.
* This mutex is held while traversing the device list for pm options. * This mutex is held while traversing the device list for pm options.
* @lru_lock: Spinlock protecting the bo subsystem lru lists. * @lru_lock: Spinlock protecting the bo subsystem lru lists.
...@@ -497,8 +486,6 @@ struct ttm_bo_global { ...@@ -497,8 +486,6 @@ struct ttm_bo_global {
struct ttm_mem_global *mem_glob; struct ttm_mem_global *mem_glob;
struct page *dummy_read_page; struct page *dummy_read_page;
struct ttm_mem_shrink shrink; struct ttm_mem_shrink shrink;
size_t ttm_bo_extra_size;
size_t ttm_bo_size;
struct mutex device_list_mutex; struct mutex device_list_mutex;
spinlock_t lru_lock; spinlock_t lru_lock;
...@@ -600,8 +587,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) ...@@ -600,8 +587,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
} }
/** /**
* ttm_tt_create * ttm_tt_init
* *
* @ttm: The struct ttm_tt.
* @bdev: pointer to a struct ttm_bo_device: * @bdev: pointer to a struct ttm_bo_device:
* @size: Size of the data needed backing. * @size: Size of the data needed backing.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
...@@ -612,28 +600,22 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) ...@@ -612,28 +600,22 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
* Returns: * Returns:
* NULL: Out of memory. * NULL: Out of memory.
*/ */
extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
unsigned long size, unsigned long size, uint32_t page_flags,
uint32_t page_flags, struct page *dummy_read_page);
struct page *dummy_read_page); extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page);
/** /**
* ttm_tt_set_user: * ttm_tt_fini
* *
* @ttm: The struct ttm_tt to populate. * @ttm: the ttm_tt structure.
* @tsk: A struct task_struct for which @start is a valid user-space address.
* @start: A valid user-space address.
* @num_pages: Size in pages of the user memory area.
* *
* Populate a struct ttm_tt with a user-space memory area after first pinning * Free memory of ttm_tt structure
* the pages backing it.
* Returns:
* !0: Error.
*/ */
extern void ttm_tt_fini(struct ttm_tt *ttm);
extern int ttm_tt_set_user(struct ttm_tt *ttm, extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
struct task_struct *tsk,
unsigned long start, unsigned long num_pages);
/** /**
* ttm_ttm_bind: * ttm_ttm_bind:
...@@ -645,21 +627,12 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm, ...@@ -645,21 +627,12 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
*/ */
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/**
* ttm_tt_populate:
*
* @ttm: The struct ttm_tt to contain the backing pages.
*
* Add backing pages to all of @ttm
*/
extern int ttm_tt_populate(struct ttm_tt *ttm);
/** /**
* ttm_ttm_destroy: * ttm_ttm_destroy:
* *
* @ttm: The struct ttm_tt. * @ttm: The struct ttm_tt.
* *
* Unbind, unpopulate and destroy a struct ttm_tt. * Unbind, unpopulate and destroy common struct ttm_tt.
*/ */
extern void ttm_tt_destroy(struct ttm_tt *ttm); extern void ttm_tt_destroy(struct ttm_tt *ttm);
...@@ -673,19 +646,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm); ...@@ -673,19 +646,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
extern void ttm_tt_unbind(struct ttm_tt *ttm); extern void ttm_tt_unbind(struct ttm_tt *ttm);
/** /**
* ttm_ttm_destroy: * ttm_tt_swapin:
* *
* @ttm: The struct ttm_tt. * @ttm: The struct ttm_tt.
* @index: Index of the desired page.
*
* Return a pointer to the struct page backing @ttm at page
* index @index. If the page is unpopulated, one will be allocated to
* populate that index.
* *
* Returns: * Swap in a previously swap out ttm_tt.
* NULL on OOM.
*/ */
extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); extern int ttm_tt_swapin(struct ttm_tt *ttm);
/** /**
* ttm_tt_cache_flush: * ttm_tt_cache_flush:
...@@ -1046,17 +1013,23 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; ...@@ -1046,17 +1013,23 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
#include <linux/agp_backend.h> #include <linux/agp_backend.h>
/** /**
* ttm_agp_backend_init * ttm_agp_tt_create
* *
* @bdev: Pointer to a struct ttm_bo_device. * @bdev: Pointer to a struct ttm_bo_device.
* @bridge: The agp bridge this device is sitting on. * @bridge: The agp bridge this device is sitting on.
* @size: Size of the data needed backing.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
* @dummy_read_page: See struct ttm_bo_device.
*
* *
* Create a TTM backend that uses the indicated AGP bridge as an aperture * Create a TTM backend that uses the indicated AGP bridge as an aperture
* for TT memory. This function uses the linux agpgart interface to * for TT memory. This function uses the linux agpgart interface to
* bind and unbind memory backing a ttm_tt. * bind and unbind memory backing a ttm_tt.
*/ */
extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
struct agp_bridge_data *bridge); struct agp_bridge_data *bridge,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page);
#endif #endif
#endif #endif
...@@ -30,45 +30,70 @@ ...@@ -30,45 +30,70 @@
#include "ttm_memory.h" #include "ttm_memory.h"
/** /**
* Get count number of pages from pool to pages list. * Initialize pool allocator.
*/
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
/**
* Free pool allocator.
*/
void ttm_page_alloc_fini(void);
/**
* ttm_pool_populate:
*
* @ttm: The struct ttm_tt to contain the backing pages.
* *
* @pages: head of empty linked list where pages are filled. * Add backing pages to all of @ttm
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state for the page.
* @count: number of pages to allocate.
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/ */
int ttm_get_pages(struct list_head *pages, extern int ttm_pool_populate(struct ttm_tt *ttm);
int flags,
enum ttm_caching_state cstate,
unsigned count,
dma_addr_t *dma_address);
/** /**
* Put linked list of pages to pool. * ttm_pool_unpopulate:
*
* @ttm: The struct ttm_tt which to free backing pages.
* *
* @pages: list of pages to free. * Free all pages of @ttm
* @page_count: number of pages in the list. Zero can be passed for unknown
* count.
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state.
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/ */
void ttm_put_pages(struct list_head *pages, extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
unsigned page_count,
int flags, /**
enum ttm_caching_state cstate, * Output the state of pools to debugfs file
dma_addr_t *dma_address); */
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
#ifdef CONFIG_SWIOTLB
/** /**
* Initialize pool allocator. * Initialize pool allocator.
*/ */
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
/** /**
* Free pool allocator. * Free pool allocator.
*/ */
void ttm_page_alloc_fini(void); void ttm_dma_page_alloc_fini(void);
/** /**
* Output the state of pools to debugfs file * Output the state of pools to debugfs file
*/ */
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
#else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
unsigned max_pages)
{
return -ENODEV;
}
static inline void ttm_dma_page_alloc_fini(void) { return; }
static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
return 0;
}
#endif
#endif #endif
...@@ -24,7 +24,7 @@ extern int swiotlb_force; ...@@ -24,7 +24,7 @@ extern int swiotlb_force;
extern void swiotlb_init(int verbose); extern void swiotlb_init(int verbose);
extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
extern unsigned long swioltb_nr_tbl(void); extern unsigned long swiotlb_nr_tbl(void);
/* /*
* Enumeration for sync targets * Enumeration for sync targets
......
...@@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str) ...@@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str)
__setup("swiotlb=", setup_io_tlb_npages); __setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */ /* make io_tlb_overflow tunable too? */
unsigned long swioltb_nr_tbl(void) unsigned long swiotlb_nr_tbl(void)
{ {
return io_tlb_nslabs; return io_tlb_nslabs;
} }
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
/* Note that this doesn't work with highmem page */ /* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address) volatile void *address)
...@@ -321,6 +321,7 @@ void __init swiotlb_free(void) ...@@ -321,6 +321,7 @@ void __init swiotlb_free(void)
free_bootmem_late(__pa(io_tlb_start), free_bootmem_late(__pa(io_tlb_start),
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
} }
io_tlb_nslabs = 0;
} }
static int is_swiotlb_buffer(phys_addr_t paddr) static int is_swiotlb_buffer(phys_addr_t paddr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment