Commit 0a667b50 authored by Dave Airlie's avatar Dave Airlie

drm/ttm: remove bdev from ttm_tt

I want to split this structure up and use it differently,
step one remove bdev pointer from it and pass it explicitly.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826014428.828392-4-airlied@gmail.com
parent f437bc1e
...@@ -565,7 +565,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, ...@@ -565,7 +565,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
} }
/* Bind the memory to the GTT space */ /* Bind the memory to the GTT space */
r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); r = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem, ctx);
if (unlikely(r)) { if (unlikely(r)) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -991,9 +991,10 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) ...@@ -991,9 +991,10 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
* *
* Called by amdgpu_ttm_backend_bind() * Called by amdgpu_ttm_backend_bind()
**/ **/
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r; int r;
...@@ -1027,9 +1028,10 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) ...@@ -1027,9 +1028,10 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
/** /**
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/ */
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
...@@ -1110,16 +1112,17 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, ...@@ -1110,16 +1112,17 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space. * This handles binding GTT memory to the device address space.
*/ */
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem) struct ttm_resource *bo_mem)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void*)ttm; struct amdgpu_ttm_tt *gtt = (void*)ttm;
uint64_t flags; uint64_t flags;
int r = 0; int r = 0;
if (gtt->userptr) { if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(ttm); r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) { if (r) {
DRM_ERROR("failed to pin userptr\n"); DRM_ERROR("failed to pin userptr\n");
return r; return r;
...@@ -1237,15 +1240,16 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) ...@@ -1237,15 +1240,16 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy(). * ttm_tt_destroy().
*/ */
static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r; int r;
/* if the pages have userptr pinning then clear that first */ /* if the pages have userptr pinning then clear that first */
if (gtt->userptr) if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(ttm); amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return; return;
...@@ -1257,7 +1261,8 @@ static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) ...@@ -1257,7 +1261,8 @@ static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
gtt->ttm.ttm.num_pages, gtt->offset); gtt->ttm.ttm.num_pages, gtt->offset);
} }
static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
...@@ -1307,10 +1312,11 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -1307,10 +1312,11 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible * Map the pages of a ttm_tt object to an address space visible
* to the underlying device. * to the underlying device.
*/ */
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_operation_ctx *ctx) struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
...@@ -1361,7 +1367,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, ...@@ -1361,7 +1367,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
* Unmaps pages of a ttm_tt object from the device address space and * Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it. * unpopulates the page array backing it.
*/ */
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev; struct amdgpu_device *adev;
...@@ -1385,7 +1391,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) ...@@ -1385,7 +1391,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (ttm->page_flags & TTM_PAGE_FLAG_SG) if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return; return;
adev = amdgpu_ttm_adev(ttm->bdev); adev = amdgpu_ttm_adev(bdev);
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (adev->need_swiotlb && swiotlb_nr_tbl()) { if (adev->need_swiotlb && swiotlb_nr_tbl()) {
......
...@@ -967,7 +967,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { ...@@ -967,7 +967,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
* TTM TT * TTM TT
*/ */
static void backend_func_destroy(struct ttm_tt *tt) static void backend_func_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
{ {
ttm_tt_fini(tt); ttm_tt_fini(tt);
kfree(tt); kfree(tt);
......
...@@ -848,7 +848,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -848,7 +848,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret) if (ret)
return ret; return ret;
ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx); ret = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg, &ctx);
if (ret) if (ret)
goto out; goto out;
...@@ -1219,7 +1219,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -1219,7 +1219,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} }
static int static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct ttm_dma_tt *ttm_dma = (void *)ttm; struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
...@@ -1237,12 +1238,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) ...@@ -1237,12 +1238,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
return 0; return 0;
} }
drm = nouveau_bdev(ttm->bdev); drm = nouveau_bdev(bdev);
dev = drm->dev->dev; dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) { if (drm->agp.bridge) {
return ttm_agp_tt_populate(ttm, ctx); return ttm_agp_tt_populate(bdev, ttm, ctx);
} }
#endif #endif
...@@ -1255,7 +1256,8 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) ...@@ -1255,7 +1256,8 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
} }
static void static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct ttm_dma_tt *ttm_dma = (void *)ttm; struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
...@@ -1265,12 +1267,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ...@@ -1265,12 +1267,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave) if (slave)
return; return;
drm = nouveau_bdev(ttm->bdev); drm = nouveau_bdev(bdev);
dev = drm->dev->dev; dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) { if (drm->agp.bridge) {
ttm_agp_tt_unpopulate(ttm); ttm_agp_tt_unpopulate(bdev, ttm);
return; return;
} }
#endif #endif
......
...@@ -15,7 +15,7 @@ struct nouveau_sgdma_be { ...@@ -15,7 +15,7 @@ struct nouveau_sgdma_be {
}; };
static void static void
nouveau_sgdma_destroy(struct ttm_tt *ttm) nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
...@@ -26,7 +26,7 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm) ...@@ -26,7 +26,7 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
} }
static int static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) nv04_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_mem *mem = nouveau_mem(reg);
...@@ -47,7 +47,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) ...@@ -47,7 +47,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg)
} }
static void static void
nv04_sgdma_unbind(struct ttm_tt *ttm) nv04_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
nouveau_mem_fini(nvbe->mem); nouveau_mem_fini(nvbe->mem);
...@@ -60,7 +60,7 @@ static struct ttm_backend_func nv04_sgdma_backend = { ...@@ -60,7 +60,7 @@ static struct ttm_backend_func nv04_sgdma_backend = {
}; };
static int static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) nv50_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_mem *mem = nouveau_mem(reg);
......
...@@ -104,7 +104,8 @@ struct qxl_ttm_tt { ...@@ -104,7 +104,8 @@ struct qxl_ttm_tt {
u64 offset; u64 offset;
}; };
static int qxl_ttm_backend_bind(struct ttm_tt *ttm, static int qxl_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem) struct ttm_resource *bo_mem)
{ {
struct qxl_ttm_tt *gtt = (void *)ttm; struct qxl_ttm_tt *gtt = (void *)ttm;
...@@ -118,12 +119,14 @@ static int qxl_ttm_backend_bind(struct ttm_tt *ttm, ...@@ -118,12 +119,14 @@ static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
return -1; return -1;
} }
static void qxl_ttm_backend_unbind(struct ttm_tt *ttm) static void qxl_ttm_backend_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
/* Not implemented */ /* Not implemented */
} }
static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct qxl_ttm_tt *gtt = (void *)ttm; struct qxl_ttm_tt *gtt = (void *)ttm;
......
...@@ -244,7 +244,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -244,7 +244,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
goto out_cleanup; goto out_cleanup;
} }
r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx); r = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem, &ctx);
if (unlikely(r)) { if (unlikely(r)) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -430,9 +430,9 @@ struct radeon_ttm_tt { ...@@ -430,9 +430,9 @@ struct radeon_ttm_tt {
}; };
/* prepare the sg table with the user pages */ /* prepare the sg table with the user pages */
static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned pinned = 0; unsigned pinned = 0;
int r; int r;
...@@ -491,9 +491,9 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) ...@@ -491,9 +491,9 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
return r; return r;
} }
static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
...@@ -520,17 +520,18 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -520,17 +520,18 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
} }
static int radeon_ttm_backend_bind(struct ttm_tt *ttm, static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem) struct ttm_resource *bo_mem)
{ {
struct radeon_ttm_tt *gtt = (void*)ttm; struct radeon_ttm_tt *gtt = (void*)ttm;
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
RADEON_GART_PAGE_WRITE; RADEON_GART_PAGE_WRITE;
int r; int r;
if (gtt->userptr) { if (gtt->userptr) {
radeon_ttm_tt_pin_userptr(ttm); radeon_ttm_tt_pin_userptr(bdev, ttm);
flags &= ~RADEON_GART_PAGE_WRITE; flags &= ~RADEON_GART_PAGE_WRITE;
} }
...@@ -551,18 +552,18 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, ...@@ -551,18 +552,18 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
return 0; return 0;
} }
static void radeon_ttm_backend_unbind(struct ttm_tt *ttm) static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages); radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
if (gtt->userptr) if (gtt->userptr)
radeon_ttm_tt_unpin_userptr(ttm); radeon_ttm_tt_unpin_userptr(bdev, ttm);
} }
static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
...@@ -609,8 +610,9 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) ...@@ -609,8 +610,9 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
return (struct radeon_ttm_tt *)ttm; return (struct radeon_ttm_tt *)ttm;
} }
static int radeon_ttm_tt_populate(struct ttm_tt *ttm, static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_operation_ctx *ctx) struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev; struct radeon_device *rdev;
...@@ -633,10 +635,10 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm, ...@@ -633,10 +635,10 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
return 0; return 0;
} }
rdev = radeon_get_rdev(ttm->bdev); rdev = radeon_get_rdev(bdev);
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_populate(ttm, ctx); return ttm_agp_tt_populate(bdev, ttm, ctx);
} }
#endif #endif
...@@ -649,7 +651,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm, ...@@ -649,7 +651,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx); return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
} }
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
...@@ -664,10 +666,10 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) ...@@ -664,10 +666,10 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave) if (slave)
return; return;
rdev = radeon_get_rdev(ttm->bdev); rdev = radeon_get_rdev(bdev);
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
ttm_agp_tt_unpopulate(ttm); ttm_agp_tt_unpopulate(bdev, ttm);
return; return;
} }
#endif #endif
......
...@@ -48,7 +48,8 @@ struct ttm_agp_backend { ...@@ -48,7 +48,8 @@ struct ttm_agp_backend {
struct agp_bridge_data *bridge; struct agp_bridge_data *bridge;
}; };
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) static int ttm_agp_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{ {
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
...@@ -82,7 +83,8 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) ...@@ -82,7 +83,8 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
return ret; return ret;
} }
static void ttm_agp_unbind(struct ttm_tt *ttm) static void ttm_agp_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
...@@ -96,12 +98,13 @@ static void ttm_agp_unbind(struct ttm_tt *ttm) ...@@ -96,12 +98,13 @@ static void ttm_agp_unbind(struct ttm_tt *ttm)
} }
} }
static void ttm_agp_destroy(struct ttm_tt *ttm) static void ttm_agp_destroy(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem) if (agp_be->mem)
ttm_agp_unbind(ttm); ttm_agp_unbind(bdev, ttm);
ttm_tt_fini(ttm); ttm_tt_fini(ttm);
kfree(agp_be); kfree(agp_be);
} }
...@@ -135,7 +138,8 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, ...@@ -135,7 +138,8 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
} }
EXPORT_SYMBOL(ttm_agp_tt_create); EXPORT_SYMBOL(ttm_agp_tt_create);
int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) int ttm_agp_tt_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
if (ttm->state != tt_unpopulated) if (ttm->state != tt_unpopulated)
return 0; return 0;
...@@ -144,7 +148,8 @@ int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) ...@@ -144,7 +148,8 @@ int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
} }
EXPORT_SYMBOL(ttm_agp_tt_populate); EXPORT_SYMBOL(ttm_agp_tt_populate);
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm) void ttm_agp_tt_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
ttm_pool_unpopulate(ttm); ttm_pool_unpopulate(ttm);
} }
......
...@@ -282,7 +282,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -282,7 +282,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err; goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) { if (mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_bind(bo->ttm, mem, ctx); ret = ttm_tt_bind(bdev, bo->ttm, mem, ctx);
if (ret) if (ret)
goto out_err; goto out_err;
} }
...@@ -324,7 +324,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -324,7 +324,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
out_err: out_err:
new_man = ttm_manager_type(bdev, bo->mem.mem_type); new_man = ttm_manager_type(bdev, bo->mem.mem_type);
if (!new_man->use_tt) { if (!new_man->use_tt) {
ttm_tt_destroy(bo->ttm); ttm_tt_destroy(bdev, bo->ttm);
bo->ttm = NULL; bo->ttm = NULL;
} }
...@@ -344,7 +344,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ...@@ -344,7 +344,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
if (bo->bdev->driver->move_notify) if (bo->bdev->driver->move_notify)
bo->bdev->driver->move_notify(bo, false, NULL); bo->bdev->driver->move_notify(bo, false, NULL);
ttm_tt_destroy(bo->ttm); ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL; bo->ttm = NULL;
ttm_resource_free(bo, &bo->mem); ttm_resource_free(bo, &bo->mem);
} }
...@@ -1656,7 +1656,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) ...@@ -1656,7 +1656,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
if (bo->bdev->driver->swap_notify) if (bo->bdev->driver->swap_notify)
bo->bdev->driver->swap_notify(bo); bo->bdev->driver->swap_notify(bo);
ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
out: out:
/** /**
......
...@@ -67,7 +67,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, ...@@ -67,7 +67,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
return ret; return ret;
} }
ttm_tt_unbind(ttm); ttm_tt_unbind(bo->bdev, ttm);
ttm_bo_free_old_node(bo); ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM); TTM_PL_MASK_MEM);
...@@ -79,7 +79,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, ...@@ -79,7 +79,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
return ret; return ret;
if (new_mem->mem_type != TTM_PL_SYSTEM) { if (new_mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_bind(ttm, new_mem, ctx); ret = ttm_tt_bind(bo->bdev, ttm, new_mem, ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} }
...@@ -264,7 +264,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -264,7 +264,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* TTM might be null for moves within the same region. * TTM might be null for moves within the same region.
*/ */
if (ttm) { if (ttm) {
ret = ttm_tt_populate(ttm, ctx); ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret) if (ret)
goto out1; goto out1;
} }
...@@ -303,7 +303,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -303,7 +303,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL; new_mem->mm_node = NULL;
if (!man->use_tt) { if (!man->use_tt) {
ttm_tt_destroy(ttm); ttm_tt_destroy(bdev, ttm);
bo->ttm = NULL; bo->ttm = NULL;
} }
...@@ -454,7 +454,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, ...@@ -454,7 +454,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
BUG_ON(!ttm); BUG_ON(!ttm);
ret = ttm_tt_populate(ttm, &ctx); ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -550,7 +550,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, ...@@ -550,7 +550,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
return ret; return ret;
if (!man->use_tt) { if (!man->use_tt) {
ttm_tt_destroy(bo->ttm); ttm_tt_destroy(bdev, bo->ttm);
bo->ttm = NULL; bo->ttm = NULL;
} }
ttm_bo_free_old_node(bo); ttm_bo_free_old_node(bo);
...@@ -673,7 +673,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, ...@@ -673,7 +673,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
return ret; return ret;
if (!to->use_tt) { if (!to->use_tt) {
ttm_tt_destroy(bo->ttm); ttm_tt_destroy(bdev, bo->ttm);
bo->ttm = NULL; bo->ttm = NULL;
} }
ttm_bo_free_old_node(bo); ttm_bo_free_old_node(bo);
......
...@@ -341,7 +341,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -341,7 +341,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
}; };
ttm = bo->ttm; ttm = bo->ttm;
if (ttm_tt_populate(bo->ttm, &ctx)) if (ttm_tt_populate(bdev, bo->ttm, &ctx))
return VM_FAULT_OOM; return VM_FAULT_OOM;
} else { } else {
/* Iomem should not be marked encrypted */ /* Iomem should not be marked encrypted */
......
...@@ -207,29 +207,28 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) ...@@ -207,29 +207,28 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
} }
EXPORT_SYMBOL(ttm_tt_set_placement_caching); EXPORT_SYMBOL(ttm_tt_set_placement_caching);
void ttm_tt_destroy(struct ttm_tt *ttm) void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
if (ttm == NULL) if (ttm == NULL)
return; return;
ttm_tt_unbind(ttm); ttm_tt_unbind(bdev, ttm);
if (ttm->state == tt_unbound) if (ttm->state == tt_unbound)
ttm_tt_unpopulate(ttm); ttm_tt_unpopulate(bdev, ttm);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage) ttm->swap_storage)
fput(ttm->swap_storage); fput(ttm->swap_storage);
ttm->swap_storage = NULL; ttm->swap_storage = NULL;
ttm->func->destroy(ttm); ttm->func->destroy(bdev, ttm);
} }
static void ttm_tt_init_fields(struct ttm_tt *ttm, static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
uint32_t page_flags) uint32_t page_flags)
{ {
ttm->bdev = bo->bdev;
ttm->num_pages = bo->num_pages; ttm->num_pages = bo->num_pages;
ttm->caching_state = tt_cached; ttm->caching_state = tt_cached;
ttm->page_flags = page_flags; ttm->page_flags = page_flags;
...@@ -308,15 +307,16 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) ...@@ -308,15 +307,16 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
} }
EXPORT_SYMBOL(ttm_dma_tt_fini); EXPORT_SYMBOL(ttm_dma_tt_fini);
void ttm_tt_unbind(struct ttm_tt *ttm) void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
if (ttm->state == tt_bound) { if (ttm->state == tt_bound) {
ttm->func->unbind(ttm); ttm->func->unbind(bdev, ttm);
ttm->state = tt_unbound; ttm->state = tt_unbound;
} }
} }
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, int ttm_tt_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
int ret = 0; int ret = 0;
...@@ -327,11 +327,11 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, ...@@ -327,11 +327,11 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem,
if (ttm->state == tt_bound) if (ttm->state == tt_bound)
return 0; return 0;
ret = ttm_tt_populate(ttm, ctx); ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret) if (ret)
return ret; return ret;
ret = ttm->func->bind(ttm, bo_mem); ret = ttm->func->bind(bdev, ttm, bo_mem);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -383,7 +383,8 @@ int ttm_tt_swapin(struct ttm_tt *ttm) ...@@ -383,7 +383,8 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
return ret; return ret;
} }
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) int ttm_tt_swapout(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct file *persistent_swap_storage)
{ {
struct address_space *swap_space; struct address_space *swap_space;
struct file *swap_storage; struct file *swap_storage;
...@@ -429,7 +430,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ...@@ -429,7 +430,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
put_page(to_page); put_page(to_page);
} }
ttm_tt_unpopulate(ttm); ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage; ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage) if (persistent_swap_storage)
...@@ -443,7 +444,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ...@@ -443,7 +444,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
return ret; return ret;
} }
static void ttm_tt_add_mapping(struct ttm_tt *ttm) static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
pgoff_t i; pgoff_t i;
...@@ -451,22 +452,23 @@ static void ttm_tt_add_mapping(struct ttm_tt *ttm) ...@@ -451,22 +452,23 @@ static void ttm_tt_add_mapping(struct ttm_tt *ttm)
return; return;
for (i = 0; i < ttm->num_pages; ++i) for (i = 0; i < ttm->num_pages; ++i)
ttm->pages[i]->mapping = ttm->bdev->dev_mapping; ttm->pages[i]->mapping = bdev->dev_mapping;
} }
int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) int ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
int ret; int ret;
if (ttm->state != tt_unpopulated) if (ttm->state != tt_unpopulated)
return 0; return 0;
if (ttm->bdev->driver->ttm_tt_populate) if (bdev->driver->ttm_tt_populate)
ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx); ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
else else
ret = ttm_pool_populate(ttm, ctx); ret = ttm_pool_populate(ttm, ctx);
if (!ret) if (!ret)
ttm_tt_add_mapping(ttm); ttm_tt_add_mapping(bdev, ttm);
return ret; return ret;
} }
...@@ -484,14 +486,15 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) ...@@ -484,14 +486,15 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
} }
} }
void ttm_tt_unpopulate(struct ttm_tt *ttm) void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
if (ttm->state == tt_unpopulated) if (ttm->state == tt_unpopulated)
return; return;
ttm_tt_clear_mapping(ttm); ttm_tt_clear_mapping(ttm);
if (ttm->bdev->driver->ttm_tt_unpopulate) if (bdev->driver->ttm_tt_unpopulate)
ttm->bdev->driver->ttm_tt_unpopulate(ttm); bdev->driver->ttm_tt_unpopulate(bdev, ttm);
else else
ttm_pool_unpopulate(ttm); ttm_pool_unpopulate(ttm);
} }
...@@ -465,13 +465,13 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, ...@@ -465,13 +465,13 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
dma_resv_assert_held(src->base.resv); dma_resv_assert_held(src->base.resv);
if (dst->ttm->state == tt_unpopulated) { if (dst->ttm->state == tt_unpopulated) {
ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx); ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
if (ret) if (ret)
return ret; return ret;
} }
if (src->ttm->state == tt_unpopulated) { if (src->ttm->state == tt_unpopulated) {
ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx); ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -539,7 +539,8 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) ...@@ -539,7 +539,8 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
} }
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) static int vmw_ttm_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
...@@ -573,7 +574,8 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) ...@@ -573,7 +574,8 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
return 0; return 0;
} }
static void vmw_ttm_unbind(struct ttm_tt *ttm) static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
...@@ -594,7 +596,7 @@ static void vmw_ttm_unbind(struct ttm_tt *ttm) ...@@ -594,7 +596,7 @@ static void vmw_ttm_unbind(struct ttm_tt *ttm)
} }
static void vmw_ttm_destroy(struct ttm_tt *ttm) static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
...@@ -612,7 +614,8 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) ...@@ -612,7 +614,8 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
} }
static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) static int vmw_ttm_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct vmw_ttm_tt *vmw_tt = struct vmw_ttm_tt *vmw_tt =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
...@@ -640,7 +643,8 @@ static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) ...@@ -640,7 +643,8 @@ static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
return ret; return ret;
} }
static void vmw_ttm_unpopulate(struct ttm_tt *ttm) static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm.ttm); dma_ttm.ttm);
...@@ -796,7 +800,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv, ...@@ -796,7 +800,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
ret = vmw_ttm_populate(bo->ttm, &ctx); ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
if (likely(ret == 0)) { if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt = struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
......
...@@ -77,8 +77,9 @@ struct ttm_bo_driver { ...@@ -77,8 +77,9 @@ struct ttm_bo_driver {
* Returns: * Returns:
* -ENOMEM: Out of memory. * -ENOMEM: Out of memory.
*/ */
int (*ttm_tt_populate)(struct ttm_tt *ttm, int (*ttm_tt_populate)(struct ttm_bo_device *bdev,
struct ttm_operation_ctx *ctx); struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx);
/** /**
* ttm_tt_unpopulate * ttm_tt_unpopulate
...@@ -87,7 +88,7 @@ struct ttm_bo_driver { ...@@ -87,7 +88,7 @@ struct ttm_bo_driver {
* *
* Free all backing page * Free all backing page
*/ */
void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/** /**
* struct ttm_bo_driver member eviction_valuable * struct ttm_bo_driver member eviction_valuable
......
...@@ -60,7 +60,7 @@ struct ttm_backend_func { ...@@ -60,7 +60,7 @@ struct ttm_backend_func {
* indicated by @bo_mem. This function should be able to handle * indicated by @bo_mem. This function should be able to handle
* differences between aperture and system page sizes. * differences between aperture and system page sizes.
*/ */
int (*bind) (struct ttm_tt *ttm, struct ttm_resource *bo_mem); int (*bind) (struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem);
/** /**
* struct ttm_backend_func member unbind * struct ttm_backend_func member unbind
...@@ -70,7 +70,7 @@ struct ttm_backend_func { ...@@ -70,7 +70,7 @@ struct ttm_backend_func {
* Unbind previously bound backend pages. This function should be * Unbind previously bound backend pages. This function should be
* able to handle differences between aperture and system page sizes. * able to handle differences between aperture and system page sizes.
*/ */
void (*unbind) (struct ttm_tt *ttm); void (*unbind) (struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/** /**
* struct ttm_backend_func member destroy * struct ttm_backend_func member destroy
...@@ -80,13 +80,12 @@ struct ttm_backend_func { ...@@ -80,13 +80,12 @@ struct ttm_backend_func {
* Destroy the backend. This will be call back from ttm_tt_destroy so * Destroy the backend. This will be call back from ttm_tt_destroy so
* don't call ttm_tt_destroy from the callback or infinite loop. * don't call ttm_tt_destroy from the callback or infinite loop.
*/ */
void (*destroy) (struct ttm_tt *ttm); void (*destroy) (struct ttm_bo_device *bdev, struct ttm_tt *ttm);
}; };
/** /**
* struct ttm_tt * struct ttm_tt
* *
* @bdev: Pointer to a struct ttm_bo_device.
* @func: Pointer to a struct ttm_backend_func that describes * @func: Pointer to a struct ttm_backend_func that describes
* the backend methods. * the backend methods.
* pointer. * pointer.
...@@ -103,7 +102,6 @@ struct ttm_backend_func { ...@@ -103,7 +102,6 @@ struct ttm_backend_func {
* memory. * memory.
*/ */
struct ttm_tt { struct ttm_tt {
struct ttm_bo_device *bdev;
struct ttm_backend_func *func; struct ttm_backend_func *func;
struct page **pages; struct page **pages;
uint32_t page_flags; uint32_t page_flags;
...@@ -183,7 +181,8 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); ...@@ -183,7 +181,8 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
* *
* Bind the pages of @ttm to an aperture location identified by @bo_mem * Bind the pages of @ttm to an aperture location identified by @bo_mem
*/ */
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, int ttm_tt_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem,
struct ttm_operation_ctx *ctx); struct ttm_operation_ctx *ctx);
/** /**
...@@ -193,7 +192,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, ...@@ -193,7 +192,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem,
* *
* Unbind, unpopulate and destroy common struct ttm_tt. * Unbind, unpopulate and destroy common struct ttm_tt.
*/ */
void ttm_tt_destroy(struct ttm_tt *ttm); void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/** /**
* ttm_ttm_unbind: * ttm_ttm_unbind:
...@@ -202,7 +201,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm); ...@@ -202,7 +201,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm);
* *
* Unbind a struct ttm_tt. * Unbind a struct ttm_tt.
*/ */
void ttm_tt_unbind(struct ttm_tt *ttm); void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/** /**
* ttm_tt_swapin: * ttm_tt_swapin:
...@@ -227,7 +226,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm); ...@@ -227,7 +226,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm);
* and cache flushes and potential page splitting / combining. * and cache flushes and potential page splitting / combining.
*/ */
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage); int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct file *persistent_swap_storage);
/** /**
* ttm_tt_populate - allocate pages for a ttm * ttm_tt_populate - allocate pages for a ttm
...@@ -236,7 +235,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage); ...@@ -236,7 +235,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
* *
* Calls the driver method to allocate pages for a ttm * Calls the driver method to allocate pages for a ttm
*/ */
int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/** /**
* ttm_tt_unpopulate - free pages from a ttm * ttm_tt_unpopulate - free pages from a ttm
...@@ -245,7 +244,7 @@ int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); ...@@ -245,7 +244,7 @@ int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
* *
* Calls the driver method to free all pages from a ttm * Calls the driver method to free all pages from a ttm
*/ */
void ttm_tt_unpopulate(struct ttm_tt *ttm); void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h> #include <linux/agp_backend.h>
...@@ -265,8 +264,8 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm); ...@@ -265,8 +264,8 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm);
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge, struct agp_bridge_data *bridge,
uint32_t page_flags); uint32_t page_flags);
int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); int ttm_agp_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); void ttm_agp_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
#endif #endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment