Commit 1b4ea4c5 authored by Christian König's avatar Christian König

drm/ttm: set the tt caching state at creation time

All drivers can determine the tt caching state at creation time,
no need to do this on the fly during every validation.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Link: https://patchwork.freedesktop.org/patch/394253/
parent 070c7fa5
...@@ -124,7 +124,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) ...@@ -124,7 +124,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_dma_tt *ttm; struct ttm_dma_tt *ttm;
if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached) if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET; return AMDGPU_BO_INVALID_OFFSET;
ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
......
...@@ -1292,7 +1292,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, ...@@ -1292,7 +1292,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags) uint32_t page_flags)
{ {
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_ttm_tt *gtt; struct amdgpu_ttm_tt *gtt;
enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) { if (gtt == NULL) {
...@@ -1300,8 +1302,13 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -1300,8 +1302,13 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
} }
gtt->gobj = &bo->base; gtt->gobj = &bo->base;
if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
caching = ttm_write_combined;
else
caching = ttm_cached;
/* allocate space for the uninitialized page entries */ /* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) { if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt); kfree(gtt);
return NULL; return NULL;
} }
...@@ -1525,7 +1532,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) ...@@ -1525,7 +1532,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && mem->mem_type == TTM_PL_TT) { if (mem && mem->mem_type == TTM_PL_TT) {
flags |= AMDGPU_PTE_SYSTEM; flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching_state == tt_cached) if (ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED; flags |= AMDGPU_PTE_SNOOPED;
} }
......
...@@ -918,7 +918,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -918,7 +918,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
if (!tt) if (!tt)
return NULL; return NULL;
ret = ttm_tt_init(tt, bo, page_flags); ret = ttm_tt_init(tt, bo, page_flags, ttm_cached);
if (ret < 0) if (ret < 0)
goto err_ttm_tt_init; goto err_ttm_tt_init;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_mem.h" #include "nouveau_mem.h"
#include "nouveau_ttm.h" #include "nouveau_ttm.h"
#include "nouveau_bo.h"
struct nouveau_sgdma_be { struct nouveau_sgdma_be {
/* this has to be the first field so populate/unpopulated in /* this has to be the first field so populate/unpopulated in
...@@ -67,13 +68,23 @@ nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ...@@ -67,13 +68,23 @@ nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
struct ttm_tt * struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{ {
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_sgdma_be *nvbe; struct nouveau_sgdma_be *nvbe;
enum ttm_caching caching;
if (nvbo->force_coherent)
caching = ttm_uncached;
else if (drm->agp.bridge)
caching = ttm_write_combined;
else
caching = ttm_cached;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe) if (!nvbe)
return NULL; return NULL;
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
kfree(nvbe); kfree(nvbe);
return NULL; return NULL;
} }
......
...@@ -133,7 +133,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -133,7 +133,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
if (ttm == NULL) if (ttm == NULL)
return NULL; return NULL;
if (ttm_tt_init(ttm, bo, page_flags)) { if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) {
kfree(ttm); kfree(ttm);
return NULL; return NULL;
} }
......
...@@ -546,7 +546,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, ...@@ -546,7 +546,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm); ttm->num_pages, bo_mem, ttm);
} }
if (ttm->caching_state == tt_cached) if (ttm->caching == ttm_cached)
flags |= RADEON_GART_PAGE_SNOOP; flags |= RADEON_GART_PAGE_SNOOP;
r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages, r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags); ttm->pages, gtt->ttm.dma_address, flags);
...@@ -590,6 +590,10 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -590,6 +590,10 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
struct radeon_ttm_tt *gtt; struct radeon_ttm_tt *gtt;
enum ttm_caching caching;
struct radeon_bo *rbo;
rbo = container_of(bo, struct radeon_bo, tbo);
rdev = radeon_get_rdev(bo->bdev); rdev = radeon_get_rdev(bo->bdev);
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
...@@ -603,7 +607,15 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -603,7 +607,15 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
if (gtt == NULL) { if (gtt == NULL) {
return NULL; return NULL;
} }
if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
if (rbo->flags & RADEON_GEM_GTT_UC)
caching = ttm_uncached;
else if (rbo->flags & RADEON_GEM_GTT_WC)
caching = ttm_write_combined;
else
caching = ttm_cached;
if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt); kfree(gtt);
return NULL; return NULL;
} }
......
...@@ -136,7 +136,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, ...@@ -136,7 +136,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
agp_be->mem = NULL; agp_be->mem = NULL;
agp_be->bridge = bridge; agp_be->bridge = bridge;
if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) { if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined)) {
kfree(agp_be); kfree(agp_be);
return NULL; return NULL;
} }
......
...@@ -220,14 +220,14 @@ static struct ttm_pool_manager *_manager; ...@@ -220,14 +220,14 @@ static struct ttm_pool_manager *_manager;
/** /**
* Select the right pool or requested caching state and ttm flags. */ * Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
enum ttm_caching_state cstate) enum ttm_caching cstate)
{ {
int pool_index; int pool_index;
if (cstate == tt_cached) if (cstate == ttm_cached)
return NULL; return NULL;
if (cstate == tt_wc) if (cstate == ttm_write_combined)
pool_index = 0x0; pool_index = 0x0;
else else
pool_index = 0x1; pool_index = 0x1;
...@@ -441,17 +441,17 @@ static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) ...@@ -441,17 +441,17 @@ static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
} }
static int ttm_set_pages_caching(struct page **pages, static int ttm_set_pages_caching(struct page **pages,
enum ttm_caching_state cstate, unsigned cpages) enum ttm_caching cstate, unsigned cpages)
{ {
int r = 0; int r = 0;
/* Set page caching */ /* Set page caching */
switch (cstate) { switch (cstate) {
case tt_uncached: case ttm_uncached:
r = ttm_set_pages_array_uc(pages, cpages); r = ttm_set_pages_array_uc(pages, cpages);
if (r) if (r)
pr_err("Failed to set %d pages to uc!\n", cpages); pr_err("Failed to set %d pages to uc!\n", cpages);
break; break;
case tt_wc: case ttm_write_combined:
r = ttm_set_pages_array_wc(pages, cpages); r = ttm_set_pages_array_wc(pages, cpages);
if (r) if (r)
pr_err("Failed to set %d pages to wc!\n", cpages); pr_err("Failed to set %d pages to wc!\n", cpages);
...@@ -486,7 +486,7 @@ static void ttm_handle_caching_failure(struct page **failed_pages, ...@@ -486,7 +486,7 @@ static void ttm_handle_caching_failure(struct page **failed_pages,
* pages returned in pages array. * pages returned in pages array.
*/ */
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, int ttm_flags, enum ttm_caching cstate,
unsigned count, unsigned order) unsigned count, unsigned order)
{ {
struct page **caching_array; struct page **caching_array;
...@@ -566,7 +566,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -566,7 +566,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
* pages is small. * pages is small.
*/ */
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
enum ttm_caching_state cstate, enum ttm_caching cstate,
unsigned count, unsigned long *irq_flags) unsigned count, unsigned long *irq_flags)
{ {
struct page *p; struct page *p;
...@@ -626,7 +626,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, ...@@ -626,7 +626,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, struct list_head *pages,
int ttm_flags, int ttm_flags,
enum ttm_caching_state cstate, enum ttm_caching cstate,
unsigned count, unsigned order) unsigned count, unsigned order)
{ {
unsigned long irq_flags; unsigned long irq_flags;
...@@ -703,7 +703,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -703,7 +703,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
/* Put all pages in pages list to correct pool to wait for reuse */ /* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags, static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate) enum ttm_caching cstate)
{ {
struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
...@@ -821,7 +821,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -821,7 +821,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
* cached pages. * cached pages.
*/ */
static int ttm_get_pages(struct page **pages, unsigned npages, int flags, static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate) enum ttm_caching cstate)
{ {
struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
...@@ -1040,7 +1040,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) ...@@ -1040,7 +1040,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
put_pages: put_pages:
ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
ttm->caching_state); ttm->caching);
ttm_tt_set_unpopulated(ttm); ttm_tt_set_unpopulated(ttm);
} }
...@@ -1057,7 +1057,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) ...@@ -1057,7 +1057,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
return -ENOMEM; return -ENOMEM;
ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
ttm->caching_state); ttm->caching);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_pool_unpopulate_helper(ttm, 0); ttm_pool_unpopulate_helper(ttm, 0);
return ret; return ret;
......
...@@ -325,15 +325,15 @@ static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) ...@@ -325,15 +325,15 @@ static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
} }
return d_page; return d_page;
} }
static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate) static enum pool_type ttm_to_type(int flags, enum ttm_caching cstate)
{ {
enum pool_type type = IS_UNDEFINED; enum pool_type type = IS_UNDEFINED;
if (flags & TTM_PAGE_FLAG_DMA32) if (flags & TTM_PAGE_FLAG_DMA32)
type |= IS_DMA32; type |= IS_DMA32;
if (cstate == tt_cached) if (cstate == ttm_cached)
type |= IS_CACHED; type |= IS_CACHED;
else if (cstate == tt_uncached) else if (cstate == ttm_uncached)
type |= IS_UC; type |= IS_UC;
else else
type |= IS_WC; type |= IS_WC;
...@@ -663,7 +663,7 @@ static struct dma_pool *ttm_dma_find_pool(struct device *dev, ...@@ -663,7 +663,7 @@ static struct dma_pool *ttm_dma_find_pool(struct device *dev,
* are pages that have changed their caching state already put them to the * are pages that have changed their caching state already put them to the
* pool. * pool.
*/ */
static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, static void ttm_dma_handle_caching_failure(struct dma_pool *pool,
struct list_head *d_pages, struct list_head *d_pages,
struct page **failed_pages, struct page **failed_pages,
unsigned cpages) unsigned cpages)
...@@ -734,7 +734,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, ...@@ -734,7 +734,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
r = ttm_set_pages_caching(pool, caching_array, r = ttm_set_pages_caching(pool, caching_array,
cpages); cpages);
if (r) if (r)
ttm_dma_handle_caching_state_failure( ttm_dma_handle_caching_failure(
pool, d_pages, caching_array, pool, d_pages, caching_array,
cpages); cpages);
} }
...@@ -760,7 +760,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, ...@@ -760,7 +760,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
r = ttm_set_pages_caching(pool, caching_array, r = ttm_set_pages_caching(pool, caching_array,
cpages); cpages);
if (r) { if (r) {
ttm_dma_handle_caching_state_failure( ttm_dma_handle_caching_failure(
pool, d_pages, caching_array, pool, d_pages, caching_array,
cpages); cpages);
goto out; goto out;
...@@ -773,7 +773,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, ...@@ -773,7 +773,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
if (cpages) { if (cpages) {
r = ttm_set_pages_caching(pool, caching_array, cpages); r = ttm_set_pages_caching(pool, caching_array, cpages);
if (r) if (r)
ttm_dma_handle_caching_state_failure(pool, d_pages, ttm_dma_handle_caching_failure(pool, d_pages,
caching_array, cpages); caching_array, cpages);
} }
out: out:
...@@ -904,7 +904,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, ...@@ -904,7 +904,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
INIT_LIST_HEAD(&ttm_dma->pages_list); INIT_LIST_HEAD(&ttm_dma->pages_list);
i = 0; i = 0;
type = ttm_to_type(ttm->page_flags, ttm->caching_state); type = ttm_to_type(ttm->page_flags, ttm->caching);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
...@@ -1000,7 +1000,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -1000,7 +1000,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
unsigned count, i, npages = 0; unsigned count, i, npages = 0;
unsigned long irq_flags; unsigned long irq_flags;
type = ttm_to_type(ttm->page_flags, ttm->caching_state); type = ttm_to_type(ttm->page_flags, ttm->caching);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pool = ttm_dma_find_pool(dev, type | IS_HUGE); pool = ttm_dma_find_pool(dev, type | IS_HUGE);
...@@ -1032,7 +1032,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -1032,7 +1032,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
return; return;
is_cached = (ttm_dma_find_pool(pool->dev, is_cached = (ttm_dma_find_pool(pool->dev,
ttm_to_type(ttm->page_flags, tt_cached)) == pool); ttm_to_type(ttm->page_flags, ttm_cached)) == pool);
/* make sure pages array match list and count number of pages */ /* make sure pages array match list and count number of pages */
count = 0; count = 0;
......
...@@ -114,31 +114,30 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) ...@@ -114,31 +114,30 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0; return 0;
} }
static int ttm_tt_set_caching(struct ttm_tt *ttm, static int ttm_tt_set_caching(struct ttm_tt *ttm, enum ttm_caching caching)
enum ttm_caching_state c_state)
{ {
if (ttm->caching_state == c_state) if (ttm->caching == caching)
return 0; return 0;
/* Can't change the caching state after TT is populated */ /* Can't change the caching state after TT is populated */
if (WARN_ON_ONCE(ttm_tt_is_populated(ttm))) if (WARN_ON_ONCE(ttm_tt_is_populated(ttm)))
return -EINVAL; return -EINVAL;
ttm->caching_state = c_state; ttm->caching = caching;
return 0; return 0;
} }
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
{ {
enum ttm_caching_state state; enum ttm_caching state;
if (placement & TTM_PL_FLAG_WC) if (placement & TTM_PL_FLAG_WC)
state = tt_wc; state = ttm_write_combined;
else if (placement & TTM_PL_FLAG_UNCACHED) else if (placement & TTM_PL_FLAG_UNCACHED)
state = tt_uncached; state = ttm_uncached;
else else
state = tt_cached; state = ttm_cached;
return ttm_tt_set_caching(ttm, state); return ttm_tt_set_caching(ttm, state);
} }
...@@ -162,20 +161,22 @@ void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ...@@ -162,20 +161,22 @@ void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
static void ttm_tt_init_fields(struct ttm_tt *ttm, static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
uint32_t page_flags) uint32_t page_flags,
enum ttm_caching caching)
{ {
ttm->num_pages = bo->num_pages; ttm->num_pages = bo->num_pages;
ttm->caching_state = tt_cached; ttm->caching = ttm_cached;
ttm->page_flags = page_flags; ttm->page_flags = page_flags;
ttm_tt_set_unpopulated(ttm); ttm_tt_set_unpopulated(ttm);
ttm->swap_storage = NULL; ttm->swap_storage = NULL;
ttm->sg = bo->sg; ttm->sg = bo->sg;
ttm->caching = caching;
} }
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags) uint32_t page_flags, enum ttm_caching caching)
{ {
ttm_tt_init_fields(ttm, bo, page_flags); ttm_tt_init_fields(ttm, bo, page_flags, caching);
if (ttm_tt_alloc_page_directory(ttm)) { if (ttm_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n"); pr_err("Failed allocating page table\n");
...@@ -193,11 +194,11 @@ void ttm_tt_fini(struct ttm_tt *ttm) ...@@ -193,11 +194,11 @@ void ttm_tt_fini(struct ttm_tt *ttm)
EXPORT_SYMBOL(ttm_tt_fini); EXPORT_SYMBOL(ttm_tt_fini);
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags) uint32_t page_flags, enum ttm_caching caching)
{ {
struct ttm_tt *ttm = &ttm_dma->ttm; struct ttm_tt *ttm = &ttm_dma->ttm;
ttm_tt_init_fields(ttm, bo, page_flags); ttm_tt_init_fields(ttm, bo, page_flags, caching);
INIT_LIST_HEAD(&ttm_dma->pages_list); INIT_LIST_HEAD(&ttm_dma->pages_list);
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
...@@ -209,12 +210,12 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, ...@@ -209,12 +210,12 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_dma_tt_init); EXPORT_SYMBOL(ttm_dma_tt_init);
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags) uint32_t page_flags, enum ttm_caching caching)
{ {
struct ttm_tt *ttm = &ttm_dma->ttm; struct ttm_tt *ttm = &ttm_dma->ttm;
int ret; int ret;
ttm_tt_init_fields(ttm, bo, page_flags); ttm_tt_init_fields(ttm, bo, page_flags, caching);
INIT_LIST_HEAD(&ttm_dma->pages_list); INIT_LIST_HEAD(&ttm_dma->pages_list);
if (page_flags & TTM_PAGE_FLAG_SG) if (page_flags & TTM_PAGE_FLAG_SG)
......
...@@ -647,9 +647,11 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -647,9 +647,11 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->mob = NULL; vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags); ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else else
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags); ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags,
ttm_cached);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_init; goto out_no_init;
......
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef _TTM_CACHING_H_
#define _TTM_CACHING_H_
enum ttm_caching {
ttm_uncached,
ttm_write_combined,
ttm_cached
};
#endif
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#define _TTM_TT_H_ #define _TTM_TT_H_
#include <linux/types.h> #include <linux/types.h>
#include <drm/ttm/ttm_caching.h>
struct ttm_tt; struct ttm_tt;
struct ttm_resource; struct ttm_resource;
...@@ -42,12 +43,6 @@ struct ttm_operation_ctx; ...@@ -42,12 +43,6 @@ struct ttm_operation_ctx;
#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31) #define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
enum ttm_caching_state {
tt_uncached,
tt_wc,
tt_cached
};
/** /**
* struct ttm_tt * struct ttm_tt
* *
...@@ -69,7 +64,7 @@ struct ttm_tt { ...@@ -69,7 +64,7 @@ struct ttm_tt {
unsigned long num_pages; unsigned long num_pages;
struct sg_table *sg; /* for SG objects via dma-buf */ struct sg_table *sg; /* for SG objects via dma-buf */
struct file *swap_storage; struct file *swap_storage;
enum ttm_caching_state caching_state; enum ttm_caching caching;
}; };
static inline bool ttm_tt_is_populated(struct ttm_tt *tt) static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
...@@ -121,6 +116,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); ...@@ -121,6 +116,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* @ttm: The struct ttm_tt. * @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for. * @bo: The buffer object we create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
* @caching: the desired caching state of the pages
* *
* Create a struct ttm_tt to back data with system memory pages. * Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated. * No pages are actually allocated.
...@@ -128,11 +124,11 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); ...@@ -128,11 +124,11 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* NULL: Out of memory. * NULL: Out of memory.
*/ */
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags); uint32_t page_flags, enum ttm_caching caching);
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags); uint32_t page_flags, enum ttm_caching caching);
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags); uint32_t page_flags, enum ttm_caching caching);
/** /**
* ttm_tt_fini * ttm_tt_fini
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment