Commit 37205891 authored by Dave Airlie's avatar Dave Airlie

drm/ttm: make ttm_range_man_init/takedown take type + args

This makes it easier to move these to a driver allocated system
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarBen Skeggs <bskeggs@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-47-airlied@gmail.com
parent 0af135b8
...@@ -66,12 +66,9 @@ static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, ...@@ -66,12 +66,9 @@ static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type, unsigned int type,
uint64_t size) uint64_t size)
{ {
struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, type); return ttm_range_man_init(&adev->mman.bdev, type,
TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED,
man->available_caching = TTM_PL_FLAG_UNCACHED; false, size >> PAGE_SHIFT);
man->default_caching = TTM_PL_FLAG_UNCACHED;
return ttm_range_man_init(&adev->mman.bdev, man, size >> PAGE_SHIFT);
} }
/** /**
...@@ -1996,9 +1993,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ...@@ -1996,9 +1993,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_vram_mgr_fini(adev); amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev);
ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_GDS)); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_GWS)); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_OA)); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev); ttm_bo_device_release(&adev->mman.bdev);
adev->mman.initialized = false; adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n"); DRM_INFO("amdgpu: ttm finalized\n");
......
...@@ -1103,7 +1103,6 @@ EXPORT_SYMBOL(drm_vram_mm_debugfs_init); ...@@ -1103,7 +1103,6 @@ EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
uint64_t vram_base, size_t vram_size) uint64_t vram_base, size_t vram_size)
{ {
struct ttm_mem_type_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
int ret; int ret;
vmm->vram_base = vram_base; vmm->vram_base = vram_base;
...@@ -1116,9 +1115,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, ...@@ -1116,9 +1115,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
if (ret) if (ret)
return ret; return ret;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM,
man->default_caching = TTM_PL_FLAG_WC; TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
ret = ttm_range_man_init(&vmm->bdev, man, vram_size >> PAGE_SHIFT); TTM_PL_FLAG_WC, false,
vram_size >> PAGE_SHIFT);
if (ret) if (ret)
return ret; return ret;
...@@ -1127,7 +1127,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, ...@@ -1127,7 +1127,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
{ {
ttm_range_man_fini(&vmm->bdev, ttm_manager_type(&vmm->bdev, TTM_PL_VRAM)); ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
ttm_bo_device_release(&vmm->bdev); ttm_bo_device_release(&vmm->bdev);
} }
......
...@@ -156,16 +156,17 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind) ...@@ -156,16 +156,17 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
static int static int
nouveau_ttm_init_vram(struct nouveau_drm *drm) nouveau_ttm_init_vram(struct nouveau_drm *drm)
{ {
struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
struct nvif_mmu *mmu = &drm->client.mmu; struct nvif_mmu *mmu = &drm->client.mmu;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
/* Some BARs do not support being ioremapped WC */ /* Some BARs do not support being ioremapped WC */
const u8 type = mmu->type[drm->ttm.type_vram].type; const u8 type = mmu->type[drm->ttm.type_vram].type;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
if (type & NVIF_MEM_UNCACHED) { if (type & NVIF_MEM_UNCACHED) {
man->available_caching = TTM_PL_FLAG_UNCACHED; man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED;
...@@ -178,7 +179,9 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) ...@@ -178,7 +179,9 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm)
ttm_mem_type_manager_set_used(man, true); ttm_mem_type_manager_set_used(man, true);
return 0; return 0;
} else { } else {
return ttm_range_man_init(&drm->ttm.bdev, man, return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM,
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
TTM_PL_FLAG_WC, false,
drm->gem.vram_available >> PAGE_SHIFT); drm->gem.vram_available >> PAGE_SHIFT);
} }
} }
...@@ -193,7 +196,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) ...@@ -193,7 +196,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm)
ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man);
ttm_mem_type_manager_cleanup(man); ttm_mem_type_manager_cleanup(man);
} else } else
ttm_range_man_fini(&drm->ttm.bdev, man); ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
} }
static int static int
...@@ -216,9 +219,10 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) ...@@ -216,9 +219,10 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
else if (!drm->agp.bridge) else if (!drm->agp.bridge)
man->func = &nv04_gart_manager; man->func = &nv04_gart_manager;
else else
return ttm_range_man_init(&drm->ttm.bdev, man, return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT,
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
TTM_PL_FLAG_WC, true,
size_pages); size_pages);
ttm_mem_type_manager_init(&drm->ttm.bdev, man, ttm_mem_type_manager_init(&drm->ttm.bdev, man,
size_pages); size_pages);
ttm_mem_type_manager_set_used(man, true); ttm_mem_type_manager_set_used(man, true);
...@@ -232,7 +236,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm) ...@@ -232,7 +236,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA && if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
drm->agp.bridge) drm->agp.bridge)
ttm_range_man_fini(&drm->ttm.bdev, man); ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
else { else {
ttm_mem_type_manager_disable(man); ttm_mem_type_manager_disable(man);
ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man);
......
...@@ -220,12 +220,8 @@ static int qxl_ttm_init_mem_type(struct qxl_device *qdev, ...@@ -220,12 +220,8 @@ static int qxl_ttm_init_mem_type(struct qxl_device *qdev,
unsigned int type, unsigned int type,
uint64_t size) uint64_t size)
{ {
struct ttm_mem_type_manager *man = ttm_manager_type(&qdev->mman.bdev, type); return ttm_range_man_init(&qdev->mman.bdev, type, TTM_PL_MASK_CACHING,
TTM_PL_FLAG_CACHED, false, size);
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
return ttm_range_man_init(&qdev->mman.bdev, man, size);
} }
int qxl_ttm_init(struct qxl_device *qdev) int qxl_ttm_init(struct qxl_device *qdev)
...@@ -267,8 +263,8 @@ int qxl_ttm_init(struct qxl_device *qdev) ...@@ -267,8 +263,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
void qxl_ttm_fini(struct qxl_device *qdev) void qxl_ttm_fini(struct qxl_device *qdev)
{ {
ttm_range_man_fini(&qdev->mman.bdev, ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM)); ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&qdev->mman.bdev, ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV)); ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev); ttm_bo_device_release(&qdev->mman.bdev);
DRM_INFO("qxl: ttm finalized\n"); DRM_INFO("qxl: ttm finalized\n");
} }
......
...@@ -68,35 +68,34 @@ struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) ...@@ -68,35 +68,34 @@ struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
static int radeon_ttm_init_vram(struct radeon_device *rdev) static int radeon_ttm_init_vram(struct radeon_device *rdev)
{ {
struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM,
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; TTM_PL_FLAG_WC, false,
man->default_caching = TTM_PL_FLAG_WC;
return ttm_range_man_init(&rdev->mman.bdev, man,
rdev->mc.real_vram_size >> PAGE_SHIFT); rdev->mc.real_vram_size >> PAGE_SHIFT);
} }
static int radeon_ttm_init_gtt(struct radeon_device *rdev) static int radeon_ttm_init_gtt(struct radeon_device *rdev)
{ {
struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT); uint32_t available_caching, default_caching;
available_caching = TTM_PL_MASK_CACHING;
default_caching = TTM_PL_FLAG_CACHED;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->use_tt = true;
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
if (!rdev->ddev->agp) { if (!rdev->ddev->agp) {
DRM_ERROR("AGP is not enabled\n"); DRM_ERROR("AGP is not enabled\n");
return -EINVAL; return -EINVAL;
} }
man->available_caching = TTM_PL_FLAG_UNCACHED | available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC; TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC; default_caching = TTM_PL_FLAG_WC;
} }
#endif #endif
return ttm_range_man_init(&rdev->mman.bdev, man, return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT,
available_caching,
default_caching, true,
rdev->mc.gtt_size >> PAGE_SHIFT); rdev->mc.gtt_size >> PAGE_SHIFT);
} }
...@@ -827,8 +826,8 @@ void radeon_ttm_fini(struct radeon_device *rdev) ...@@ -827,8 +826,8 @@ void radeon_ttm_fini(struct radeon_device *rdev)
} }
radeon_bo_unref(&rdev->stolen_vga_memory); radeon_bo_unref(&rdev->stolen_vga_memory);
} }
ttm_range_man_fini(&rdev->mman.bdev, ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM)); ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&rdev->mman.bdev, ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT)); ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev); ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
rdev->mman.initialized = false; rdev->mman.initialized = false;
......
...@@ -107,19 +107,27 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, ...@@ -107,19 +107,27 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
static const struct ttm_mem_type_manager_func ttm_bo_manager_func; static const struct ttm_mem_type_manager_func ttm_bo_manager_func;
int ttm_range_man_init(struct ttm_bo_device *bdev, int ttm_range_man_init(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man, unsigned type,
uint32_t available_caching,
uint32_t default_caching,
bool use_tt,
unsigned long p_size) unsigned long p_size)
{ {
struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type);
struct ttm_range_manager *rman; struct ttm_range_manager *rman;
man->func = &ttm_bo_manager_func; man->available_caching = available_caching;
man->default_caching = default_caching;
ttm_mem_type_manager_init(bdev, man, p_size); man->use_tt = use_tt;
rman = kzalloc(sizeof(*rman), GFP_KERNEL); rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman) if (!rman)
return -ENOMEM; return -ENOMEM;
man->func = &ttm_bo_manager_func;
ttm_mem_type_manager_init(bdev, man, p_size);
drm_mm_init(&rman->mm, 0, p_size); drm_mm_init(&rman->mm, 0, p_size);
spin_lock_init(&rman->lock); spin_lock_init(&rman->lock);
man->priv = rman; man->priv = rman;
...@@ -130,8 +138,9 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, ...@@ -130,8 +138,9 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
EXPORT_SYMBOL(ttm_range_man_init); EXPORT_SYMBOL(ttm_range_man_init);
int ttm_range_man_fini(struct ttm_bo_device *bdev, int ttm_range_man_fini(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man) unsigned type)
{ {
struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type);
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm; struct drm_mm *mm = &rman->mm;
int ret; int ret;
......
...@@ -626,13 +626,9 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv) ...@@ -626,13 +626,9 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ret = vmw_thp_init(dev_priv); ret = vmw_thp_init(dev_priv);
#else #else
struct ttm_mem_type_manager *man = &dev_priv->bdev.man[TTM_PL_VRAM]; ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM,
TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED,
man->available_caching = TTM_PL_FLAG_CACHED; false, dev_priv->vram_size >> PAGE_SHIFT);
man->default_caching = TTM_PL_FLAG_CACHED;
ret = ttm_range_man_init(&dev_priv->bdev, man,
dev_priv->vram_size >> PAGE_SHIFT);
#endif #endif
ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM)->use_type = false; ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM)->use_type = false;
return ret; return ret;
...@@ -643,8 +639,7 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv) ...@@ -643,8 +639,7 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
vmw_thp_fini(dev_priv); vmw_thp_fini(dev_priv);
#else #else
ttm_bo_man_fini(&dev_priv->bdev, ttm_bo_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM));
#endif #endif
} }
......
...@@ -856,14 +856,20 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); ...@@ -856,14 +856,20 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
* ttm_range_man_init * ttm_range_man_init
* *
* @bdev: ttm device * @bdev: ttm device
* @man: the manager to initialise with the range manager. * @type: memory manager type
* @available_caching: TTM_PL_FLAG_* for allowed caching modes
* @default_caching: default caching mode
* @use_tt: if the memory manager uses tt
* @p_size: size of area to be managed in pages. * @p_size: size of area to be managed in pages.
* *
* Initialise a generic range manager for the selected memory type. * Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot. * The range manager is installed for this device in the type slot.
*/ */
int ttm_range_man_init(struct ttm_bo_device *bdev, int ttm_range_man_init(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man, unsigned type,
uint32_t available_caching,
uint32_t default_caching,
bool use_tt,
unsigned long p_size); unsigned long p_size);
/** /**
...@@ -875,7 +881,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, ...@@ -875,7 +881,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
* Remove the generic range manager from a slot and tear it down. * Remove the generic range manager from a slot and tear it down.
*/ */
int ttm_range_man_fini(struct ttm_bo_device *bdev, int ttm_range_man_fini(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man); unsigned type);
/** /**
* ttm_mem_type_manager_debug * ttm_mem_type_manager_debug
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment