Commit 961c75cf authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: move amdgpu_device_(vram|gtt)_location

Move that into amdgpu_gmc.c since we are really deadling with GMC
address space here.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent efa9a5ef
...@@ -1166,10 +1166,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev); ...@@ -1166,10 +1166,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes); u64 num_vis_bytes);
void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc, u64 base);
void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers, const u32 *registers,
......
...@@ -651,71 +651,6 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) ...@@ -651,71 +651,6 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
__clear_bit(wb, adev->wb.used); __clear_bit(wb, adev->wb.used);
} }
/**
* amdgpu_device_vram_location - try to find VRAM location
*
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
*
* Function will try to place VRAM at base address provided
* as parameter.
*/
void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc, u64 base)
{
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
mc->vram_start = base;
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
/**
* amdgpu_device_gart_location - try to find GART location
*
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
* Function will place try to place GART before or after VRAM.
*
* If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
*/
void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
{
u64 size_af, size_bf;
mc->gart_size += adev->pm.smu_prv_buffer_size;
size_af = adev->gmc.mc_mask - mc->vram_end;
size_bf = mc->vram_start;
if (size_bf > size_af) {
if (mc->gart_size > size_bf) {
dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_bf;
}
mc->gart_start = 0;
} else {
if (mc->gart_size > size_af) {
dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_af;
}
/* VCE doesn't like it when BOs cross a 4GB segment, so align
* the GART base on a 4GB boundary as well.
*/
mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
}
mc->gart_end = mc->gart_start + mc->gart_size - 1;
dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
/** /**
* amdgpu_device_resize_fb_bar - try to resize FB BAR * amdgpu_device_resize_fb_bar - try to resize FB BAR
* *
......
...@@ -78,3 +78,67 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) ...@@ -78,3 +78,67 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
} }
return pd_addr; return pd_addr;
} }
/**
* amdgpu_gmc_vram_location - try to find VRAM location
*
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
*
* Function will try to place VRAM at base address provided
* as parameter.
*/
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
u64 base)
{
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
mc->vram_start = base;
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
/**
* amdgpu_gmc_gart_location - try to find GART location
*
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
* Function will place try to place GART before or after VRAM.
*
* If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
*/
void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
{
u64 size_af, size_bf;
mc->gart_size += adev->pm.smu_prv_buffer_size;
size_af = adev->gmc.mc_mask - mc->vram_end;
size_bf = mc->vram_start;
if (size_bf > size_af) {
if (mc->gart_size > size_bf) {
dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_bf;
}
mc->gart_start = 0;
} else {
if (mc->gart_size > size_af) {
dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_af;
}
/* VCE doesn't like it when BOs cross a 4GB segment, so align
* the GART base on a 4GB boundary as well.
*/
mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
}
mc->gart_end = mc->gart_start + mc->gart_size - 1;
dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
...@@ -136,5 +136,9 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc) ...@@ -136,5 +136,9 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags); uint64_t *addr, uint64_t *flags);
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
u64 base);
void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
#endif #endif
...@@ -224,8 +224,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, ...@@ -224,8 +224,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24; base <<= 24;
amdgpu_device_vram_location(adev, &adev->gmc, base); amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_gmc_gart_location(adev, mc);
} }
static void gmc_v6_0_mc_program(struct amdgpu_device *adev) static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
......
...@@ -242,8 +242,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, ...@@ -242,8 +242,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24; base <<= 24;
amdgpu_device_vram_location(adev, &adev->gmc, base); amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_gmc_gart_location(adev, mc);
} }
/** /**
......
...@@ -411,8 +411,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, ...@@ -411,8 +411,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24; base <<= 24;
amdgpu_device_vram_location(adev, &adev->gmc, base); amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_gmc_gart_location(adev, mc);
} }
/** /**
......
...@@ -749,8 +749,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, ...@@ -749,8 +749,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = 0; u64 base = 0;
if (!amdgpu_sriov_vf(adev)) if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev); base = mmhub_v1_0_get_fb_location(adev);
amdgpu_device_vram_location(adev, &adev->gmc, base); amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_gmc_gart_location(adev, mc);
/* base offset of vram pages */ /* base offset of vram pages */
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment