Commit 8d40002f authored by Tianci.Yin's avatar Tianci.Yin Committed by Alex Deucher

drm/amdgpu: update the method to get fb_loc of memory training(V4)

The method of getting fb_loc changed from parsing VBIOS to
taking certain offset from top of VRAM
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarTianci.Yin <tianci.yin@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7eca4006
......@@ -636,9 +636,8 @@ struct amdgpu_fw_vram_usage {
struct amdgpu_bo *reserved_bo;
void *va;
/* Offset on the top of VRAM, used as c2p write buffer.
/* GDDR6 training support flag.
*/
u64 mem_train_fb_loc;
bool mem_train_support;
};
......
......@@ -2022,7 +2022,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
if (adev->is_atom_fw) {
amdgpu_atomfirmware_scratch_regs_init(adev);
amdgpu_atomfirmware_allocate_fb_scratch(adev);
ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
ret = amdgpu_atomfirmware_get_mem_train_info(adev);
if (ret) {
DRM_ERROR("Failed to get mem train fb location.\n");
return ret;
......
......@@ -525,16 +525,12 @@ static int gddr6_mem_train_support(struct amdgpu_device *adev)
return ret;
}
int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
unsigned char *bios = ctx->bios;
struct vram_reserve_block *reserved_block;
int index, block_number;
int index;
uint8_t frev, crev;
uint16_t data_offset, size;
uint32_t start_address_in_kb;
uint64_t offset;
int ret;
adev->fw_vram_usage.mem_train_support = false;
......@@ -569,32 +565,6 @@ int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
return -EINVAL;
}
reserved_block = (struct vram_reserve_block *)
(bios + data_offset + sizeof(struct atom_common_table_header));
block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
/ sizeof(struct vram_reserve_block);
reserved_block += (block_number > 0) ? block_number-1 : 0;
DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
block_number,
le32_to_cpu(reserved_block->start_address_in_kb),
le16_to_cpu(reserved_block->used_by_firmware_in_kb),
le16_to_cpu(reserved_block->used_by_driver_in_kb));
if (reserved_block->used_by_firmware_in_kb > 0) {
start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
offset = (uint64_t)start_address_in_kb * ONE_KiB;
if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
offset -= ONE_MiB;
}
offset &= ~(ONE_MiB - 1);
adev->fw_vram_usage.mem_train_fb_loc = offset;
adev->fw_vram_usage.mem_train_support = true;
DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
ret = 0;
} else {
DRM_ERROR("used_by_firmware_in_kb is 0!\n");
ret = -EINVAL;
}
return ret;
adev->fw_vram_usage.mem_train_support = true;
return 0;
}
......@@ -31,7 +31,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type, int *vram_vendor);
int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
......
......@@ -1720,6 +1720,14 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
return 0;
}
static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size)
{
if ((vram_size & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) )
vram_size -= ONE_MiB;
return ALIGN(vram_size, ONE_MiB);
}
/**
* amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
*
......@@ -1738,7 +1746,7 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
return 0;
}
ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size);
ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
......
......@@ -66,6 +66,13 @@ struct amdgpu_copy_mem {
unsigned long offset;
};
/* Definitions for constance */
enum amdgpu_internal_constants
{
ONE_KiB = 0x400,
ONE_MiB = 0x100000,
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
......
......@@ -672,20 +672,6 @@ struct vram_usagebyfirmware_v2_1
uint16_t used_by_driver_in_kb;
};
/* This is part of vram_usagebyfirmware_v2_1 */
struct vram_reserve_block
{
uint32_t start_address_in_kb;
uint16_t used_by_firmware_in_kb;
uint16_t used_by_driver_in_kb;
};
/* Definitions for constance */
enum atomfirmware_internal_constants
{
ONE_KiB = 0x400,
ONE_MiB = 0x100000,
};
/*
***************************************************************************
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment