Commit a7843c03 authored by Jonathan Kim's avatar Jonathan Kim Committed by Alex Deucher

drm/amdgpu: attempt xgmi perfmon re-arm on failed arm

The DF routines to arm xGMI performance will attempt to re-arm both on
performance monitoring start and read on initial failure to arm.
Signed-off-by: default avatarJonathan Kim <Jonathan.Kim@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5e9eec07
...@@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, ...@@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
} }
/* same as perfmon_wreg but return status on write value check */
static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
uint32_t lo_addr, uint32_t lo_val,
uint32_t hi_addr, uint32_t hi_val)
{
unsigned long flags, address, data;
uint32_t lo_val_rb, hi_val_rb;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
WREG32(data, lo_val);
WREG32(address, hi_addr);
WREG32(data, hi_val);
WREG32(address, lo_addr);
lo_val_rb = RREG32(data);
WREG32(address, hi_addr);
hi_val_rb = RREG32(data);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
return -EBUSY;
return 0;
}
/*
* retry arming counters every 100 usecs within 1 millisecond interval.
* if retry fails after time out, return error.
*/
#define ARM_RETRY_USEC_TIMEOUT 1000
#define ARM_RETRY_USEC_INTERVAL 100
static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
uint32_t lo_addr, uint32_t lo_val,
uint32_t hi_addr, uint32_t hi_val)
{
int countdown = ARM_RETRY_USEC_TIMEOUT;
while (countdown) {
if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
hi_addr, hi_val))
break;
countdown -= ARM_RETRY_USEC_INTERVAL;
udelay(ARM_RETRY_USEC_INTERVAL);
}
return countdown > 0 ? 0 : -ETIME;
}
/* get the number of df counters available */ /* get the number of df counters available */
static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
...@@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev, ...@@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
switch (target_cntr) { switch (target_cntr) {
case 0: case 0:
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0; *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0; *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
break; break;
case 1: case 1:
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1; *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1; *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
break; break;
case 2: case 2:
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2; *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2; *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
break; break;
case 3: case 3:
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3; *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3; *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
break; break;
} }
...@@ -422,6 +477,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev, ...@@ -422,6 +477,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
return -ENOSPC; return -ENOSPC;
} }
#define DEFERRED_ARM_MASK (1 << 31)
static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
uint64_t config, bool is_deferred)
{
int target_cntr;
target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
if (target_cntr < 0)
return -EINVAL;
if (is_deferred)
adev->df_perfmon_config_assign_mask[target_cntr] |=
DEFERRED_ARM_MASK;
else
adev->df_perfmon_config_assign_mask[target_cntr] &=
~DEFERRED_ARM_MASK;
return 0;
}
static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
uint64_t config)
{
int target_cntr;
target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
/*
* we never get target_cntr < 0 since this funciton is only called in
* pmc_count for now but we should check anyways.
*/
return (target_cntr >= 0 &&
(adev->df_perfmon_config_assign_mask[target_cntr]
& DEFERRED_ARM_MASK));
}
/* release performance counter */ /* release performance counter */
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev, static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
uint64_t config) uint64_t config)
...@@ -451,29 +544,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, ...@@ -451,29 +544,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
int is_enable) int is_enable)
{ {
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
int ret = 0; int err = 0, ret = 0;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA20: case CHIP_VEGA20:
if (is_enable)
return df_v3_6_pmc_add_cntr(adev, config);
df_v3_6_reset_perfmon_cntr(adev, config); df_v3_6_reset_perfmon_cntr(adev, config);
if (is_enable) { ret = df_v3_6_pmc_get_ctrl_settings(adev,
ret = df_v3_6_pmc_add_cntr(adev, config);
} else {
ret = df_v3_6_pmc_get_ctrl_settings(adev,
config, config,
&lo_base_addr, &lo_base_addr,
&hi_base_addr, &hi_base_addr,
&lo_val, &lo_val,
&hi_val); &hi_val);
if (ret) if (ret)
return ret; return ret;
err = df_v3_6_perfmon_arm_with_retry(adev,
lo_base_addr,
lo_val,
hi_base_addr,
hi_val);
df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, if (err)
hi_base_addr, hi_val); ret = df_v3_6_pmc_set_deferred(adev, config, true);
}
break; break;
default: default:
...@@ -501,7 +598,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, ...@@ -501,7 +598,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
if (ret) if (ret)
return ret; return ret;
df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); df_v3_6_reset_perfmon_cntr(adev, config);
if (is_disable) if (is_disable)
df_v3_6_pmc_release_cntr(adev, config); df_v3_6_pmc_release_cntr(adev, config);
...@@ -518,18 +615,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, ...@@ -518,18 +615,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config, uint64_t config,
uint64_t *count) uint64_t *count)
{ {
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
*count = 0; *count = 0;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA20: case CHIP_VEGA20:
df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr, df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
&hi_base_addr); &hi_base_addr);
if ((lo_base_addr == 0) || (hi_base_addr == 0)) if ((lo_base_addr == 0) || (hi_base_addr == 0))
return; return;
/* rearm the counter or throw away count value on failure */
if (df_v3_6_pmc_is_deferred(adev, config)) {
int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
lo_base_addr, lo_val,
hi_base_addr, hi_val);
if (rearm_err)
return;
df_v3_6_pmc_set_deferred(adev, config, false);
}
df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val, df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
hi_base_addr, &hi_val); hi_base_addr, &hi_val);
...@@ -542,7 +650,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, ...@@ -542,7 +650,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
config, lo_base_addr, hi_base_addr, lo_val, hi_val); config, lo_base_addr, hi_base_addr, lo_val, hi_val);
break; break;
default: default:
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment