Commit e99a7467 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2023-06-02' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Quiet enough week, though the misc fixes tree didn't get to me when I
  was sending this, so maybe it'll be a bit bigger next week, just one
  i915 fix and some scattered amdgpu fixes:

  amdgpu:
   - Fix mclk and fclk output ordering on some APUs
   - Fix display regression with 5K VRR
   - VCN, JPEG spurious interrupt warning fixes
   - Fix SI DPM on some ARM64 platforms
   - Fix missing TMZ enablement on GC 11.0.1

  i915:
   - Fix for OA reporting to allow detecting non-power-of-two reports"

* tag 'drm-fixes-2023-06-02' of git://anongit.freedesktop.org/drm/drm:
  drm/i915/perf: Clear out entire reports after reading if not power of 2 size
  drm/amdgpu: enable tmz by default for GC 11.0.1
  drm/amd/pm: resolve reboot exception for si oland
  drm/amdgpu: add RAS POISON interrupt funcs for jpeg_v4_0
  drm/amdgpu: add RAS POISON interrupt funcs for jpeg_v2_6
  drm/amdgpu: separate ras irq from jpeg instance irq for UVD_POISON
  drm/amdgpu: add RAS POISON interrupt funcs for vcn_v4_0
  drm/amdgpu: add RAS POISON interrupt funcs for vcn_v2_6
  drm/amdgpu: separate ras irq from vcn instance irq for UVD_POISON
  Revert "drm/amd/display: Do not set drr on pipe commit"
  Revert "drm/amd/display: Block optimize on consecutive FAMS enables"
  drm/amd/pm: reverse mclk and fclk clocks levels for renoir
  drm/amd/pm: reverse mclk and fclk clocks levels for vangogh
  drm/amd/pm: reverse mclk and fclk clocks levels for yellow carp
  drm/amd/pm: reverse mclk clocks levels for SMU v13.0.5
  drm/amd/pm: reverse mclk and fclk clocks levels for SMU v13.0.4
parents 1419c3ba b6ccf213
...@@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) ...@@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(9, 3, 0): case IP_VERSION(9, 3, 0):
/* GC 10.3.7 */ /* GC 10.3.7 */
case IP_VERSION(10, 3, 7): case IP_VERSION(10, 3, 7):
/* GC 11.0.1 */
case IP_VERSION(11, 0, 1):
if (amdgpu_tmz == 0) { if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false; adev->gmc.tmz_enabled = false;
dev_info(adev->dev, dev_info(adev->dev,
...@@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) ...@@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/ /* YELLOW_CARP*/
case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 3):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4): case IP_VERSION(11, 0, 4):
/* Don't enable it by default yet. /* Don't enable it by default yet.
*/ */
......
...@@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, ...@@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0; return 0;
} }
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r, i;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
if (r)
goto late_fini;
}
}
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
{ {
int err; int err;
...@@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) ...@@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
adev->jpeg.ras_if = &ras->ras_block.ras_comm; adev->jpeg.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init) if (!ras->ras_block.ras_late_init)
ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init;
return 0; return 0;
} }
...@@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{ ...@@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{
struct amdgpu_jpeg_inst { struct amdgpu_jpeg_inst {
struct amdgpu_ring ring_dec; struct amdgpu_ring ring_dec;
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_jpeg_reg external; struct amdgpu_jpeg_reg external;
}; };
...@@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); ...@@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry); struct amdgpu_iv_entry *entry);
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/ #endif /*__AMDGPU_JPEG_H__*/
...@@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, ...@@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
return 0; return 0;
} }
int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r, i;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
if (r)
goto late_fini;
}
}
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
{ {
int err; int err;
...@@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) ...@@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
adev->vcn.ras_if = &ras->ras_block.ras_comm; adev->vcn.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init) if (!ras->ras_block.ras_late_init)
ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
return 0; return 0;
} }
...@@ -234,6 +234,7 @@ struct amdgpu_vcn_inst { ...@@ -234,6 +234,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
atomic_t sched_score; atomic_t sched_score;
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_vcn_reg external; struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo; struct amdgpu_bo *dpg_sram_bo;
struct dpg_pause_state pause_state; struct dpg_pause_state pause_state;
...@@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, ...@@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry); struct amdgpu_iv_entry *entry);
int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
#endif #endif
...@@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle) ...@@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle)
/* JPEG DJPEG POISON EVENT */ /* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
if (r) if (r)
return r; return r;
/* JPEG EJPEG POISON EVENT */ /* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
if (r) if (r)
return r; return r;
} }
...@@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle) ...@@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
} }
return 0; return 0;
...@@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, ...@@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
return 0; return 0;
} }
static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
...@@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, ...@@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__JPEG_DECODE: case VCN_2_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
break; break;
case VCN_2_6__SRCID_DJPEG0_POISON:
case VCN_2_6__SRCID_EJPEG0_POISON:
amdgpu_jpeg_process_poison_irq(adev, source, entry);
break;
default: default:
DRM_ERROR("Unhandled interrupt: %d %d\n", DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]); entry->src_id, entry->src_data[0]);
...@@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { ...@@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
.process = jpeg_v2_5_process_interrupt, .process = jpeg_v2_5_process_interrupt,
}; };
static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = {
.set = jpeg_v2_6_set_ras_interrupt_state,
.process = amdgpu_jpeg_process_poison_irq,
};
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{ {
int i; int i;
...@@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) ...@@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst[i].irq.num_types = 1; adev->jpeg.inst[i].irq.num_types = 1;
adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
adev->jpeg.inst[i].ras_poison_irq.num_types = 1;
adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs;
} }
} }
...@@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = { ...@@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
.ras_block = { .ras_block = {
.hw_ops = &jpeg_v2_6_ras_hw_ops, .hw_ops = &jpeg_v2_6_ras_hw_ops,
.ras_late_init = amdgpu_jpeg_ras_late_init,
}, },
}; };
......
...@@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle) ...@@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle)
/* JPEG DJPEG POISON EVENT */ /* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r) if (r)
return r; return r;
/* JPEG EJPEG POISON EVENT */ /* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r) if (r)
return r; return r;
...@@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle) ...@@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
} }
amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return 0; return 0;
} }
...@@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, ...@@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
return 0; return 0;
} }
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
...@@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, ...@@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
case VCN_4_0__SRCID__JPEG_DECODE: case VCN_4_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(&adev->jpeg.inst->ring_dec); amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
break; break;
case VCN_4_0__SRCID_DJPEG0_POISON:
case VCN_4_0__SRCID_EJPEG0_POISON:
amdgpu_jpeg_process_poison_irq(adev, source, entry);
break;
default: default:
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]); entry->src_id, entry->src_data[0]);
...@@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { ...@@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
.process = jpeg_v4_0_process_interrupt, .process = jpeg_v4_0_process_interrupt,
}; };
static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = {
.set = jpeg_v4_0_set_ras_interrupt_state,
.process = amdgpu_jpeg_process_poison_irq,
};
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev) static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->jpeg.inst->irq.num_types = 1; adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs; adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs;
adev->jpeg.inst->ras_poison_irq.num_types = 1;
adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs;
} }
const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = { const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
...@@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = { ...@@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
static struct amdgpu_jpeg_ras jpeg_v4_0_ras = { static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
.ras_block = { .ras_block = {
.hw_ops = &jpeg_v4_0_ras_hw_ops, .hw_ops = &jpeg_v4_0_ras_hw_ops,
.ras_late_init = amdgpu_jpeg_ras_late_init,
}, },
}; };
......
...@@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle) ...@@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle)
/* VCN POISON TRAP */ /* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq); VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
if (r) if (r)
return r; return r;
} }
...@@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle) ...@@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle)
(adev->vcn.cur_state != AMD_PG_STATE_GATE && (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS))) RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
} }
return 0; return 0;
...@@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, ...@@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
return 0; return 0;
} }
static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
...@@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, ...@@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
break; break;
case VCN_2_6__SRCID_UVD_POISON:
amdgpu_vcn_process_poison_irq(adev, source, entry);
break;
default: default:
DRM_ERROR("Unhandled interrupt: %d %d\n", DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]); entry->src_id, entry->src_data[0]);
...@@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { ...@@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
.process = vcn_v2_5_process_interrupt, .process = vcn_v2_5_process_interrupt,
}; };
static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
.set = vcn_v2_6_set_ras_interrupt_state,
.process = amdgpu_vcn_process_poison_irq,
};
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{ {
int i; int i;
...@@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) ...@@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
continue; continue;
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
} }
} }
...@@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = { ...@@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
static struct amdgpu_vcn_ras vcn_v2_6_ras = { static struct amdgpu_vcn_ras vcn_v2_6_ras = {
.ras_block = { .ras_block = {
.hw_ops = &vcn_v2_6_ras_hw_ops, .hw_ops = &vcn_v2_6_ras_hw_ops,
.ras_late_init = amdgpu_vcn_ras_late_init,
}, },
}; };
......
...@@ -139,7 +139,7 @@ static int vcn_v4_0_sw_init(void *handle) ...@@ -139,7 +139,7 @@ static int vcn_v4_0_sw_init(void *handle)
/* VCN POISON TRAP */ /* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
if (r) if (r)
return r; return r;
...@@ -305,8 +305,8 @@ static int vcn_v4_0_hw_fini(void *handle) ...@@ -305,8 +305,8 @@ static int vcn_v4_0_hw_fini(void *handle)
vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
} }
} }
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
} }
return 0; return 0;
...@@ -1975,6 +1975,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp ...@@ -1975,6 +1975,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp
return 0; return 0;
} }
/**
* vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
*
* @adev: amdgpu_device pointer
* @source: interrupt sources
* @type: interrupt types
* @state: interrupt states
*
* Set VCN block RAS interrupt state
*/
static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
/** /**
* vcn_v4_0_process_interrupt - process VCN block interrupt * vcn_v4_0_process_interrupt - process VCN block interrupt
* *
...@@ -2007,9 +2025,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ ...@@ -2007,9 +2025,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break; break;
case VCN_4_0__SRCID_UVD_POISON:
amdgpu_vcn_process_poison_irq(adev, source, entry);
break;
default: default:
DRM_ERROR("Unhandled interrupt: %d %d\n", DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]); entry->src_id, entry->src_data[0]);
...@@ -2024,6 +2039,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { ...@@ -2024,6 +2039,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
.process = vcn_v4_0_process_interrupt, .process = vcn_v4_0_process_interrupt,
}; };
static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
.set = vcn_v4_0_set_ras_interrupt_state,
.process = amdgpu_vcn_process_poison_irq,
};
/** /**
* vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
* *
...@@ -2041,6 +2061,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) ...@@ -2041,6 +2061,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
} }
} }
...@@ -2114,6 +2137,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = { ...@@ -2114,6 +2137,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
static struct amdgpu_vcn_ras vcn_v4_0_ras = { static struct amdgpu_vcn_ras vcn_v4_0_ras = {
.ras_block = { .ras_block = {
.hw_ops = &vcn_v4_0_ras_hw_ops, .hw_ops = &vcn_v4_0_ras_hw_ops,
.ras_late_init = amdgpu_vcn_ras_late_init,
}, },
}; };
......
...@@ -2113,15 +2113,6 @@ void dcn20_optimize_bandwidth( ...@@ -2113,15 +2113,6 @@ void dcn20_optimize_bandwidth(
if (hubbub->funcs->program_compbuf_size) if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dc_dmub_srv_p_state_delegate(dc,
true, context);
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
dc->clk_mgr->clks.fw_based_mclk_switching = true;
} else {
dc->clk_mgr->clks.fw_based_mclk_switching = false;
}
dc->clk_mgr->funcs->update_clocks( dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr, dc->clk_mgr,
context, context,
......
...@@ -983,36 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, ...@@ -983,36 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
} }
void dcn30_prepare_bandwidth(struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context) struct dc_state *context)
{ {
bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
/* Any transition into an FPO config should disable MCLK switching first to avoid
* driver and FW P-State synchronization issues.
*/
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
dc->optimized_required = true;
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
}
if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context); dcn20_prepare_bandwidth(dc, context);
/*
* enabled -> enabled: do not disable
* enabled -> disabled: disable
* disabled -> enabled: don't care
* disabled -> disabled: don't care
*/
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
dc_dmub_srv_p_state_delegate(dc, false, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
/* After disabling P-State, restore the original value to ensure we get the correct P-State
* on the next optimize. */
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
} }
...@@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) ...@@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0; return 0;
} }
static int si_set_temperature_range(struct amdgpu_device *adev)
{
int ret;
ret = si_thermal_enable_alert(adev, false);
if (ret)
return ret;
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_enable_alert(adev, true);
if (ret)
return ret;
return ret;
}
static void si_dpm_disable(struct amdgpu_device *adev) static void si_dpm_disable(struct amdgpu_device *adev)
{ {
struct rv7xx_power_info *pi = rv770_get_pi(adev); struct rv7xx_power_info *pi = rv770_get_pi(adev);
...@@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, ...@@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle) static int si_dpm_late_init(void *handle)
{ {
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->pm.dpm_enabled)
return 0;
ret = si_set_temperature_range(adev);
if (ret)
return ret;
#if 0 //TODO ?
si_dpm_powergate_uvd(adev, true);
#endif
return 0; return 0;
} }
......
...@@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, ...@@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table; DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_legacy_t metrics; SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, size = 0, ret = 0; int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false; bool cur_value_match_level = false;
...@@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, ...@@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
case SMU_MCLK: case SMU_MCLK:
case SMU_FCLK: case SMU_FCLK:
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret) if (ret)
return ret; return ret;
if (!value) if (!value)
...@@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, ...@@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table; DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics; SmuMetrics_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, size = 0, ret = 0; int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false; bool cur_value_match_level = false;
uint32_t min, max; uint32_t min, max;
...@@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu, ...@@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
case SMU_MCLK: case SMU_MCLK:
case SMU_FCLK: case SMU_FCLK:
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret) if (ret)
return ret; return ret;
if (!value) if (!value)
......
...@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) ...@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int renoir_print_clk_levels(struct smu_context *smu, static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf) enum smu_clk_type clk_type, char *buf)
{ {
int i, size = 0, ret = 0; int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics; SmuMetrics_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
...@@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, ...@@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_VCLK: case SMU_VCLK:
case SMU_DCLK: case SMU_DCLK:
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret) if (ret)
return ret; return ret;
if (!value) if (!value)
......
...@@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, ...@@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf) enum smu_clk_type clk_type, char *buf)
{ {
int i, size = 0, ret = 0; int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max; uint32_t min, max;
...@@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, ...@@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
break; break;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value); idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret) if (ret)
break; break;
......
...@@ -866,7 +866,7 @@ static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu, ...@@ -866,7 +866,7 @@ static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu,
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf) enum smu_clk_type clk_type, char *buf)
{ {
int i, size = 0, ret = 0; int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min = 0, max = 0; uint32_t min = 0, max = 0;
...@@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, ...@@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
goto print_clk_out; goto print_clk_out;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value); idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret) if (ret)
goto print_clk_out; goto print_clk_out;
......
...@@ -1000,7 +1000,7 @@ static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, ...@@ -1000,7 +1000,7 @@ static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu,
static int yellow_carp_print_clk_levels(struct smu_context *smu, static int yellow_carp_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf) enum smu_clk_type clk_type, char *buf)
{ {
int i, size = 0, ret = 0; int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max; uint32_t min, max;
...@@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, ...@@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
goto print_clk_out; goto print_clk_out;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret) if (ret)
goto print_clk_out; goto print_clk_out;
......
...@@ -877,12 +877,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, ...@@ -877,12 +877,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
stream->oa_buffer.last_ctx_id = ctx_id; stream->oa_buffer.last_ctx_id = ctx_id;
} }
/* if (is_power_of_2(report_size)) {
* Clear out the report id and timestamp as a means to detect unlanded /*
* reports. * Clear out the report id and timestamp as a means
*/ * to detect unlanded reports.
oa_report_id_clear(stream, report32); */
oa_timestamp_clear(stream, report32); oa_report_id_clear(stream, report32);
oa_timestamp_clear(stream, report32);
} else {
/* Zero out the entire report */
memset(report32, 0, report_size);
}
} }
if (start_offset != *offset) { if (start_offset != *offset) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment