Commit 750b9588 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2023-10-27' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "This is the final set of fixes for 6.6, just misc bits mainly in
  amdgpu and i915, nothing too noteworthy.

  amdgpu:
   - ignore duplicated BOs in CS parser
   - remove redundant call to amdgpu_ctx_priority_is_valid()
   - Extend VI APSM quirks to more platforms

  amdkfd:
   - reserve fence slot while locking BO

  dp_mst:
   - Fix NULL deref in get_mst_branch_device_by_guid_helper()

  logicvc:
   - Kconfig: Select REGMAP and REGMAP_MMIO

  ivpu:
   - Fix missing VPUIP interrupts

  i915:
   - Determine context valid in OA reports
   - Hold GT forcewake during steering operations
   - Check if PMU is closed before stopping event"

* tag 'drm-fixes-2023-10-27' of git://anongit.freedesktop.org/drm/drm:
  accel/ivpu/37xx: Fix missing VPUIP interrupts
  drm/amd: Disable ASPM for VI w/ all Intel systems
  drm/i915/pmu: Check if pmu is closed before stopping event
  drm/i915/mcr: Hold GT forcewake during steering operations
  drm/logicvc: Kconfig: select REGMAP and REGMAP_MMIO
  drm/i915/perf: Determine context valid in OA reports
  drm/amdkfd: reserve a fence slot while locking the BO
  drm/amdgpu: Remove redundant call to priority_is_valid()
  drm/dp_mst: Fix NULL deref in get_mst_branch_device_by_guid_helper()
  drm/amdgpu: ignore duplicate BOs again
parents 3a568e3a 44117828
...@@ -940,9 +940,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) ...@@ -940,9 +940,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
if (status == 0) if (status == 0)
return 0; return 0;
/* Disable global interrupt before handling local buttress interrupts */
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL)); REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
...@@ -974,9 +971,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) ...@@ -974,9 +971,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
else else
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status); REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
/* Re-enable global interrupt */
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
if (schedule_recovery) if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev); ivpu_pm_schedule_recovery(vdev);
...@@ -988,9 +982,14 @@ static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr) ...@@ -988,9 +982,14 @@ static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
struct ivpu_device *vdev = ptr; struct ivpu_device *vdev = ptr;
u32 ret_irqv, ret_irqb; u32 ret_irqv, ret_irqb;
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq); ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq); ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
return IRQ_RETVAL(ret_irqb | ret_irqv); return IRQ_RETVAL(ret_irqb | ret_irqv);
} }
......
...@@ -1103,7 +1103,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ...@@ -1103,7 +1103,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
if (unlikely(ret)) if (unlikely(ret))
goto error; goto error;
ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base); ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&ctx->exec); drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret)) if (unlikely(ret))
goto error; goto error;
......
...@@ -65,7 +65,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, ...@@ -65,7 +65,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
} }
amdgpu_sync_create(&p->sync); amdgpu_sync_create(&p->sync);
drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES);
return 0; return 0;
} }
......
...@@ -55,6 +55,10 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio) ...@@ -55,6 +55,10 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
return true; return true;
default: default:
case AMDGPU_CTX_PRIORITY_UNSET: case AMDGPU_CTX_PRIORITY_UNSET:
/* UNSET priority is not valid and we don't carry that
* around, but set it to NORMAL in the only place this
* function is called, amdgpu_ctx_ioctl().
*/
return false; return false;
} }
} }
...@@ -95,9 +99,6 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) ...@@ -95,9 +99,6 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
static int amdgpu_ctx_priority_permit(struct drm_file *filp, static int amdgpu_ctx_priority_permit(struct drm_file *filp,
int32_t priority) int32_t priority)
{ {
if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL;
/* NORMAL and below are accessible by everyone */ /* NORMAL and below are accessible by everyone */
if (priority <= AMDGPU_CTX_PRIORITY_NORMAL) if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0; return 0;
...@@ -632,8 +633,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, ...@@ -632,8 +633,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
return 0; return 0;
} }
static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev, static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, uint32_t id, struct amdgpu_fpriv *fpriv, uint32_t id,
bool set, u32 *stable_pstate) bool set, u32 *stable_pstate)
...@@ -676,8 +675,10 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, ...@@ -676,8 +675,10 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
id = args->in.ctx_id; id = args->in.ctx_id;
priority = args->in.priority; priority = args->in.priority;
/* For backwards compatibility reasons, we need to accept /* For backwards compatibility, we need to accept ioctls with garbage
* ioctls with garbage in the priority field */ * in the priority field. Garbage values in the priority field, result
* in the priority being set to NORMAL.
*/
if (!amdgpu_ctx_priority_is_valid(priority)) if (!amdgpu_ctx_priority_is_valid(priority))
priority = AMDGPU_CTX_PRIORITY_NORMAL; priority = AMDGPU_CTX_PRIORITY_NORMAL;
......
...@@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev) ...@@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev)
bool bL1SS = false; bool bL1SS = false;
bool bClkReqSupport = true; bool bClkReqSupport = true;
if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
return; return;
if (adev->flags & AMD_IS_APU || if (adev->flags & AMD_IS_APU ||
......
...@@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( ...@@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *found_mstb; struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port; struct drm_dp_mst_port *port;
if (!mstb)
return NULL;
if (memcmp(mstb->guid, guid, 16) == 0) if (memcmp(mstb->guid, guid, 16) == 0)
return mstb; return mstb;
list_for_each_entry(port, &mstb->ports, next) { list_for_each_entry(port, &mstb->ports, next) {
if (!port->mstb)
continue;
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb) if (found_mstb)
......
...@@ -376,9 +376,26 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags) ...@@ -376,9 +376,26 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* driver threads, but also with hardware/firmware agents. A dedicated * driver threads, but also with hardware/firmware agents. A dedicated
* locking register is used. * locking register is used.
*/ */
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
/*
* The steering control and semaphore registers are inside an
* "always on" power domain with respect to RC6. However there
* are some issues if higher-level platform sleep states are
* entering/exiting at the same time these registers are
* accessed. Grabbing GT forcewake and holding it over the
* entire lock/steer/unlock cycle ensures that those sleep
* states have been fully exited before we access these
* registers. This wakeref will be released in the unlock
* routine.
*
* This is expected to become a formally documented/numbered
* workaround soon.
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
err = wait_for(intel_uncore_read_fw(gt->uncore, err = wait_for(intel_uncore_read_fw(gt->uncore,
MTL_STEER_SEMAPHORE) == 0x1, 100); MTL_STEER_SEMAPHORE) == 0x1, 100);
}
/* /*
* Even on platforms with a hardware lock, we'll continue to grab * Even on platforms with a hardware lock, we'll continue to grab
...@@ -415,8 +432,11 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags) ...@@ -415,8 +432,11 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
{ {
spin_unlock_irqrestore(&gt->mcr_lock, flags); spin_unlock_irqrestore(&gt->mcr_lock, flags);
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1); intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_GT);
}
} }
/** /**
......
...@@ -482,8 +482,7 @@ static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report) ...@@ -482,8 +482,7 @@ static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report) static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
{ {
return !(oa_report_id(stream, report) & return !(oa_report_id(stream, report) &
stream->perf->gen8_valid_ctx_bit) && stream->perf->gen8_valid_ctx_bit);
GRAPHICS_VER(stream->perf->i915) <= 11;
} }
static u64 oa_timestamp(struct i915_perf_stream *stream, void *report) static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
...@@ -5106,6 +5105,7 @@ static void i915_perf_init_info(struct drm_i915_private *i915) ...@@ -5106,6 +5105,7 @@ static void i915_perf_init_info(struct drm_i915_private *i915)
perf->gen8_valid_ctx_bit = BIT(16); perf->gen8_valid_ctx_bit = BIT(16);
break; break;
case 12: case 12:
perf->gen8_valid_ctx_bit = BIT(16);
/* /*
* Calculate offset at runtime in oa_pin_context for gen12 and * Calculate offset at runtime in oa_pin_context for gen12 and
* cache the value in perf->ctx_oactxctrl_offset. * cache the value in perf->ctx_oactxctrl_offset.
......
...@@ -832,9 +832,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags) ...@@ -832,9 +832,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
static void i915_pmu_event_stop(struct perf_event *event, int flags) static void i915_pmu_event_stop(struct perf_event *event, int flags)
{ {
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = &i915->pmu;
if (pmu->closed)
goto out;
if (flags & PERF_EF_UPDATE) if (flags & PERF_EF_UPDATE)
i915_pmu_event_read(event); i915_pmu_event_read(event);
i915_pmu_disable(event); i915_pmu_disable(event);
out:
event->hw.state = PERF_HES_STOPPED; event->hw.state = PERF_HES_STOPPED;
} }
......
...@@ -5,5 +5,7 @@ config DRM_LOGICVC ...@@ -5,5 +5,7 @@ config DRM_LOGICVC
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_DMA_HELPER select DRM_KMS_DMA_HELPER
select DRM_GEM_DMA_HELPER select DRM_GEM_DMA_HELPER
select REGMAP
select REGMAP_MMIO
help help
DRM display driver for the logiCVC programmable logic block from Xylon DRM display driver for the logiCVC programmable logic block from Xylon
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment