Commit e8aaca57 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2024-01-11' of...

Merge tag 'drm-intel-next-fixes-2024-01-11' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Fixes for kernel-doc warnings enforced in linux-next
- Another build warning fix for string formatting of intel_wakeref_t
- Display fixes for DP DSC BPC and C20 PLL state verification
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZZ_IOcLiDG9LJafO@jlahtine-mobl.ger.corp.intel.com
parents b76c01f1 d505a16e
...@@ -3067,24 +3067,29 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state, ...@@ -3067,24 +3067,29 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
{ {
struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20; const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
bool use_mplla; bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
int i; int i;
use_mplla = intel_c20_use_mplla(mpll_hw_state->clock); I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
if (use_mplla) { "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) { crtc->base.base.id, crtc->base.name,
I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], sw_use_mpllb, hw_use_mpllb);
"[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
crtc->base.base.id, crtc->base.name, i, if (hw_use_mpllb) {
mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
}
} else {
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) { for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
"[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
crtc->base.base.id, crtc->base.name, i, crtc->base.base.id, crtc->base.name, i,
mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
} }
} else {
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
"[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
crtc->base.base.id, crtc->base.name, i,
mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
}
} }
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) { for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
......
...@@ -405,8 +405,8 @@ print_async_put_domains_state(struct i915_power_domains *power_domains) ...@@ -405,8 +405,8 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
struct drm_i915_private, struct drm_i915_private,
display.power.domains); display.power.domains);
drm_dbg(&i915->drm, "async_put_wakeref %lu\n", drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
power_domains->async_put_wakeref); str_yes_no(power_domains->async_put_wakeref));
print_power_domains(power_domains, "async_put_domains[0]", print_power_domains(power_domains, "async_put_domains[0]",
&power_domains->async_put_domains[0]); &power_domains->async_put_domains[0]);
......
...@@ -2101,7 +2101,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, ...@@ -2101,7 +2101,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
} }
} }
dsc_max_bpc = intel_dp_dsc_min_src_input_bpc(i915); dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
if (!dsc_max_bpc) if (!dsc_max_bpc)
return -EINVAL; return -EINVAL;
......
...@@ -3319,11 +3319,11 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector) ...@@ -3319,11 +3319,11 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev); struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry; struct dentry *root = connector->base.debugfs_entry;
if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) { /* TODO: Add support for MST connectors as well. */
if (!(HAS_DP20(i915) && if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort)) connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
connector->mst_port)
return; return;
}
debugfs_create_file("i915_psr_sink_status", 0444, root, debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops); connector, &i915_psr_sink_status_fops);
......
...@@ -412,9 +412,9 @@ struct i915_gem_context { ...@@ -412,9 +412,9 @@ struct i915_gem_context {
/** @stale: tracks stale engines to be destroyed */ /** @stale: tracks stale engines to be destroyed */
struct { struct {
/** @lock: guards engines */ /** @stale.lock: guards engines */
spinlock_t lock; spinlock_t lock;
/** @engines: list of stale engines */ /** @stale.engines: list of stale engines */
struct list_head engines; struct list_head engines;
} stale; } stale;
}; };
......
...@@ -21,8 +21,11 @@ struct mei_aux_device; ...@@ -21,8 +21,11 @@ struct mei_aux_device;
/** /**
* struct intel_gsc - graphics security controller * struct intel_gsc - graphics security controller
* *
* @gem_obj: scratch memory GSC operations * @intf: gsc interface
* @intf : gsc interface * @intf.adev: MEI aux. device for this @intf
* @intf.gem_obj: scratch memory GSC operations
* @intf.irq: IRQ for this device (%-1 for no IRQ)
* @intf.id: this interface's id number/index
*/ */
struct intel_gsc { struct intel_gsc {
struct intel_gsc_intf { struct intel_gsc_intf {
......
...@@ -105,61 +105,67 @@ struct intel_guc { ...@@ -105,61 +105,67 @@ struct intel_guc {
*/ */
struct { struct {
/** /**
* @lock: protects everything in submission_state, * @submission_state.lock: protects everything in
* ce->guc_id.id, and ce->guc_id.ref when transitioning in and * submission_state, ce->guc_id.id, and ce->guc_id.ref
* out of zero * when transitioning in and out of zero
*/ */
spinlock_t lock; spinlock_t lock;
/** /**
* @guc_ids: used to allocate new guc_ids, single-lrc * @submission_state.guc_ids: used to allocate new
* guc_ids, single-lrc
*/ */
struct ida guc_ids; struct ida guc_ids;
/** /**
* @num_guc_ids: Number of guc_ids, selftest feature to be able * @submission_state.num_guc_ids: Number of guc_ids, selftest
* to reduce this number while testing. * feature to be able to reduce this number while testing.
*/ */
int num_guc_ids; int num_guc_ids;
/** /**
* @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc * @submission_state.guc_ids_bitmap: used to allocate
* new guc_ids, multi-lrc
*/ */
unsigned long *guc_ids_bitmap; unsigned long *guc_ids_bitmap;
/** /**
* @guc_id_list: list of intel_context with valid guc_ids but no * @submission_state.guc_id_list: list of intel_context
* refs * with valid guc_ids but no refs
*/ */
struct list_head guc_id_list; struct list_head guc_id_list;
/** /**
* @guc_ids_in_use: Number single-lrc guc_ids in use * @submission_state.guc_ids_in_use: Number single-lrc
* guc_ids in use
*/ */
unsigned int guc_ids_in_use; unsigned int guc_ids_in_use;
/** /**
* @destroyed_contexts: list of contexts waiting to be destroyed * @submission_state.destroyed_contexts: list of contexts
* (deregistered with the GuC) * waiting to be destroyed (deregistered with the GuC)
*/ */
struct list_head destroyed_contexts; struct list_head destroyed_contexts;
/** /**
* @destroyed_worker: worker to deregister contexts, need as we * @submission_state.destroyed_worker: worker to deregister
* need to take a GT PM reference and can't from destroy * contexts, need as we need to take a GT PM reference and
* function as it might be in an atomic context (no sleeping) * can't from destroy function as it might be in an atomic
* context (no sleeping)
*/ */
struct work_struct destroyed_worker; struct work_struct destroyed_worker;
/** /**
* @reset_fail_worker: worker to trigger a GT reset after an * @submission_state.reset_fail_worker: worker to trigger
* engine reset fails * a GT reset after an engine reset fails
*/ */
struct work_struct reset_fail_worker; struct work_struct reset_fail_worker;
/** /**
* @reset_fail_mask: mask of engines that failed to reset * @submission_state.reset_fail_mask: mask of engines that
* failed to reset
*/ */
intel_engine_mask_t reset_fail_mask; intel_engine_mask_t reset_fail_mask;
/** /**
* @sched_disable_delay_ms: schedule disable delay, in ms, for * @submission_state.sched_disable_delay_ms: schedule
* contexts * disable delay, in ms, for contexts
*/ */
unsigned int sched_disable_delay_ms; unsigned int sched_disable_delay_ms;
/** /**
* @sched_disable_gucid_threshold: threshold of min remaining available * @submission_state.sched_disable_gucid_threshold:
* guc_ids before we start bypassing the schedule disable delay * threshold of min remaining available guc_ids before
* we start bypassing the schedule disable delay
*/ */
unsigned int sched_disable_gucid_threshold; unsigned int sched_disable_gucid_threshold;
} submission_state; } submission_state;
...@@ -243,37 +249,40 @@ struct intel_guc { ...@@ -243,37 +249,40 @@ struct intel_guc {
*/ */
struct { struct {
/** /**
* @lock: Lock protecting the below fields and the engine stats. * @timestamp.lock: Lock protecting the below fields and
* the engine stats.
*/ */
spinlock_t lock; spinlock_t lock;
/** /**
* @gt_stamp: 64 bit extended value of the GT timestamp. * @timestamp.gt_stamp: 64-bit extended value of the GT
* timestamp.
*/ */
u64 gt_stamp; u64 gt_stamp;
/** /**
* @ping_delay: Period for polling the GT timestamp for * @timestamp.ping_delay: Period for polling the GT
* overflow. * timestamp for overflow.
*/ */
unsigned long ping_delay; unsigned long ping_delay;
/** /**
* @work: Periodic work to adjust GT timestamp, engine and * @timestamp.work: Periodic work to adjust GT timestamp,
* context usage for overflows. * engine and context usage for overflows.
*/ */
struct delayed_work work; struct delayed_work work;
/** /**
* @shift: Right shift value for the gpm timestamp * @timestamp.shift: Right shift value for the gpm timestamp
*/ */
u32 shift; u32 shift;
/** /**
* @last_stat_jiffies: jiffies at last actual stats collection time * @timestamp.last_stat_jiffies: jiffies at last actual
* We use this timestamp to ensure we don't oversample the * stats collection time. We use this timestamp to ensure
* stats because runtime power management events can trigger * we don't oversample the stats because runtime power
* stats collection at much higher rates than required. * management events can trigger stats collection at much
* higher rates than required.
*/ */
unsigned long last_stat_jiffies; unsigned long last_stat_jiffies;
} timestamp; } timestamp;
......
...@@ -291,7 +291,8 @@ struct i915_perf_stream { ...@@ -291,7 +291,8 @@ struct i915_perf_stream {
int size_exponent; int size_exponent;
/** /**
* @ptr_lock: Locks reads and writes to all head/tail state * @oa_buffer.ptr_lock: Locks reads and writes to all
* head/tail state
* *
* Consider: the head and tail pointer state needs to be read * Consider: the head and tail pointer state needs to be read
* consistently from a hrtimer callback (atomic context) and * consistently from a hrtimer callback (atomic context) and
...@@ -313,7 +314,8 @@ struct i915_perf_stream { ...@@ -313,7 +314,8 @@ struct i915_perf_stream {
spinlock_t ptr_lock; spinlock_t ptr_lock;
/** /**
* @head: Although we can always read back the head pointer register, * @oa_buffer.head: Although we can always read back
* the head pointer register,
* we prefer to avoid trusting the HW state, just to avoid any * we prefer to avoid trusting the HW state, just to avoid any
* risk that some hardware condition could * somehow bump the * risk that some hardware condition could * somehow bump the
* head pointer unpredictably and cause us to forward the wrong * head pointer unpredictably and cause us to forward the wrong
...@@ -322,7 +324,8 @@ struct i915_perf_stream { ...@@ -322,7 +324,8 @@ struct i915_perf_stream {
u32 head; u32 head;
/** /**
* @tail: The last verified tail that can be read by userspace. * @oa_buffer.tail: The last verified tail that can be
* read by userspace.
*/ */
u32 tail; u32 tail;
} oa_buffer; } oa_buffer;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment