Commit 6910b676 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2020-11-27-1' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Unfortunately this has a bit of thanksgiving stuffing in it, as it a
  bit larger (at least the vc4 patches) than I like at this point in
  time.

  The main thing is it has a bunch of regressions fixes for reports in
  the last couple of weeks, ast, nouveau and the amdgpu ttm init fix,
  along with the usual selection of amdgpu and i915 fixes.

  The vc4 fixes are a few but they are fixes and the nastiest one is a
  fix for when you have a 2.4Ghz Wifi and a HDMI signal with a clock in
  that range and there isn't enough shielding and interference happen
  between the two, the fix adjusts the mode clock to try and avoid the
  wifi channels in that case.

  Hopefully you can merge this between turkey slices, and next week
  should be quieter.

  ast:
   - LUT loading regression fix

  nouveau:
   - relocations regression fix

  amdgpu:
   - ttm init oops fix
   - Runtime pm fix
   - SI UVD suspend/resume fix
   - HDCP fix for headless cards
   - Sienna Cichlid golden register update

  i915:
   - Fix Perf/OA workaround register corruption (Lionel)
   - Correct a comment statement in GVT (Yan)
   - Fix GT enable/disable iterrupts, including a race condition that
     prevented GPU to go idle (Chris)
   - Free stale request on destroying the virtual engine (Chris)

  exynos:
   - config dependency fix

  mediatek:
   - unused var removal
   - horizonal front/back porch formula fix

  vc4:
   - wifi and hdmi interference fix
   - mode rejection fixes
   - use after free fix
   - cleanup some code"

* tag 'drm-fixes-2020-11-27-1' of git://anongit.freedesktop.org/drm/drm: (28 commits)
  drm/nouveau: fix relocations applying logic and a double-free
  drm/ast: Reload gamma LUT after changing primary plane's color format
  drm/amdgpu: Fix size calculation when init onchip memory
  drm/amdgpu: update golden setting for sienna_cichlid
  drm/amd/display: Avoid HDCP initialization in devices without output
  drm/i915/gt: Free stale request on destroying the virtual engine
  drm/i915/gt: Don't cancel the interrupt shadow too early
  drm/i915/gt: Track signaled breadcrumbs outside of the breadcrumb spinlock
  drm/amdgpu: fix a page fault
  drm/amdgpu: fix SI UVD firmware validate resume fail
  drm/amd/amdgpu: fix null pointer in runtime pm
  drm/i915/gt: Defer enabling the breadcrumb interrupt to after submission
  drm/i915/gvt: correct a false comment of flag F_UNALIGN
  drm/i915/perf: workaround register corruption in OATAILPTR
  drm/vc4: kms: Don't disable the muxing of an active CRTC
  drm/vc4: kms: Store the unassigned channel list in the state
  drm/exynos: depend on COMMON_CLK to fix compile tests
  drm/mediatek: dsi: Modify horizontal front/back porch byte formula
  drm/vc4: hdmi: Disable Wifi Frequencies
  dt-bindings: display: Add a property to deal with WiFi coexistence
  ...
parents 99c710c4 9595930d
......@@ -76,6 +76,12 @@ properties:
resets:
maxItems: 1
wifi-2.4ghz-coexistence:
type: boolean
description: >
Should the pixel frequencies in the WiFi frequencies range be
avoided?
required:
- compatible
- reg
......
......@@ -4852,7 +4852,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
if (ras && ras->supported)
if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
return amdgpu_dpm_baco_enter(adev);
......@@ -4871,7 +4871,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
if (ret)
return ret;
if (ras && ras->supported)
if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
......
......@@ -69,10 +69,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
uint64_t size)
uint64_t size_in_page)
{
return ttm_range_man_init(&adev->mman.bdev, type,
false, size >> PAGE_SHIFT);
false, size_in_page);
}
/**
......
......@@ -67,6 +67,7 @@ struct amdgpu_uvd {
unsigned harvest_config;
/* store image width to adjust nb memory state */
unsigned decode_image_width;
uint32_t keyselect;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
......
......@@ -3105,6 +3105,8 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
......
......@@ -277,15 +277,8 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
*/
static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
{
void *ptr;
uint32_t ucode_len, i;
uint32_t keysel;
ptr = adev->uvd.inst[0].cpu_addr;
ptr += 192 + 16;
memcpy(&ucode_len, ptr, 4);
ptr += ucode_len;
memcpy(&keysel, ptr, 4);
int i;
uint32_t keysel = adev->uvd.keyselect;
WREG32(mmUVD_FW_START, keysel);
......@@ -550,6 +543,8 @@ static int uvd_v3_1_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
void *ptr;
uint32_t ucode_len;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
......@@ -571,6 +566,13 @@ static int uvd_v3_1_sw_init(void *handle)
if (r)
return r;
/* Retrieval firmware validate key */
ptr = adev->uvd.inst[0].cpu_addr;
ptr += 192 + 16;
memcpy(&ucode_len, ptr, 4);
ptr += ucode_len;
memcpy(&adev->uvd.keyselect, ptr, 4);
r = amdgpu_uvd_entity_init(adev);
return r;
......
......@@ -1041,7 +1041,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN) {
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
......
......@@ -742,7 +742,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_SUSPEND:
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 1);
ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_OFF:
if (ast->tx_chip_type == AST_TX_DP501)
......@@ -777,6 +776,21 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
return 0;
}
static void
ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
{
struct ast_private *ast = to_ast_private(crtc->dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
/*
* The gamma LUT has to be reloaded after changing the primary
* plane's color format.
*/
if (old_ast_crtc_state->format != ast_crtc_state->format)
ast_crtc_load_lut(ast, crtc);
}
static void
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
......@@ -830,6 +844,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_check = ast_crtc_helper_atomic_check,
.atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
};
......
# SPDX-License-Identifier: GPL-2.0-only
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC Exynos Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on OF && DRM && COMMON_CLK
depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
......
......@@ -30,18 +30,21 @@
#include "i915_trace.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
static void irq_enable(struct intel_engine_cs *engine)
static bool irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
return;
return false;
/* Caller disables interrupts */
spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine);
spin_unlock(&engine->gt->irq_lock);
return true;
}
static void irq_disable(struct intel_engine_cs *engine)
......@@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
lockdep_assert_held(&b->irq_lock);
if (!b->irq_engine || b->irq_armed)
return;
if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
/*
* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference.
*/
if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
return;
/*
......@@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
*/
WRITE_ONCE(b->irq_armed, true);
/*
* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference. This is tracked
* by i915->gt.awake, we can forgo holding our own wakref
* for the interrupt as before i915->gt.awake is released (when
* the driver is idle) we disarm the breadcrumbs.
*/
if (!b->irq_enabled++)
irq_enable(b->irq_engine);
/* Requests may have completed before we could enable the interrupt. */
if (!b->irq_enabled++ && irq_enable(b->irq_engine))
irq_work_queue(&b->irq_work);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
lockdep_assert_held(&b->irq_lock);
if (!b->irq_engine || !b->irq_armed)
if (!b->irq_engine)
return;
spin_lock(&b->irq_lock);
if (!b->irq_armed)
__intel_breadcrumbs_arm_irq(b);
spin_unlock(&b->irq_lock);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
irq_disable(b->irq_engine);
......@@ -105,8 +106,6 @@ static void add_signaling_context(struct intel_breadcrumbs *b,
{
intel_context_get(ce);
list_add_tail(&ce->signal_link, &b->signalers);
if (list_is_first(&ce->signal_link, &b->signalers))
__intel_breadcrumbs_arm_irq(b);
}
static void remove_signaling_context(struct intel_breadcrumbs *b,
......@@ -174,34 +173,65 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl);
}
static bool __signal_request(struct i915_request *rq, struct list_head *signals)
static bool __signal_request(struct i915_request *rq)
{
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
if (!__dma_fence_signal(&rq->fence)) {
i915_request_put(rq);
return false;
}
list_add_tail(&rq->signal_link, signals);
return true;
}
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
node->next = head;
return node;
}
static void signal_irq_work(struct irq_work *work)
{
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
struct llist_node *signal, *sn;
struct intel_context *ce, *cn;
struct list_head *pos, *next;
LIST_HEAD(signal);
signal = NULL;
if (unlikely(!llist_empty(&b->signaled_requests)))
signal = llist_del_all(&b->signaled_requests);
spin_lock(&b->irq_lock);
if (list_empty(&b->signalers))
/*
* Keep the irq armed until the interrupt after all listeners are gone.
*
* Enabling/disabling the interrupt is rather costly, roughly a couple
* of hundred microseconds. If we are proactive and enable/disable
* the interrupt around every request that wants a breadcrumb, we
* quickly drown in the extra orders of magnitude of latency imposed
* on request submission.
*
* So we try to be lazy, and keep the interrupts enabled until no
* more listeners appear within a breadcrumb interrupt interval (that
* is until a request completes that no one cares about). The
* observation is that listeners come in batches, and will often
* listen to a bunch of requests in succession. Though note on icl+,
* interrupts are always enabled due to concerns with rc6 being
* dysfunctional with per-engine interrupt masking.
*
* We also try to avoid raising too many interrupts, as they may
* be generated by userspace batches and it is unfortunately rather
* too easy to drown the CPU under a flood of GPU interrupts. Thus
* whenever no one appears to be listening, we turn off the interrupts.
* Fewer interrupts should conserve power -- at the very least, fewer
* interrupt draw less ire from other users of the system and tools
* like powertop.
*/
if (!signal && b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
list_splice_init(&b->signaled_requests, &signal);
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
GEM_BUG_ON(list_empty(&ce->signals));
......@@ -218,7 +248,10 @@ static void signal_irq_work(struct irq_work *work)
* spinlock as the callback chain may end up adding
* more signalers to the same context or engine.
*/
__signal_request(rq, &signal);
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
if (__signal_request(rq))
/* We own signal_node now, xfer to local list */
signal = slist_add(&rq->signal_node, signal);
}
/*
......@@ -238,9 +271,9 @@ static void signal_irq_work(struct irq_work *work)
spin_unlock(&b->irq_lock);
list_for_each_safe(pos, next, &signal) {
llist_for_each_safe(signal, sn, signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
llist_entry(signal, typeof(*rq), signal_node);
struct list_head cb_list;
spin_lock(&rq->lock);
......@@ -251,6 +284,9 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
}
struct intel_breadcrumbs *
......@@ -264,7 +300,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
spin_lock_init(&b->irq_lock);
INIT_LIST_HEAD(&b->signalers);
INIT_LIST_HEAD(&b->signaled_requests);
init_llist_head(&b->signaled_requests);
init_irq_work(&b->irq_work, signal_irq_work);
......@@ -292,21 +328,22 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
unsigned long flags;
if (!READ_ONCE(b->irq_armed))
return;
spin_lock_irqsave(&b->irq_lock, flags);
__intel_breadcrumbs_disarm_irq(b);
spin_unlock_irqrestore(&b->irq_lock, flags);
if (!list_empty(&b->signalers))
irq_work_queue(&b->irq_work);
/* Kick the work once more to drain the signalers */
irq_work_sync(&b->irq_work);
while (unlikely(READ_ONCE(b->irq_armed))) {
local_irq_disable();
signal_irq_work(&b->irq_work);
local_irq_enable();
cond_resched();
}
GEM_BUG_ON(!list_empty(&b->signalers));
}
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
{
irq_work_sync(&b->irq_work);
GEM_BUG_ON(!list_empty(&b->signalers));
GEM_BUG_ON(b->irq_armed);
kfree(b);
}
......@@ -327,7 +364,8 @@ static void insert_breadcrumb(struct i915_request *rq,
* its signal completion.
*/
if (__request_completed(rq)) {
if (__signal_request(rq, &b->signaled_requests))
if (__signal_request(rq) &&
llist_add(&rq->signal_node, &b->signaled_requests))
irq_work_queue(&b->irq_work);
return;
}
......@@ -362,9 +400,12 @@ static void insert_breadcrumb(struct i915_request *rq,
GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
/* Check after attaching to irq, interrupt may have already fired. */
if (__request_completed(rq))
irq_work_queue(&b->irq_work);
/*
* Defer enabling the interrupt to after HW submission and recheck
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
irq_work_queue(&b->irq_work);
}
bool i915_request_enable_breadcrumb(struct i915_request *rq)
......
......@@ -35,7 +35,7 @@ struct intel_breadcrumbs {
struct intel_engine_cs *irq_engine;
struct list_head signalers;
struct list_head signaled_requests;
struct llist_head signaled_requests;
struct irq_work irq_work; /* for use from inside irq_lock */
......
......@@ -182,6 +182,7 @@
struct virtual_engine {
struct intel_engine_cs base;
struct intel_context context;
struct rcu_work rcu;
/*
* We allow only a single request through the virtual engine at a time
......@@ -5425,33 +5426,57 @@ static struct list_head *virtual_queue(struct virtual_engine *ve)
return &ve->base.execlists.default_priolist.requests[0];
}
static void virtual_context_destroy(struct kref *kref)
static void rcu_virtual_context_destroy(struct work_struct *wrk)
{
struct virtual_engine *ve =
container_of(kref, typeof(*ve), context.ref);
container_of(wrk, typeof(*ve), rcu.work);
unsigned int n;
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
GEM_BUG_ON(ve->request);
GEM_BUG_ON(ve->context.inflight);
/* Preempt-to-busy may leave a stale request behind. */
if (unlikely(ve->request)) {
struct i915_request *old;
spin_lock_irq(&ve->base.active.lock);
old = fetch_and_zero(&ve->request);
if (old) {
GEM_BUG_ON(!i915_request_completed(old));
__i915_request_submit(old);
i915_request_put(old);
}
spin_unlock_irq(&ve->base.active.lock);
}
/*
* Flush the tasklet in case it is still running on another core.
*
* This needs to be done before we remove ourselves from the siblings'
* rbtrees as in the case it is running in parallel, it may reinsert
* the rb_node into a sibling.
*/
tasklet_kill(&ve->base.execlists.tasklet);
/* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
struct rb_node *node = &ve->nodes[sibling->id].rb;
unsigned long flags;
if (RB_EMPTY_NODE(node))
continue;
spin_lock_irqsave(&sibling->active.lock, flags);
spin_lock_irq(&sibling->active.lock);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
spin_unlock_irqrestore(&sibling->active.lock, flags);
spin_unlock_irq(&sibling->active.lock);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
if (ve->context.state)
__execlists_context_fini(&ve->context);
......@@ -5464,6 +5489,27 @@ static void virtual_context_destroy(struct kref *kref)
kfree(ve);
}
static void virtual_context_destroy(struct kref *kref)
{
struct virtual_engine *ve =
container_of(kref, typeof(*ve), context.ref);
GEM_BUG_ON(!list_empty(&ve->context.signals));
/*
* When destroying the virtual engine, we have to be aware that
* it may still be in use from an hardirq/softirq context causing
* the resubmission of a completed request (background completion
* due to preempt-to-busy). Before we can free the engine, we need
* to flush the submission code and tasklets that are still potentially
* accessing the engine. Flushing the tasklets requires process context,
* and since we can guard the resubmit onto the engine with an RCU read
* lock, we can delegate the free of the engine to an RCU worker.
*/
INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
queue_rcu_work(system_wq, &ve->rcu);
}
static void virtual_engine_initial_hint(struct virtual_engine *ve)
{
int swp;
......
......@@ -255,7 +255,7 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
/* This reg is in GVT's mmio save-restor list and in hardware
* logical context image
......
......@@ -909,8 +909,13 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
intel_uncore_write(uncore, oastatus_reg,
oastatus & ~GEN8_OASTATUS_REPORT_LOST);
intel_uncore_rmw(uncore, oastatus_reg,
GEN8_OASTATUS_COUNTER_OVERFLOW |
GEN8_OASTATUS_REPORT_LOST,
IS_GEN_RANGE(uncore->i915, 8, 10) ?
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
return gen8_append_oa_reports(stream, buf, count, offset);
......
......@@ -676,6 +676,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08)
#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17)
#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16)
#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
......
......@@ -176,7 +176,11 @@ struct i915_request {
struct intel_context *context;
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
struct list_head signal_link;
union {
struct list_head signal_link;
struct llist_node signal_node;
};
/*
* The rcu epoch of when this request was allocated. Used to judiciously
......
......@@ -522,15 +522,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
return 0;
}
static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
}
static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
.destroy = mtk_dpi_encoder_destroy,
};
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
......
......@@ -444,7 +444,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
u32 horizontal_front_back_byte;
u32 data_phy_cycles_byte;
u32 dsi_tmp_buf_bpp, data_phy_cycles;
u32 delta;
struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
......@@ -466,50 +469,30 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
else
horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
dsi_tmp_buf_bpp;
dsi_tmp_buf_bpp - 10;
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
timing->da_hs_zero + timing->da_hs_exit;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
data_phy_cycles * dsi->lanes + 18) {
horizontal_frontporch_byte =
vm->hfront_porch * dsi_tmp_buf_bpp -
(data_phy_cycles * dsi->lanes + 18) *
vm->hfront_porch /
(vm->hfront_porch + vm->hback_porch);
horizontal_backporch_byte =
horizontal_backporch_byte -
(data_phy_cycles * dsi->lanes + 18) *
vm->hback_porch /
(vm->hfront_porch + vm->hback_porch);
} else {
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
horizontal_frontporch_byte = vm->hfront_porch *
dsi_tmp_buf_bpp;
}
timing->da_hs_zero + timing->da_hs_exit + 3;
delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
if (horizontal_front_back_byte > data_phy_cycles_byte) {
horizontal_frontporch_byte -= data_phy_cycles_byte *
horizontal_frontporch_byte /
horizontal_front_back_byte;
horizontal_backporch_byte -= data_phy_cycles_byte *
horizontal_backporch_byte /
horizontal_front_back_byte;
} else {
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
data_phy_cycles * dsi->lanes + 12) {
horizontal_frontporch_byte =
vm->hfront_porch * dsi_tmp_buf_bpp -
(data_phy_cycles * dsi->lanes + 12) *
vm->hfront_porch /
(vm->hfront_porch + vm->hback_porch);
horizontal_backporch_byte = horizontal_backporch_byte -
(data_phy_cycles * dsi->lanes + 12) *
vm->hback_porch /
(vm->hfront_porch + vm->hback_porch);
} else {
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
horizontal_frontporch_byte = vm->hfront_porch *
dsi_tmp_buf_bpp;
}
DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
}
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
......
......@@ -558,8 +558,10 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
NV_PRINTK(err, cli, "validating bo list\n");
validate_fini(op, chan, NULL, NULL);
return ret;
} else if (ret > 0) {
*apply_relocs = true;
}
*apply_relocs = ret;
return 0;
}
......@@ -662,7 +664,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
u_free(reloc);
return ret;
}
......@@ -872,9 +873,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
break;
}
}
u_free(reloc);
}
out_prevalid:
if (!IS_ERR(reloc))
u_free(reloc);
u_free(bo);
u_free(push);
......
......@@ -219,6 +219,7 @@ struct vc4_dev {
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
struct drm_private_obj hvs_channels;
struct drm_private_obj load_tracker;
/* List of vc4_debugfs_info_entry for adding to debugfs once
......@@ -531,6 +532,9 @@ struct vc4_crtc_state {
unsigned int top;
unsigned int bottom;
} margins;
/* Transitional state below, only valid during atomic commits */
bool update_muxing;
};
#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
......
......@@ -760,12 +760,54 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
{
}
#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL
#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL
static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long long pixel_rate = mode->clock * 1000;
unsigned long long tmds_rate;
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return -EINVAL;
/*
* The 1440p@60 pixel rate is in the same range than the first
* WiFi channel (between 2.4GHz and 2.422GHz with 22MHz
* bandwidth). Slightly lower the frequency to bring it out of
* the WiFi range.
*/
tmds_rate = pixel_rate * 10;
if (vc4_hdmi->disable_wifi_frequencies &&
(tmds_rate >= WIFI_2_4GHz_CH1_MIN_FREQ &&
tmds_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) {
mode->clock = 238560;
pixel_rate = mode->clock * 1000;
}
if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
return -EINVAL;
return 0;
}
static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
......@@ -773,6 +815,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
.atomic_check = vc4_hdmi_encoder_atomic_check,
.mode_valid = vc4_hdmi_encoder_mode_valid,
.disable = vc4_hdmi_encoder_disable,
.enable = vc4_hdmi_encoder_enable,
......@@ -1694,6 +1737,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
}
vc4_hdmi->disable_wifi_frequencies =
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
......@@ -1817,6 +1863,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
PHY_LANE_2,
PHY_LANE_CK,
},
.unsupported_odd_h_timings = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
......@@ -1842,6 +1889,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
PHY_LANE_CK,
PHY_LANE_2,
},
.unsupported_odd_h_timings = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
......
......@@ -62,6 +62,9 @@ struct vc4_hdmi_variant {
*/
enum vc4_hdmi_phy_channel phy_lane_mapping[4];
/* The BCM2711 cannot deal with odd horizontal pixel timings */
bool unsupported_odd_h_timings;
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
......@@ -139,6 +142,14 @@ struct vc4_hdmi {
int hpd_gpio;
bool hpd_active_low;
/*
* On some systems (like the RPi4), some modes are in the same
* frequency range than the WiFi channels (1440p@60Hz for
* example). Should we take evasive actions because that system
* has a wifi adapter?
*/
bool disable_wifi_frequencies;
struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment