Commit ff5ebafd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2023-01-13' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "There is a bit of a post-holiday build up here I expect, small fixes
  across the board, amdgpu and msm being the main leaders, with others
  having a few. One code removal patch for nouveau:

  buddy:
   - benchmark regression fix for top-down buddy allocation

  panel:
   - add Lenovo panel orientation quirk

  ttm:
   - fix kernel oops regression

  amdgpu:
   - fix missing fence references
   - fix missing pipeline sync fencing
   - SMU13 fan speed fix
   - SMU13 fix power cap handling
   - SMU13 BACO fix
   - Fix a possible segfault in bo validation error case
   - Delay removal of firmware framebuffer
   - Fix error when unloading

  amdkfd:
   - SVM fix when clearing vram
   - GC11 fix for multi-GPU

  i915:
   - Reserve enough fence slot for i915_vma_unbind_vsync
   - Fix potential use after free
   - Reset engines twice in case of reset failure
   - Use multi-cast registers for SVG Unit registers

  msm:
   - display:
   - doc warning fixes
   - dt attribs cleanups
   - memory leak fix
   - error handing in hdmi probe fix
   - dp_aux_isr incorrect signalling fix
   - shutdown path fix
   - accel:
   - a5xx: fix quirks to be a bitmask
   - a6xx: fix gx halt to avoid 1s hang
   - kexec shutdown fix
   - fix potential double free

  vmwgfx:
   - drop rcu usage to make code more robust

  virtio:
   - fix use-after-free in gem handle code

  nouveau:
   - drop unused nouveau_fbcon.c"

* tag 'drm-fixes-2023-01-13' of git://anongit.freedesktop.org/drm/drm: (35 commits)
  drm: Optimize drm buddy top-down allocation method
  drm/ttm: Fix a regression causing kernel oops'es
  drm/i915/gt: Cover rest of SVG unit MCR registers
  drm/nouveau: Remove file nouveau_fbcon.c
  drm/amdkfd: Fix NULL pointer error for GC 11.0.1 on mGPU
  drm/amd/pm/smu13: BACO is supported when it's in BACO state
  drm/amdkfd: Add sync after creating vram bo
  drm/i915/gt: Reset twice
  drm/amdgpu: fix pipeline sync v2
  drm/vmwgfx: Remove rcu locks from user resources
  drm/virtio: Fix GEM handle creation UAF
  drm/amdgpu: Fixed bug on error when unloading amdgpu
  drm/amd: Delay removal of the firmware framebuffer
  drm/amdgpu: Fix potential NULL dereference
  drm/i915: Fix potential context UAFs
  drm/i915: Reserve enough fence slot for i915_vma_unbind_async
  drm: Add orientation quirk for Lenovo ideapad D330-10IGL
  drm/msm/a6xx: Avoid gx gbit halt during rpm suspend
  drm/msm/adreno: Make adreno quirks not overwrite each other
  drm/msm: another fix for the headless Adreno GPU
  ...
parents d45b832d e695bc7e
......@@ -32,7 +32,7 @@ properties:
- description: Display byte clock
- description: Display byte interface clock
- description: Display pixel clock
- description: Display escape clock
- description: Display core clock
- description: Display AHB clock
- description: Display AXI clock
......@@ -137,8 +137,6 @@ required:
- phys
- assigned-clocks
- assigned-clock-parents
- power-domains
- operating-points-v2
- ports
additionalProperties: false
......
......@@ -69,7 +69,6 @@ required:
- compatible
- reg
- reg-names
- vdds-supply
unevaluatedProperties: false
......
......@@ -39,7 +39,6 @@ required:
- compatible
- reg
- reg-names
- vcca-supply
unevaluatedProperties: false
......
......@@ -34,6 +34,10 @@ properties:
vddio-supply:
description: Phandle to vdd-io regulator device node.
qcom,dsi-phy-regulator-ldo-mode:
type: boolean
description: Indicates if the LDO mode PHY regulator is wanted.
required:
- compatible
- reg
......
......@@ -72,7 +72,7 @@ examples:
#include <dt-bindings/interconnect/qcom,qcm2290.h>
#include <dt-bindings/power/qcom-rpmpd.h>
mdss@5e00000 {
display-subsystem@5e00000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "qcom,qcm2290-mdss";
......
......@@ -62,7 +62,7 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-rpmpd.h>
mdss@5e00000 {
display-subsystem@5e00000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "qcom,sm6115-mdss";
......
......@@ -2099,7 +2099,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
}
amdgpu_amdkfd_remove_eviction_fence(
bo, bo->kfd_bo->process_info->eviction_fence);
bo, bo->vm_bo->vm->process_info->eviction_fence);
amdgpu_bo_unreserve(bo);
......
......@@ -61,6 +61,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
amdgpu_ctx_put(p->ctx);
return -ECANCELED;
}
amdgpu_sync_create(&p->sync);
return 0;
}
......@@ -452,18 +454,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
}
r = amdgpu_sync_fence(&p->sync, fence);
if (r)
goto error;
/*
* When we have an explicit dependency it might be necessary to insert a
* pipeline sync to make sure that all caches etc are flushed and the
* next job actually sees the results from the previous one.
*/
if (fence->context == p->gang_leader->base.entity->fence_context)
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
error:
dma_fence_put(fence);
return r;
}
......@@ -1188,10 +1178,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_gpu_scheduler *sched;
struct amdgpu_bo_list_entry *e;
struct dma_fence *fence;
unsigned int i;
int r;
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv;
......@@ -1211,10 +1210,24 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return r;
}
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r && r != -ERESTARTSYS)
DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
return r;
sched = p->gang_leader->base.entity->rq->sched;
while ((fence = amdgpu_sync_get_fence(&p->sync))) {
struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
/*
* When we have an dependency it might be necessary to insert a
* pipeline sync to make sure that all caches etc are flushed and the
* next job actually sees the results from the previous one
* before we start executing on the same scheduler ring.
*/
if (!s_fence || s_fence->sched != sched)
continue;
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
if (r)
return r;
}
return 0;
}
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
......@@ -1254,9 +1267,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
continue;
fence = &p->jobs[i]->base.s_fence->scheduled;
dma_fence_get(fence);
r = drm_sched_job_add_dependency(&leader->base, fence);
if (r)
if (r) {
dma_fence_put(fence);
goto error_cleanup;
}
}
if (p->gang_size > 1) {
......@@ -1344,6 +1360,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
unsigned i;
amdgpu_sync_free(&parser->sync);
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
kfree(parser->post_deps[i].chain);
......
......@@ -36,6 +36,7 @@
#include <generated/utsrelease.h>
#include <linux/pci-p2pdma.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
......@@ -90,6 +91,8 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_MAX_RETRY_LIMIT 2
#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
static const struct drm_driver amdgpu_kms_driver;
const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
......@@ -3687,6 +3690,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
/* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r)
return r;
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
......
......@@ -23,7 +23,6 @@
*/
#include <drm/amdgpu_drm.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem.h>
......@@ -2122,11 +2121,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
}
#endif
/* Get rid of things like offb */
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
if (ret)
return ret;
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
return PTR_ERR(adev);
......
......@@ -470,8 +470,9 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
return true;
fail:
DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
man->size);
if (man)
DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
man->size);
return false;
}
......
......@@ -391,8 +391,10 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
dma_fence_get(f);
r = drm_sched_job_add_dependency(&job->base, f);
if (r)
if (r) {
dma_fence_put(f);
return r;
}
}
return 0;
}
......
......@@ -882,7 +882,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
kfree(rsv);
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
drm_buddy_free_list(&mgr->mm, &rsv->blocks);
drm_buddy_free_list(&mgr->mm, &rsv->allocated);
kfree(rsv);
}
drm_buddy_fini(&mgr->mm);
......
......@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
if (q->wptr_bo) {
wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
}
......
......@@ -570,6 +570,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
goto reserve_bo_failed;
}
if (clear) {
r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
if (r) {
pr_debug("failed %d to sync bo\n", r);
amdgpu_bo_unreserve(bo);
goto reserve_bo_failed;
}
}
r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
......
......@@ -1261,7 +1261,8 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
uint32_t tach_period, crystal_clock_freq;
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
int ret;
if (!speed)
......@@ -1271,7 +1272,6 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
if (ret)
return ret;
crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
......@@ -2298,6 +2298,10 @@ bool smu_v13_0_baco_is_support(struct smu_context *smu)
!smu_baco->platform_support)
return false;
/* return true if ASIC is in BACO state already */
if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
return true;
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
......
......@@ -213,6 +213,7 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(SOC_PCC),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
};
static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
......
......@@ -192,6 +192,7 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(SOC_PCC),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
};
static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
......
......@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
kmem_cache_free(slab_blocks, block);
}
static void list_insert_sorted(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
struct drm_buddy_block *node;
struct list_head *head;
head = &mm->free_list[drm_buddy_block_order(block)];
if (list_empty(head)) {
list_add(&block->link, head);
return;
}
list_for_each_entry(node, head, link)
if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
break;
__list_add(&block->link, node->link.prev, &node->link);
}
static void mark_allocated(struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
......@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
list_add(&block->link,
&mm->free_list[drm_buddy_block_order(block)]);
list_insert_sorted(mm, block);
}
static void mark_split(struct drm_buddy_block *block)
......@@ -387,20 +405,26 @@ alloc_range_bias(struct drm_buddy *mm,
}
static struct drm_buddy_block *
get_maxblock(struct list_head *head)
get_maxblock(struct drm_buddy *mm, unsigned int order)
{
struct drm_buddy_block *max_block = NULL, *node;
unsigned int i;
max_block = list_first_entry_or_null(head,
struct drm_buddy_block,
link);
if (!max_block)
return NULL;
for (i = order; i <= mm->max_order; ++i) {
if (!list_empty(&mm->free_list[i])) {
node = list_last_entry(&mm->free_list[i],
struct drm_buddy_block,
link);
if (!max_block) {
max_block = node;
continue;
}
list_for_each_entry(node, head, link) {
if (drm_buddy_block_offset(node) >
drm_buddy_block_offset(max_block))
max_block = node;
if (drm_buddy_block_offset(node) >
drm_buddy_block_offset(max_block)) {
max_block = node;
}
}
}
return max_block;
......@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
unsigned int i;
unsigned int tmp;
int err;
for (i = order; i <= mm->max_order; ++i) {
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
block = get_maxblock(&mm->free_list[i]);
if (block)
break;
} else {
block = list_first_entry_or_null(&mm->free_list[i],
struct drm_buddy_block,
link);
if (block)
break;
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
block = get_maxblock(mm, order);
if (block)
/* Store the obtained block order */
tmp = drm_buddy_block_order(block);
} else {
for (tmp = order; tmp <= mm->max_order; ++tmp) {
if (!list_empty(&mm->free_list[tmp])) {
block = list_last_entry(&mm->free_list[tmp],
struct drm_buddy_block,
link);
if (block)
break;
}
}
}
......@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
BUG_ON(!drm_buddy_block_is_free(block));
while (i != order) {
while (tmp != order) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
block = block->right;
i--;
tmp--;
}
return block;
err_undo:
if (i != order)
if (tmp != order)
__drm_buddy_free(mm, block);
return ERR_PTR(err);
}
......
......@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Ideapad D330-10IGL (HD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */
.matches = {
/* Non exact match to match all versions */
......
......@@ -1688,6 +1688,10 @@ void i915_gem_init__contexts(struct drm_i915_private *i915)
init_contexts(&i915->gem.contexts);
}
/*
* Note that this implicitly consumes the ctx reference, by placing
* the ctx in the context_xa.
*/
static void gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv,
u32 id)
......@@ -1703,10 +1707,6 @@ static void gem_context_register(struct i915_gem_context *ctx,
snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
current->comm, pid_nr(ctx->pid));
/* And finally expose ourselves to userspace via the idr */
old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
WARN_ON(old);
spin_lock(&ctx->client->ctx_lock);
list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
spin_unlock(&ctx->client->ctx_lock);
......@@ -1714,6 +1714,10 @@ static void gem_context_register(struct i915_gem_context *ctx,
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
/* And finally expose ourselves to userspace via the idr */
old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
WARN_ON(old);
}
int i915_gem_context_open(struct drm_i915_private *i915,
......@@ -2199,14 +2203,22 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
if (IS_ERR(ctx))
return ctx;
/*
* One for the xarray and one for the caller. We need to grab
* the reference *prior* to making the ctx visble to userspace
* in gem_context_register(), as at any point after that
* userspace can try to race us with another thread destroying
* the context under our feet.
*/
i915_gem_context_get(ctx);
gem_context_register(ctx, file_priv, id);
old = xa_erase(&file_priv->proto_context_xa, id);
GEM_BUG_ON(old != pc);
proto_context_close(file_priv->dev_priv, pc);
/* One for the xarray and one for the caller */
return i915_gem_context_get(ctx);
return ctx;
}
struct i915_gem_context *
......
......@@ -406,10 +406,10 @@
#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
#define CHICKEN_RASTER_1 _MMIO(0x6204)
#define CHICKEN_RASTER_1 MCR_REG(0x6204)
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
#define CHICKEN_RASTER_2 _MMIO(0x6208)
#define CHICKEN_RASTER_2 MCR_REG(0x6208)
#define TBIMR_FAST_CLIP REG_BIT(5)
#define VFLSKPD MCR_REG(0x62a8)
......
......@@ -278,6 +278,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
{
struct intel_uncore *uncore = gt->uncore;
int loops = 2;
int err;
/*
......@@ -285,18 +286,39 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
* for fifo space for the write or forcewake the chip for
* the read
*/
intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
do {
intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests */
err = __intel_wait_for_register_fw(uncore,
GEN6_GDRST, hw_domain_mask, 0,
500, 0,
NULL);
/*
* Wait for the device to ack the reset requests.
*
* On some platforms, e.g. Jasperlake, we see that the
* engine register state is not cleared until shortly after
* GDRST reports completion, causing a failure as we try
* to immediately resume while the internal state is still
* in flux. If we immediately repeat the reset, the second
* reset appears to serialise with the first, and since
* it is a no-op, the registers should retain their reset
* value. However, there is still a concern that upon
* leaving the second reset, the internal engine state
* is still in flux and not ready for resuming.
*/
err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
hw_domain_mask, 0,
2000, 0,
NULL);
} while (err == 0 && --loops);
if (err)
GT_TRACE(gt,
"Wait for 0x%08x engines reset failed\n",
hw_domain_mask);
/*
* As we have observed that the engine state is still volatile
* after GDRST is acked, impose a small delay to let everything settle.
*/
udelay(50);
return err;
}
......
......@@ -645,7 +645,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
wa_mcr_add(wal,
......@@ -775,7 +775,7 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
/* Wa_15010599737:dg2 */
wa_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
}
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
......
......@@ -2116,7 +2116,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
if (!obj->mm.rsgt)
return -EBUSY;
err = dma_resv_reserve_fences(obj->base.resv, 1);
err = dma_resv_reserve_fences(obj->base.resv, 2);
if (err)
return -EBUSY;
......
......@@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
#define GBIF_CLIENT_HALT_MASK BIT(0)
#define GBIF_ARB_HALT_MASK BIT(1)
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
bool gx_off)
{
struct msm_gpu *gpu = &adreno_gpu->base;
......@@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
return;
}
/* Halt the gx side of GBIF */
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
if (gx_off) {
/* Halt the gx side of GBIF */
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
}
/* Halt new client requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
......@@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Halt the gmu cm3 core */
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
a6xx_bus_clear_pending_transactions(adreno_gpu);
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
/* Reset GPU core blocks */
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
......@@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
return;
}
a6xx_bus_clear_pending_transactions(adreno_gpu);
a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
/* tell the GMU we want to slumber */
ret = a6xx_gmu_notify_slumber(gmu);
......
......@@ -1270,6 +1270,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
if (hang_debug)
a6xx_dump(gpu);
/*
* To handle recovery specific sequences during the rpm suspend we are
* about to trigger
*/
a6xx_gpu->hung = true;
/* Halt SQE first */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
......@@ -1312,6 +1318,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
mutex_unlock(&gpu->active_lock);
msm_gpu_hw_init(gpu);
a6xx_gpu->hung = false;
}
static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
......
......@@ -32,6 +32,7 @@ struct a6xx_gpu {
void *llc_slice;
void *htw_llc_slice;
bool have_mmu500;
bool hung;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
......
......@@ -29,11 +29,9 @@ enum {
ADRENO_FW_MAX,
};
enum adreno_quirks {
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
};
#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1)
#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
struct adreno_rev {
uint8_t core;
......@@ -65,7 +63,7 @@ struct adreno_info {
const char *name;
const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
enum adreno_quirks quirks;
u64 quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
u32 inactive_period;
......
......@@ -132,7 +132,6 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
* dpu_encoder_phys_wb_setup_fb - setup output framebuffer
* @phys_enc: Pointer to physical encoder
* @fb: Pointer to output framebuffer
* @wb_roi: Pointer to output region of interest
*/
static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
struct drm_framebuffer *fb)
......@@ -692,7 +691,7 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
* @init: Pointer to init info structure with initialization params
* @p: Pointer to init info structure with initialization params
*/
struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
struct dpu_enc_phys_init_params *p)
......
......@@ -423,6 +423,10 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
isr = dp_catalog_aux_get_irq(aux->catalog);
/* no interrupts pending, return immediately */
if (!isr)
return;
if (!aux->cmd_busy)
return;
......
......@@ -532,11 +532,19 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
return ret;
goto err_put_phy;
platform_set_drvdata(pdev, hdmi);
return component_add(&pdev->dev, &msm_hdmi_ops);
ret = component_add(&pdev->dev, &msm_hdmi_ops);
if (ret)
goto err_put_phy;
return 0;
err_put_phy:
msm_hdmi_put_phy(hdmi);
return ret;
}
static int msm_hdmi_dev_remove(struct platform_device *pdev)
......
......@@ -1278,7 +1278,7 @@ void msm_drv_shutdown(struct platform_device *pdev)
* msm_drm_init, drm_dev->registered is used as an indicator that the
* shutdown will be successful.
*/
if (drm && drm->registered)
if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}
......
......@@ -47,15 +47,17 @@ struct msm_mdss {
static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
struct msm_mdss *msm_mdss)
{
struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
struct icc_path *path0;
struct icc_path *path1;
path0 = of_icc_get(dev, "mdp0-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
msm_mdss->path[0] = path0;
msm_mdss->num_paths = 1;
path1 = of_icc_get(dev, "mdp1-mem");
if (!IS_ERR_OR_NULL(path1)) {
msm_mdss->path[1] = path1;
msm_mdss->num_paths++;
......
This diff is collapsed.
......@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
......
......@@ -358,10 +358,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
return ret;
}
drm_gem_object_put(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
/*
* The handle owns the reference now. But we must drop our
* remaining reference *after* we no longer need to dereference
* the obj. Otherwise userspace could guess the handle and
* race closing it from another thread.
*/
drm_gem_object_put(obj);
return 0;
}
......@@ -723,11 +731,18 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
drm_gem_object_release(obj);
return ret;
}
drm_gem_object_put(obj);
rc_blob->res_handle = bo->hw_res_handle;
rc_blob->bo_handle = handle;
/*
* The handle owns the reference now. But we must drop our
* remaining reference *after* we no longer need to dereference
* the obj. Otherwise userspace could guess the handle and
* race closing it from another thread.
*/
drm_gem_object_put(obj);
return 0;
}
......
......@@ -254,40 +254,6 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
kref_put(&base->refcount, ttm_release_base);
}
/**
* ttm_base_object_noref_lookup - look up a base object without reference
* @tfile: The struct ttm_object_file the object is registered with.
* @key: The object handle.
*
* This function looks up a ttm base object and returns a pointer to it
* without refcounting the pointer. The returned pointer is only valid
* until ttm_base_object_noref_release() is called, and the object
* pointed to by the returned pointer may be doomed. Any persistent usage
* of the object requires a refcount to be taken using kref_get_unless_zero().
* Iff this function returns successfully it needs to be paired with
* ttm_base_object_noref_release() and no sleeping- or scheduling functions
* may be called inbetween these function callse.
*
* Return: A pointer to the object if successful or NULL otherwise.
*/
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key)
{
struct vmwgfx_hash_item *hash;
int ret;
rcu_read_lock();
ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
if (ret) {
rcu_read_unlock();
return NULL;
}
__release(RCU);
return hlist_entry(hash, struct ttm_ref_object, hash)->obj;
}
EXPORT_SYMBOL(ttm_base_object_noref_lookup);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint64_t key)
{
......@@ -295,15 +261,16 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
struct vmwgfx_hash_item *hash;
int ret;
rcu_read_lock();
ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
spin_lock(&tfile->lock);
ret = ttm_tfile_find_ref(tfile, key, &hash);
if (likely(ret == 0)) {
base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
rcu_read_unlock();
spin_unlock(&tfile->lock);
return base;
}
......
......@@ -307,18 +307,4 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key);
/**
* ttm_base_object_noref_release - release a base object pointer looked up
* without reference
*
* Releases a base object pointer looked up with ttm_base_object_noref_lookup().
*/
static inline void ttm_base_object_noref_release(void)
{
__acquire(RCU);
rcu_read_unlock();
}
#endif
......@@ -715,44 +715,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
return 0;
}
/**
* vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
* @filp: The TTM object file the handle is registered with.
* @handle: The user buffer object handle.
*
* This function looks up a struct vmw_bo and returns a pointer to the
* struct vmw_buffer_object it derives from without refcounting the pointer.
* The returned pointer is only valid until vmw_user_bo_noref_release() is
* called, and the object pointed to by the returned pointer may be doomed.
* Any persistent usage of the object requires a refcount to be taken using
* ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
* needs to be paired with vmw_user_bo_noref_release() and no sleeping-
* or scheduling functions may be called in between these function calls.
*
* Return: A struct vmw_buffer_object pointer if successful or negative
* error pointer on failure.
*/
struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
{
struct vmw_buffer_object *vmw_bo;
struct ttm_buffer_object *bo;
struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-ESRCH);
}
vmw_bo = gem_to_vmw_bo(gobj);
bo = ttm_bo_get_unless_zero(&vmw_bo->base);
vmw_bo = vmw_buffer_object(bo);
drm_gem_object_put(gobj);
return vmw_bo;
}
/**
* vmw_bo_fence_single - Utility function to fence a single TTM buffer
* object without unreserving it.
......
......@@ -830,12 +830,7 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
extern struct vmw_resource *
vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv *
converter);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
......@@ -874,15 +869,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
return !RB_EMPTY_NODE(&res->mob_node);
}
/**
* vmw_user_resource_noref_release - release a user resource pointer looked up
* without reference
*/
static inline void vmw_user_resource_noref_release(void)
{
ttm_base_object_noref_release();
}
/**
* Buffer object helper functions - vmwgfx_bo.c
*/
......@@ -934,8 +920,6 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
extern struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
......
This diff is collapsed.
......@@ -281,39 +281,6 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
return ret;
}
/**
* vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
* TTM user-space handle and perform basic type checks
*
* @dev_priv: Pointer to a device private struct
* @tfile: Pointer to a struct ttm_object_file identifying the caller
* @handle: The TTM user-space handle
* @converter: Pointer to an object describing the resource type
*
* If the handle can't be found or is associated with an incorrect resource
* type, -EINVAL will be returned.
*/
struct vmw_resource *
vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv
*converter)
{
struct ttm_base_object *base;
base = ttm_base_object_noref_lookup(tfile, handle);
if (!base)
return ERR_PTR(-ESRCH);
if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
ttm_base_object_noref_release();
return ERR_PTR(-EINVAL);
}
return converter->base_obj_to_res(base);
}
/*
* Helper function that looks either a surface or bo.
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment