Commit 839e0f04 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2019-10-18' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "This is this weeks fixes for drm.

  The dma-resv one is probably the more important one a fair few people
  have reported it, besides that it's a couple of panfrost, a few i915
  and a few amdgpu fixes.

  One radeon patch to fix some ppc64 related issues caused an x86
  regression so is getting reverted for now.

  Summary:

  dma-resv:
   - shared fences for lima/panfrost

  ttm:
   - prefault regression fix
   - lifetime fix

  panfrost:
   - stopped job timeout fix
   - missing register values

  amdgpu:
   - smu7 powerplay fix
   - bail earlier for cik/si detection
   - navi SDMA fix

  radeon:
   - revert a ppc64 shutdown fix that broke x86

  i915:
   - VBT information handling fix
   - Circular locking fix
   - preemption vs resubmission virtual requests fix"

* tag 'drm-fixes-2019-10-18' of git://anongit.freedesktop.org/drm/drm:
  drm/i915: Fixup preempt-to-busy vs resubmission of a virtual request
  drm/i915/userptr: Never allow userptr into the mappable GGTT
  drm/i915: Favor last VBT child device with conflicting AUX ch/DDC pin
  drm/i915/execlists: Refactor -EIO markup of hung requests
  drm/panfrost: Handle resetting on timeout better
  drm/panfrost: Add missing GPU feature registers
  drm/ttm: fix handling in ttm_bo_add_mem_to_lru
  drm/ttm: Restore ttm prefaulting
  drm/ttm: fix busy reference in ttm_mem_evict_first
  drm/amdgpu/sdma5: fix mask value of POLL_REGMEM packet for pipe sync
  drm/amdgpu: Bail earlier when amdgpu.cik_/si_support is not set to 1
  Revert "drm/radeon: Fix EEH during kexec"
  drm/msm/dsi: Implement reset correctly
  dma-buf/resv: fix exclusive fence get
  drm/edid: Add 6 bpc quirk for SDC panel in Lenovo G50
  drm/tiny: Kconfig: Remove always-y THERMAL dep. from TINYDRM_REPAPER
  drm/amdgpu/powerplay: fix typo in mvdd table setup
parents 84629d43 5c1e34b5
...@@ -471,7 +471,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj, ...@@ -471,7 +471,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
if (pfence_excl) if (pfence_excl)
*pfence_excl = fence_excl; *pfence_excl = fence_excl;
else if (fence_excl) else if (fence_excl)
shared[++shared_count] = fence_excl; shared[shared_count++] = fence_excl;
if (!shared_count) { if (!shared_count) {
kfree(shared); kfree(shared);
......
...@@ -1048,6 +1048,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ...@@ -1048,6 +1048,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV; return -ENODEV;
} }
#ifdef CONFIG_DRM_AMDGPU_SI
if (!amdgpu_si_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
dev_info(&pdev->dev,
"SI support provided by radeon.\n");
dev_info(&pdev->dev,
"Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
);
return -ENODEV;
}
}
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
if (!amdgpu_cik_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_KAVERI:
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KABINI:
case CHIP_MULLINS:
dev_info(&pdev->dev,
"CIK support provided by radeon.\n");
dev_info(&pdev->dev,
"Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
);
return -ENODEV;
}
}
#endif
/* Get rid of things like offb */ /* Get rid of things like offb */
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb"); ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
if (ret) if (ret)
......
...@@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) ...@@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
struct amdgpu_device *adev; struct amdgpu_device *adev;
int r, acpi_status; int r, acpi_status;
#ifdef CONFIG_DRM_AMDGPU_SI
if (!amdgpu_si_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
dev_info(dev->dev,
"SI support provided by radeon.\n");
dev_info(dev->dev,
"Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
);
return -ENODEV;
}
}
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
if (!amdgpu_cik_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_KAVERI:
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KABINI:
case CHIP_MULLINS:
dev_info(dev->dev,
"CIK support provided by radeon.\n");
dev_info(dev->dev,
"Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
);
return -ENODEV;
}
}
#endif
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
if (adev == NULL) { if (adev == NULL) {
return -ENOMEM; return -ENOMEM;
......
...@@ -1129,7 +1129,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -1129,7 +1129,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */ amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
} }
......
...@@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, ...@@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS; count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) { for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage = table->SmioTable2.Pattern[level].Voltage =
PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio = table->SmioTable2.Pattern[level].Smio =
(uint8_t) level; (uint8_t) level;
......
...@@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, ...@@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS; count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) { for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US( table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio = table->SmioTable2.Pattern[level].Smio =
(uint8_t) level; (uint8_t) level;
......
...@@ -159,6 +159,9 @@ static const struct edid_quirk { ...@@ -159,6 +159,9 @@ static const struct edid_quirk {
/* Medion MD 30217 PG */ /* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
/* Lenovo G50 */
{ "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
......
...@@ -1270,7 +1270,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, ...@@ -1270,7 +1270,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n", "disabling port %c DVI/HDMI support\n",
port_name(port), info->alternate_ddc_pin, port_name(port), info->alternate_ddc_pin,
port_name(p), port_name(port)); port_name(p), port_name(p));
/* /*
* If we have multiple ports supposedly sharing the * If we have multiple ports supposedly sharing the
...@@ -1278,9 +1278,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, ...@@ -1278,9 +1278,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and * port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately. * system couldn't communicate with them separately.
* *
* Give child device order the priority, first come first * Give inverse child device order the priority,
* served. * last one wins. Yes, there are real machines
* (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and
* we must pick port E :(
*/ */
info = &dev_priv->vbt.ddi_port_info[p];
info->supports_dvi = false; info->supports_dvi = false;
info->supports_hdmi = false; info->supports_hdmi = false;
info->alternate_ddc_pin = 0; info->alternate_ddc_pin = 0;
...@@ -1316,7 +1321,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, ...@@ -1316,7 +1321,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n", "disabling port %c DP support\n",
port_name(port), info->alternate_aux_channel, port_name(port), info->alternate_aux_channel,
port_name(p), port_name(port)); port_name(p), port_name(p));
/* /*
* If we have multiple ports supposedlt sharing the * If we have multiple ports supposedlt sharing the
...@@ -1324,9 +1329,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, ...@@ -1324,9 +1329,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel * port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately. * and system couldn't communicate with them separately.
* *
* Give child device order the priority, first come first * Give inverse child device order the priority,
* served. * last one wins. Yes, there are real machines
* (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and
* we must pick port E :(
*/ */
info = &dev_priv->vbt.ddi_port_info[p];
info->supports_dp = false; info->supports_dp = false;
info->alternate_aux_channel = 0; info->alternate_aux_channel = 0;
} }
......
...@@ -364,6 +364,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) ...@@ -364,6 +364,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
return VM_FAULT_OOM; return VM_FAULT_OOM;
case -ENOSPC: case -ENOSPC:
case -EFAULT: case -EFAULT:
case -ENODEV: /* bad object, how did you get here! */
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
default: default:
WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
...@@ -475,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file, ...@@ -475,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (!obj) if (!obj)
return -ENOENT; return -ENOENT;
if (i915_gem_object_never_bind_ggtt(obj)) {
ret = -ENODEV;
goto out;
}
ret = create_mmap_offset(obj); ret = create_mmap_offset(obj);
if (ret == 0) if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node); *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
out:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return ret; return ret;
} }
......
...@@ -152,6 +152,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) ...@@ -152,6 +152,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY; return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
} }
static inline bool
i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
}
static inline bool static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{ {
......
...@@ -32,7 +32,8 @@ struct drm_i915_gem_object_ops { ...@@ -32,7 +32,8 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2) #define I915_GEM_OBJECT_IS_PROXY BIT(2)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3) #define I915_GEM_OBJECT_NO_GGTT BIT(3)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
/* Interface between the GEM object and its backing storage. /* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set * get_pages() is called once prior to the use of the associated set
......
...@@ -702,6 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) ...@@ -702,6 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE | I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_GGTT |
I915_GEM_OBJECT_ASYNC_CANCEL, I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages, .get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages, .put_pages = i915_gem_userptr_put_pages,
......
...@@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state, ...@@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_ring *ring); struct intel_ring *ring);
static void mark_eio(struct i915_request *rq)
{
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{ {
return (i915_ggtt_offset(engine->status_page.vma) + return (i915_ggtt_offset(engine->status_page.vma) +
...@@ -1236,6 +1243,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1236,6 +1243,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
submit = true; submit = true;
last = rq; last = rq;
} }
i915_request_put(rq);
/* /*
* Hmm, we have a bunch of virtual engine requests, * Hmm, we have a bunch of virtual engine requests,
...@@ -2574,12 +2582,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -2574,12 +2582,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
__execlists_reset(engine, true); __execlists_reset(engine, true);
/* Mark all executing requests as skipped. */ /* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) { list_for_each_entry(rq, &engine->active.requests, sched.link)
if (!i915_request_signaled(rq)) mark_eio(rq);
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
/* Flush the queued requests to the timeline list (for retiring). */ /* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) { while ((rb = rb_first_cached(&execlists->queue))) {
...@@ -2587,9 +2591,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -2587,9 +2591,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
int i; int i;
priolist_for_each_request_consume(rq, rn, p, i) { priolist_for_each_request_consume(rq, rn, p, i) {
mark_eio(rq);
__i915_request_submit(rq); __i915_request_submit(rq);
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
} }
rb_erase_cached(&p->node, &execlists->queue); rb_erase_cached(&p->node, &execlists->queue);
...@@ -2605,13 +2608,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -2605,13 +2608,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
RB_CLEAR_NODE(rb); RB_CLEAR_NODE(rb);
spin_lock(&ve->base.active.lock); spin_lock(&ve->base.active.lock);
if (ve->request) { rq = fetch_and_zero(&ve->request);
ve->request->engine = engine; if (rq) {
__i915_request_submit(ve->request); mark_eio(rq);
dma_fence_set_error(&ve->request->fence, -EIO);
i915_request_mark_complete(ve->request); rq->engine = engine;
__i915_request_submit(rq);
i915_request_put(rq);
ve->base.execlists.queue_priority_hint = INT_MIN; ve->base.execlists.queue_priority_hint = INT_MIN;
ve->request = NULL;
} }
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
} }
...@@ -3615,6 +3620,8 @@ static void virtual_submission_tasklet(unsigned long data) ...@@ -3615,6 +3620,8 @@ static void virtual_submission_tasklet(unsigned long data)
static void virtual_submit_request(struct i915_request *rq) static void virtual_submit_request(struct i915_request *rq)
{ {
struct virtual_engine *ve = to_virtual_engine(rq->engine); struct virtual_engine *ve = to_virtual_engine(rq->engine);
struct i915_request *old;
unsigned long flags;
GEM_TRACE("%s: rq=%llx:%lld\n", GEM_TRACE("%s: rq=%llx:%lld\n",
ve->base.name, ve->base.name,
...@@ -3623,15 +3630,31 @@ static void virtual_submit_request(struct i915_request *rq) ...@@ -3623,15 +3630,31 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
GEM_BUG_ON(ve->request); spin_lock_irqsave(&ve->base.active.lock, flags);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
old = ve->request;
if (old) { /* background completion event from preempt-to-busy */
GEM_BUG_ON(!i915_request_completed(old));
__i915_request_submit(old);
i915_request_put(old);
}
ve->base.execlists.queue_priority_hint = rq_prio(rq); if (i915_request_completed(rq)) {
WRITE_ONCE(ve->request, rq); __i915_request_submit(rq);
list_move_tail(&rq->sched.link, virtual_queue(ve)); ve->base.execlists.queue_priority_hint = INT_MIN;
ve->request = NULL;
} else {
ve->base.execlists.queue_priority_hint = rq_prio(rq);
ve->request = i915_request_get(rq);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
tasklet_schedule(&ve->base.execlists.tasklet);
}
tasklet_schedule(&ve->base.execlists.tasklet); spin_unlock_irqrestore(&ve->base.active.lock, flags);
} }
static struct ve_bond * static struct ve_bond *
......
...@@ -969,6 +969,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -969,6 +969,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
if (i915_gem_object_never_bind_ggtt(obj))
return ERR_PTR(-ENODEV);
if (flags & PIN_MAPPABLE && if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) { (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available /* If the required space is larger than the available
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include "dsi_cfg.h" #include "dsi_cfg.h"
#include "msm_kms.h" #include "msm_kms.h"
#define DSI_RESET_TOGGLE_DELAY_MS 20
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{ {
u32 ver; u32 ver;
...@@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host) ...@@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
wmb(); /* clocks need to be enabled before reset */ wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1); dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0); dsi_write(msm_host, REG_DSI_RESET, 0);
} }
...@@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) ...@@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
/* dsi controller can only be reset while clocks are running */ /* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1); dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0); dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */ wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0); dsi_write(msm_host, REG_DSI_CTRL, data0);
......
...@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) ...@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES); pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES); pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES); pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES); pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i)); pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
......
...@@ -381,13 +381,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) ...@@ -381,13 +381,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
job_read(pfdev, JS_TAIL_LO(js)), job_read(pfdev, JS_TAIL_LO(js)),
sched_job); sched_job);
mutex_lock(&pfdev->reset_lock); if (!mutex_trylock(&pfdev->reset_lock))
return;
for (i = 0; i < NUM_JOB_SLOTS; i++) for (i = 0; i < NUM_JOB_SLOTS; i++) {
drm_sched_stop(&pfdev->js->queue[i].sched, sched_job); struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
drm_sched_stop(sched, sched_job);
if (js != i)
/* Ensure any timeouts on other slots have finished */
cancel_delayed_work_sync(&sched->work_tdr);
}
if (sched_job) drm_sched_increase_karma(sched_job);
drm_sched_increase_karma(sched_job);
spin_lock_irqsave(&pfdev->js->job_lock, flags); spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) { for (i = 0; i < NUM_JOB_SLOTS; i++) {
......
...@@ -379,19 +379,11 @@ radeon_pci_remove(struct pci_dev *pdev) ...@@ -379,19 +379,11 @@ radeon_pci_remove(struct pci_dev *pdev)
static void static void
radeon_pci_shutdown(struct pci_dev *pdev) radeon_pci_shutdown(struct pci_dev *pdev)
{ {
struct drm_device *ddev = pci_get_drvdata(pdev);
/* if we are running in a VM, make sure the device /* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown * torn down properly on reboot/shutdown
*/ */
if (radeon_device_is_virtual()) if (radeon_device_is_virtual())
radeon_pci_remove(pdev); radeon_pci_remove(pdev);
/* Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
* during kexec.
*/
radeon_suspend_kms(ddev, true, true, false);
} }
static int radeon_pmops_suspend(struct device *dev) static int radeon_pmops_suspend(struct device *dev)
......
...@@ -63,7 +63,6 @@ config TINYDRM_REPAPER ...@@ -63,7 +63,6 @@ config TINYDRM_REPAPER
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_KMS_CMA_HELPER
depends on THERMAL || !THERMAL
help help
DRM driver for the following Pervasive Displays panels: DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021) 1.44" TFT EPD Panel (E1144CS021)
......
...@@ -185,8 +185,9 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, ...@@ -185,8 +185,9 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
list_add_tail(&bo->lru, &man->lru[bo->priority]); list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
if (bo->ttm && !(bo->ttm->page_flags & if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]); list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
} }
...@@ -878,11 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -878,11 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) { if (!bo) {
if (busy_bo) if (busy_bo)
ttm_bo_get(busy_bo); kref_get(&busy_bo->list_kref);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo) if (busy_bo)
ttm_bo_put(busy_bo); kref_put(&busy_bo->list_kref, ttm_bo_release_list);
return ret; return ret;
} }
......
...@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
else else
ret = vmf_insert_pfn(&cvma, address, pfn); ret = vmf_insert_pfn(&cvma, address, pfn);
/* /* Never error on prefaulted PTEs */
* Somebody beat us to this PTE or prefaulting to if (unlikely((ret & VM_FAULT_ERROR))) {
* an already populated PTE, or prefaulting error. if (i == 0)
*/ goto out_io_unlock;
else
if (unlikely((ret == VM_FAULT_NOPAGE && i > 0))) break;
break; }
else if (unlikely(ret & VM_FAULT_ERROR))
goto out_io_unlock;
address += PAGE_SIZE; address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last)) if (unlikely(++page_offset >= page_last))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment