Commit 1660a76a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.16-rc6' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "i915, amd and nouveau fixes.

  i915:
   - backlight fix for some panels
   - pm fix
   - fencing fix
   - some GVT fixes

  amdgpu:
   - backlight fix across suspend/resume
   - object destruction ordering issue fix
   - displayport fix

  nouveau:
   - two backlight fixes
   - fix for some lockups

  Pretty quiet week, seems like everyone was fixing backlights"

* tag 'drm-fixes-for-v4.16-rc6' of git://people.freedesktop.org/~airlied/linux:
  drm/nouveau/bl: fix backlight regression
  drm/nouveau/bl: Fix oops on driver unbind
  drm/nouveau/mmu: ALIGN_DOWN correct variable
  drm/i915/gvt: fix user copy warning by whitelist workload rb_tail field
  drm/i915/gvt: Correct the privilege shadow batch buffer address
  drm/amdgpu/dce: Don't turn off DP sink when disconnected
  drm/amdgpu: save/restore backlight level in legacy dce code
  drm/radeon: fix prime teardown order
  drm/amdgpu: fix prime teardown order
  drm/i915: Kick the rps worker when changing the boost frequency
  drm/i915: Only prune fences after wait-for-all
  drm/i915: Enable VBT based BL control for DP
  drm/i915/gvt: keep oa config in shadow ctx
  drm/i915/gvt: Add runtime_pm_get/put into gvt_switch_mmio
parents df09348f 3a1b5de3
...@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) ...@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e., /* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor * passive dp->(dvi|hdmi) adaptor
*/ */
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
int saved_dpms = connector->dpms; amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
/* Only turn off the display if it's physically disconnected */ amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { /* Don't start link training before we have the DPCD */
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { return;
/* Don't try to start link training before we
* have the dpcd */ /* Turn the connector off and back on immediately, which
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) * will trigger link training
return; */
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
/* set it to OFF so that drm_helper_connector_dpms() drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
* won't return immediately since the current state
* is ON at this point.
*/
connector->dpms = DRM_MODE_DPMS_OFF;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
connector->dpms = saved_dpms;
} }
} }
} }
......
...@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) ...@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
if (robj) { if (robj) {
if (robj->gem_base.import_attach)
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
amdgpu_mn_unregister(robj); amdgpu_mn_unregister(robj);
amdgpu_bo_unref(&robj); amdgpu_bo_unref(&robj);
} }
......
...@@ -352,6 +352,7 @@ struct amdgpu_mode_info { ...@@ -352,6 +352,7 @@ struct amdgpu_mode_info {
u16 firmware_flags; u16 firmware_flags;
/* pointer to backlight encoder */ /* pointer to backlight encoder */
struct amdgpu_encoder *bl_encoder; struct amdgpu_encoder *bl_encoder;
u8 bl_level; /* saved backlight level */
struct amdgpu_audio audio; /* audio stuff */ struct amdgpu_audio audio; /* audio stuff */
int num_crtc; /* number of crtcs */ int num_crtc; /* number of crtcs */
int num_hpd; /* number of hpd pins */ int num_hpd; /* number of hpd pins */
......
...@@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) ...@@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(bo);
if (bo->gem_base.import_attach)
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base); drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent); amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) { if (!list_empty(&bo->shadow_list)) {
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <linux/backlight.h> #include <linux/backlight.h>
#include "bif/bif_4_1_d.h" #include "bif/bif_4_1_d.h"
static u8 u8
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
{ {
u8 backlight_level; u8 backlight_level;
...@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) ...@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
return backlight_level; return backlight_level;
} }
static void void
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
u8 backlight_level) u8 backlight_level)
{ {
......
...@@ -24,6 +24,11 @@ ...@@ -24,6 +24,11 @@
#ifndef __ATOMBIOS_ENCODER_H__ #ifndef __ATOMBIOS_ENCODER_H__
#define __ATOMBIOS_ENCODER_H__ #define __ATOMBIOS_ENCODER_H__
u8
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
void
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
u8 backlight_level);
u8 u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
void void
......
...@@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle) ...@@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle) static int dce_v10_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
return dce_v10_0_hw_fini(handle); return dce_v10_0_hw_fini(handle);
} }
...@@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle) ...@@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret; int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
ret = dce_v10_0_hw_init(handle); ret = dce_v10_0_hw_init(handle);
/* turn on the BL */ /* turn on the BL */
......
...@@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle) ...@@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle) static int dce_v11_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
return dce_v11_0_hw_fini(handle); return dce_v11_0_hw_fini(handle);
} }
...@@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle) ...@@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret; int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
ret = dce_v11_0_hw_init(handle); ret = dce_v11_0_hw_init(handle);
/* turn on the BL */ /* turn on the BL */
......
...@@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle) ...@@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle) static int dce_v6_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
return dce_v6_0_hw_fini(handle); return dce_v6_0_hw_fini(handle);
} }
...@@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle) ...@@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret; int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
ret = dce_v6_0_hw_init(handle); ret = dce_v6_0_hw_init(handle);
/* turn on the BL */ /* turn on the BL */
......
...@@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle) ...@@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle) static int dce_v8_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
return dce_v8_0_hw_fini(handle); return dce_v8_0_hw_fini(handle);
} }
...@@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle) ...@@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret; int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
ret = dce_v8_0_hw_init(handle); ret = dce_v8_0_hw_init(handle);
/* turn on the BL */ /* turn on the BL */
......
...@@ -471,6 +471,7 @@ struct parser_exec_state { ...@@ -471,6 +471,7 @@ struct parser_exec_state {
* used when ret from 2nd level batch buffer * used when ret from 2nd level batch buffer
*/ */
int saved_buf_addr_type; int saved_buf_addr_type;
bool is_ctx_wa;
struct cmd_info *info; struct cmd_info *info;
...@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s) ...@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)
bb->accessing = true; bb->accessing = true;
bb->bb_start_cmd_va = s->ip_va; bb->bb_start_cmd_va = s->ip_va;
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
bb->bb_offset = s->ip_va - s->rb_va;
else
bb->bb_offset = 0;
/* /*
* ip_va saves the virtual address of the shadow batch buffer, while * ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer. * ip_gma saves the graphics address of the original batch buffer.
...@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) ...@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.ring_tail = gma_tail; s.ring_tail = gma_tail;
s.rb_va = workload->shadow_ring_buffer_va; s.rb_va = workload->shadow_ring_buffer_va;
s.workload = workload; s.workload = workload;
s.is_ctx_wa = false;
if ((bypass_scan_mask & (1 << workload->ring_id)) || if ((bypass_scan_mask & (1 << workload->ring_id)) ||
gma_head == gma_tail) gma_head == gma_tail)
...@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.ring_tail = gma_tail; s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va; s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload; s.workload = workload;
s.is_ctx_wa = true;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL; ret = -EINVAL;
......
...@@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, ...@@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need * performace for batch mmio read/write, so we need
* handle forcewake mannually. * handle forcewake mannually.
*/ */
intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id); switch_mmio(pre, next, ring_id);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv);
} }
/** /**
......
...@@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer( ...@@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer(
pdp_pair[i].val = pdp[7 - i]; pdp_pair[i].val = pdp[7 - i];
} }
/*
* when populating shadow ctx from guest, we should not overrride oa related
* registers, so that they will not be overlapped by guest oa configs. Thus
* made it possible to capture oa data from host for both host and guests.
*/
static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
int i = 0;
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
i915_mmio_reg_offset(EU_PERF_CNTL1),
i915_mmio_reg_offset(EU_PERF_CNTL2),
i915_mmio_reg_offset(EU_PERF_CNTL3),
i915_mmio_reg_offset(EU_PERF_CNTL4),
i915_mmio_reg_offset(EU_PERF_CNTL5),
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
if (!workload || !reg_state || workload->ring_id != RCS)
return;
if (save) {
workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
u32 state_offset = ctx_flexeu0 + i * 2;
workload->flex_mmio[i] = reg_state[state_offset + 1];
}
} else {
reg_state[ctx_oactxctrl] =
i915_mmio_reg_offset(GEN8_OACTXCONTROL);
reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
u32 state_offset = ctx_flexeu0 + i * 2;
u32 mmio = flex_mmio[i];
reg_state[state_offset] = mmio;
reg_state[state_offset + 1] = workload->flex_mmio[i];
}
}
}
static int populate_shadow_context(struct intel_vgpu_workload *workload) static int populate_shadow_context(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
...@@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap(page); shadow_ring_context = kmap(page);
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
#define COPY_REG(name) \ #define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
...@@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
kunmap(page); kunmap(page);
return 0; return 0;
} }
...@@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
goto err; goto err;
} }
/* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
* is only updated into ring_scan_buffer, not real ring address
* allocated in later copy_workload_to_ring_buffer. pls be noted
* shadow_ring_buffer_va is now pointed to real ring buffer va
* in copy_workload_to_ring_buffer.
*/
if (bb->bb_offset)
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
+ bb->bb_offset;
/* relocate shadow batch buffer */ /* relocate shadow batch buffer */
bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
if (gmadr_bytes == 8) if (gmadr_bytes == 8)
...@@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create("gvt-g_vgpu_workload", s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0, sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN,
NULL); offsetof(struct intel_vgpu_workload, rb_tail),
sizeof_field(struct intel_vgpu_workload, rb_tail),
NULL);
if (!s->workloads) { if (!s->workloads) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -110,6 +110,10 @@ struct intel_vgpu_workload { ...@@ -110,6 +110,10 @@ struct intel_vgpu_workload {
/* shadow batch buffer */ /* shadow batch buffer */
struct list_head shadow_bb; struct list_head shadow_bb;
struct intel_shadow_wa_ctx wa_ctx; struct intel_shadow_wa_ctx wa_ctx;
/* oa registers */
u32 oactxctrl;
u32 flex_mmio[7];
}; };
struct intel_vgpu_shadow_bb { struct intel_vgpu_shadow_bb {
...@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb { ...@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb {
u32 *bb_start_cmd_va; u32 *bb_start_cmd_va;
unsigned int clflush; unsigned int clflush;
bool accessing; bool accessing;
unsigned long bb_offset;
}; };
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
......
...@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, ...@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
dma_fence_put(shared[i]); dma_fence_put(shared[i]);
kfree(shared); kfree(shared);
/*
* If both shared fences and an exclusive fence exist,
* then by construction the shared fences must be later
* than the exclusive fence. If we successfully wait for
* all the shared fences, we know that the exclusive fence
* must all be signaled. If all the shared fences are
* signaled, we can prune the array and recover the
* floating references on the fences/requests.
*/
prune_fences = count && timeout >= 0; prune_fences = count && timeout >= 0;
} else { } else {
excl = reservation_object_get_excl_rcu(resv); excl = reservation_object_get_excl_rcu(resv);
} }
if (excl && timeout >= 0) { if (excl && timeout >= 0)
timeout = i915_gem_object_wait_fence(excl, flags, timeout, timeout = i915_gem_object_wait_fence(excl, flags, timeout,
rps_client); rps_client);
prune_fences = timeout >= 0;
}
dma_fence_put(excl); dma_fence_put(excl);
/* Oportunistically prune the fences iff we know they have *all* been /*
* Opportunistically prune the fences iff we know they have *all* been
* signaled and that the reservation object has not been changed (i.e. * signaled and that the reservation object has not been changed (i.e.
* no new fences have been added). * no new fences have been added).
*/ */
......
...@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, ...@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps; struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val; bool boost = false;
ssize_t ret; ssize_t ret;
u32 val;
ret = kstrtou32(buf, 0, &val); ret = kstrtou32(buf, 0, &val);
if (ret) if (ret)
...@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, ...@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
return -EINVAL; return -EINVAL;
mutex_lock(&dev_priv->pcu_lock); mutex_lock(&dev_priv->pcu_lock);
rps->boost_freq = val; if (val != rps->boost_freq) {
rps->boost_freq = val;
boost = atomic_read(&rps->num_waiters);
}
mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&dev_priv->pcu_lock);
if (boost)
schedule_work(&rps->work);
return count; return count;
} }
......
...@@ -620,19 +620,15 @@ static int ...@@ -620,19 +620,15 @@ static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp) bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{ {
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
int backlight_controller = dev_priv->vbt.backlight.controller;
lockdep_assert_held(&dev_priv->pps_mutex); lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */ /* We should never land here with regular DP ports */
WARN_ON(!intel_dp_is_edp(intel_dp)); WARN_ON(!intel_dp_is_edp(intel_dp));
/*
* TODO: BXT has 2 PPS instances. The correct port->PPS instance
* mapping needs to be retrieved from VBT, for now just hard-code to
* use instance #0 always.
*/
if (!intel_dp->pps_reset) if (!intel_dp->pps_reset)
return 0; return backlight_controller;
intel_dp->pps_reset = false; intel_dp->pps_reset = false;
...@@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) ...@@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
*/ */
intel_dp_init_panel_power_sequencer_registers(intel_dp, false); intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
return 0; return backlight_controller;
} }
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
......
...@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd) ...@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd)
struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object; struct nvif_object *device = &drm->client.device.object;
int or = nv_encoder->or; int or = ffs(nv_encoder->dcb->or) - 1;
u32 div = 1025; u32 div = 1025;
u32 val; u32 val;
...@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd) ...@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd)
struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object; struct nvif_object *device = &drm->client.device.object;
int or = nv_encoder->or; int or = ffs(nv_encoder->dcb->or) - 1;
u32 div = 1025; u32 div = 1025;
u32 val = (bd->props.brightness * div) / 100; u32 val = (bd->props.brightness * div) / 100;
...@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd) ...@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd)
struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object; struct nvif_object *device = &drm->client.device.object;
int or = nv_encoder->or; int or = ffs(nv_encoder->dcb->or) - 1;
u32 div, val; u32 div, val;
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
...@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd) ...@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd)
struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object; struct nvif_object *device = &drm->client.device.object;
int or = nv_encoder->or; int or = ffs(nv_encoder->dcb->or) - 1;
u32 div, val; u32 div, val;
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
...@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector) ...@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector)
return -ENODEV; return -ENODEV;
} }
if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
return 0; return 0;
if (drm->client.device.info.chipset <= 0xa0 || if (drm->client.device.info.chipset <= 0xa0 ||
...@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev) ...@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
struct nvif_device *device = &drm->client.device; struct nvif_device *device = &drm->client.device;
struct drm_connector *connector; struct drm_connector *connector;
INIT_LIST_HEAD(&drm->bl_connectors);
if (apple_gmux_present()) { if (apple_gmux_present()) {
NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
return 0; return 0;
} }
INIT_LIST_HEAD(&drm->bl_connectors);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP) connector->connector_type != DRM_MODE_CONNECTOR_eDP)
......
...@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, ...@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
tail = this->addr + this->size; tail = this->addr + this->size;
if (vmm->func->page_block && next && next->page != p) if (vmm->func->page_block && next && next->page != p)
tail = ALIGN_DOWN(addr, vmm->func->page_block); tail = ALIGN_DOWN(tail, vmm->func->page_block);
if (addr <= tail && tail - addr >= size) { if (addr <= tail && tail - addr >= size) {
rb_erase(&this->tree, &vmm->free); rb_erase(&this->tree, &vmm->free);
......
...@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) ...@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
struct radeon_bo *robj = gem_to_radeon_bo(gobj); struct radeon_bo *robj = gem_to_radeon_bo(gobj);
if (robj) { if (robj) {
if (robj->gem_base.import_attach)
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
radeon_mn_unregister(robj); radeon_mn_unregister(robj);
radeon_bo_unref(&robj); radeon_bo_unref(&robj);
} }
......
...@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) ...@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
mutex_unlock(&bo->rdev->gem.mutex); mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo); radeon_bo_clear_surface_reg(bo);
WARN_ON_ONCE(!list_empty(&bo->va)); WARN_ON_ONCE(!list_empty(&bo->va));
if (bo->gem_base.import_attach)
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base); drm_gem_object_release(&bo->gem_base);
kfree(bo); kfree(bo);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment