Commit 91383014 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.5' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

radeon and amdgpu fixes for 4.5. Three regression fixes and
some fixups for the error handling in the vblank regression fixes
from earlier.

* 'drm-fixes-4.5' of git://people.freedesktop.org/~agd5f/linux:
  Revert "drm/radeon/pm: adjust display configuration after powerstate"
  drm/amdgpu/dp: add back special handling for NUTMEG
  drm/radeon/dp: add back special handling for NUTMEG
  drm/radeon: Fix error handling in radeon_flip_work_func.
  drm/amdgpu: Fix error handling in amdgpu_flip_work_func.
parents dad82ea3 d74e766e
...@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work) ...@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast * In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small. * machines because the time window for this to happen is very small.
*/ */
while (amdgpuCrtc->enabled && repcnt--) { while (amdgpuCrtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in * start in hpos, and to the "fudged earlier" vblank start in
* vpos. * vpos.
...@@ -112,13 +112,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work) ...@@ -112,13 +112,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
break; break;
/* Sleep at least until estimated real start of hw vblank */ /* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) { if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */ /* Don't wait ridiculously long - something is wrong */
repcnt = 0; repcnt = 0;
break; break;
} }
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay); usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
}; };
......
...@@ -265,6 +265,17 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector ...@@ -265,6 +265,17 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock; unsigned lane_num, i, max_pix_clock;
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = 270000;
return 0;
}
}
} else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
...@@ -275,6 +286,7 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector ...@@ -275,6 +286,7 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
} }
} }
} }
}
return -EINVAL; return -EINVAL;
} }
......
...@@ -315,6 +315,17 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector, ...@@ -315,6 +315,17 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock; unsigned lane_num, i, max_pix_clock;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = 270000;
return 0;
}
}
} else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
...@@ -325,6 +336,7 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector, ...@@ -325,6 +336,7 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
} }
} }
} }
}
return -EINVAL; return -EINVAL;
} }
......
...@@ -455,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work) ...@@ -455,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast * In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small. * machines because the time window for this to happen is very small.
*/ */
while (radeon_crtc->enabled && repcnt--) { while (radeon_crtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in * start in hpos, and to the "fudged earlier" vblank start in
* vpos. * vpos.
...@@ -471,13 +471,13 @@ static void radeon_flip_work_func(struct work_struct *__work) ...@@ -471,13 +471,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
break; break;
/* Sleep at least until estimated real start of hw vblank */ /* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) { if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */ /* Don't wait ridiculously long - something is wrong */
repcnt = 0; repcnt = 0;
break; break;
} }
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay); usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
}; };
......
...@@ -1079,6 +1079,8 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) ...@@ -1079,6 +1079,8 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
/* update display watermarks based on new power state */ /* update display watermarks based on new power state */
radeon_bandwidth_update(rdev); radeon_bandwidth_update(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
/* wait for the rings to drain */ /* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) { for (i = 0; i < RADEON_NUM_RINGS; i++) {
...@@ -1095,9 +1097,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) ...@@ -1095,9 +1097,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev); radeon_dpm_post_set_power_state(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display; rdev->pm.dpm.single_display = single_display;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment