Commit 821b4db3 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2014-01-28' of...

Merge tag 'drm-intel-fixes-2014-01-28' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Pile of -fixes all over the place. Lot's of cc: stable.

Only big thing is that we've dropped the preliminary hw support tag for
bdw - it seems to work. Which also means that I'll shovel a few more bdw
patches through -fixes, there's 5 w/a patches from Ken already on
intel-gfx.

* tag 'drm-intel-fixes-2014-01-28' of git://people.freedesktop.org/~danvet/drm-intel:
  drm/i915: Fix the offset issue for the stolen GEM objects
  drm/i915: Decouple GPU error reporting from ring initialisation
  i915: remove pm_qos request on error
  Revert "drm/i915: Mask reserved bits in display/sprite address registers"
  drm/i915: VLV2 - Fix hotplug detect bits
  drm/i915: Allow reading the TIMESTAMP register on Gen8.
  drm/i915: Repeat evictions whilst pageflip completions are outstanding
  drm/i915: Wait for completion of pending flips when starved of fences
  drm/i915: don't disable DP port after a failed link training
  drm/i915: don't disable the DP port if the link is lost
  drm/i915: Eliminate lots of WARNs when there's no backlight present
  drm/i915: g4x/vlv: fix dp aux interrupt mask
  drm/i915/ppgtt: Defer request freeing on reset
  i915: send D1 opregion notification
  drm/i915/bdw: remove preliminary_hw_support flag from BDW
  drm/i915: Tune down reset_stat output from ERROR to debug
  drm/i915: Make semaphore modparam RO
  drm/i915: Fix disabled semaphores
  drm/i915: Clarify relocation errnos
  drm/i915: Spelling s/auxilliary/auxiliary/
parents a5bd4f8a ec14ba47
......@@ -1685,6 +1685,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
......
......@@ -59,7 +59,7 @@ MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
module_param_named(semaphores, i915_semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
......@@ -341,7 +341,6 @@ static const struct intel_device_info intel_haswell_m_info = {
};
static const struct intel_device_info intel_broadwell_d_info = {
.is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
......@@ -350,7 +349,6 @@ static const struct intel_device_info intel_broadwell_d_info = {
};
static const struct intel_device_info intel_broadwell_m_info = {
.is_preliminary = 1,
.gen = 8, .is_mobile = 1, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
......@@ -924,7 +922,15 @@ static int i915_runtime_suspend(struct device *device)
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
dev_priv->pm.suspended = true;
intel_opregion_notify_adapter(dev, PCI_D3cold);
/*
* current versions of firmware which depend on this opregion
* notification have repurposed the D1 definition to mean
* "runtime suspended" vs. what you would normally expect (D3)
* to distinguish it from notifications that might be sent
* via the suspend path.
*/
intel_opregion_notify_adapter(dev, PCI_D1);
return 0;
}
......
......@@ -330,6 +330,7 @@ struct drm_i915_error_state {
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_ring {
bool valid;
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
......
......@@ -2330,7 +2330,7 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
if (ring->hangcheck.action != HANGCHECK_WAIT &&
i915_request_guilty(request, acthd, &inside)) {
DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
offset,
......@@ -2388,16 +2388,6 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
i915_gem_free_request(request);
}
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
......@@ -2407,6 +2397,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_object_move_to_inactive(obj);
}
/*
* We must free the requests after all the corresponding objects have
* been moved off active lists. Which is the same order as the normal
* retire_requests function does. This is important if object hold
* implicit references on things like e.g. ppgtt address spaces through
* the request.
*/
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
i915_gem_free_request(request);
}
}
void i915_gem_restore_fences(struct drm_device *dev)
......@@ -3099,7 +3106,7 @@ i915_find_fence_reg(struct drm_device *dev)
}
if (avail == NULL)
return NULL;
goto deadlock;
/* None available, try to steal one or wait for a user to finish */
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
......@@ -3109,7 +3116,12 @@ i915_find_fence_reg(struct drm_device *dev)
return reg;
}
return NULL;
deadlock:
/* Wait for completion of pending flips which consume fences */
if (intel_has_pending_fb_unpin(dev))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
}
/**
......@@ -3154,8 +3166,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
}
} else if (enable) {
reg = i915_find_fence_reg(dev);
if (reg == NULL)
return -EDEADLK;
if (IS_ERR(reg))
return PTR_ERR(reg);
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
......
......@@ -27,8 +27,10 @@
*/
#include <drm/drmP.h>
#include "i915_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "i915_trace.h"
static bool
......@@ -53,6 +55,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
int ret = 0;
int pass = 0;
trace_i915_gem_evict(dev, min_size, alignment, mappable);
......@@ -119,14 +122,24 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
/* Can we unpin some objects such as idle hw contents,
* or pending flips?
*/
ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
if (ret)
return ret;
if (nonblocking)
return -ENOSPC;
/* Only idle the GPU and repeat the search once */
i915_gem_retire_requests(dev);
nonblocking = true;
goto search_again;
if (pass++ == 0) {
ret = i915_gpu_idle(dev);
if (ret)
return ret;
i915_gem_retire_requests(dev);
goto search_again;
}
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
*/
return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
......
......@@ -252,7 +252,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
int ret = -EINVAL;
int ret;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
......@@ -287,7 +287,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
int ret = -EINVAL;
int ret;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
......@@ -335,7 +335,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
uint32_t target_offset;
int ret = -EINVAL;
int ret;
/* we've already hold a reference to all valid objects */
target_vma = eb_get_vma(eb, reloc->target_handle);
......@@ -365,7 +365,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
return -EINVAL;
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
......@@ -376,7 +376,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
return -EINVAL;
}
target_obj->pending_read_domains |= reloc->read_domains;
......@@ -396,14 +396,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
return ret;
return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
return ret;
return -EINVAL;
}
/* We can't wait for rendering with pagefaults disabled */
......
......@@ -250,7 +250,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
}
sg = st->sgl;
sg->offset = offset;
sg->offset = 0;
sg->length = size;
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
......
......@@ -239,6 +239,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
unsigned ring)
{
BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
if (!error->ring[ring].valid)
return;
err_printf(m, "%s command stream:\n", ring_str(ring));
err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
......@@ -293,7 +296,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
if (!error) {
......@@ -328,7 +330,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for_each_ring(ring, dev_priv, i)
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
i915_ring_error_state(m, dev, error, i);
if (error->active_bo)
......@@ -385,8 +387,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
obj = error->ring[i].ctx;
if (obj) {
if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
......@@ -667,7 +668,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL;
obj = ring->scratch.obj;
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
if (obj != NULL &&
acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
......@@ -775,11 +777,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
for_each_ring(ring, dev_priv, i) {
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (ring->dev == NULL)
continue;
error->ring[i].valid = true;
i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer =
......
......@@ -2122,9 +2122,13 @@
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
/* VLV DP/HDMI bits again match Bspec */
#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
......@@ -2138,7 +2142,8 @@
#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (1 << 4)
#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
......@@ -3573,8 +3578,6 @@
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
......
......@@ -2114,8 +2114,8 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
......@@ -2205,8 +2205,8 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
......@@ -2982,6 +2982,30 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
return pending;
}
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
struct intel_crtc *crtc;
/* Note that we don't need to be called with mode_config.lock here
* as our list of CRTC objects is static for the lifetime of the
* device and so cannot disappear as we iterate. Similarly, we can
* happily treat the predicates as racy, atomic checks as userspace
* cannot claim and pin a new fb without at least acquring the
* struct_mutex and so serialising with us.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
if (crtc->unpin_work)
intel_wait_for_vblank(dev, crtc->pipe);
return true;
}
return false;
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
......
......@@ -2638,7 +2638,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
intel_dp_link_down(intel_dp);
break;
}
......@@ -2891,13 +2890,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) {
intel_dp_link_down(intel_dp);
return;
}
/* Now read the DPCD to see if it's actually running */
if (!intel_dp_get_dpcd(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
......@@ -3012,18 +3009,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
return status;
}
switch (intel_dig_port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS;
break;
case PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS;
break;
case PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
if (IS_VALLEYVIEW(dev)) {
switch (intel_dig_port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
break;
case PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
break;
case PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
break;
default:
return connector_status_unknown;
}
} else {
switch (intel_dig_port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
break;
case PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
break;
case PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
break;
default:
return connector_status_unknown;
}
}
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
......
......@@ -626,6 +626,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
......
......@@ -396,9 +396,7 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_panel *panel;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
......@@ -417,12 +415,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
* only one).
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
intel_connector = to_intel_connector(connector);
panel = &intel_connector->panel;
if (panel->backlight.present)
intel_panel_set_backlight(intel_connector, bclp, 255);
}
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
intel_panel_set_backlight(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
mutex_unlock(&dev->mode_config.mutex);
......
......@@ -502,7 +502,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
u32 freq;
unsigned long flags;
if (pipe == INVALID_PIPE)
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
......@@ -579,7 +579,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
if (pipe == INVALID_PIPE)
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
/*
......@@ -782,7 +782,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
if (pipe == INVALID_PIPE)
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
......
......@@ -673,10 +673,12 @@ gen6_add_request(struct intel_ring_buffer *ring)
if (ret)
return ret;
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = ring->signal_mbox[i];
if (mbox_reg != GEN6_NOSYNC)
update_mboxes(ring, mbox_reg);
if (i915_semaphore_is_enabled(dev)) {
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = ring->signal_mbox[i];
if (mbox_reg != GEN6_NOSYNC)
update_mboxes(ring, mbox_reg);
}
}
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
......
......@@ -141,8 +141,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
......@@ -158,7 +158,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
~SP_ENABLE);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
I915_WRITE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
......@@ -315,8 +315,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
I915_WRITE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
......@@ -333,7 +333,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
I915_WRITE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
/*
......@@ -489,8 +489,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
I915_WRITE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
......@@ -506,7 +506,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
I915_WRITE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
/*
......
......@@ -805,7 +805,7 @@ static const struct register_whitelist {
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
};
int i915_reg_read_ioctl(struct drm_device *dev,
......
......@@ -721,7 +721,7 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_IS_PINNED (1<<10)
/** Provide a hint to the kernel that the command stream and auxilliary
/** Provide a hint to the kernel that the command stream and auxiliary
* state buffers already holds the correct presumed addresses and so the
* relocation process may be skipped if no buffers need to be moved in
* preparation for the execbuffer.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment