Commit b81a6179 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2016-09-19' of git://anongit.freedesktop.org/drm-intel into drm-next

- refactor the sseu code (Imre)
- refine guc dmesg output (Dave Gordon)
- more vgpu work
- more skl wm fixes (Lyude)
- refactor dpll code in prep for upfront link training (Jim Bride et al)
- consolidate all platform feature checks into intel_device_info (Carlos Santa)
- refactor elsp/execlist submission as prep for re-submission after hang
  recovery and eventually scheduling (Chris Wilson)
- allow synchronous gpu reset handling, to remove tricky/impossible/fragile
  error recovery code (Chris Wilson)
- prep work for nonblocking (execlist) submission, using fences to track
  depencies and drive elsp submission (Chris Wilson)
- partial error recover/resubmission of non-guilty batches after hangs (Chris Wilson)
- full dma-buf implicit fencing support (Chris Wilson)
- dp link training fixes (Jim, Dhinkaran, Navare, ...)
- obey dp branch device pixel rate/bpc/clock limits (Mika Kahola), needed for
  many vga dongles
- bunch of small cleanups and polish all over, as usual

[airlied: printing macros collided]

* tag 'drm-intel-next-2016-09-19' of git://anongit.freedesktop.org/drm-intel: (163 commits)
  drm/i915: Update DRIVER_DATE to 20160919
  drm: Fix DisplayPort branch device ID kernel-doc
  drm/i915: use NULL for NULL pointers
  drm/i915: do not use 'false' as a NULL pointer
  drm/i915: make intel_dp_compute_bpp static
  drm: Add DP branch device info on debugfs
  drm/i915: Update bits per component for display info
  drm/i915: Check pixel rate for DP to VGA dongle
  drm/i915: Read DP branch device SW revision
  drm/i915: Read DP branch device HW revision
  drm/i915: Cleanup DisplayPort AUX channel initialization
  drm: Read DP branch device id
  drm: Helper to read max bits per component
  drm: Helper to read max clock rate
  drm: Drop VGA from bpc definitions
  drm: Add missing DP downstream port types
  drm/i915: Add ddb size field to device info structure
  drm/i915/guc: general tidying up (submission)
  drm/i915/guc: general tidying up (loader)
  drm/i915: clarify PMINTRMSK/pm_intr_keep usage
  ...
parents bd4a68da 6e05f3d3
......@@ -507,8 +507,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I915GM_IDS(&gen3_early_ops),
INTEL_I945G_IDS(&gen3_early_ops),
INTEL_I945GM_IDS(&gen3_early_ops),
INTEL_VLV_M_IDS(&gen6_early_ops),
INTEL_VLV_D_IDS(&gen6_early_ops),
INTEL_VLV_IDS(&gen6_early_ops),
INTEL_PINEVIEW_IDS(&gen3_early_ops),
INTEL_I965G_IDS(&gen3_early_ops),
INTEL_G33_IDS(&gen3_early_ops),
......@@ -521,10 +520,8 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_SNB_M_IDS(&gen6_early_ops),
INTEL_IVB_M_IDS(&gen6_early_ops),
INTEL_IVB_D_IDS(&gen6_early_ops),
INTEL_HSW_D_IDS(&gen6_early_ops),
INTEL_HSW_M_IDS(&gen6_early_ops),
INTEL_BDW_M_IDS(&gen8_early_ops),
INTEL_BDW_D_IDS(&gen8_early_ops),
INTEL_HSW_IDS(&gen6_early_ops),
INTEL_BDW_IDS(&gen8_early_ops),
INTEL_CHV_IDS(&chv_early_ops),
INTEL_SKL_IDS(&gen9_early_ops),
INTEL_BXT_IDS(&gen9_early_ops),
......
......@@ -439,6 +439,179 @@ int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
}
EXPORT_SYMBOL(drm_dp_link_configure);
/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
* rate for others
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*
* Returns max clock in kHz on success or 0 if max clock not defined
*/
int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4])
{
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DETAILED_CAP_INFO_AVAILABLE;
if (!detailed_cap_info)
return 0;
switch (type) {
case DP_DS_PORT_TYPE_VGA:
return port_cap[1] * 8 * 1000;
case DP_DS_PORT_TYPE_DVI:
case DP_DS_PORT_TYPE_HDMI:
case DP_DS_PORT_TYPE_DP_DUALMODE:
return port_cap[1] * 2500;
default:
return 0;
}
}
EXPORT_SYMBOL(drm_dp_downstream_max_clock);
/**
* drm_dp_downstream_max_bpc() - extract branch device max
* bits per component
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*
* Returns max bpc on success or 0 if max bpc not defined
*/
int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4])
{
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DETAILED_CAP_INFO_AVAILABLE;
int bpc;
if (!detailed_cap_info)
return 0;
switch (type) {
case DP_DS_PORT_TYPE_VGA:
case DP_DS_PORT_TYPE_DVI:
case DP_DS_PORT_TYPE_HDMI:
case DP_DS_PORT_TYPE_DP_DUALMODE:
bpc = port_cap[2] & DP_DS_MAX_BPC_MASK;
switch (bpc) {
case DP_DS_8BPC:
return 8;
case DP_DS_10BPC:
return 10;
case DP_DS_12BPC:
return 12;
case DP_DS_16BPC:
return 16;
}
default:
return 0;
}
}
EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
/**
* drm_dp_downstream_id() - identify branch device
* @aux: DisplayPort AUX channel
* @id: DisplayPort branch device id
*
* Returns branch device id on success or NULL on failure
*/
int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
{
return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
}
EXPORT_SYMBOL(drm_dp_downstream_id);
/**
* drm_dp_downstream_debug() - debug DP branch devices
* @m: pointer for debugfs file
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
* @aux: DisplayPort AUX channel
*
*/
void drm_dp_downstream_debug(struct seq_file *m,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4], struct drm_dp_aux *aux)
{
bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DETAILED_CAP_INFO_AVAILABLE;
int clk;
int bpc;
char id[6];
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT;
seq_printf(m, "\tDP branch device present: %s\n",
branch_device ? "yes" : "no");
if (!branch_device)
return;
switch (type) {
case DP_DS_PORT_TYPE_DP:
seq_puts(m, "\t\tType: DisplayPort\n");
break;
case DP_DS_PORT_TYPE_VGA:
seq_puts(m, "\t\tType: VGA\n");
break;
case DP_DS_PORT_TYPE_DVI:
seq_puts(m, "\t\tType: DVI\n");
break;
case DP_DS_PORT_TYPE_HDMI:
seq_puts(m, "\t\tType: HDMI\n");
break;
case DP_DS_PORT_TYPE_NON_EDID:
seq_puts(m, "\t\tType: others without EDID support\n");
break;
case DP_DS_PORT_TYPE_DP_DUALMODE:
seq_puts(m, "\t\tType: DP++\n");
break;
case DP_DS_PORT_TYPE_WIRELESS:
seq_puts(m, "\t\tType: Wireless\n");
break;
default:
seq_puts(m, "\t\tType: N/A\n");
}
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
if (len > 0)
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
if (len > 0)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
clk = drm_dp_downstream_max_clock(dpcd, port_cap);
if (clk > 0) {
if (type == DP_DS_PORT_TYPE_VGA)
seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
else
seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
}
bpc = drm_dp_downstream_max_bpc(dpcd, port_cap);
if (bpc > 0)
seq_printf(m, "\t\tMax bpc: %d\n", bpc);
}
}
EXPORT_SYMBOL(drm_dp_downstream_debug);
/*
* I2C-over-AUX implementation
*/
......
......@@ -16,6 +16,7 @@ i915-y := i915_drv.o \
i915_params.o \
i915_pci.o \
i915_suspend.o \
i915_sw_fence.o \
i915_sysfs.o \
intel_csr.o \
intel_device_info.o \
......
......@@ -984,7 +984,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
src = ERR_PTR(-ENODEV);
if (src_needs_clflush &&
i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, 0, 0)) {
i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) {
i915_memcpy_from_wc(dst,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -420,22 +420,6 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
}
}
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev->struct_mutex);
if (i915.enable_execlists) {
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev_priv, ctx);
}
i915_gem_context_lost(dev_priv);
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
......
......@@ -170,7 +170,9 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (ret)
return ret;
ret = i915_gem_wait_for_idle(dev_priv, true);
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
......@@ -275,7 +277,9 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
return ret;
}
ret = i915_gem_wait_for_idle(dev_priv, true);
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
......
......@@ -1131,13 +1131,25 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
struct reservation_object *resv;
if (obj->flags & other_rings) {
ret = i915_gem_object_sync(obj, req);
ret = i915_gem_request_await_object
(req, obj, obj->base.pending_write_domain);
if (ret)
return ret;
}
resv = i915_gem_object_get_dmabuf_resv(obj);
if (resv) {
ret = i915_sw_fence_await_reservation
(&req->submit, resv, &i915_fence_ops,
obj->base.pending_write_domain, 10*HZ,
GFP_KERNEL | __GFP_NOWARN);
if (ret < 0)
return ret;
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
}
......@@ -1253,12 +1265,9 @@ static struct i915_gem_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *engine, const u32 ctx_id)
{
struct i915_gem_context *ctx = NULL;
struct i915_gem_context *ctx;
struct i915_ctx_hang_stats *hs;
if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
......@@ -1538,13 +1547,9 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv = file->driver_priv;
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0) {
/* If not, use the ping-pong mechanism to select one. */
mutex_lock(&dev_priv->drm.struct_mutex);
file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
dev_priv->mm.bsd_engine_dispatch_index ^= 1;
mutex_unlock(&dev_priv->drm.struct_mutex);
}
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine = atomic_fetch_xor(1,
&dev_priv->mm.bsd_engine_dispatch_index);
return file_priv->bsd_engine;
}
......
This diff is collapsed.
......@@ -312,10 +312,6 @@ struct i915_page_dma {
#define px_page(px) (px_base(px)->page)
#define px_dma(px) (px_base(px)->daddr)
struct i915_page_scratch {
struct i915_page_dma base;
};
struct i915_page_table {
struct i915_page_dma base;
......@@ -361,7 +357,7 @@ struct i915_address_space {
bool closed;
struct i915_page_scratch *scratch_page;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
......
This diff is collapsed.
......@@ -28,6 +28,7 @@
#include <linux/fence.h>
#include "i915_gem.h"
#include "i915_sw_fence.h"
struct intel_wait {
struct rb_node node;
......@@ -82,26 +83,32 @@ struct drm_i915_gem_request {
struct intel_ring *ring;
struct intel_signal_node signaling;
struct i915_sw_fence submit;
wait_queue_t submitq;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
* this request.
*/
u32 previous_seqno;
/** Position in the ringbuffer of the start of the request */
/** Position in the ring of the start of the request */
u32 head;
/**
* Position in the ringbuffer of the start of the postfix.
* This is required to calculate the maximum available ringbuffer
* space without overwriting the postfix.
* Position in the ring of the start of the postfix.
* This is required to calculate the maximum available ring space
* without overwriting the postfix.
*/
u32 postfix;
/** Position in the ringbuffer of the end of the whole request */
/** Position in the ring of the end of the whole request */
u32 tail;
/** Preallocate space in the ringbuffer for the emitting the request */
/** Position in the ring of the end of any workarounds after the tail */
u32 wa_tail;
/** Preallocate space in the ring for the emitting the request */
u32 reserved_space;
/**
......@@ -134,27 +141,8 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */
struct list_head client_list;
/**
* The ELSP only accepts two elements at a time, so we queue
* context/tail pairs on a given queue (ring->execlist_queue) until the
* hardware is available. The queue serves a double purpose: we also use
* it to keep track of the up to 2 contexts currently in the hardware
* (usually one in execution and the other queued up by the GPU): We
* only remove elements from the head of the queue when the hardware
* informs us that an element has been completed.
*
* All accesses to the queue are mediated by a spinlock
* (ring->execlist_lock).
*/
/** Execlist link in the submission queue.*/
/** Link in the execlist submission queue, guarded by execlist_lock. */
struct list_head execlist_link;
/** Execlists no. of times this request has been sent to the ELSP */
int elsp_submitted;
/** Execlists context hardware id. */
unsigned int ctx_hw_id;
};
extern const struct fence_ops i915_fence_ops;
......@@ -222,6 +210,11 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
*pdst = src;
}
int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write);
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
__i915_add_request(req, true)
......@@ -234,10 +227,12 @@ struct intel_rps_client;
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
int i915_wait_request(struct drm_i915_gem_request *req,
bool interruptible,
unsigned int flags,
s64 *timeout,
struct intel_rps_client *rps)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
......@@ -472,6 +467,19 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
if (!request || i915_gem_request_completed(request))
return NULL;
/* An especially silly compiler could decide to recompute the
* result of i915_gem_request_completed, more specifically
* re-emit the load for request->fence.seqno. A race would catch
* a later seqno value, which could flip the result from true to
* false. Which means part of the instructions below might not
* be executed, while later on instructions are executed. Due to
* barriers within the refcounting the inconsistency can't reach
* past the call to i915_gem_request_get_rcu, but not executing
* that while still executing i915_gem_request_put() creates
* havoc enough. Prevent this with a compiler barrier.
*/
barrier();
request = i915_gem_request_get_rcu(request);
/* What stops the following rcu_access_pointer() from occurring
......@@ -578,13 +586,15 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
if (!request)
return 0;
return i915_wait_request(request, true, NULL, NULL);
return i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
}
/**
* i915_gem_active_wait_unlocked - waits until the request is completed
* @active - the active request on which to wait
* @interruptible - whether the wait can be woken by a userspace signal
* @flags - how to wait
* @timeout - how long to wait at most
* @rps - userspace client to charge for a waitboost
*
......@@ -605,7 +615,7 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
*/
static inline int
i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
bool interruptible,
unsigned int flags,
s64 *timeout,
struct intel_rps_client *rps)
{
......@@ -614,7 +624,7 @@ i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
request = i915_gem_active_get_unlocked(active);
if (request) {
ret = i915_wait_request(request, interruptible, timeout, rps);
ret = i915_wait_request(request, flags, timeout, rps);
i915_gem_request_put(request);
}
......@@ -641,7 +651,9 @@ i915_gem_active_retire(struct i915_gem_active *active,
if (!request)
return 0;
ret = i915_wait_request(request, true, NULL, NULL);
ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
if (ret)
return ret;
......
......@@ -323,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
if (i915_gem_wait_for_idle(dev_priv, false) == 0 &&
if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
break;
......@@ -414,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
ret = i915_gem_wait_for_idle(dev_priv, false);
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
if (ret)
goto out;
......
......@@ -92,6 +92,7 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
u32 base;
......@@ -111,7 +112,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 3) {
u32 bsm;
pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
pci_read_config_dword(pdev, INTEL_BSM, &bsm);
base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) {
......@@ -119,7 +120,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u16 toud = 0;
u8 tmp;
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I845_ESMRAMC, &tmp);
if (tmp & TSEG_ENABLE) {
......@@ -133,7 +134,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
}
}
pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
I865_TOUD, &toud);
base = (toud << 16) + tseg_size;
......@@ -142,13 +143,13 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u32 tom;
u8 tmp;
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I85X_ESMRAMC, &tmp);
if (tmp & TSEG_ENABLE)
tseg_size = MB(1);
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
I85X_DRB3, &tmp);
tom = tmp * MB(32);
......@@ -158,7 +159,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u32 tom;
u8 tmp;
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I845_ESMRAMC, &tmp);
if (tmp & TSEG_ENABLE) {
......@@ -172,7 +173,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
}
}
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I830_DRB3, &tmp);
tom = tmp * MB(32);
......@@ -182,7 +183,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u32 tom;
u8 tmp;
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I830_ESMRAMC, &tmp);
if (tmp & TSEG_ENABLE) {
......@@ -192,7 +193,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tseg_size = KB(512);
}
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I830_DRB3, &tmp);
tom = tmp * MB(32);
......
......@@ -68,7 +68,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
for_each_active(active, idx)
i915_gem_active_wait_unlocked(&obj->last_read[idx],
false, NULL, NULL);
0, NULL, NULL);
}
static void cancel_userptr(struct work_struct *work)
......
This diff is collapsed.
......@@ -103,9 +103,6 @@
#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
#define HOST2GUC_TRIGGER (1<<0)
#define DRBMISC1 0x1984
#define DOORBELL_ENABLE (1<<0)
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -7067,7 +7067,7 @@ enum {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define GEN8_PMINTR_REDIRECT_TO_GUC (1<<31)
#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
......@@ -7123,6 +7123,15 @@ enum {
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1<<31)
#define GEN6_PCODE_ERROR_MASK 0xFF
#define GEN6_PCODE_SUCCESS 0x0
#define GEN6_PCODE_ILLEGAL_CMD 0x1
#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
#define GEN6_PCODE_TIMEOUT 0x3
#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
#define GEN7_PCODE_TIMEOUT 0x2
#define GEN7_PCODE_ILLEGAL_DATA 0x3
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
......@@ -7144,6 +7153,10 @@ enum {
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN9_PCODE_SAGV_CONTROL 0x21
#define GEN9_SAGV_DISABLE 0x0
#define GEN9_SAGV_IS_DISABLED 0x1
#define GEN9_SAGV_ENABLE 0x3
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment