Commit 5481e27f authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2016-10-24' of git://anongit.freedesktop.org/drm-intel into drm-next

- first slice of the gvt device model (Zhenyu et al)
- compression support for gpu error states (Chris)
- sunset clause on gpu errors resulting in dmesg noise telling users
  how to report them
- .rodata diet from Tvrtko
- switch over lots of macros to only take dev_priv (Tvrtko)
- underrun suppression for dp link training (Ville)
- lspcon (hmdi 2.0 on skl/bxt) support from Shashank Sharma, polish
  from Jani
- gen9 wm fixes from Paulo&Lyude
- updated ddi programming for kbl (Rodrigo)
- respect alternate aux/ddc pins (from vbt) for all ddi ports (Ville)

* tag 'drm-intel-next-2016-10-24' of git://anongit.freedesktop.org/drm-intel: (227 commits)
  drm/i915: Update DRIVER_DATE to 20161024
  drm/i915: Stop setting SNB min-freq-table 0 on powersave setup
  drm/i915/dp: add lane_count check in intel_dp_check_link_status
  drm/i915: Fix whitespace issues
  drm/i915: Clean up DDI DDC/AUX CH sanitation
  drm/i915: Respect alternate_ddc_pin for all DDI ports
  drm/i915: Respect alternate_aux_channel for all DDI ports
  drm/i915/gen9: Remove WaEnableYV12BugFixInHalfSliceChicken7
  drm/i915: KBL - Recommended buffer translation programming for DisplayPort
  drm/i915: Move down skl/kbl ddi iboost and n_edp_entires fixup
  drm/i915: Add a sunset clause to GPU hang logging
  drm/i915: Stop reporting error details in dmesg as well as the error-state
  drm/i915/gvt: do not ignore return value of create_scratch_page
  drm/i915/gvt: fix spare warnings on odd constant _Bool cast
  drm/i915/gvt: mark symbols static where possible
  drm/i915/gvt: fix sparse warnings on different address spaces
  drm/i915/gvt: properly access enabled intel_engine_cs
  drm/i915/gvt: Remove defunct vmap_batch()
  drm/i915/gvt: Use common mapping routines for shadow_bb object
  drm/i915/gvt: Use common mapping routines for indirect_ctx object
  ...
parents 61d0a04d 9558e74c
......@@ -49,6 +49,15 @@ Intel GVT-g Guest Support(vGPU)
.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
:internal:
Intel GVT-g Host Support(vGPU device model)
-------------------------------------------
.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
:doc: Intel GVT-g host support
.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
:internal:
Display Hardware Handling
=========================
......
......@@ -4064,6 +4064,16 @@ F: include/drm/i915*
F: include/uapi/drm/i915_drm.h
F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
L: igvt-g-dev@lists.01.org
L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
L: dri-devel@lists.freedesktop.org
......
......@@ -148,6 +148,14 @@ static bool is_type2_adaptor(uint8_t adaptor_id)
DP_DUAL_MODE_REV_TYPE2);
}
static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
const uint8_t adaptor_id)
{
return is_hdmi_adaptor(hdmi_id) &&
(adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_TYPE_HAS_DPCD));
}
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @adapter: I2C adapter for the DDC bus
......@@ -203,6 +211,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
if (ret == 0) {
if (is_lspcon_adaptor(hdmi_id, adaptor_id))
return DRM_DP_DUAL_MODE_LSPCON;
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
......@@ -364,3 +374,96 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
/**
* drm_lspcon_get_mode: Get LSPCON's current mode of operation by
* reading offset (0x80, 0x41)
* @adapter: I2C-over-aux adapter
* @mode: current lspcon mode of operation output variable
*
* Returns:
* 0 on success, sets the current_mode value to appropriate mode
* -error on failure
*/
int drm_lspcon_get_mode(struct i2c_adapter *adapter,
enum drm_lspcon_mode *mode)
{
u8 data;
int ret = 0;
if (!mode) {
DRM_ERROR("NULL input\n");
return -EINVAL;
}
/* Read Status: i2c over aux */
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
&data, sizeof(data));
if (ret < 0) {
DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
return -EFAULT;
}
if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
*mode = DRM_LSPCON_MODE_PCON;
else
*mode = DRM_LSPCON_MODE_LS;
return 0;
}
EXPORT_SYMBOL(drm_lspcon_get_mode);
/**
* drm_lspcon_set_mode: Change LSPCON's mode of operation by
* writing offset (0x80, 0x40)
* @adapter: I2C-over-aux adapter
* @mode: required mode of operation
*
* Returns:
* 0 on success, -error on failure/timeout
*/
int drm_lspcon_set_mode(struct i2c_adapter *adapter,
enum drm_lspcon_mode mode)
{
u8 data = 0;
int ret;
int time_out = 200;
enum drm_lspcon_mode current_mode;
if (mode == DRM_LSPCON_MODE_PCON)
data = DP_DUAL_MODE_LSPCON_MODE_PCON;
/* Change mode */
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
&data, sizeof(data));
if (ret < 0) {
DRM_ERROR("LSPCON mode change failed\n");
return ret;
}
/*
* Confirm mode change by reading the status bit.
* Sometimes, it takes a while to change the mode,
* so wait and retry until time out or done.
*/
do {
ret = drm_lspcon_get_mode(adapter, &current_mode);
if (ret) {
DRM_ERROR("can't confirm LSPCON mode change\n");
return ret;
} else {
if (current_mode != mode) {
msleep(10);
time_out -= 10;
} else {
DRM_DEBUG_KMS("LSPCON mode changed to %s\n",
mode == DRM_LSPCON_MODE_LS ?
"LS" : "PCON");
return 0;
}
}
} while (time_out);
DRM_ERROR("LSPCON mode change timed out\n");
return -ETIMEDOUT;
}
EXPORT_SYMBOL(drm_lspcon_set_mode);
......@@ -46,6 +46,31 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
If in doubt, say "N".
config DRM_I915_CAPTURE_ERROR
bool "Enable capturing GPU state following a hang"
depends on DRM_I915
default y
help
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang to
https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
for triaging.
If in doubt, say "Y".
config DRM_I915_COMPRESS_ERROR
bool "Compress GPU error state"
depends on DRM_I915_CAPTURE_ERROR
select ZLIB_DEFLATE
default y
help
This option selects ZLIB_DEFLATE if it isn't already
selected and causes any error state captured upon a GPU hang
to be compressed using zlib.
If in doubt, say "Y".
config DRM_I915_USERPTR
bool "Always enable userptr support"
depends on DRM_I915
......
......@@ -42,7 +42,6 @@ i915-y += i915_cmd_parser.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gpu_error.o \
i915_trace_points.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
......@@ -102,11 +101,15 @@ i915-y += dvo_ch7017.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
intel_lspcon.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
intel_tv.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
# virtual gpu code
i915-y += i915_vgpu.o
......
GVT_DIR := gvt
GVT_SOURCE := gvt.o
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Dexuan Cui
*
* Contributors:
* Pei Zhang <pei.zhang@intel.com>
* Min He <min.he@intel.com>
* Niu Bing <bing.niu@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
#define BYTES_TO_MB(b) ((b) >> 20ULL)
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 alloc_flag, search_flag;
u64 start, end, size;
struct drm_mm_node *node;
int retried = 0;
int ret;
if (high_gm) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
node = &vgpu->gm.high_gm_node;
size = vgpu_hidden_sz(vgpu);
start = gvt_hidden_gmadr_base(gvt);
end = gvt_hidden_gmadr_end(gvt);
} else {
search_flag = DRM_MM_SEARCH_DEFAULT;
alloc_flag = DRM_MM_CREATE_DEFAULT;
node = &vgpu->gm.low_gm_node;
size = vgpu_aperture_sz(vgpu);
start = gvt_aperture_gmadr_base(gvt);
end = gvt_aperture_gmadr_end(gvt);
}
mutex_lock(&dev_priv->drm.struct_mutex);
search_again:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
node, size, 4096, 0,
start, end, search_flag,
alloc_flag);
if (ret) {
ret = i915_gem_evict_something(&dev_priv->ggtt.base,
size, 4096, 0, start, end, 0);
if (ret == 0 && ++retried < 3)
goto search_again;
gvt_err("fail to alloc %s gm space from host, retried %d\n",
high_gm ? "high" : "low", retried);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
ret = alloc_gm(vgpu, false);
if (ret)
return ret;
ret = alloc_gm(vgpu, true);
if (ret)
goto out_free_aperture;
gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
return 0;
out_free_aperture:
mutex_lock(&dev_priv->drm.struct_mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
mutex_lock(&dev_priv->drm.struct_mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
/**
* intel_vgpu_write_fence - write fence registers owned by a vGPU
* @vgpu: vGPU instance
* @fence: vGPU fence register number
* @value: Fence register value to be written
*
* This function is used to write fence registers owned by a vGPU. The vGPU
* fence register number will be translated into HW fence register number.
*
*/
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
assert_rpm_wakelock_held(dev_priv);
if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
if (WARN_ON(!reg))
return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
I915_WRITE(fence_reg_lo, 0);
POSTING_READ(fence_reg_lo);
I915_WRITE(fence_reg_hi, upper_32_bits(value));
I915_WRITE(fence_reg_lo, lower_32_bits(value));
POSTING_READ(fence_reg_lo);
}
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
u32 i;
if (WARN_ON(!vgpu_fence_sz(vgpu)))
return;
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
intel_vgpu_write_fence(vgpu, i, 0);
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
int i;
struct list_head *pos, *q;
intel_runtime_pm_get(dev_priv);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
i = 0;
list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
reg = list_entry(pos, struct drm_i915_fence_reg, link);
if (reg->pin_count || reg->vma)
continue;
list_del(pos);
vgpu->fence.regs[i] = reg;
intel_vgpu_write_fence(vgpu, i, 0);
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return 0;
out_free_fence:
/* Return fences to host, if fail */
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
if (!reg)
continue;
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return -ENOSPC;
}
static void free_resource(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
}
static int alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned long request, avail, max, taken;
const char *item;
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
gvt_err("Invalid vGPU creation params\n");
return -EINVAL;
}
item = "low GM space";
max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
taken = gvt->gm.vgpu_allocated_low_gm_size;
avail = max - taken;
request = MB_TO_BYTES(param->low_gm_sz);
if (request > avail)
goto no_enough_resource;
vgpu_aperture_sz(vgpu) = request;
item = "high GM space";
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
taken = gvt->gm.vgpu_allocated_high_gm_size;
avail = max - taken;
request = MB_TO_BYTES(param->high_gm_sz);
if (request > avail)
goto no_enough_resource;
vgpu_hidden_sz(vgpu) = request;
item = "fence";
max = gvt_fence_sz(gvt) - HOST_FENCE;
taken = gvt->fence.vgpu_allocated_fence_num;
avail = max - taken;
request = param->fence_sz;
if (request > avail)
goto no_enough_resource;
vgpu_fence_sz(vgpu) = request;
gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
return 0;
no_enough_resource:
gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC;
}
/**
* inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
*
*/
void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
{
free_vgpu_gm(vgpu);
free_vgpu_fence(vgpu);
free_resource(vgpu);
}
/**
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
* This function is used to allocate HW resource for a vGPU. User specifies
* the resource configuration through the creation params.
*
* Returns:
* zero on success, negative error code if failed.
*
*/
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
int ret;
ret = alloc_resource(vgpu, param);
if (ret)
return ret;
ret = alloc_vgpu_gm(vgpu);
if (ret)
goto out_free_resource;
ret = alloc_vgpu_fence(vgpu);
if (ret)
goto out_free_vgpu_gm;
return 0;
out_free_vgpu_gm:
free_vgpu_gm(vgpu);
out_free_resource:
free_resource(vgpu);
return ret;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
enum {
INTEL_GVT_PCI_BAR_GTTMMIO = 0,
INTEL_GVT_PCI_BAR_APERTURE,
INTEL_GVT_PCI_BAR_PIO,
INTEL_GVT_PCI_BAR_MAX,
};
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
return 0;
}
static int map_aperture(struct intel_vgpu *vgpu, bool map)
{
u64 first_gfn, first_mfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
else
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn,
vgpu_aperture_sz(vgpu)
>> PAGE_SHIFT, map,
GVT_MAP_APERTURE);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
}
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
u64 start, end;
u64 val;
int ret;
if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
else
start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
start &= ~GENMASK(3, 0);
end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
return 0;
}
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u8 old = vgpu_cfg_space(vgpu)[offset];
u8 new = *(u8 *)p_data;
u8 changed = old ^ new;
int ret;
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
if (old & PCI_COMMAND_MEMORY) {
ret = trap_gttmmio(vgpu, false);
if (ret)
return ret;
ret = map_aperture(vgpu, false);
if (ret)
return ret;
} else {
ret = trap_gttmmio(vgpu, true);
if (ret)
return ret;
ret = map_aperture(vgpu, true);
if (ret)
return ret;
}
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
return 0;
}
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
unsigned int bar_index =
(rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
return -EINVAL;
if (new == 0xffffffff) {
/*
* Power-up software can determine how much address
* space the device requires by writing a value of
* all 1's to the register and then reading the value
* back. The device will return 0's in all don't-care
* address bits.
*/
size = vgpu->cfg_space.bar[bar_index].size;
if (lo) {
new = rounddown(new, size);
} else {
u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
/* for 32bit mode bar it returns all-0 in upper 32
* bit, for 64bit mode bar it will calculate the
* size with lower 32bit and return the corresponding
* value
*/
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
new &= (~(size-1)) >> 32;
else
new = 0;
}
/*
* Unmapp & untrap the BAR, since guest hasn't configured a
* valid GPA
*/
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, false);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, false);
break;
}
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
} else {
/*
* Unmapp & untrap the old BAR first, since guest has
* re-configured the BAR
*/
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, false);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, false);
break;
}
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
/* Track the new BAR */
if (mmio_enabled) {
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, true);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, true);
break;
}
}
}
return ret;
}
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
int ret;
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
/* First check if it's PCI_COMMAND */
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
if (WARN_ON(bytes > 2))
return -EINVAL;
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
}
switch (rounddown(offset, 4)) {
case PCI_BASE_ADDRESS_0:
case PCI_BASE_ADDRESS_1:
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
case INTEL_GVT_PCI_SWSCI:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
if (ret)
return ret;
break;
case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
if (ret)
return ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
break;
default:
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
break;
}
return 0;
}
This diff is collapsed.
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_CMD_PARSER_H_
#define _GVT_CMD_PARSER_H_
#define GVT_CMD_HASH_BITS 7
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
#endif
......@@ -24,11 +24,34 @@
#ifndef __GVT_DEBUG_H__
#define __GVT_DEBUG_H__
#define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args)
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
/*
* Other GVT debug stuff will be introduced in the GVT device model patches.
*/
#define gvt_dbg_irq(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
#define gvt_dbg_mm(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
#define gvt_dbg_mmio(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
#define gvt_dbg_dpy(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
#define gvt_dbg_el(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
#define gvt_dbg_sched(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
#define gvt_dbg_render(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
#define gvt_dbg_cmd(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
#endif
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#include "gvt.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
{
u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
int pipe = -1;
switch (data & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
pipe = PIPE_C;
break;
}
return pipe;
}
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
return 0;
return 1;
}
static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
get_edp_pipe(vgpu) == pipe)
return 1;
return 0;
}
/* EDID with 1024x768 as its resolution */
static unsigned char virtual_dp_monitor_edid[] = {
/*Header*/
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
/* Vendor & Product Identification */
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
/* Version & Revision */
0x01, 0x04,
/* Basic Display Parameters & Features */
0xa5, 0x34, 0x20, 0x78, 0x23,
/* Color Characteristics */
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
/* Established Timings: maximum resolution is 1024x768 */
0x21, 0x08, 0x00,
/* Standard Timings. All invalid */
0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
/* 18 Byte Data Blocks 1: invalid */
0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
/* 18 Byte Data Blocks 2: invalid */
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
/* 18 Byte Data Blocks 3: invalid */
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
/* 18 Byte Data Blocks 4: invalid */
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
/* Extension Block Count */
0x00,
/* Checksum */
0xef,
};
#define DPCD_HEADER_SIZE 0xb
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv))
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
if (IS_SKYLAKE(dev_priv) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_PORT_DP_A_HOTPLUG;
else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
}
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
kfree(port->edid);
port->edid = NULL;
kfree(port->dpcd);
port->dpcd = NULL;
}
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
if (!port->edid)
return -ENOMEM;
port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
if (!port->dpcd) {
kfree(port->edid);
return -ENOMEM;
}
memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
EDID_SIZE);
port->edid->data_valid = true;
memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
port->dpcd->data_valid = true;
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
port->type = type;
emulate_monitor_status_change(vgpu);
return 0;
}
/**
* intel_gvt_check_vblank_emulation - check if vblank emulation timer should
* be turned on/off when a virtual pipe is enabled/disabled.
* @gvt: a GVT device
*
* This function is used to turn on/off vblank timer according to currently
* enabled/disabled virtual pipes.
*
*/
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
bool have_enabled_pipe = false;
int pipe, id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
hrtimer_cancel(&irq->vblank_timer.timer);
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
have_enabled_pipe =
pipe_is_enabled(vgpu, pipe);
if (have_enabled_pipe)
break;
}
}
if (have_enabled_pipe)
hrtimer_start(&irq->vblank_timer.timer,
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
HRTIMER_MODE_ABS);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
[PIPE_B] = PIPE_B_VBLANK,
[PIPE_C] = PIPE_C_VBLANK,
};
int event;
if (pipe < PIPE_A || pipe > PIPE_C)
return;
for_each_set_bit(event, irq->flip_done_event[pipe],
INTEL_GVT_EVENT_MAX) {
clear_bit(event, irq->flip_done_event[pipe]);
if (!pipe_is_enabled(vgpu, pipe))
continue;
vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
if (pipe_is_enabled(vgpu, pipe)) {
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
static void emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
for_each_pipe(vgpu->gvt->dev_priv, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
}
/**
* intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
* @gvt: a GVT device
*
* This function is used to trigger vblank interrupts for vGPUs on GVT device
*
*/
void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
int id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
for_each_active_vgpu(gvt, vgpu, id)
emulate_vblank(vgpu);
}
/**
* intel_vgpu_clean_display - clean vGPU virtual display emulation
* @vgpu: a vGPU
*
* This function is used to clean vGPU virtual display emulation stuffs
*
*/
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
}
/**
* intel_vgpu_init_display- initialize vGPU virtual display emulation
* @vgpu: a vGPU
*
* This function is used to initialize vGPU virtual display emulation stuffs
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_init_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
else
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_DISPLAY_H_
#define _GVT_DISPLAY_H_
#define SBI_REG_MAX 20
#define DPCD_SIZE 0x700
#define intel_vgpu_port(vgpu, port) \
(&(vgpu->display.ports[port]))
#define intel_vgpu_has_monitor_on_port(vgpu, port) \
(intel_vgpu_port(vgpu, port)->edid && \
intel_vgpu_port(vgpu, port)->edid->data_valid)
#define intel_vgpu_port_is_dp(vgpu, port) \
((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
#define INTEL_GVT_MAX_UEVENT_VARS 3
/* DPCD start */
#define DPCD_SIZE 0x700
/* DPCD */
#define DP_SET_POWER 0x600
#define DP_SET_POWER_D0 0x1
#define AUX_NATIVE_WRITE 0x8
#define AUX_NATIVE_READ 0x9
#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
#define AUX_BURST_SIZE 16
/* DPCD addresses */
#define DPCD_REV 0x000
#define DPCD_MAX_LINK_RATE 0x001
#define DPCD_MAX_LANE_COUNT 0x002
#define DPCD_TRAINING_PATTERN_SET 0x102
#define DPCD_SINK_COUNT 0x200
#define DPCD_LANE0_1_STATUS 0x202
#define DPCD_LANE2_3_STATUS 0x203
#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
#define DPCD_SINK_STATUS 0x205
/* link training */
#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
#define DPCD_LINK_TRAINING_DISABLED 0x00
#define DPCD_TRAINING_PATTERN_1 0x01
#define DPCD_TRAINING_PATTERN_2 0x02
#define DPCD_CP_READY_MASK (1 << 6)
/* lane status */
#define DPCD_LANES_CR_DONE 0x11
#define DPCD_LANES_EQ_DONE 0x22
#define DPCD_SYMBOL_LOCKED 0x44
#define DPCD_INTERLANE_ALIGN_DONE 0x01
#define DPCD_SINK_IN_SYNC 0x03
/* DPCD end */
#define SBI_RESPONSE_MASK 0x3
#define SBI_RESPONSE_SHIFT 0x1
#define SBI_STAT_MASK 0x1
#define SBI_STAT_SHIFT 0x0
#define SBI_OPCODE_SHIFT 8
#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
#define SBI_CMD_IORD 2
#define SBI_CMD_IOWR 3
#define SBI_CMD_CRRD 6
#define SBI_CMD_CRWR 7
#define SBI_ADDR_OFFSET_SHIFT 16
#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
struct intel_vgpu_sbi_register {
unsigned int offset;
u32 value;
};
struct intel_vgpu_sbi {
int number;
struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
};
enum intel_gvt_plane_type {
PRIMARY_PLANE = 0,
CURSOR_PLANE,
SPRITE_PLANE,
MAX_PLANE
};
struct intel_vgpu_dpcd_data {
bool data_valid;
u8 data[DPCD_SIZE];
};
enum intel_vgpu_port_type {
GVT_CRT = 0,
GVT_DP_A,
GVT_DP_B,
GVT_DP_C,
GVT_DP_D,
GVT_HDMI_B,
GVT_HDMI_C,
GVT_HDMI_D,
GVT_PORT_MAX
};
struct intel_vgpu_port {
/* per display EDID information */
struct intel_vgpu_edid_data *edid;
/* per display DPCD information */
struct intel_vgpu_dpcd_data *dpcd;
int type;
};
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
#endif
This diff is collapsed.
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_EDID_H_
#define _GVT_EDID_H_
#define EDID_SIZE 128
#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
#define GVT_AUX_NATIVE_WRITE 0x8
#define GVT_AUX_NATIVE_READ 0x9
#define GVT_AUX_I2C_WRITE 0x0
#define GVT_AUX_I2C_READ 0x1
#define GVT_AUX_I2C_STATUS 0x2
#define GVT_AUX_I2C_MOT 0x4
#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6)
struct intel_vgpu_edid_data {
bool data_valid;
unsigned char edid_block[EDID_SIZE];
};
enum gmbus_cycle_type {
GMBUS_NOCYCLE = 0x0,
NIDX_NS_W = 0x1,
IDX_NS_W = 0x3,
GMBUS_STOP = 0x4,
NIDX_STOP = 0x5,
IDX_STOP = 0x7
};
/*
* States of GMBUS
*
* GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
* registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
* not considered here. Below describes the usage of GMBUS registers that are
* cared by the EDID virtualization
*
* GMBUS0:
* R/W
* port selection. value of bit0 - bit2 corresponds to the GPIO registers.
*
* GMBUS1:
* R/W Protect
* Command and Status.
* bit0 is the direction bit: 1 is read; 0 is write.
* bit1 - bit7 is slave 7-bit address.
* bit16 - bit24 total byte count (ignore?)
*
* GMBUS2:
* Most of bits are read only except bit 15 (IN_USE)
* Status register
* bit0 - bit8 current byte count
* bit 11: hardware ready;
*
* GMBUS3:
* Read/Write
* Data for transfer
*/
/* From hw specs, Other phases like START, ADDRESS, INDEX
* are invisible to GMBUS MMIO interface. So no definitions
* in below enum types
*/
enum gvt_gmbus_phase {
GMBUS_IDLE_PHASE = 0,
GMBUS_DATA_PHASE,
GMBUS_WAIT_PHASE,
//GMBUS_STOP_PHASE,
GMBUS_MAX_PHASE
};
struct intel_vgpu_i2c_gmbus {
unsigned int total_byte_count; /* from GMBUS1 */
enum gmbus_cycle_type cycle_type;
enum gvt_gmbus_phase phase;
};
struct intel_vgpu_i2c_aux_ch {
bool i2c_over_aux_ch;
bool aux_ch_mot;
};
enum i2c_state {
I2C_NOT_SPECIFIED = 0,
I2C_GMBUS = 1,
I2C_AUX_CH = 2
};
/* I2C sequences cannot interleave.
* GMBUS and AUX_CH sequences cannot interleave.
*/
struct intel_vgpu_i2c_edid {
enum i2c_state state;
unsigned int port;
bool slave_selected;
bool edid_available;
unsigned int current_edid_read;
struct intel_vgpu_i2c_gmbus gmbus;
struct intel_vgpu_i2c_aux_ch aux_ch;
};
void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes);
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes);
void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
int port_idx,
unsigned int offset,
void *p_data);
#endif /*_GVT_EDID_H_*/
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -19,17 +19,51 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Dexuan Cui
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
struct intel_gvt_io_emulation_ops {
int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
};
extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
int (*detect_host)(void);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*set_wp_page)(unsigned long handle, u64 gfn);
int (*unset_wp_page)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map,
int type);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
};
extern struct intel_gvt_mpt xengt_mpt;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -59,6 +59,7 @@ struct i915_params {
bool load_detect_test;
bool force_reset_modeset_test;
bool reset;
bool error_capture;
bool disable_display;
bool verbose_state_checks;
bool nuclear_pageflip;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment