Commit 39a75ac4 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-next-2017-02-07' of https://github.com/01org/gvt-linux into drm-intel-next-fixes

From Zhenyu, "These are GVT-g changes for 4.11 merge window, mostly for
gvt init order fix that impacted resource handling for device model, the
one i915 change has been reviewed and acked."
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parents 13f62f54 2d6ceb8e
...@@ -83,7 +83,7 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) ...@@ -83,7 +83,7 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0; return 0;
} }
/* EDID with 1024x768 as its resolution */ /* EDID with 1920x1200 as its resolution */
static unsigned char virtual_dp_monitor_edid[] = { static unsigned char virtual_dp_monitor_edid[] = {
/*Header*/ /*Header*/
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
...@@ -97,11 +97,16 @@ static unsigned char virtual_dp_monitor_edid[] = { ...@@ -97,11 +97,16 @@ static unsigned char virtual_dp_monitor_edid[] = {
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
/* Established Timings: maximum resolution is 1024x768 */ /* Established Timings: maximum resolution is 1024x768 */
0x21, 0x08, 0x00, 0x21, 0x08, 0x00,
/* Standard Timings. All invalid */ /*
0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, * Standard Timings.
0x00, 0x40, 0x00, 0x00, 0x00, 0x01, * below new resolutions can be supported:
/* 18 Byte Data Blocks 1: invalid */ * 1920x1080, 1280x720, 1280x960, 1280x1024,
0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, * 1440x900, 1600x1200, 1680x1050
*/
0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
/* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
/* 18 Byte Data Blocks 2: invalid */ /* 18 Byte Data Blocks 2: invalid */
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
...@@ -115,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = { ...@@ -115,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = {
/* Extension Block Count */ /* Extension Block Count */
0x00, 0x00,
/* Checksum */ /* Checksum */
0xef, 0x45,
}; };
#define DPCD_HEADER_SIZE 0xb #define DPCD_HEADER_SIZE 0xb
......
...@@ -48,31 +48,6 @@ struct gvt_firmware_header { ...@@ -48,31 +48,6 @@ struct gvt_firmware_header {
unsigned char data[1]; unsigned char data[1];
}; };
#define RD(offset) (readl(mmio + offset.reg))
#define WR(v, offset) (writel(v, mmio + offset.reg))
static void bdw_forcewake_get(void __iomem *mmio)
{
WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
RD(ECOBUS);
if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
gvt_err("fail to wait forcewake idle\n");
WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
gvt_err("fail to wait forcewake ack\n");
if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
gvt_err("fail to wait c0 wake up\n");
}
#undef RD
#undef WR
#define dev_to_drm_minor(d) dev_get_drvdata((d)) #define dev_to_drm_minor(d) dev_get_drvdata((d))
static ssize_t static ssize_t
...@@ -91,9 +66,9 @@ static struct bin_attribute firmware_attr = { ...@@ -91,9 +66,9 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL, .mmap = NULL,
}; };
static int expose_firmware_sysfs(struct intel_gvt *gvt, static int expose_firmware_sysfs(struct intel_gvt *gvt)
void __iomem *mmio)
{ {
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e; struct intel_gvt_mmio_info *e;
...@@ -132,7 +107,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt, ...@@ -132,7 +107,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt,
for (j = 0; j < e->length; j += 4) for (j = 0; j < e->length; j += 4)
*(u32 *)(p + e->offset + j) = *(u32 *)(p + e->offset + j) =
readl(mmio + e->offset + j); I915_READ_NOTRACE(_MMIO(e->offset + j));
} }
memcpy(gvt->firmware.mmio, p, info->mmio_size); memcpy(gvt->firmware.mmio, p, info->mmio_size);
...@@ -235,7 +210,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) ...@@ -235,7 +210,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
struct gvt_firmware_header *h; struct gvt_firmware_header *h;
const struct firmware *fw; const struct firmware *fw;
char *path; char *path;
void __iomem *mmio;
void *mem; void *mem;
int ret; int ret;
...@@ -260,17 +234,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) ...@@ -260,17 +234,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->mmio = mem; firmware->mmio = mem;
mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
if (!mmio) {
kfree(path);
kfree(firmware->cfg_space);
kfree(firmware->mmio);
return -EINVAL;
}
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
bdw_forcewake_get(mmio);
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
pdev->revision); pdev->revision);
...@@ -300,13 +263,11 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) ...@@ -300,13 +263,11 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
release_firmware(fw); release_firmware(fw);
firmware->firmware_loaded = true; firmware->firmware_loaded = true;
pci_iounmap(pdev, mmio);
return 0; return 0;
out_free_fw: out_free_fw:
release_firmware(fw); release_firmware(fw);
expose_firmware: expose_firmware:
expose_firmware_sysfs(gvt, mmio); expose_firmware_sysfs(gvt);
pci_iounmap(pdev, mmio);
return 0; return 0;
} }
...@@ -68,8 +68,6 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -68,8 +68,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
*/ */
int intel_gvt_init_host(void) int intel_gvt_init_host(void)
{ {
int ret;
if (intel_gvt_host.initialized) if (intel_gvt_host.initialized)
return 0; return 0;
...@@ -77,6 +75,13 @@ int intel_gvt_init_host(void) ...@@ -77,6 +75,13 @@ int intel_gvt_init_host(void)
if (xen_domain() && !xen_initial_domain()) if (xen_domain() && !xen_initial_domain())
return -ENODEV; return -ENODEV;
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped) {
gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
return -ENODEV;
}
#endif
/* Try to load MPT modules for hypervisors */ /* Try to load MPT modules for hypervisors */
if (xen_initial_domain()) { if (xen_initial_domain()) {
/* In Xen dom0 */ /* In Xen dom0 */
...@@ -96,11 +101,6 @@ int intel_gvt_init_host(void) ...@@ -96,11 +101,6 @@ int intel_gvt_init_host(void)
if (!intel_gvt_host.mpt) if (!intel_gvt_host.mpt)
return -EINVAL; return -EINVAL;
/* Try to detect if we're running in host instead of VM. */
ret = intel_gvt_hypervisor_detect_host();
if (ret)
return -ENODEV;
gvt_dbg_core("Running with hypervisor %s in host mode\n", gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]); supported_hypervisors[intel_gvt_host.hypervisor_type]);
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
* both Xen and KVM by providing dedicated hypervisor-related MPT modules. * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/ */
struct intel_gvt_mpt { struct intel_gvt_mpt {
int (*detect_host)(void);
int (*host_init)(struct device *dev, void *gvt, const void *ops); int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle); int (*attach_vgpu)(void *vgpu, unsigned long *handle);
......
...@@ -1248,43 +1248,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, ...@@ -1248,43 +1248,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
static bool kvmgt_check_guest(void)
{
unsigned int eax, ebx, ecx, edx;
char s[12];
unsigned int *i;
eax = KVM_CPUID_SIGNATURE;
ebx = ecx = edx = 0;
asm volatile ("cpuid"
: "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
:
: "cc", "memory");
i = (unsigned int *)s;
i[0] = ebx;
i[1] = ecx;
i[2] = edx;
return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
}
/**
* NOTE:
* It's actually impossible to check if we are running in KVM host,
* since the "KVM host" is simply native. So we only dectect guest here.
*/
static int kvmgt_detect_host(void)
{
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped) {
gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
return -ENODEV;
}
#endif
return kvmgt_check_guest() ? -ENODEV : 0;
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
{ {
struct intel_vgpu *itr; struct intel_vgpu *itr;
...@@ -1459,7 +1422,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) ...@@ -1459,7 +1422,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
} }
struct intel_gvt_mpt kvmgt_mpt = { struct intel_gvt_mpt kvmgt_mpt = {
.detect_host = kvmgt_detect_host,
.host_init = kvmgt_host_init, .host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit, .host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu, .attach_vgpu = kvmgt_attach_vgpu,
......
...@@ -43,18 +43,6 @@ ...@@ -43,18 +43,6 @@
* hypervisor. * hypervisor.
*/ */
/**
* intel_gvt_hypervisor_detect_host - check if GVT-g is running within
* hypervisor host/privilged domain
*
* Returns:
* Zero on success, -ENODEV if current kernel is running inside a VM
*/
static inline int intel_gvt_hypervisor_detect_host(void)
{
return intel_gvt_host.mpt->detect_host();
}
/** /**
* intel_gvt_hypervisor_host_init - init GVT-g host side * intel_gvt_hypervisor_host_init - init GVT-g host side
* *
......
...@@ -74,7 +74,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) ...@@ -74,7 +74,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{ {
unsigned int num_types; unsigned int num_types;
unsigned int i, low_avail; unsigned int i, low_avail, high_avail;
unsigned int min_low; unsigned int min_low;
/* vGPU type name is defined as GVTg_Vx_y which contains /* vGPU type name is defined as GVTg_Vx_y which contains
...@@ -89,9 +89,9 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) ...@@ -89,9 +89,9 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
* to indicate how many vGPU instance can be created for this * to indicate how many vGPU instance can be created for this
* type. * type.
* *
* Currently use static size here as we init type earlier..
*/ */
low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE; low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
num_types = 4; num_types = 4;
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
...@@ -106,7 +106,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) ...@@ -106,7 +106,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].low_gm_size = min_low; gvt->types[i].low_gm_size = min_low;
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
gvt->types[i].fence = 4; gvt->types[i].fence = 4;
gvt->types[i].max_instance = low_avail / min_low; gvt->types[i].max_instance = min(low_avail / min_low,
high_avail / gvt->types[i].high_gm_size);
gvt->types[i].avail_instance = gvt->types[i].max_instance; gvt->types[i].avail_instance = gvt->types[i].max_instance;
if (IS_GEN8(gvt->dev_priv)) if (IS_GEN8(gvt->dev_priv))
...@@ -142,9 +143,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) ...@@ -142,9 +143,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
/* Need to depend on maxium hw resource size but keep on /* Need to depend on maxium hw resource size but keep on
* static config for now. * static config for now.
*/ */
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
gvt->gm.vgpu_allocated_low_gm_size; gvt->gm.vgpu_allocated_low_gm_size;
high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
gvt->gm.vgpu_allocated_high_gm_size; gvt->gm.vgpu_allocated_high_gm_size;
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
gvt->fence.vgpu_allocated_fence_num; gvt->fence.vgpu_allocated_fence_num;
......
...@@ -824,10 +824,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -824,10 +824,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = intel_gvt_init(dev_priv);
if (ret < 0)
goto err_workqueues;
/* This must be called before any calls to HAS_PCH_* */ /* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv); intel_detect_pch(dev_priv);
...@@ -841,7 +837,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -841,7 +837,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_audio_hooks(dev_priv); intel_init_audio_hooks(dev_priv);
ret = i915_gem_load_init(dev_priv); ret = i915_gem_load_init(dev_priv);
if (ret < 0) if (ret < 0)
goto err_gvt; goto err_workqueues;
intel_display_crc_init(dev_priv); intel_display_crc_init(dev_priv);
...@@ -853,8 +849,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -853,8 +849,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
return 0; return 0;
err_gvt:
intel_gvt_cleanup(dev_priv);
err_workqueues: err_workqueues:
i915_workqueues_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv);
return ret; return ret;
...@@ -1077,6 +1071,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1077,6 +1071,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("can't enable MSI"); DRM_DEBUG_DRIVER("can't enable MSI");
} }
ret = intel_gvt_init(dev_priv);
if (ret)
goto out_ggtt;
return 0; return 0;
out_ggtt: out_ggtt:
...@@ -1290,6 +1288,8 @@ void i915_driver_unload(struct drm_device *dev) ...@@ -1290,6 +1288,8 @@ void i915_driver_unload(struct drm_device *dev)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_gvt_cleanup(dev_priv);
i915_driver_unregister(dev_priv); i915_driver_unregister(dev_priv);
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
......
...@@ -67,6 +67,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) ...@@ -67,6 +67,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0; return 0;
} }
if (intel_vgpu_active(dev_priv)) {
DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n");
goto bail;
}
if (!is_supported_device(dev_priv)) { if (!is_supported_device(dev_priv)) {
DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
goto bail; goto bail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment