Commit 5acb6cd1 authored by Alex Williamson's avatar Alex Williamson

Merge tag 'gvt-next-2022-04-29' into v5.19/vfio/next

Merge GVT-g dependencies for vfio.
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parents 920df8d6 419f8299
......@@ -105,6 +105,7 @@ structure to represent a mediated device's driver::
struct mdev_driver {
int (*probe) (struct mdev_device *dev);
void (*remove) (struct mdev_device *dev);
struct attribute_group **supported_type_groups;
struct device_driver driver;
};
......@@ -119,33 +120,15 @@ to register and unregister itself with the core driver:
extern void mdev_unregister_driver(struct mdev_driver *drv);
The mediated bus driver is responsible for adding mediated devices to the VFIO
group when devices are bound to the driver and removing mediated devices from
the VFIO when devices are unbound from the driver.
Physical Device Driver Interface
--------------------------------
The physical device driver interface provides the mdev_parent_ops[3] structure
to define the APIs to manage work in the mediated core driver that is related
to the physical device.
The structures in the mdev_parent_ops structure are as follows:
* dev_attr_groups: attributes of the parent device
* mdev_attr_groups: attributes of the mediated device
* supported_config: attributes to define supported configurations
* device_driver: device driver to bind for mediated device instances
The mdev_parent_ops also still has various functions pointers. Theses exist
for historical reasons only and shall not be used for new drivers.
The mediated bus driver's probe function should create a vfio_device on top of
the mdev_device and connect it to an appropriate implementation of
vfio_device_ops.
When a driver wants to add the GUID creation sysfs to an existing device it has
probe'd to then it should call::
extern int mdev_register_device(struct device *dev,
const struct mdev_parent_ops *ops);
struct mdev_driver *mdev_driver);
This will provide the 'mdev_supported_types/XX/create' files which can then be
used to trigger the creation of a mdev_device. The created mdev_device will be
......
......@@ -102,40 +102,30 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
bool
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
depends on X86
depends on 64BIT
default n
depends on KVM
depends on VFIO_MDEV
select DRM_I915_GVT
select KVM_EXTERNAL_WRITE_TRACKING
help
Choose this option if you want to enable Intel GVT-g graphics
virtualization technology host support with integrated graphics.
With GVT-g, it's possible to have one integrated graphics
device shared by multiple VMs under different hypervisors.
Note that at least one hypervisor like Xen or KVM is required for
this driver to work, and it only supports newer device from
Broadwell+. For further information and setup guide, you can
visit: http://01.org/igvt-g.
device shared by multiple VMs under KVM.
Now it's just a stub to support the modifications of i915 for
GVT device model. It requires at least one MPT modules for Xen/KVM
and other components of GVT device model to work. Use it under
you own risk.
Note that this driver only supports newer device from Broadwell on.
For further information and setup guide, you can visit:
http://01.org/igvt-g.
If in doubt, say "N".
config DRM_I915_GVT_KVMGT
tristate "Enable KVM/VFIO support for Intel GVT-g"
depends on DRM_I915_GVT
depends on KVM
depends on VFIO_MDEV
select KVM_EXTERNAL_WRITE_TRACKING
default n
help
Choose this option if you want to enable KVMGT support for
Intel GVT-g.
config DRM_I915_PXP
bool "Enable Intel PXP support"
depends on DRM_I915
......
......@@ -320,13 +320,13 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
# virtual gpu code
i915-y += i915_vgpu.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
i915-$(CONFIG_DRM_I915_GVT) += \
intel_gvt.o \
intel_gvt_mmio_table.o
include $(src)/gvt/Makefile
endif
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
# header test
......
# SPDX-License-Identifier: GPL-2.0
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o page_track.o
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
kvmgt-$(CONFIG_DRM_I915_GVT) += \
gvt/aperture_gm.o \
gvt/cfg_space.o \
gvt/cmd_parser.o \
gvt/debugfs.o \
gvt/display.o \
gvt/dmabuf.o \
gvt/edid.o \
gvt/execlist.o \
gvt/fb_decoder.o \
gvt/firmware.o \
gvt/gtt.o \
gvt/handlers.o \
gvt/interrupt.o \
gvt/kvmgt.o \
gvt/mmio.o \
gvt/mmio_context.o \
gvt/opregion.o \
gvt/page_track.o \
gvt/sched_policy.o \
gvt/scheduler.o \
gvt/trace_points.o \
gvt/vgpu.o
......@@ -129,60 +129,16 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
static int map_aperture(struct intel_vgpu *vgpu, bool map)
static void map_aperture(struct intel_vgpu *vgpu, bool map)
{
phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
u64 first_gfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
else
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
aperture_pa >> PAGE_SHIFT,
aperture_sz >> PAGE_SHIFT,
map);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
}
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
u64 start, end;
u64 val;
int ret;
if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
else
start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
start &= ~GENMASK(3, 0);
end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
return 0;
if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
}
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
......@@ -191,26 +147,17 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 old = vgpu_cfg_space(vgpu)[offset];
u8 new = *(u8 *)p_data;
u8 changed = old ^ new;
int ret;
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
if (old & PCI_COMMAND_MEMORY) {
ret = trap_gttmmio(vgpu, false);
if (ret)
return ret;
ret = map_aperture(vgpu, false);
if (ret)
return ret;
trap_gttmmio(vgpu, false);
map_aperture(vgpu, false);
} else {
ret = trap_gttmmio(vgpu, true);
if (ret)
return ret;
ret = map_aperture(vgpu, true);
if (ret)
return ret;
trap_gttmmio(vgpu, true);
map_aperture(vgpu, true);
}
return 0;
......@@ -230,13 +177,12 @@ static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
return 0;
}
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
......@@ -259,14 +205,14 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
* Untrap the BAR, since guest hasn't configured a
* valid GPA
*/
ret = trap_gttmmio(vgpu, false);
trap_gttmmio(vgpu, false);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
intel_vgpu_write_pci_bar(vgpu, offset,
size >> (lo ? 0 : 32), lo);
ret = map_aperture(vgpu, false);
map_aperture(vgpu, false);
break;
default:
/* Unimplemented BARs */
......@@ -282,19 +228,18 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
*/
trap_gttmmio(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
ret = trap_gttmmio(vgpu, mmio_enabled);
trap_gttmmio(vgpu, mmio_enabled);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
map_aperture(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
ret = map_aperture(vgpu, mmio_enabled);
map_aperture(vgpu, mmio_enabled);
break;
default:
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
}
}
return ret;
}
/**
......@@ -336,8 +281,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
emulate_pci_bar_write(vgpu, offset, p_data, bytes);
break;
case INTEL_GVT_PCI_SWSCI:
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
......
......@@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (GRAPHICS_VER(s->engine->i915) == 9 &&
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
intel_gvt_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
if (ctx_sr_ctl & 1) {
......@@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len);
len += copy_len;
gma += copy_len;
......
......@@ -29,7 +29,7 @@
*/
#include <linux/dma-buf.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
......@@ -42,24 +42,6 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
unsigned long size,
dma_addr_t dma_addr)
{
int ret = 0;
if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
ret = -EINVAL;
return ret;
}
static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
}
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
......@@ -95,7 +77,7 @@ static int vgpu_gem_get_pages(
for_each_sg(st->sgl, sg, page_num, i) {
dma_addr_t dma_addr =
GEN8_DECODE_PTE(readq(&gtt_entries[i]));
if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
ret = -EINVAL;
goto out;
}
......@@ -114,7 +96,7 @@ static int vgpu_gem_get_pages(
for_each_sg(st->sgl, sg, i, j) {
dma_addr = sg_dma_address(sg);
if (dma_addr)
vgpu_unpin_dma_address(vgpu, dma_addr);
intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
}
sg_free_table(st);
kfree(st);
......@@ -136,7 +118,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
int i;
for_each_sg(pages->sgl, sg, fb_info->size, i)
vgpu_unpin_dma_address(vgpu,
intel_gvt_dma_unmap_guest_page(vgpu,
sg_dma_address(sg));
}
......@@ -157,7 +139,6 @@ static void dmabuf_gem_object_free(struct kref *kref)
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
list_del(pos);
intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
......@@ -491,14 +472,6 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
kref_init(&dmabuf_obj->kref);
mutex_lock(&vgpu->dmabuf_lock);
if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
gvt_vgpu_err("get vfio device failed\n");
mutex_unlock(&vgpu->dmabuf_lock);
goto out_free_info;
}
mutex_unlock(&vgpu->dmabuf_lock);
update_fb_info(gfx_plane_info, &fb_info);
INIT_LIST_HEAD(&dmabuf_obj->list);
......@@ -603,7 +576,6 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
dmabuf_obj->vgpu = NULL;
idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
intel_gvt_hypervisor_put_vfio_device(vgpu);
list_del(pos);
/* dmabuf_obj might be freed in dmabuf_obj_put */
......
......@@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu,
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
&write_pointer, 4);
intel_gvt_write_gpa(vgpu,
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
status, 8);
intel_gvt_write_gpa(vgpu,
hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
&write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
......
......@@ -66,22 +66,16 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
*(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
_MMIO(offset));
return 0;
}
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
struct drm_i915_private *i915 = gvt->gt->i915;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
int i, ret;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
......@@ -99,17 +93,16 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
p = firmware + h->cfg_space_offset;
for (i = 0; i < h->cfg_space_size; i += 4)
pci_read_config_dword(pdev, i, p + i);
memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space,
info->cfg_space_size);
memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size);
p = firmware + h->mmio_offset;
/* Take a snapshot of hw mmio registers. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio,
info->mmio_size);
memcpy(gvt->firmware.mmio, p, info->mmio_size);
memcpy(p, gvt->firmware.mmio, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
......
......@@ -49,6 +49,22 @@
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
{
struct kvm *kvm = vgpu->kvm;
int idx;
bool ret;
if (!vgpu->attached)
return false;
idx = srcu_read_lock(&kvm->srcu);
ret = kvm_is_visible_gfn(kvm, gfn);
srcu_read_unlock(&kvm->srcu, idx);
return ret;
}
/*
* validate a gm address and related range size,
* translate it to host gm address
......@@ -314,7 +330,7 @@ static inline int gtt_get_entry64(void *pt,
return -EINVAL;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
ret = intel_gvt_read_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
if (WARN_ON(ret))
......@@ -339,7 +355,7 @@ static inline int gtt_set_entry64(void *pt,
return -EINVAL;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
ret = intel_gvt_write_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
if (WARN_ON(ret))
......@@ -997,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
......@@ -1162,15 +1178,16 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *entry)
{
const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn;
kvm_pfn_t pfn;
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
if (pfn == INTEL_GVT_INVALID_ADDR)
if (!vgpu->attached)
return -EINVAL;
pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
return -EINVAL;
return PageTransHuge(pfn_to_page(pfn));
}
......@@ -1195,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
return PTR_ERR(sub_spt);
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
PAGE_SIZE, &dma_addr);
if (ret) {
ppgtt_invalidate_spt(spt);
return ret;
......@@ -1241,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_64k_splited(&entry);
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + i, PAGE_SIZE, &dma_addr);
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
PAGE_SIZE, &dma_addr);
if (ret)
return ret;
......@@ -1296,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
}
/* direct shadow */
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
&dma_addr);
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
if (ret)
return -ENXIO;
......@@ -1331,7 +1347,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_set_shadow_entry(spt, &se, i);
} else {
gfn = ops->get_pfn(&ge);
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
ppgtt_set_shadow_entry(spt, &se, i);
continue;
......@@ -1497,7 +1513,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
struct intel_gvt *gvt = spt->vgpu->gvt;
int ret;
ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
ret = intel_gvt_read_gpa(spt->vgpu,
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
......@@ -2228,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
pfn = pte_ops->get_pfn(entry);
if (pfn != vgpu->gvt->gtt.scratch_mfn)
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
pfn << PAGE_SHIFT);
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
......@@ -2315,13 +2330,13 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
*/
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
goto out;
}
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
PAGE_SIZE, &dma_addr);
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
&dma_addr);
if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
......
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Eddie Dong <eddie.dong@intel.com>
*
* Contributors:
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include <linux/types.h>
#include <linux/kthread.h>
#include "i915_drv.h"
#include "intel_gvt.h"
#include "gvt.h"
#include <linux/vfio.h>
#include <linux/mdev.h>
struct intel_gvt_host intel_gvt_host;
static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_XEN] = "XEN",
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
.vgpu_release = intel_gvt_release_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
.emulate_hotplug = intel_vgpu_emulate_hotplug,
};
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
info->msi_cap_offset = pdev->msi_cap;
}
static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
int id;
mutex_lock(&gvt->lock);
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
(void *)&gvt->service_request)) {
if (vgpu->active)
intel_vgpu_emulate_vblank(vgpu);
}
}
mutex_unlock(&gvt->lock);
}
static int gvt_service_thread(void *data)
{
struct intel_gvt *gvt = (struct intel_gvt *)data;
int ret;
gvt_dbg_core("service thread start\n");
while (!kthread_should_stop()) {
ret = wait_event_interruptible(gvt->service_thread_wq,
kthread_should_stop() || gvt->service_request);
if (kthread_should_stop())
break;
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
continue;
intel_gvt_test_and_emulate_vblank(gvt);
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
(void *)&gvt->service_request)) {
intel_gvt_schedule(gvt);
}
}
return 0;
}
static void clean_service_thread(struct intel_gvt *gvt)
{
kthread_stop(gvt->service_thread);
}
static int init_service_thread(struct intel_gvt *gvt)
{
init_waitqueue_head(&gvt->service_thread_wq);
gvt->service_thread = kthread_run(gvt_service_thread,
gvt, "gvt_service_thread");
if (IS_ERR(gvt->service_thread)) {
gvt_err("fail to start service thread.\n");
return PTR_ERR(gvt->service_thread);
}
return 0;
}
/**
* intel_gvt_clean_device - clean a GVT device
* @i915: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
*
*/
void intel_gvt_clean_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
if (drm_WARN_ON(&i915->drm, !gvt))
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_clean_vgpu_types(gvt);
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_free_firmware(gvt);
intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
kfree(i915->gvt);
}
/**
* intel_gvt_init_device - initialize a GVT device
* @i915: drm i915 private data
*
* This function is called at the initialization stage, to initialize
* necessary GVT components.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_init_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt;
struct intel_vgpu *vgpu;
int ret;
if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
if (!gvt)
return -ENOMEM;
gvt_dbg_core("init gvt device\n");
idr_init_base(&gvt->vgpu_idr, 1);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
gvt->gt = to_gt(i915);
i915->gvt = gvt;
init_device_info(gvt);
ret = intel_gvt_setup_mmio_info(gvt);
if (ret)
goto out_clean_idr;
intel_gvt_init_engine_mmio_context(gvt);
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
ret = intel_gvt_init_irq(gvt);
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_gtt(gvt);
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
goto out_clean_gtt;
ret = intel_gvt_init_sched_policy(gvt);
if (ret)
goto out_clean_workload_scheduler;
ret = intel_gvt_init_cmd_parser(gvt);
if (ret)
goto out_clean_sched_policy;
ret = init_service_thread(gvt);
if (ret)
goto out_clean_cmd_parser;
ret = intel_gvt_init_vgpu_types(gvt);
if (ret)
goto out_clean_thread;
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
gvt_err("failed to create idle vgpu\n");
goto out_clean_types;
}
gvt->idle_vgpu = vgpu;
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
intel_gvt_host.dev = i915->drm.dev;
intel_gvt_host.initialized = true;
return 0;
out_clean_types:
intel_gvt_clean_vgpu_types(gvt);
out_clean_thread:
clean_service_thread(gvt);
out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
intel_gvt_clean_mmio_info(gvt);
out_clean_idr:
idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
i915->gvt = NULL;
return ret;
}
int
intel_gvt_pm_resume(struct intel_gvt *gvt)
{
intel_gvt_restore_fence(gvt);
intel_gvt_restore_mmio(gvt);
intel_gvt_restore_ggtt(gvt);
return 0;
}
int
intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
{
int ret;
void *gvt;
if (!intel_gvt_host.initialized)
return -ENODEV;
if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
m->type != INTEL_GVT_HYPERVISOR_XEN)
return -EINVAL;
/* Get a reference for device model module */
if (!try_module_get(THIS_MODULE))
return -ENODEV;
intel_gvt_host.mpt = m;
intel_gvt_host.hypervisor_type = m->type;
gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
&intel_gvt_ops);
if (ret < 0) {
gvt_err("Failed to init %s hypervisor module\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
module_put(THIS_MODULE);
return -ENODEV;
}
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
return 0;
}
EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
......@@ -34,11 +34,13 @@
#define _GVT_H_
#include <uapi/linux/pci_regs.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include "i915_drv.h"
#include "intel_gvt.h"
#include "debug.h"
#include "hypercall.h"
#include "mmio.h"
#include "reg.h"
#include "interrupt.h"
......@@ -56,15 +58,6 @@
#define GVT_MAX_VGPU 8
struct intel_gvt_host {
struct device *dev;
bool initialized;
int hypervisor_type;
const struct intel_gvt_mpt *mpt;
};
extern struct intel_gvt_host intel_gvt_host;
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
......@@ -176,12 +169,14 @@ struct intel_vgpu_submission {
} last_ctx[I915_NUM_ENGINES];
};
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct intel_vgpu {
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool attached;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
......@@ -209,21 +204,40 @@ struct intel_vgpu {
struct dentry *debugfs;
/* Hypervisor-specific device state. */
void *vdev;
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
struct intel_vgpu_vblank_timer vblank_timer;
u32 scan_nonprivbb;
};
static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
{
return vgpu->vdev;
}
struct vfio_device vfio_device;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
/*
* Two caches are used to avoid mapping duplicated pages (eg.
* scratch pages). This help to reduce dma setup overhead.
*/
struct rb_root gfn_cache;
struct rb_root dma_addr_cache;
unsigned long nr_cache_entries;
struct mutex cache_lock;
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
struct vfio_group *vfio_group;
struct kvm_page_track_notifier_node track_node;
#define NR_BKT (1 << 18)
struct hlist_head ptable[NR_BKT];
#undef NR_BKT
};
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
......@@ -272,7 +286,7 @@ struct intel_gvt_mmio {
/* Value of command write of this reg needs to be patched */
#define F_CMD_WRITE_PATCH (1 << 8)
const struct gvt_mmio_block *mmio_block;
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
......@@ -428,7 +442,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define RING_CTX_SIZE 320
struct intel_vgpu_creation_params {
__u64 handle;
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
......@@ -496,6 +509,9 @@ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_set_opregion(struct intel_vgpu *vgpu);
int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
......@@ -557,30 +573,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
unsigned int);
int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
unsigned int);
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
struct intel_vgpu_type *);
void (*vgpu_destroy)(struct intel_vgpu *vgpu);
void (*vgpu_release)(struct intel_vgpu *vgpu);
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
unsigned int);
void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
};
void intel_vgpu_detach_regions(struct intel_vgpu *vgpu);
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
......@@ -724,13 +717,54 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
}
/**
* intel_gvt_read_gpa - copy data from GPA to host data buffer
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len)
{
if (!vgpu->attached)
return -ESRCH;
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
}
/**
* intel_gvt_write_gpa - copy data from host data buffer to GPA
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
if (!vgpu->attached)
return -ESRCH;
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
}
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
int intel_gvt_pm_resume(struct intel_gvt *gvt);
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr);
void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
dma_addr_t dma_addr);
#include "trace.h"
#include "mpt.h"
#endif
This diff is collapsed.
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Dexuan Cui
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
#include <linux/types.h>
struct device;
enum hypervisor_type {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
};
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*enable_page_track)(unsigned long handle, u64 gfn);
int (*disable_page_track)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
int (*set_opregion)(void *vgpu);
int (*set_edid)(void *vgpu, int port_num);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
#endif /* _GVT_HYPERCALL_H_ */
......@@ -29,6 +29,8 @@
*
*/
#include <linux/eventfd.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
......@@ -397,9 +399,45 @@ static void init_irq_map(struct intel_gvt_irq *irq)
}
/* =======================vEvent injection===================== */
#define MSI_CAP_CONTROL(offset) (offset + 2)
#define MSI_CAP_ADDRESS(offset) (offset + 4)
#define MSI_CAP_DATA(offset) (offset + 8)
#define MSI_CAP_EN 0x1
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
return intel_gvt_hypervisor_inject_msi(vgpu);
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
u16 control, data;
u32 addr;
control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
/* Do not generate MSI if MSIEN is disabled */
if (!(control & MSI_CAP_EN))
return 0;
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
return -EINVAL;
trace_inject_msi(vgpu->id, addr, data);
/*
* When guest is powered off, msi_trigger is set to NULL, but vgpu's
* config and mmio register isn't restored to default during guest
* poweroff. If this vgpu is still used in next vm, this vgpu's pipe
* may be enabled, then once this vgpu is active, it will get inject
* vblank interrupt request. But msi_trigger is null until msi is
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
if (!vgpu->attached)
return -ESRCH;
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
return -EFAULT;
return 0;
}
static void propagate_event(struct intel_gvt_irq *irq,
......
This diff is collapsed.
......@@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
}
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes);
goto out;
}
......@@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
}
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes);
goto out;
}
......
......@@ -72,7 +72,6 @@ struct intel_gvt_mmio_info {
const struct intel_engine_cs *
intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
......
This diff is collapsed.
This diff is collapsed.
......@@ -87,7 +87,7 @@ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
track = radix_tree_delete(&vgpu->page_track_tree, gfn);
if (track) {
if (track->tracked)
intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
intel_gvt_page_track_remove(vgpu, gfn);
kfree(track);
}
}
......@@ -112,7 +112,7 @@ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
if (track->tracked)
return 0;
ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
ret = intel_gvt_page_track_add(vgpu, gfn);
if (ret)
return ret;
track->tracked = true;
......@@ -139,7 +139,7 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
if (!track->tracked)
return 0;
ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
ret = intel_gvt_page_track_remove(vgpu, gfn);
if (ret)
return ret;
track->tracked = false;
......@@ -172,7 +172,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
if (ret)
......
......@@ -132,6 +132,13 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
/* XXX FIXME i915 has changed PP_XXX definition */
#define PCH_PP_STATUS _MMIO(0xc7200)
#define PCH_PP_CONTROL _MMIO(0xc7204)
#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
#define PCH_PP_DIVISOR _MMIO(0xc7210)
#endif
This diff is collapsed.
......@@ -377,7 +377,7 @@ TRACE_EVENT(render_mmio,
/* This part must be out of protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/gvt
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
......@@ -293,7 +293,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_detach_regions(vgpu);
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
......@@ -370,8 +370,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu *vgpu;
int ret;
gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
param->handle, param->low_gm_sz, param->high_gm_sz,
gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
param->low_gm_sz, param->high_gm_sz,
param->fence_sz);
vgpu = vzalloc(sizeof(*vgpu));
......@@ -384,7 +384,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
goto out_free_vgpu;
vgpu->id = ret;
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
mutex_init(&vgpu->vgpu_lock);
......@@ -405,13 +404,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
populate_pvinfo_page(vgpu);
ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
if (ret)
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_gtt(vgpu);
if (ret)
goto out_detach_hypervisor_vgpu;
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_opregion(vgpu);
if (ret)
......@@ -431,14 +426,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_gvt_debugfs_add_vgpu(vgpu);
ret = intel_gvt_hypervisor_set_opregion(vgpu);
ret = intel_gvt_set_opregion(vgpu);
if (ret)
goto out_clean_sched_policy;
if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
ret = intel_gvt_set_edid(vgpu, PORT_B);
else
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
ret = intel_gvt_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
......@@ -454,8 +449,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
......@@ -483,7 +476,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params param;
struct intel_vgpu *vgpu;
param.handle = 0;
param.primary = 1;
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
......
......@@ -468,11 +468,6 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
pci_dev_put(dev_priv->bridge_dev);
}
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
intel_gvt_sanitize_options(dev_priv);
}
/**
* i915_set_dma_info - set all relevant PCI dma info as configured for the
* platform
......@@ -566,8 +561,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
}
}
intel_sanitize_options(dev_priv);
/* needs to be done before ggtt probe */
intel_dram_edram_detect(dev_priv);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment