Commit 1e1d2e18 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-next-2022-04-21-for-christoph' of...

Merge tag 'gvt-next-2022-04-21-for-christoph' of https://github.com/intel/gvt-linux into drm-intel-next

gvt-next-2022-04-21-for-christoph

- Separating the MMIO table from GVT-g. (Zhi)
- GVT-g re-factor. (Christoph)
- GVT-g mdev API cleanup. (Jason)
- GVT-g trace/makefile cleanup. (Jani)

[Jani: added #include to adapt to header refactoring in drm-intel-next]
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
From: "Wang, Zhi A" <zhi.a.wang@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/25a713cd-0b7d-4c09-7d91-4f4ef6c9eb11@intel.com
parents b4b15757 2917f531
......@@ -105,6 +105,7 @@ structure to represent a mediated device's driver::
struct mdev_driver {
int (*probe) (struct mdev_device *dev);
void (*remove) (struct mdev_device *dev);
struct attribute_group **supported_type_groups;
struct device_driver driver;
};
......@@ -119,33 +120,15 @@ to register and unregister itself with the core driver:
extern void mdev_unregister_driver(struct mdev_driver *drv);
The mediated bus driver is responsible for adding mediated devices to the VFIO
group when devices are bound to the driver and removing mediated devices from
the VFIO when devices are unbound from the driver.
Physical Device Driver Interface
--------------------------------
The physical device driver interface provides the mdev_parent_ops[3] structure
to define the APIs to manage work in the mediated core driver that is related
to the physical device.
The structures in the mdev_parent_ops structure are as follows:
* dev_attr_groups: attributes of the parent device
* mdev_attr_groups: attributes of the mediated device
* supported_config: attributes to define supported configurations
* device_driver: device driver to bind for mediated device instances
The mdev_parent_ops also still has various functions pointers. Theses exist
for historical reasons only and shall not be used for new drivers.
The mediated bus driver's probe function should create a vfio_device on top of
the mdev_device and connect it to an appropriate implementation of
vfio_device_ops.
When a driver wants to add the GUID creation sysfs to an existing device it has
probe'd to then it should call::
extern int mdev_register_device(struct device *dev,
const struct mdev_parent_ops *ops);
struct mdev_driver *mdev_driver);
This will provide the 'mdev_supported_types/XX/create' files which can then be
used to trigger the creation of a mdev_device. The created mdev_device will be
......
......@@ -102,40 +102,30 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
bool
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
depends on X86
depends on 64BIT
default n
depends on KVM
depends on VFIO_MDEV
select DRM_I915_GVT
select KVM_EXTERNAL_WRITE_TRACKING
help
Choose this option if you want to enable Intel GVT-g graphics
virtualization technology host support with integrated graphics.
With GVT-g, it's possible to have one integrated graphics
device shared by multiple VMs under different hypervisors.
Note that at least one hypervisor like Xen or KVM is required for
this driver to work, and it only supports newer device from
Broadwell+. For further information and setup guide, you can
visit: http://01.org/igvt-g.
device shared by multiple VMs under KVM.
Now it's just a stub to support the modifications of i915 for
GVT device model. It requires at least one MPT modules for Xen/KVM
and other components of GVT device model to work. Use it under
you own risk.
Note that this driver only supports newer device from Broadwell on.
For further information and setup guide, you can visit:
http://01.org/igvt-g.
If in doubt, say "N".
config DRM_I915_GVT_KVMGT
tristate "Enable KVM/VFIO support for Intel GVT-g"
depends on DRM_I915_GVT
depends on KVM
depends on VFIO_MDEV
select KVM_EXTERNAL_WRITE_TRACKING
default n
help
Choose this option if you want to enable KVMGT support for
Intel GVT-g.
config DRM_I915_PXP
bool "Enable Intel PXP support"
depends on DRM_I915
......
......@@ -322,13 +322,13 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
# virtual gpu code
i915-y += i915_vgpu.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
i915-$(CONFIG_DRM_I915_GVT) += \
intel_gvt.o \
intel_gvt_mmio_table.o
include $(src)/gvt/Makefile
endif
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
# header test
......
# SPDX-License-Identifier: GPL-2.0
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o page_track.o
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
kvmgt-$(CONFIG_DRM_I915_GVT) += \
gvt/aperture_gm.o \
gvt/cfg_space.o \
gvt/cmd_parser.o \
gvt/debugfs.o \
gvt/display.o \
gvt/dmabuf.o \
gvt/edid.o \
gvt/execlist.o \
gvt/fb_decoder.o \
gvt/firmware.o \
gvt/gtt.o \
gvt/handlers.o \
gvt/interrupt.o \
gvt/kvmgt.o \
gvt/mmio.o \
gvt/mmio_context.o \
gvt/opregion.o \
gvt/page_track.o \
gvt/sched_policy.o \
gvt/scheduler.o \
gvt/trace_points.o \
gvt/vgpu.o
......@@ -129,60 +129,16 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
static int map_aperture(struct intel_vgpu *vgpu, bool map)
static void map_aperture(struct intel_vgpu *vgpu, bool map)
{
phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
u64 first_gfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
else
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
aperture_pa >> PAGE_SHIFT,
aperture_sz >> PAGE_SHIFT,
map);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
}
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
u64 start, end;
u64 val;
int ret;
if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
else
start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
start &= ~GENMASK(3, 0);
end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
return 0;
if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
}
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
......@@ -191,26 +147,17 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 old = vgpu_cfg_space(vgpu)[offset];
u8 new = *(u8 *)p_data;
u8 changed = old ^ new;
int ret;
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
if (old & PCI_COMMAND_MEMORY) {
ret = trap_gttmmio(vgpu, false);
if (ret)
return ret;
ret = map_aperture(vgpu, false);
if (ret)
return ret;
trap_gttmmio(vgpu, false);
map_aperture(vgpu, false);
} else {
ret = trap_gttmmio(vgpu, true);
if (ret)
return ret;
ret = map_aperture(vgpu, true);
if (ret)
return ret;
trap_gttmmio(vgpu, true);
map_aperture(vgpu, true);
}
return 0;
......@@ -230,13 +177,12 @@ static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
return 0;
}
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
......@@ -259,14 +205,14 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
* Untrap the BAR, since guest hasn't configured a
* valid GPA
*/
ret = trap_gttmmio(vgpu, false);
trap_gttmmio(vgpu, false);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
intel_vgpu_write_pci_bar(vgpu, offset,
size >> (lo ? 0 : 32), lo);
ret = map_aperture(vgpu, false);
map_aperture(vgpu, false);
break;
default:
/* Unimplemented BARs */
......@@ -282,19 +228,18 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
*/
trap_gttmmio(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
ret = trap_gttmmio(vgpu, mmio_enabled);
trap_gttmmio(vgpu, mmio_enabled);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
map_aperture(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
ret = map_aperture(vgpu, mmio_enabled);
map_aperture(vgpu, mmio_enabled);
break;
default:
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
}
}
return ret;
}
/**
......@@ -336,8 +281,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
emulate_pci_bar_write(vgpu, offset, p_data, bytes);
break;
case INTEL_GVT_PCI_SWSCI:
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
......
......@@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (GRAPHICS_VER(s->engine->i915) == 9 &&
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
intel_gvt_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
if (ctx_sr_ctl & 1) {
......@@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len);
len += copy_len;
gma += copy_len;
......
......@@ -29,7 +29,7 @@
*/
#include <linux/dma-buf.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
......@@ -42,24 +42,6 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
unsigned long size,
dma_addr_t dma_addr)
{
int ret = 0;
if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
ret = -EINVAL;
return ret;
}
static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
}
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
......@@ -95,7 +77,7 @@ static int vgpu_gem_get_pages(
for_each_sg(st->sgl, sg, page_num, i) {
dma_addr_t dma_addr =
GEN8_DECODE_PTE(readq(&gtt_entries[i]));
if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
ret = -EINVAL;
goto out;
}
......@@ -114,7 +96,7 @@ static int vgpu_gem_get_pages(
for_each_sg(st->sgl, sg, i, j) {
dma_addr = sg_dma_address(sg);
if (dma_addr)
vgpu_unpin_dma_address(vgpu, dma_addr);
intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
}
sg_free_table(st);
kfree(st);
......@@ -136,7 +118,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
int i;
for_each_sg(pages->sgl, sg, fb_info->size, i)
vgpu_unpin_dma_address(vgpu,
intel_gvt_dma_unmap_guest_page(vgpu,
sg_dma_address(sg));
}
......@@ -157,7 +139,6 @@ static void dmabuf_gem_object_free(struct kref *kref)
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
list_del(pos);
intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
......@@ -491,14 +472,6 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
kref_init(&dmabuf_obj->kref);
mutex_lock(&vgpu->dmabuf_lock);
if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
gvt_vgpu_err("get vfio device failed\n");
mutex_unlock(&vgpu->dmabuf_lock);
goto out_free_info;
}
mutex_unlock(&vgpu->dmabuf_lock);
update_fb_info(gfx_plane_info, &fb_info);
INIT_LIST_HEAD(&dmabuf_obj->list);
......@@ -603,7 +576,6 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
dmabuf_obj->vgpu = NULL;
idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
intel_gvt_hypervisor_put_vfio_device(vgpu);
list_del(pos);
/* dmabuf_obj might be freed in dmabuf_obj_put */
......
......@@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu,
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
&write_pointer, 4);
intel_gvt_write_gpa(vgpu,
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
status, 8);
intel_gvt_write_gpa(vgpu,
hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
&write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
......
......@@ -66,22 +66,16 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
*(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
_MMIO(offset));
return 0;
}
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
struct drm_i915_private *i915 = gvt->gt->i915;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
int i, ret;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
......@@ -99,17 +93,16 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
p = firmware + h->cfg_space_offset;
for (i = 0; i < h->cfg_space_size; i += 4)
pci_read_config_dword(pdev, i, p + i);
memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space,
info->cfg_space_size);
memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size);
p = firmware + h->mmio_offset;
/* Take a snapshot of hw mmio registers. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio,
info->mmio_size);
memcpy(gvt->firmware.mmio, p, info->mmio_size);
memcpy(p, gvt->firmware.mmio, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
......
......@@ -49,6 +49,22 @@
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
{
struct kvm *kvm = vgpu->kvm;
int idx;
bool ret;
if (!vgpu->attached)
return false;
idx = srcu_read_lock(&kvm->srcu);
ret = kvm_is_visible_gfn(kvm, gfn);
srcu_read_unlock(&kvm->srcu, idx);
return ret;
}
/*
* validate a gm address and related range size,
* translate it to host gm address
......@@ -314,7 +330,7 @@ static inline int gtt_get_entry64(void *pt,
return -EINVAL;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
ret = intel_gvt_read_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
if (WARN_ON(ret))
......@@ -339,7 +355,7 @@ static inline int gtt_set_entry64(void *pt,
return -EINVAL;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
ret = intel_gvt_write_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
if (WARN_ON(ret))
......@@ -997,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
......@@ -1162,15 +1178,16 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *entry)
{
const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn;
kvm_pfn_t pfn;
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
if (pfn == INTEL_GVT_INVALID_ADDR)
if (!vgpu->attached)
return -EINVAL;
pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
return -EINVAL;
return PageTransHuge(pfn_to_page(pfn));
}
......@@ -1195,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
return PTR_ERR(sub_spt);
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
PAGE_SIZE, &dma_addr);
if (ret) {
ppgtt_invalidate_spt(spt);
return ret;
......@@ -1241,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_64k_splited(&entry);
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + i, PAGE_SIZE, &dma_addr);
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
PAGE_SIZE, &dma_addr);
if (ret)
return ret;
......@@ -1296,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
}
/* direct shadow */
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
&dma_addr);
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
if (ret)
return -ENXIO;
......@@ -1331,7 +1347,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_set_shadow_entry(spt, &se, i);
} else {
gfn = ops->get_pfn(&ge);
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
ppgtt_set_shadow_entry(spt, &se, i);
continue;
......@@ -1497,7 +1513,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
struct intel_gvt *gvt = spt->vgpu->gvt;
int ret;
ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
ret = intel_gvt_read_gpa(spt->vgpu,
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
......@@ -2228,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
pfn = pte_ops->get_pfn(entry);
if (pfn != vgpu->gvt->gtt.scratch_mfn)
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
pfn << PAGE_SHIFT);
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
......@@ -2315,13 +2330,13 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
*/
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
goto out;
}
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
PAGE_SIZE, &dma_addr);
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
&dma_addr);
if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
......
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Eddie Dong <eddie.dong@intel.com>
*
* Contributors:
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include <linux/types.h>
#include <linux/kthread.h>
#include "i915_drv.h"
#include "intel_gvt.h"
#include "gvt.h"
#include <linux/vfio.h>
#include <linux/mdev.h>
struct intel_gvt_host intel_gvt_host;
static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_XEN] = "XEN",
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
.vgpu_release = intel_gvt_release_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
.emulate_hotplug = intel_vgpu_emulate_hotplug,
};
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
info->msi_cap_offset = pdev->msi_cap;
}
static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
int id;
mutex_lock(&gvt->lock);
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
(void *)&gvt->service_request)) {
if (vgpu->active)
intel_vgpu_emulate_vblank(vgpu);
}
}
mutex_unlock(&gvt->lock);
}
static int gvt_service_thread(void *data)
{
struct intel_gvt *gvt = (struct intel_gvt *)data;
int ret;
gvt_dbg_core("service thread start\n");
while (!kthread_should_stop()) {
ret = wait_event_interruptible(gvt->service_thread_wq,
kthread_should_stop() || gvt->service_request);
if (kthread_should_stop())
break;
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
continue;
intel_gvt_test_and_emulate_vblank(gvt);
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
(void *)&gvt->service_request)) {
intel_gvt_schedule(gvt);
}
}
return 0;
}
static void clean_service_thread(struct intel_gvt *gvt)
{
kthread_stop(gvt->service_thread);
}
static int init_service_thread(struct intel_gvt *gvt)
{
init_waitqueue_head(&gvt->service_thread_wq);
gvt->service_thread = kthread_run(gvt_service_thread,
gvt, "gvt_service_thread");
if (IS_ERR(gvt->service_thread)) {
gvt_err("fail to start service thread.\n");
return PTR_ERR(gvt->service_thread);
}
return 0;
}
/**
* intel_gvt_clean_device - clean a GVT device
* @i915: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
*
*/
void intel_gvt_clean_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
if (drm_WARN_ON(&i915->drm, !gvt))
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_clean_vgpu_types(gvt);
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_free_firmware(gvt);
intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
kfree(i915->gvt);
}
/**
* intel_gvt_init_device - initialize a GVT device
* @i915: drm i915 private data
*
* This function is called at the initialization stage, to initialize
* necessary GVT components.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_init_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt;
struct intel_vgpu *vgpu;
int ret;
if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
if (!gvt)
return -ENOMEM;
gvt_dbg_core("init gvt device\n");
idr_init_base(&gvt->vgpu_idr, 1);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
gvt->gt = to_gt(i915);
i915->gvt = gvt;
init_device_info(gvt);
ret = intel_gvt_setup_mmio_info(gvt);
if (ret)
goto out_clean_idr;
intel_gvt_init_engine_mmio_context(gvt);
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
ret = intel_gvt_init_irq(gvt);
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_gtt(gvt);
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
goto out_clean_gtt;
ret = intel_gvt_init_sched_policy(gvt);
if (ret)
goto out_clean_workload_scheduler;
ret = intel_gvt_init_cmd_parser(gvt);
if (ret)
goto out_clean_sched_policy;
ret = init_service_thread(gvt);
if (ret)
goto out_clean_cmd_parser;
ret = intel_gvt_init_vgpu_types(gvt);
if (ret)
goto out_clean_thread;
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
gvt_err("failed to create idle vgpu\n");
goto out_clean_types;
}
gvt->idle_vgpu = vgpu;
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
intel_gvt_host.dev = i915->drm.dev;
intel_gvt_host.initialized = true;
return 0;
out_clean_types:
intel_gvt_clean_vgpu_types(gvt);
out_clean_thread:
clean_service_thread(gvt);
out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
intel_gvt_clean_mmio_info(gvt);
out_clean_idr:
idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
i915->gvt = NULL;
return ret;
}
int
intel_gvt_pm_resume(struct intel_gvt *gvt)
{
intel_gvt_restore_fence(gvt);
intel_gvt_restore_mmio(gvt);
intel_gvt_restore_ggtt(gvt);
return 0;
}
int
intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
{
int ret;
void *gvt;
if (!intel_gvt_host.initialized)
return -ENODEV;
if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
m->type != INTEL_GVT_HYPERVISOR_XEN)
return -EINVAL;
/* Get a reference for device model module */
if (!try_module_get(THIS_MODULE))
return -ENODEV;
intel_gvt_host.mpt = m;
intel_gvt_host.hypervisor_type = m->type;
gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
&intel_gvt_ops);
if (ret < 0) {
gvt_err("Failed to init %s hypervisor module\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
module_put(THIS_MODULE);
return -ENODEV;
}
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
return 0;
}
EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
......@@ -34,11 +34,13 @@
#define _GVT_H_
#include <uapi/linux/pci_regs.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include "i915_drv.h"
#include "intel_gvt.h"
#include "debug.h"
#include "hypercall.h"
#include "mmio.h"
#include "reg.h"
#include "interrupt.h"
......@@ -56,15 +58,6 @@
#define GVT_MAX_VGPU 8
struct intel_gvt_host {
struct device *dev;
bool initialized;
int hypervisor_type;
const struct intel_gvt_mpt *mpt;
};
extern struct intel_gvt_host intel_gvt_host;
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
......@@ -176,12 +169,14 @@ struct intel_vgpu_submission {
} last_ctx[I915_NUM_ENGINES];
};
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct intel_vgpu {
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool attached;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
......@@ -209,21 +204,40 @@ struct intel_vgpu {
struct dentry *debugfs;
/* Hypervisor-specific device state. */
void *vdev;
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
struct intel_vgpu_vblank_timer vblank_timer;
u32 scan_nonprivbb;
};
static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
{
return vgpu->vdev;
}
struct vfio_device vfio_device;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
/*
* Two caches are used to avoid mapping duplicated pages (eg.
* scratch pages). This help to reduce dma setup overhead.
*/
struct rb_root gfn_cache;
struct rb_root dma_addr_cache;
unsigned long nr_cache_entries;
struct mutex cache_lock;
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
struct vfio_group *vfio_group;
struct kvm_page_track_notifier_node track_node;
#define NR_BKT (1 << 18)
struct hlist_head ptable[NR_BKT];
#undef NR_BKT
};
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
......@@ -272,7 +286,7 @@ struct intel_gvt_mmio {
/* Value of command write of this reg needs to be patched */
#define F_CMD_WRITE_PATCH (1 << 8)
const struct gvt_mmio_block *mmio_block;
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
......@@ -428,7 +442,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define RING_CTX_SIZE 320
struct intel_vgpu_creation_params {
__u64 handle;
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
......@@ -496,6 +509,9 @@ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_set_opregion(struct intel_vgpu *vgpu);
int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
......@@ -557,30 +573,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
unsigned int);
int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
unsigned int);
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
struct intel_vgpu_type *);
void (*vgpu_destroy)(struct intel_vgpu *vgpu);
void (*vgpu_release)(struct intel_vgpu *vgpu);
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
unsigned int);
void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
};
void intel_vgpu_detach_regions(struct intel_vgpu *vgpu);
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
......@@ -724,13 +717,54 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
}
/**
* intel_gvt_read_gpa - copy data from GPA to host data buffer
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len)
{
if (!vgpu->attached)
return -ESRCH;
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
}
/**
* intel_gvt_write_gpa - copy data from host data buffer to GPA
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
if (!vgpu->attached)
return -ESRCH;
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
}
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
int intel_gvt_pm_resume(struct intel_gvt *gvt);
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr);
void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
dma_addr_t dma_addr);
#include "trace.h"
#include "mpt.h"
#endif
......@@ -102,12 +102,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
return NULL;
}
static int new_mmio_info(struct intel_gvt *gvt,
u32 offset, u16 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
gvt_mmio_func read, gvt_mmio_func write)
static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
gvt_mmio_func read, gvt_mmio_func write)
{
struct intel_gvt_mmio_info *info, *p;
struct intel_gvt_mmio_info *p;
u32 start, end, i;
if (!intel_gvt_match_device(gvt, device))
......@@ -120,32 +119,18 @@ static int new_mmio_info(struct intel_gvt *gvt,
end = offset + size;
for (i = start; i < end; i += 4) {
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->offset = i;
p = intel_gvt_find_mmio_info(gvt, info->offset);
if (p) {
WARN(1, "dup mmio definition offset %x\n",
info->offset);
kfree(info);
/* We return -EEXIST here to make GVT-g load fail.
* So duplicated MMIO can be found as soon as
* possible.
*/
return -EEXIST;
p = intel_gvt_find_mmio_info(gvt, i);
if (!p) {
WARN(1, "assign a handler to a non-tracked mmio %x\n",
i);
return -ENODEV;
}
info->ro_mask = ro_mask;
info->device = device;
info->read = read ? read : intel_vgpu_default_mmio_read;
info->write = write ? write : intel_vgpu_default_mmio_write;
gvt->mmio.mmio_attribute[info->offset / 4] = flags;
INIT_HLIST_NODE(&info->node);
hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
gvt->mmio.num_tracked_mmio++;
p->ro_mask = ro_mask;
gvt->mmio.mmio_attribute[i / 4] = flags;
if (read)
p->read = read;
if (write)
p->write = write;
}
return 0;
}
......@@ -2143,15 +2128,12 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
}
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \
ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
s, f, am, rm, d, r, w); \
if (ret) \
return ret; \
} while (0)
#define MMIO_D(reg, d) \
MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
#define MMIO_DH(reg, d, r, w) \
MMIO_F(reg, 4, 0, 0, 0, d, r, w)
......@@ -2176,9 +2158,6 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
#define MMIO_RING_D(prefix, d) \
MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
#define MMIO_RING_DFH(prefix, d, f, r, w) \
MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
......@@ -2202,7 +2181,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(SDEISR, D_ALL);
MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
......@@ -2230,7 +2208,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
......@@ -2284,257 +2261,32 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(_MMIO(0x602a0), D_ALL);
MMIO_D(_MMIO(0x65050), D_ALL);
MMIO_D(_MMIO(0x650b4), D_ALL);
MMIO_D(_MMIO(0xc4040), D_ALL);
MMIO_D(DERRMR, D_ALL);
MMIO_D(PIPEDSL(PIPE_A), D_ALL);
MMIO_D(PIPEDSL(PIPE_B), D_ALL);
MMIO_D(PIPEDSL(PIPE_C), D_ALL);
MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
MMIO_D(PIPESTAT(PIPE_A), D_ALL);
MMIO_D(PIPESTAT(PIPE_B), D_ALL);
MMIO_D(PIPESTAT(PIPE_C), D_ALL);
MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
MMIO_D(CURCNTR(PIPE_A), D_ALL);
MMIO_D(CURCNTR(PIPE_B), D_ALL);
MMIO_D(CURCNTR(PIPE_C), D_ALL);
MMIO_D(CURPOS(PIPE_A), D_ALL);
MMIO_D(CURPOS(PIPE_B), D_ALL);
MMIO_D(CURPOS(PIPE_C), D_ALL);
MMIO_D(CURBASE(PIPE_A), D_ALL);
MMIO_D(CURBASE(PIPE_B), D_ALL);
MMIO_D(CURBASE(PIPE_C), D_ALL);
MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
MMIO_D(_MMIO(0x700ac), D_ALL);
MMIO_D(_MMIO(0x710ac), D_ALL);
MMIO_D(_MMIO(0x720ac), D_ALL);
MMIO_D(_MMIO(0x70090), D_ALL);
MMIO_D(_MMIO(0x70094), D_ALL);
MMIO_D(_MMIO(0x70098), D_ALL);
MMIO_D(_MMIO(0x7009c), D_ALL);
MMIO_D(DSPCNTR(PIPE_A), D_ALL);
MMIO_D(DSPADDR(PIPE_A), D_ALL);
MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
MMIO_D(DSPPOS(PIPE_A), D_ALL);
MMIO_D(DSPSIZE(PIPE_A), D_ALL);
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
MMIO_D(DSPADDR(PIPE_B), D_ALL);
MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
MMIO_D(DSPPOS(PIPE_B), D_ALL);
MMIO_D(DSPSIZE(PIPE_B), D_ALL);
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
MMIO_D(DSPADDR(PIPE_C), D_ALL);
MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
MMIO_D(DSPPOS(PIPE_C), D_ALL);
MMIO_D(DSPSIZE(PIPE_C), D_ALL);
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_A), D_ALL);
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
MMIO_D(SPRPOS(PIPE_A), D_ALL);
MMIO_D(SPRSIZE(PIPE_A), D_ALL);
MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_B), D_ALL);
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
MMIO_D(SPRPOS(PIPE_B), D_ALL);
MMIO_D(SPRSIZE(PIPE_B), D_ALL);
MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_C), D_ALL);
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
MMIO_D(SPRPOS(PIPE_C), D_ALL);
MMIO_D(SPRSIZE(PIPE_C), D_ALL);
MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
MMIO_D(PF_CTL(PIPE_A), D_ALL);
MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
MMIO_D(PF_CTL(PIPE_B), D_ALL);
MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
MMIO_D(PF_CTL(PIPE_C), D_ALL);
MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL);
MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL);
MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL);
MMIO_D(WM1_LP_ILK, D_ALL);
MMIO_D(WM2_LP_ILK, D_ALL);
MMIO_D(WM3_LP_ILK, D_ALL);
MMIO_D(WM1S_LP_ILK, D_ALL);
MMIO_D(WM2S_LP_IVB, D_ALL);
MMIO_D(WM3S_LP_IVB, D_ALL);
MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
MMIO_D(_MMIO(0x48268), D_ALL);
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
......@@ -2557,74 +2309,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
MMIO_D(PCH_PP_DIVISOR, D_ALL);
MMIO_D(PCH_PP_STATUS, D_ALL);
MMIO_D(PCH_LVDS, D_ALL);
MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
MMIO_D(PCH_DREF_CONTROL, D_ALL);
MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
MMIO_D(PCH_DPLL_SEL, D_ALL);
MMIO_D(_MMIO(0x61208), D_ALL);
MMIO_D(_MMIO(0x6120c), D_ALL);
MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
......@@ -2640,143 +2325,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
MMIO_D(FUSE_STRAP, D_ALL);
MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
MMIO_D(DISP_ARB_CTL, D_ALL);
MMIO_D(DISP_ARB_CTL2, D_ALL);
MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
MMIO_D(SOUTH_CHICKEN1, D_ALL);
MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL);
MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL);
MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL);
MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL);
MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL);
MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL);
MMIO_D(ILK_FBC_RT_BASE, D_ALL);
MMIO_D(IPS_CTL, D_ALL);
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(_MMIO(0x60110), D_ALL);
MMIO_D(_MMIO(0x61110), D_ALL);
MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
MMIO_D(SPLL_CTL, D_ALL);
MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
MMIO_D(_MMIO(0x46508), D_ALL);
MMIO_D(_MMIO(0x49080), D_ALL);
MMIO_D(_MMIO(0x49180), D_ALL);
MMIO_D(_MMIO(0x49280), D_ALL);
MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
MMIO_D(SBI_ADDR, D_ALL);
MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
MMIO_D(PIXCLK_GATE, D_ALL);
MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
dp_aux_ch_ctl_mmio_write);
......@@ -2799,65 +2351,18 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
MMIO_D(FORCEWAKE_ACK, D_ALL);
MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
MMIO_D(ECOBUS, D_ALL);
MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
MMIO_D(GEN6_RPNSWREQ, D_ALL);
MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
MMIO_D(GEN6_RPSTAT1, D_ALL);
MMIO_D(GEN6_RP_CONTROL, D_ALL);
MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
MMIO_D(GEN6_RP_CUR_UP, D_ALL);
MMIO_D(GEN6_RP_PREV_UP, D_ALL);
MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
MMIO_D(GEN6_RP_UP_EI, D_ALL);
MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
MMIO_D(GEN6_RC_SLEEP, D_ALL);
MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
MMIO_D(GEN6_PMINTRMSK, D_ALL);
MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
......@@ -2865,97 +2370,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_D(RSTDBYCTL, D_ALL);
MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
MMIO_D(TILECTL, D_ALL);
MMIO_D(GEN6_UCGCTL1, D_ALL);
MMIO_D(GEN6_UCGCTL2, D_ALL);
MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
MMIO_D(_MMIO(0x13812c), D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
MMIO_D(HSW_EDRAM_CAP, D_ALL);
MMIO_D(HSW_IDICR, D_ALL);
MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
MMIO_D(_MMIO(0x3c), D_ALL);
MMIO_D(_MMIO(0x860), D_ALL);
MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL);
MMIO_D(_MMIO(0x121d0), D_ALL);
MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL);
MMIO_D(_MMIO(0x41d0), D_ALL);
MMIO_D(GAC_ECO_BITS, D_ALL);
MMIO_D(_MMIO(0x6200), D_ALL);
MMIO_D(_MMIO(0x6204), D_ALL);
MMIO_D(_MMIO(0x6208), D_ALL);
MMIO_D(_MMIO(0x7118), D_ALL);
MMIO_D(_MMIO(0x7180), D_ALL);
MMIO_D(_MMIO(0x7408), D_ALL);
MMIO_D(_MMIO(0x7c00), D_ALL);
MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
MMIO_D(_MMIO(0x911c), D_ALL);
MMIO_D(_MMIO(0x9120), D_ALL);
MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GAB_CTL, D_ALL);
MMIO_D(_MMIO(0x48800), D_ALL);
MMIO_D(_MMIO(0xce044), D_ALL);
MMIO_D(_MMIO(0xe6500), D_ALL);
MMIO_D(_MMIO(0xe6504), D_ALL);
MMIO_D(_MMIO(0xe6600), D_ALL);
MMIO_D(_MMIO(0xe6604), D_ALL);
MMIO_D(_MMIO(0xe6700), D_ALL);
MMIO_D(_MMIO(0xe6704), D_ALL);
MMIO_D(_MMIO(0xe6800), D_ALL);
MMIO_D(_MMIO(0xe6804), D_ALL);
MMIO_D(PCH_GMBUS4, D_ALL);
MMIO_D(PCH_GMBUS5, D_ALL);
MMIO_D(_MMIO(0x902c), D_ALL);
MMIO_D(_MMIO(0xec008), D_ALL);
MMIO_D(_MMIO(0xec00c), D_ALL);
MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
MMIO_D(_MMIO(0xec408), D_ALL);
MMIO_D(_MMIO(0xec40c), D_ALL);
MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
MMIO_D(_MMIO(0xfc810), D_ALL);
MMIO_D(_MMIO(0xfc81c), D_ALL);
MMIO_D(_MMIO(0xfc828), D_ALL);
MMIO_D(_MMIO(0xfc834), D_ALL);
MMIO_D(_MMIO(0xfcc00), D_ALL);
MMIO_D(_MMIO(0xfcc0c), D_ALL);
MMIO_D(_MMIO(0xfcc18), D_ALL);
MMIO_D(_MMIO(0xfcc24), D_ALL);
MMIO_D(_MMIO(0xfd000), D_ALL);
MMIO_D(_MMIO(0xfd00c), D_ALL);
MMIO_D(_MMIO(0xfd018), D_ALL);
MMIO_D(_MMIO(0xfd024), D_ALL);
MMIO_D(_MMIO(0xfd034), D_ALL);
MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
MMIO_D(_MMIO(0x2054), D_ALL);
MMIO_D(_MMIO(0x12054), D_ALL);
MMIO_D(_MMIO(0x22054), D_ALL);
MMIO_D(_MMIO(0x1a054), D_ALL);
MMIO_D(_MMIO(0x44070), D_ALL);
MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
......@@ -2963,8 +2388,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
......@@ -3012,28 +2435,23 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
......@@ -3041,7 +2459,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
......@@ -3049,7 +2466,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
......@@ -3057,22 +2473,18 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
......@@ -3107,21 +2519,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
#define RING_REG(base) _MMIO((base) + 0x270)
MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
......@@ -3130,24 +2529,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
......@@ -3159,27 +2540,14 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(_MMIO(0xb110), D_BDW);
MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
D_BDW_PLUS, NULL, force_nonpriv_write);
MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
......@@ -3219,30 +2587,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
MMIO_D(DC_STATE_EN, D_SKL_PLUS);
MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
MMIO_D(CDCLK_CTL, D_SKL_PLUS);
MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
......@@ -3285,22 +2638,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
......@@ -3362,30 +2699,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
MMIO_D(DMC_SSP_BASE, D_SKL_PLUS);
MMIO_D(DMC_HTP_SKL, D_SKL_PLUS);
MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS);
MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(SKL_DFSM, D_SKL_PLUS);
MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
......@@ -3402,40 +2722,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
NULL, gen9_trtt_chicken_write);
MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, csfe_chicken1_mmio_write);
......@@ -3446,7 +2735,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
......@@ -3454,43 +2742,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
MMIO_D(ERROR_GEN6, D_BXT);
MMIO_D(DONE_REG, D_BXT);
MMIO_D(EIR, D_BXT);
MMIO_D(PGTBL_ER, D_BXT);
MMIO_D(_MMIO(0x4194), D_BXT);
MMIO_D(_MMIO(0x4294), D_BXT);
MMIO_D(_MMIO(0x4494), D_BXT);
MMIO_RING_D(RING_PSMI_CTL, D_BXT);
MMIO_RING_D(RING_DMA_FADD, D_BXT);
MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
MMIO_RING_D(RING_IPEHR, D_BXT);
MMIO_RING_D(RING_INSTPS, D_BXT);
MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
MMIO_RING_D(RING_BBSTATE, D_BXT);
MMIO_RING_D(RING_IPEIR, D_BXT);
MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
MMIO_D(BXT_RP_STATE_CAP, D_BXT);
MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
NULL, bxt_phy_ctl_family_write);
MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
NULL, bxt_phy_ctl_family_write);
MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
NULL, bxt_port_pll_enable_write);
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
......@@ -3498,128 +2756,19 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
bxt_port_pll_enable_write);
MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
bxt_port_tx_dw3_read, NULL);
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
MMIO_D(BXT_DE_PLL_CTL, D_BXT);
MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
MMIO_D(RC6_CTX_BASE, D_BXT);
MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
......@@ -3639,17 +2788,14 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
return 0;
}
static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset)
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset)
{
unsigned long device = intel_gvt_get_device_type(gvt);
const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
int num = gvt->mmio.num_mmio_block;
int i;
for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
if (offset >= i915_mmio_reg_offset(block->offset) &&
offset < i915_mmio_reg_offset(block->offset) + block->size)
return block;
......@@ -3674,23 +2820,117 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
kfree(e);
kfree(gvt->mmio.mmio_block);
gvt->mmio.mmio_block = NULL;
gvt->mmio.num_mmio_block = 0;
vfree(gvt->mmio.mmio_attribute);
gvt->mmio.mmio_attribute = NULL;
}
/* Special MMIO blocks. registers in MMIO block ranges should not be command
* accessible (should have no F_CMD_ACCESS flag).
* otherwise, need to update cmd_reg_handler in cmd_parser.c
*/
static const struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
};
static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
u32 size)
{
struct intel_gvt *gvt = iter->data;
struct intel_gvt_mmio_info *info, *p;
u32 start, end, i;
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
start = offset;
end = offset + size;
for (i = start; i < end; i += 4) {
p = intel_gvt_find_mmio_info(gvt, i);
if (p) {
WARN(1, "dup mmio definition offset %x\n",
info->offset);
/* We return -EEXIST here to make GVT-g load fail.
* So duplicated MMIO can be found as soon as
* possible.
*/
return -EEXIST;
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->offset = i;
info->read = intel_vgpu_default_mmio_read;
info->write = intel_vgpu_default_mmio_write;
INIT_HLIST_NODE(&info->node);
hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
gvt->mmio.num_tracked_mmio++;
}
return 0;
}
static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
u32 offset, u32 size)
{
struct intel_gvt *gvt = iter->data;
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
void *ret;
ret = krealloc(block,
(gvt->mmio.num_mmio_block + 1) * sizeof(*block),
GFP_KERNEL);
if (!ret)
return -ENOMEM;
gvt->mmio.mmio_block = block = ret;
block += gvt->mmio.num_mmio_block;
memset(block, 0, sizeof(*block));
block->offset = _MMIO(offset);
block->size = size;
gvt->mmio.num_mmio_block++;
return 0;
}
static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
u32 size)
{
if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
return handle_mmio(iter, offset, size);
else
return handle_mmio_block(iter, offset, size);
}
static int init_mmio_info(struct intel_gvt *gvt)
{
struct intel_gvt_mmio_table_iter iter = {
.i915 = gvt->gt->i915,
.data = gvt,
.handle_mmio_cb = handle_mmio_cb,
};
return intel_gvt_iterate_mmio_table(&iter);
}
static int init_mmio_block_handlers(struct intel_gvt *gvt)
{
struct gvt_mmio_block *block;
block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
if (!block) {
WARN(1, "fail to assign handlers to mmio block %x\n",
i915_mmio_reg_offset(block->offset));
return -ENODEV;
}
block->read = pvinfo_mmio_read;
block->write = pvinfo_mmio_write;
return 0;
}
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
......@@ -3713,6 +2953,14 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (!gvt->mmio.mmio_attribute)
return -ENOMEM;
ret = init_mmio_info(gvt);
if (ret)
goto err;
ret = init_mmio_block_handlers(gvt);
if (ret)
goto err;
ret = init_generic_mmio_info(gvt);
if (ret)
goto err;
......@@ -3743,9 +2991,6 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
}
gvt->mmio.mmio_block = mmio_blocks;
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
......@@ -3765,7 +3010,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data)
{
const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
struct intel_gvt_mmio_info *e;
int i, j, ret;
......@@ -3781,9 +3026,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
continue;
for (j = 0; j < block->size; j += 4) {
ret = handler(gvt,
i915_mmio_reg_offset(block->offset) + j,
data);
ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
if (ret)
return ret;
}
......@@ -3883,7 +3126,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info;
const struct gvt_mmio_block *mmio_block;
struct gvt_mmio_block *mmio_block;
gvt_mmio_func func;
int ret;
......
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Dexuan Cui
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
#include <linux/types.h>
struct device;
enum hypervisor_type {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
};
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*enable_page_track)(unsigned long handle, u64 gfn);
int (*disable_page_track)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
int (*set_opregion)(void *vgpu);
int (*set_edid)(void *vgpu, int port_num);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
#endif /* _GVT_HYPERCALL_H_ */
......@@ -29,6 +29,8 @@
*
*/
#include <linux/eventfd.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
......@@ -397,9 +399,45 @@ static void init_irq_map(struct intel_gvt_irq *irq)
}
/* =======================vEvent injection===================== */
#define MSI_CAP_CONTROL(offset) (offset + 2)
#define MSI_CAP_ADDRESS(offset) (offset + 4)
#define MSI_CAP_DATA(offset) (offset + 8)
#define MSI_CAP_EN 0x1
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
return intel_gvt_hypervisor_inject_msi(vgpu);
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
u16 control, data;
u32 addr;
control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
/* Do not generate MSI if MSIEN is disabled */
if (!(control & MSI_CAP_EN))
return 0;
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
return -EINVAL;
trace_inject_msi(vgpu->id, addr, data);
/*
* When guest is powered off, msi_trigger is set to NULL, but vgpu's
* config and mmio register isn't restored to default during guest
* poweroff. If this vgpu is still used in next vm, this vgpu's pipe
* may be enabled, then once this vgpu is active, it will get inject
* vblank interrupt request. But msi_trigger is null until msi is
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
if (!vgpu->attached)
return -ESRCH;
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
return -EFAULT;
return 0;
}
static void propagate_event(struct intel_gvt_irq *irq,
......
/*
* KVMGT - the implementation of Intel mediated pass-through framework for KVM
*
* Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
......@@ -26,6 +26,11 @@
* Kevin Tian <kevin.tian@intel.com>
* Jike Song <jike.song@intel.com>
* Xiaoguang Chen <xiaoguang.chen@intel.com>
* Eddie Dong <eddie.dong@intel.com>
*
* Contributors:
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*/
#include <linux/init.h>
......@@ -39,8 +44,6 @@
#include <linux/spinlock.h>
#include <linux/eventfd.h>
#include <linux/uuid.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
#include <linux/debugfs.h>
......@@ -49,9 +52,11 @@
#include <drm/drm_edid.h>
#include "i915_drv.h"
#include "intel_gvt.h"
#include "gvt.h"
static const struct intel_gvt_ops *intel_gvt_ops;
MODULE_IMPORT_NS(DMA_BUF);
MODULE_IMPORT_NS(I915_GVT);
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
......@@ -90,16 +95,6 @@ struct kvmgt_pgfn {
struct hlist_node hnode;
};
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct kvmgt_guest_info {
struct kvm *kvm;
struct intel_vgpu *vgpu;
struct kvm_page_track_notifier_node track_node;
#define NR_BKT (1 << 18)
struct hlist_head ptable[NR_BKT];
#undef NR_BKT
};
struct gvt_dma {
struct intel_vgpu *vgpu;
struct rb_node gfn_node;
......@@ -110,41 +105,15 @@ struct gvt_dma {
struct kref ref;
};
struct kvmgt_vdev {
struct intel_vgpu *vgpu;
struct mdev_device *mdev;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
#define vfio_dev_to_vgpu(vfio_dev) \
container_of((vfio_dev), struct intel_vgpu, vfio_device)
/*
* Two caches are used to avoid mapping duplicated pages (eg.
* scratch pages). This help to reduce dma setup overhead.
*/
struct rb_root gfn_cache;
struct rb_root dma_addr_cache;
unsigned long nr_cache_entries;
struct mutex cache_lock;
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
struct vfio_device *vfio_device;
struct vfio_group *vfio_group;
};
static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
{
return intel_vgpu_vdev(vgpu);
}
static inline bool handle_valid(unsigned long handle)
{
return !!(handle & ~0xff);
}
static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *val, int len,
struct kvm_page_track_notifier_node *node);
static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node);
static ssize_t available_instances_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
......@@ -259,15 +228,12 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
}
}
static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages;
int npage;
int ret;
......@@ -277,7 +243,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1);
}
}
......@@ -286,7 +252,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0;
int total_pages;
int npage;
......@@ -301,7 +266,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
......@@ -368,7 +333,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
struct rb_node *node = vgpu->dma_addr_cache.rb_node;
struct gvt_dma *itr;
while (node) {
......@@ -386,7 +351,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
struct rb_node *node = vgpu->gfn_cache.rb_node;
struct gvt_dma *itr;
while (node) {
......@@ -407,7 +372,6 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
......@@ -420,7 +384,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
link = &vdev->gfn_cache.rb_node;
link = &vgpu->gfn_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, gfn_node);
......@@ -431,11 +395,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->gfn_node, parent, link);
rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
rb_insert_color(&new->gfn_node, &vgpu->gfn_cache);
/* dma_addr_cache maps dma addr to struct gvt_dma. */
parent = NULL;
link = &vdev->dma_addr_cache.rb_node;
link = &vgpu->dma_addr_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
......@@ -446,59 +410,54 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->dma_addr_node, parent, link);
rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache);
vdev->nr_cache_entries++;
vgpu->nr_cache_entries++;
return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
rb_erase(&entry->gfn_node, &vdev->gfn_cache);
rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
rb_erase(&entry->gfn_node, &vgpu->gfn_cache);
rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache);
kfree(entry);
vdev->nr_cache_entries--;
vgpu->nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
for (;;) {
mutex_lock(&vdev->cache_lock);
node = rb_first(&vdev->gfn_cache);
mutex_lock(&vgpu->cache_lock);
node = rb_first(&vgpu->gfn_cache);
if (!node) {
mutex_unlock(&vdev->cache_lock);
mutex_unlock(&vgpu->cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vdev->cache_lock);
mutex_unlock(&vgpu->cache_lock);
}
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
{
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
vdev->gfn_cache = RB_ROOT;
vdev->dma_addr_cache = RB_ROOT;
vdev->nr_cache_entries = 0;
mutex_init(&vdev->cache_lock);
vgpu->gfn_cache = RB_ROOT;
vgpu->dma_addr_cache = RB_ROOT;
vgpu->nr_cache_entries = 0;
mutex_init(&vgpu->cache_lock);
}
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
static void kvmgt_protect_table_init(struct intel_vgpu *info)
{
hash_init(info->ptable);
}
static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
static void kvmgt_protect_table_destroy(struct intel_vgpu *info)
{
struct kvmgt_pgfn *p;
struct hlist_node *tmp;
......@@ -511,7 +470,7 @@ static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
}
static struct kvmgt_pgfn *
__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
__kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p, *res = NULL;
......@@ -525,8 +484,7 @@ __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
return res;
}
static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
gfn_t gfn)
static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
......@@ -534,7 +492,7 @@ static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
return !!p;
}
static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
......@@ -549,8 +507,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
hash_add(info->ptable, &p->hnode, gfn);
}
static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
gfn_t gfn)
static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
......@@ -564,18 +521,17 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
void *base = vdev->region[i].data;
void *base = vgpu->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos >= vdev->region[i].size || iswrite) {
if (pos >= vgpu->region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
count = min(count, (size_t)(vdev->region[i].size - pos));
count = min(count, (size_t)(vgpu->region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
......@@ -617,9 +573,9 @@ static int handle_edid_regs(struct intel_vgpu *vgpu,
gvt_vgpu_err("invalid EDID blob\n");
return -EINVAL;
}
intel_gvt_ops->emulate_hotplug(vgpu, true);
intel_vgpu_emulate_hotplug(vgpu, true);
} else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
intel_gvt_ops->emulate_hotplug(vgpu, false);
intel_vgpu_emulate_hotplug(vgpu, false);
else {
gvt_vgpu_err("invalid EDID link state %d\n",
regs->link_state);
......@@ -668,8 +624,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
int ret;
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
struct vfio_edid_region *region =
(struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
struct vfio_edid_region *region = vgpu->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) {
......@@ -701,44 +656,27 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct vfio_region *region;
region = krealloc(vdev->region,
(vdev->num_regions + 1) * sizeof(*region),
region = krealloc(vgpu->region,
(vgpu->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
vdev->region = region;
vdev->region[vdev->num_regions].type = type;
vdev->region[vdev->num_regions].subtype = subtype;
vdev->region[vdev->num_regions].ops = ops;
vdev->region[vdev->num_regions].size = size;
vdev->region[vdev->num_regions].flags = flags;
vdev->region[vdev->num_regions].data = data;
vdev->num_regions++;
vgpu->region = region;
vgpu->region[vgpu->num_regions].type = type;
vgpu->region[vgpu->num_regions].subtype = subtype;
vgpu->region[vgpu->num_regions].ops = ops;
vgpu->region[vgpu->num_regions].size = size;
vgpu->region[vgpu->num_regions].flags = flags;
vgpu->region[vgpu->num_regions].data = data;
vgpu->num_regions++;
return 0;
}
static int kvmgt_get_vfio_device(void *p_vgpu)
int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
vdev->vfio_device = vfio_device_get_from_dev(
mdev_dev(vdev->mdev));
if (!vdev->vfio_device) {
gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV;
}
return 0;
}
static int kvmgt_set_opregion(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
void *base;
int ret;
......@@ -764,9 +702,8 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret;
}
static int kvmgt_set_edid(void *p_vgpu, int port_num)
int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
struct vfio_edid_region *base;
int ret;
......@@ -794,71 +731,11 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
return ret;
}
static void kvmgt_put_vfio_device(void *vgpu)
{
struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
if (WARN_ON(!vdev->vfio_device))
return;
vfio_device_put(vdev->vfio_device);
}
static int intel_vgpu_create(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
struct device *pdev;
struct intel_gvt *gvt;
int ret;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
type = &gvt->types[mdev_get_type_group_id(mdev)];
if (!type) {
ret = -EINVAL;
goto out;
}
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
gvt_err("failed to create intel vgpu: %d\n", ret);
goto out;
}
INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
kvmgt_vdev(vgpu)->mdev = mdev;
mdev_set_drvdata(mdev, vgpu);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
ret = 0;
out:
return ret;
}
static int intel_vgpu_remove(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
if (handle_valid(vgpu->handle))
return -EBUSY;
intel_gvt_ops->vgpu_destroy(vgpu);
return 0;
}
static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct kvmgt_vdev *vdev = container_of(nb,
struct kvmgt_vdev,
iommu_notifier);
struct intel_vgpu *vgpu = vdev->vgpu;
struct intel_vgpu *vgpu =
container_of(nb, struct intel_vgpu, iommu_notifier);
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
......@@ -868,7 +745,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
iov_pfn = unmap->iova >> PAGE_SHIFT;
end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
mutex_lock(&vdev->cache_lock);
mutex_lock(&vgpu->cache_lock);
for (; iov_pfn < end_iov_pfn; iov_pfn++) {
entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
if (!entry)
......@@ -878,7 +755,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
mutex_unlock(&vdev->cache_lock);
mutex_unlock(&vgpu->cache_lock);
}
return NOTIFY_OK;
......@@ -887,35 +764,54 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
static int intel_vgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct kvmgt_vdev *vdev = container_of(nb,
struct kvmgt_vdev,
group_notifier);
struct intel_vgpu *vgpu =
container_of(nb, struct intel_vgpu, group_notifier);
/* the only action we care about */
if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
vdev->kvm = data;
vgpu->kvm = data;
if (!data)
schedule_work(&vdev->release_work);
schedule_work(&vgpu->release_work);
}
return NOTIFY_OK;
}
static int intel_vgpu_open_device(struct mdev_device *mdev)
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct intel_vgpu *itr;
int id;
bool ret = false;
mutex_lock(&vgpu->gvt->lock);
for_each_active_vgpu(vgpu->gvt, itr, id) {
if (!itr->attached)
continue;
if (vgpu->kvm == itr->kvm) {
ret = true;
goto out;
}
}
out:
mutex_unlock(&vgpu->gvt->lock);
return ret;
}
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned long events;
int ret;
struct vfio_group *vfio_group;
vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
&vdev->iommu_notifier);
ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events,
&vgpu->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
......@@ -923,117 +819,129 @@ static int intel_vgpu_open_device(struct mdev_device *mdev)
}
events = VFIO_GROUP_NOTIFY_SET_KVM;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
&vdev->group_notifier);
ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events,
&vgpu->group_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
goto undo_iommu;
}
vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
vfio_group =
vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev);
if (IS_ERR_OR_NULL(vfio_group)) {
ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
goto undo_register;
}
vdev->vfio_group = vfio_group;
vgpu->vfio_group = vfio_group;
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
if (!try_module_get(THIS_MODULE)) {
ret = -ENODEV;
ret = -EEXIST;
if (vgpu->attached)
goto undo_group;
ret = -ESRCH;
if (!vgpu->kvm || vgpu->kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
goto undo_group;
}
ret = kvmgt_guest_init(mdev);
if (ret)
ret = -EEXIST;
if (__kvmgt_vgpu_exist(vgpu))
goto undo_group;
intel_gvt_ops->vgpu_activate(vgpu);
vgpu->attached = true;
kvm_get_kvm(vgpu->kvm);
atomic_set(&vdev->released, 0);
return ret;
kvmgt_protect_table_init(vgpu);
gvt_cache_init(vgpu);
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node);
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vgpu->nr_cache_entries);
intel_gvt_activate_vgpu(vgpu);
atomic_set(&vgpu->released, 0);
return 0;
undo_group:
vfio_group_put_external_user(vdev->vfio_group);
vdev->vfio_group = NULL;
vfio_group_put_external_user(vgpu->vfio_group);
vgpu->vfio_group = NULL;
undo_register:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&vdev->group_notifier);
vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY,
&vgpu->group_notifier);
undo_iommu:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&vdev->iommu_notifier);
vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY,
&vgpu->iommu_notifier);
out:
return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct eventfd_ctx *trigger;
trigger = vdev->msi_trigger;
trigger = vgpu->msi_trigger;
if (trigger) {
eventfd_ctx_put(trigger);
vdev->msi_trigger = NULL;
vgpu->msi_trigger = NULL;
}
}
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_guest_info *info;
int ret;
if (!handle_valid(vgpu->handle))
if (!vgpu->attached)
return;
if (atomic_cmpxchg(&vdev->released, 0, 1))
if (atomic_cmpxchg(&vgpu->released, 0, 1))
return;
intel_gvt_ops->vgpu_release(vgpu);
intel_gvt_release_vgpu(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
&vdev->iommu_notifier);
ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY,
&vgpu->iommu_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for iommu failed: %d\n", ret);
ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
&vdev->group_notifier);
ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY,
&vgpu->group_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for group failed: %d\n", ret);
/* dereference module reference taken at open */
module_put(THIS_MODULE);
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info);
kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node);
kvm_put_kvm(vgpu->kvm);
kvmgt_protect_table_destroy(vgpu);
gvt_cache_destroy(vgpu);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
vfio_group_put_external_user(vdev->vfio_group);
vfio_group_put_external_user(vgpu->vfio_group);
vdev->kvm = NULL;
vgpu->handle = 0;
vgpu->kvm = NULL;
vgpu->attached = false;
}
static void intel_vgpu_close_device(struct mdev_device *mdev)
static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
__intel_vgpu_release(vgpu);
__intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev));
}
static void intel_vgpu_release_work(struct work_struct *work)
{
struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
release_work);
struct intel_vgpu *vgpu =
container_of(work, struct intel_vgpu, release_work);
__intel_vgpu_release(vdev->vgpu);
__intel_vgpu_release(vgpu);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
......@@ -1070,10 +978,10 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
int ret;
if (is_write)
ret = intel_gvt_ops->emulate_mmio_write(vgpu,
ret = intel_vgpu_emulate_mmio_write(vgpu,
bar_start + off, buf, count);
else
ret = intel_gvt_ops->emulate_mmio_read(vgpu,
ret = intel_vgpu_emulate_mmio_read(vgpu,
bar_start + off, buf, count);
return ret;
}
......@@ -1111,17 +1019,15 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return 0;
}
static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
......@@ -1129,10 +1035,10 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
if (is_write)
ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
buf, count);
else
ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
buf, count);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
......@@ -1150,20 +1056,19 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_ROM_REGION_INDEX:
break;
default:
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
return vdev->region[index].ops->rw(vgpu, buf, count,
return vgpu->region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
}
return ret == 0 ? count : ret;
}
static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct intel_gvt *gvt = vgpu->gvt;
int offset;
......@@ -1180,9 +1085,10 @@ static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
true : false;
}
static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf,
size_t count, loff_t *ppos)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int done = 0;
int ret;
......@@ -1191,10 +1097,10 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
/* Only support GGTT entry 8 bytes read */
if (count >= 8 && !(*ppos % 8) &&
gtt_entry(mdev, ppos)) {
gtt_entry(vgpu, ppos)) {
u64 val;
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
......@@ -1206,7 +1112,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 4 && !(*ppos % 4)) {
u32 val;
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
......@@ -1218,7 +1124,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
......@@ -1230,7 +1136,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else {
u8 val;
ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos,
false);
if (ret <= 0)
goto read_err;
......@@ -1253,10 +1159,11 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
return -EFAULT;
}
static ssize_t intel_vgpu_write(struct mdev_device *mdev,
static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev,
const char __user *buf,
size_t count, loff_t *ppos)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int done = 0;
int ret;
......@@ -1265,13 +1172,13 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
/* Only support GGTT entry 8 bytes write */
if (count >= 8 && !(*ppos % 8) &&
gtt_entry(mdev, ppos)) {
gtt_entry(vgpu, ppos)) {
u64 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
......@@ -1283,7 +1190,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
......@@ -1295,7 +1202,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = intel_vgpu_rw(mdev, (char *)&val,
ret = intel_vgpu_rw(vgpu, (char *)&val,
sizeof(val), ppos, true);
if (ret <= 0)
goto write_err;
......@@ -1307,7 +1214,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = intel_vgpu_rw(mdev, &val, sizeof(val),
ret = intel_vgpu_rw(vgpu, &val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
......@@ -1326,13 +1233,14 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
return -EFAULT;
}
static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
static int intel_vgpu_mmap(struct vfio_device *vfio_dev,
struct vm_area_struct *vma)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int index;
u64 virtaddr;
unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot;
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (index >= VFIO_PCI_ROM_REGION_INDEX)
......@@ -1407,7 +1315,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
kvmgt_vdev(vgpu)->msi_trigger = trigger;
vgpu->msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
......@@ -1455,11 +1363,10 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
return func(vgpu, index, start, count, flags, data);
}
static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
unsigned long arg)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
......@@ -1478,7 +1385,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS +
vdev->num_regions;
vgpu->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
......@@ -1569,22 +1476,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
.header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
vdev->num_regions)
vgpu->num_regions)
return -EINVAL;
info.index =
array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS +
vdev->num_regions);
vgpu->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset =
VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = vdev->region[i].size;
info.flags = vdev->region[i].flags;
info.size = vgpu->region[i].size;
info.flags = vgpu->region[i].flags;
cap_type.type = vdev->region[i].type;
cap_type.subtype = vdev->region[i].subtype;
cap_type.type = vgpu->region[i].type;
cap_type.subtype = vgpu->region[i].subtype;
ret = vfio_info_add_capability(&caps,
&cap_type.header,
......@@ -1700,7 +1607,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu);
intel_gvt_reset_vgpu(vgpu);
return 0;
} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
struct vfio_device_gfx_plane_info dmabuf;
......@@ -1713,7 +1620,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (dmabuf.argsz < minsz)
return -EINVAL;
ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
ret = intel_vgpu_query_plane(vgpu, &dmabuf);
if (ret != 0)
return ret;
......@@ -1721,14 +1628,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
__u32 dmabuf_id;
__s32 dmabuf_fd;
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
return dmabuf_fd;
return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
}
return -ENOTTY;
......@@ -1738,14 +1641,9 @@ static ssize_t
vgpu_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mdev_device *mdev = mdev_from_dev(dev);
struct intel_vgpu *vgpu = dev_get_drvdata(dev);
if (mdev) {
struct intel_vgpu *vgpu = (struct intel_vgpu *)
mdev_get_drvdata(mdev);
return sprintf(buf, "%d\n", vgpu->id);
}
return sprintf(buf, "\n");
return sprintf(buf, "%d\n", vgpu->id);
}
static DEVICE_ATTR_RO(vgpu_id);
......@@ -1765,57 +1663,78 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
static struct mdev_parent_ops intel_vgpu_ops = {
.mdev_attr_groups = intel_vgpu_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
.open_device = intel_vgpu_open_device,
.close_device = intel_vgpu_close_device,
.read = intel_vgpu_read,
.write = intel_vgpu_write,
.mmap = intel_vgpu_mmap,
.ioctl = intel_vgpu_ioctl,
static const struct vfio_device_ops intel_vgpu_dev_ops = {
.open_device = intel_vgpu_open_device,
.close_device = intel_vgpu_close_device,
.read = intel_vgpu_read,
.write = intel_vgpu_write,
.mmap = intel_vgpu_mmap,
.ioctl = intel_vgpu_ioctl,
};
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
static int intel_vgpu_probe(struct mdev_device *mdev)
{
struct device *pdev = mdev_parent_dev(mdev);
struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt;
struct intel_vgpu_type *type;
struct intel_vgpu *vgpu;
int ret;
ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
if (ret)
return ret;
type = &gvt->types[mdev_get_type_group_id(mdev)];
if (!type)
return -EINVAL;
intel_gvt_ops = ops;
intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
vgpu = intel_gvt_create_vgpu(gvt, type);
if (IS_ERR(vgpu)) {
gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
return PTR_ERR(vgpu);
}
ret = mdev_register_device(dev, &intel_vgpu_ops);
if (ret)
intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
INIT_WORK(&vgpu->release_work, intel_vgpu_release_work);
vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
&intel_vgpu_dev_ops);
return ret;
dev_set_drvdata(&mdev->dev, vgpu);
ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
if (ret) {
intel_gvt_destroy_vgpu(vgpu);
return ret;
}
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
return 0;
}
static void kvmgt_host_exit(struct device *dev, void *gvt)
static void intel_vgpu_remove(struct mdev_device *mdev)
{
mdev_unregister_device(dev);
intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
}
struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
if (WARN_ON_ONCE(vgpu->attached))
return;
intel_gvt_destroy_vgpu(vgpu);
}
static struct mdev_driver intel_vgpu_mdev_driver = {
.driver = {
.name = "intel_vgpu_mdev",
.owner = THIS_MODULE,
.dev_groups = intel_vgpu_groups,
},
.probe = intel_vgpu_probe,
.remove = intel_vgpu_remove,
.supported_type_groups = gvt_vgpu_type_groups,
};
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
struct kvm *kvm = info->kvm;
struct kvm_memory_slot *slot;
int idx;
if (!handle_valid(handle))
if (!info->attached)
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
......@@ -1837,19 +1756,15 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
return 0;
}
static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
struct kvm *kvm = info->kvm;
struct kvm_memory_slot *slot;
int idx;
if (!handle_valid(handle))
if (!info->attached)
return 0;
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
......@@ -1875,11 +1790,11 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *val, int len,
struct kvm_page_track_notifier_node *node)
{
struct kvmgt_guest_info *info = container_of(node,
struct kvmgt_guest_info, track_node);
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
intel_vgpu_page_track_handler(info, gpa,
(void *)val, len);
}
......@@ -1889,8 +1804,8 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
{
int i;
gfn_t gfn;
struct kvmgt_guest_info *info = container_of(node,
struct kvmgt_guest_info, track_node);
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);
write_lock(&kvm->mmu_lock);
for (i = 0; i < slot->npages; i++) {
......@@ -1904,182 +1819,32 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
write_unlock(&kvm->mmu_lock);
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
{
struct intel_vgpu *itr;
struct kvmgt_guest_info *info;
int id;
bool ret = false;
mutex_lock(&vgpu->gvt->lock);
for_each_active_vgpu(vgpu->gvt, itr, id) {
if (!handle_valid(itr->handle))
continue;
info = (struct kvmgt_guest_info *)itr->handle;
if (kvm && kvm == info->kvm) {
ret = true;
goto out;
}
}
out:
mutex_unlock(&vgpu->gvt->lock);
return ret;
}
static int kvmgt_guest_init(struct mdev_device *mdev)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
struct kvmgt_vdev *vdev;
struct kvm *kvm;
vgpu = mdev_get_drvdata(mdev);
if (handle_valid(vgpu->handle))
return -EEXIST;
vdev = kvmgt_vdev(vgpu);
kvm = vdev->kvm;
if (!kvm || kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
}
if (__kvmgt_vgpu_exist(vgpu, kvm))
return -EEXIST;
info = vzalloc(sizeof(struct kvmgt_guest_info));
if (!info)
return -ENOMEM;
vgpu->handle = (unsigned long)info;
info->vgpu = vgpu;
info->kvm = kvm;
kvm_get_kvm(info->kvm);
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vdev->nr_cache_entries);
return 0;
}
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME,
info->vgpu->debugfs));
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvm_put_kvm(info->kvm);
kvmgt_protect_table_destroy(info);
gvt_cache_destroy(info->vgpu);
vfree(info);
return true;
}
static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
if (!vgpu->vdev)
return -ENOMEM;
kvmgt_vdev(vgpu)->vgpu = vgpu;
return 0;
}
static void kvmgt_detach_vgpu(void *p_vgpu)
void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
{
int i;
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
if (!vdev->region)
if (!vgpu->region)
return;
for (i = 0; i < vdev->num_regions; i++)
if (vdev->region[i].ops->release)
vdev->region[i].ops->release(vgpu,
&vdev->region[i]);
vdev->num_regions = 0;
kfree(vdev->region);
vdev->region = NULL;
kfree(vdev);
for (i = 0; i < vgpu->num_regions; i++)
if (vgpu->region[i].ops->release)
vgpu->region[i].ops->release(vgpu,
&vgpu->region[i]);
vgpu->num_regions = 0;
kfree(vgpu->region);
vgpu->region = NULL;
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
struct kvmgt_vdev *vdev;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
vdev = kvmgt_vdev(vgpu);
/*
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's
* config and mmio register isn't restored to default during guest
* poweroff. If this vgpu is still used in next vm, this vgpu's pipe
* may be enabled, then once this vgpu is active, it will get inject
* vblank interrupt request. But msi_trigger is null until msi is
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
if (vdev->msi_trigger == NULL)
return 0;
if (eventfd_signal(vdev->msi_trigger, 1) == 1)
return 0;
return -EFAULT;
}
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{
struct kvmgt_guest_info *info;
kvm_pfn_t pfn;
if (!handle_valid(handle))
return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle;
pfn = gfn_to_pfn(info->kvm, gfn);
if (is_error_noslot_pfn(pfn))
return INTEL_GVT_INVALID_ADDR;
return pfn;
}
static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
struct intel_vgpu *vgpu;
struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret;
if (!handle_valid(handle))
if (!vgpu->attached)
return -EINVAL;
vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
vdev = kvmgt_vdev(vgpu);
mutex_lock(&vdev->cache_lock);
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_gfn(vgpu, gfn);
if (!entry) {
......@@ -2107,36 +1872,31 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
*dma_addr = entry->dma_addr;
}
mutex_unlock(&vdev->cache_lock);
mutex_unlock(&vgpu->cache_lock);
return 0;
err_unmap:
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
mutex_unlock(&vdev->cache_lock);
mutex_unlock(&vgpu->cache_lock);
return ret;
}
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
{
struct kvmgt_guest_info *info;
struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret = 0;
if (!handle_valid(handle))
if (!vgpu->attached)
return -ENODEV;
info = (struct kvmgt_guest_info *)handle;
vdev = kvmgt_vdev(info->vgpu);
mutex_lock(&vdev->cache_lock);
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_get(&entry->ref);
else
ret = -ENOMEM;
mutex_unlock(&vdev->cache_lock);
mutex_unlock(&vgpu->cache_lock);
return ret;
}
......@@ -2150,109 +1910,290 @@ static void __gvt_dma_release(struct kref *ref)
__gvt_cache_remove_entry(entry->vgpu, entry);
}
static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
struct intel_vgpu *vgpu;
struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
if (!handle_valid(handle))
if (!vgpu->attached)
return;
vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
vdev = kvmgt_vdev(vgpu);
mutex_lock(&vdev->cache_lock);
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_put(&entry->ref, __gvt_dma_release);
mutex_unlock(&vdev->cache_lock);
mutex_unlock(&vgpu->cache_lock);
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
void *buf, unsigned long len, bool write)
static void init_device_info(struct intel_gvt *gvt)
{
struct kvmgt_guest_info *info;
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
if (!handle_valid(handle))
return -ESRCH;
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
info->msi_cap_offset = pdev->msi_cap;
}
info = (struct kvmgt_guest_info *)handle;
static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
int id;
return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
gpa, buf, len, write);
mutex_lock(&gvt->lock);
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
(void *)&gvt->service_request)) {
if (vgpu->active)
intel_vgpu_emulate_vblank(vgpu);
}
}
mutex_unlock(&gvt->lock);
}
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
void *buf, unsigned long len)
static int gvt_service_thread(void *data)
{
return kvmgt_rw_gpa(handle, gpa, buf, len, false);
struct intel_gvt *gvt = (struct intel_gvt *)data;
int ret;
gvt_dbg_core("service thread start\n");
while (!kthread_should_stop()) {
ret = wait_event_interruptible(gvt->service_thread_wq,
kthread_should_stop() || gvt->service_request);
if (kthread_should_stop())
break;
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
continue;
intel_gvt_test_and_emulate_vblank(gvt);
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
(void *)&gvt->service_request)) {
intel_gvt_schedule(gvt);
}
}
return 0;
}
static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
void *buf, unsigned long len)
static void clean_service_thread(struct intel_gvt *gvt)
{
return kvmgt_rw_gpa(handle, gpa, buf, len, true);
kthread_stop(gvt->service_thread);
}
static unsigned long kvmgt_virt_to_pfn(void *addr)
static int init_service_thread(struct intel_gvt *gvt)
{
return PFN_DOWN(__pa(addr));
init_waitqueue_head(&gvt->service_thread_wq);
gvt->service_thread = kthread_run(gvt_service_thread,
gvt, "gvt_service_thread");
if (IS_ERR(gvt->service_thread)) {
gvt_err("fail to start service thread.\n");
return PTR_ERR(gvt->service_thread);
}
return 0;
}
static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
/**
* intel_gvt_clean_device - clean a GVT device
* @i915: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
*
*/
static void intel_gvt_clean_device(struct drm_i915_private *i915)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
int idx;
bool ret;
struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
if (!handle_valid(handle))
return false;
if (drm_WARN_ON(&i915->drm, !gvt))
return;
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
mdev_unregister_device(i915->drm.dev);
intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_clean_vgpu_types(gvt);
idx = srcu_read_lock(&kvm->srcu);
ret = kvm_is_visible_gfn(kvm, gfn);
srcu_read_unlock(&kvm->srcu, idx);
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_free_firmware(gvt);
intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
kfree(i915->gvt);
}
/**
* intel_gvt_init_device - initialize a GVT device
* @i915: drm i915 private data
*
* This function is called at the initialization stage, to initialize
* necessary GVT components.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
static int intel_gvt_init_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt;
struct intel_vgpu *vgpu;
int ret;
if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
if (!gvt)
return -ENOMEM;
gvt_dbg_core("init gvt device\n");
idr_init_base(&gvt->vgpu_idr, 1);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
gvt->gt = to_gt(i915);
i915->gvt = gvt;
init_device_info(gvt);
ret = intel_gvt_setup_mmio_info(gvt);
if (ret)
goto out_clean_idr;
intel_gvt_init_engine_mmio_context(gvt);
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
ret = intel_gvt_init_irq(gvt);
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_gtt(gvt);
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
goto out_clean_gtt;
ret = intel_gvt_init_sched_policy(gvt);
if (ret)
goto out_clean_workload_scheduler;
ret = intel_gvt_init_cmd_parser(gvt);
if (ret)
goto out_clean_sched_policy;
ret = init_service_thread(gvt);
if (ret)
goto out_clean_cmd_parser;
ret = intel_gvt_init_vgpu_types(gvt);
if (ret)
goto out_clean_thread;
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
gvt_err("failed to create idle vgpu\n");
goto out_clean_types;
}
gvt->idle_vgpu = vgpu;
intel_gvt_debugfs_init(gvt);
ret = intel_gvt_init_vgpu_type_groups(gvt);
if (ret)
goto out_destroy_idle_vgpu;
ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver);
if (ret)
goto out_cleanup_vgpu_type_groups;
gvt_dbg_core("gvt device initialization is done\n");
return 0;
out_cleanup_vgpu_type_groups:
intel_gvt_cleanup_vgpu_type_groups(gvt);
out_destroy_idle_vgpu:
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_debugfs_clean(gvt);
out_clean_types:
intel_gvt_clean_vgpu_types(gvt);
out_clean_thread:
clean_service_thread(gvt);
out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
intel_gvt_clean_mmio_info(gvt);
out_clean_idr:
idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
i915->gvt = NULL;
return ret;
}
static const struct intel_gvt_mpt kvmgt_mpt = {
.type = INTEL_GVT_HYPERVISOR_KVM,
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu,
.detach_vgpu = kvmgt_detach_vgpu,
.inject_msi = kvmgt_inject_msi,
.from_virt_to_mfn = kvmgt_virt_to_pfn,
.enable_page_track = kvmgt_page_track_add,
.disable_page_track = kvmgt_page_track_remove,
.read_gpa = kvmgt_read_gpa,
.write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn,
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
.dma_pin_guest_page = kvmgt_dma_pin_guest_page,
.set_opregion = kvmgt_set_opregion,
.set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn,
static void intel_gvt_pm_resume(struct drm_i915_private *i915)
{
struct intel_gvt *gvt = i915->gvt;
intel_gvt_restore_fence(gvt);
intel_gvt_restore_mmio(gvt);
intel_gvt_restore_ggtt(gvt);
}
static const struct intel_vgpu_ops intel_gvt_vgpu_ops = {
.init_device = intel_gvt_init_device,
.clean_device = intel_gvt_clean_device,
.pm_resume = intel_gvt_pm_resume,
};
static int __init kvmgt_init(void)
{
if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
return -ENODEV;
return 0;
int ret;
ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops);
if (ret)
return ret;
ret = mdev_register_driver(&intel_vgpu_mdev_driver);
if (ret)
intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
return ret;
}
static void __exit kvmgt_exit(void)
{
intel_gvt_unregister_hypervisor();
mdev_unregister_driver(&intel_vgpu_mdev_driver);
intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
}
module_init(kvmgt_init);
......
......@@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
}
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes);
goto out;
}
......@@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
}
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes);
goto out;
}
......
......@@ -72,7 +72,6 @@ struct intel_gvt_mmio_info {
const struct intel_engine_cs *
intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
......
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Dexuan Cui
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_MPT_H_
#define _GVT_MPT_H_
#include "gvt.h"
/**
* DOC: Hypervisor Service APIs for GVT-g Core Logic
*
* This is the glue layer between specific hypervisor MPT modules and GVT-g core
* logic. Each kind of hypervisor MPT module provides a collection of function
* callbacks and will be attached to GVT host when the driver is loading.
* GVT-g core logic will call these APIs to request specific services from
* hypervisor.
*/
/**
* intel_gvt_hypervisor_host_init - init GVT-g host side
*
* Returns:
* Zero on success, negative error code if failed
*/
static inline int intel_gvt_hypervisor_host_init(struct device *dev,
void *gvt, const void *ops)
{
if (!intel_gvt_host.mpt->host_init)
return -ENODEV;
return intel_gvt_host.mpt->host_init(dev, gvt, ops);
}
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
intel_gvt_host.mpt->host_exit(dev, gvt);
}
/**
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
* related stuffs inside hypervisor.
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
{
/* optional to provide */
if (!intel_gvt_host.mpt->attach_vgpu)
return 0;
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
}
/**
* intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
* related stuffs inside hypervisor.
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
{
/* optional to provide */
if (!intel_gvt_host.mpt->detach_vgpu)
return;
intel_gvt_host.mpt->detach_vgpu(vgpu);
}
#define MSI_CAP_CONTROL(offset) (offset + 2)
#define MSI_CAP_ADDRESS(offset) (offset + 4)
#define MSI_CAP_DATA(offset) (offset + 8)
#define MSI_CAP_EN 0x1
/**
* intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
{
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
u16 control, data;
u32 addr;
int ret;
control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
/* Do not generate MSI if MSIEN is disable */
if (!(control & MSI_CAP_EN))
return 0;
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
return -EINVAL;
trace_inject_msi(vgpu->id, addr, data);
ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
if (ret)
return ret;
return 0;
}
/**
* intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
* @p: host kernel virtual address
*
* Returns:
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
*/
static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
{
return intel_gvt_host.mpt->from_virt_to_mfn(p);
}
/**
* intel_gvt_hypervisor_enable_page_track - track a guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_enable_page_track(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
}
/**
* intel_gvt_hypervisor_disable_page_track - untrack a guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_disable_page_track(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
}
/**
* intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
}
/**
* intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
}
/**
* intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
* @vgpu: a vGPU
* @gpfn: guest pfn
*
* Returns:
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
*/
static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
}
/**
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
* @vgpu: a vGPU
* @gfn: guest pfn
* @size: page size
* @dma_addr: retrieve allocated dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_dma_map_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
dma_addr_t *dma_addr)
{
return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
dma_addr);
}
/**
* intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
* @vgpu: a vGPU
* @dma_addr: the mapped dma addr
*/
static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
struct intel_vgpu *vgpu, dma_addr_t dma_addr)
{
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
}
/**
* intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
* @vgpu: a vGPU
* @dma_addr: guest dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int
intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
}
/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @gfn: guest PFN
* @mfn: host PFN
* @nr: amount of PFNs
* @map: map or unmap
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long mfn, unsigned int nr,
bool map)
{
/* a MPT implementation could have MMIO mapped elsewhere */
if (!intel_gvt_host.mpt->map_gfn_to_mfn)
return 0;
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
map);
}
/**
* intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
* @vgpu: a vGPU
* @start: the beginning of the guest physical address region
* @end: the end of the guest physical address region
* @map: map or unmap
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_trap_area(
struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
{
/* a MPT implementation could have MMIO trapped elsewhere */
if (!intel_gvt_host.mpt->set_trap_area)
return 0;
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
/**
* intel_gvt_hypervisor_set_opregion - Set opregion for guest
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->set_opregion)
return 0;
return intel_gvt_host.mpt->set_opregion(vgpu);
}
/**
* intel_gvt_hypervisor_set_edid - Set EDID region for guest
* @vgpu: a vGPU
* @port_num: display port number
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
int port_num)
{
if (!intel_gvt_host.mpt->set_edid)
return 0;
return intel_gvt_host.mpt->set_edid(vgpu, port_num);
}
/**
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->get_vfio_device)
return 0;
return intel_gvt_host.mpt->get_vfio_device(vgpu);
}
/**
* intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->put_vfio_device)
return;
intel_gvt_host.mpt->put_vfio_device(vgpu);
}
/**
* intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
* @vgpu: a vGPU
* @gfn: guest PFN
*
* Returns:
* true on valid gfn, false on not.
*/
static inline bool intel_gvt_hypervisor_is_valid_gfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
if (!intel_gvt_host.mpt->is_valid_gfn)
return true;
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
}
int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *);
void intel_gvt_unregister_hypervisor(void);
#endif /* _GVT_MPT_H_ */
......@@ -255,33 +255,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
return 0;
}
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
{
u64 mfn;
int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("fail to get MFN from VA\n");
return -EINVAL;
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map);
if (ret) {
gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
ret);
return ret;
}
}
vgpu_opregion(vgpu)->mapped = map;
return 0;
}
/**
* intel_vgpu_opregion_base_write_handler - Opregion base register write handler
*
......@@ -294,34 +267,13 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{
int i, ret = 0;
int i;
gvt_dbg_core("emulate opregion from kernel\n");
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_KVM:
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
break;
case INTEL_GVT_HYPERVISOR_XEN:
/**
* Wins guest on Xengt will write this register twice: xen
* hvmloader and windows graphic driver.
*/
if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
ret = map_vgpu_opregion(vgpu, true);
break;
default:
ret = -EINVAL;
gvt_vgpu_err("not supported hypervisor\n");
}
return ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
return 0;
}
/**
......@@ -336,12 +288,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (!vgpu_opregion(vgpu)->va)
return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false);
} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
/* Guest opregion is released by VFIO */
}
/* Guest opregion is released by VFIO */
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE));
......@@ -470,39 +417,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
u64 scic_pa = 0, parm_pa = 0;
int ret;
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN:
scic = *((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_SCIC);
parm = *((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_PARM);
break;
case INTEL_GVT_HYPERVISOR_KVM:
scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_SCIC;
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_PARM;
ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
&scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
&parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_SCIC;
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_PARM;
ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
return -EINVAL;
ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
if (!(swsci & SWSCI_SCI_SELECT)) {
......@@ -535,34 +465,18 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
parm = 0;
out:
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN:
*((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_SCIC) = scic;
*((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_PARM) = parm;
break;
case INTEL_GVT_HYPERVISOR_KVM:
ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
&scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
&parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
return -EINVAL;
ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
return 0;
......
......@@ -87,7 +87,7 @@ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
track = radix_tree_delete(&vgpu->page_track_tree, gfn);
if (track) {
if (track->tracked)
intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
intel_gvt_page_track_remove(vgpu, gfn);
kfree(track);
}
}
......@@ -112,7 +112,7 @@ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
if (track->tracked)
return 0;
ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
ret = intel_gvt_page_track_add(vgpu, gfn);
if (ret)
return ret;
track->tracked = true;
......@@ -139,7 +139,7 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
if (!track->tracked)
return 0;
ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
ret = intel_gvt_page_track_remove(vgpu, gfn);
if (ret)
return ret;
track->tracked = false;
......@@ -172,7 +172,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
if (ret)
......
......@@ -132,6 +132,13 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
/* XXX FIXME i915 has changed PP_XXX definition */
#define PCH_PP_STATUS _MMIO(0xc7200)
#define PCH_PP_CONTROL _MMIO(0xc7204)
#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
#define PCH_PP_DIVISOR _MMIO(0xc7210)
#endif
......@@ -150,10 +150,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
#define COPY_REG_MASKED(name) {\
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val),\
&shadow_ring_context->name.val, 4);\
shadow_ring_context->name.val |= 0xffff << 16;\
......@@ -167,7 +167,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
} else if (workload->engine->id == BCS0)
intel_gvt_hypervisor_read_gpa(vgpu,
intel_gvt_read_gpa(vgpu,
workload->ring_context_gpa +
BCS_TILE_REGISTER_VAL_OFFSET,
(void *)shadow_ring_context +
......@@ -178,7 +178,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
/* don't copy Ring Context (the first 0x50 dwords),
* only copy the Engine Context part from guest
*/
intel_gvt_hypervisor_read_gpa(vgpu,
intel_gvt_read_gpa(vgpu,
workload->ring_context_gpa +
RING_CTX_SIZE,
(void *)shadow_ring_context +
......@@ -245,7 +245,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
continue;
read:
intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size);
gpa_base = context_gpa;
gpa_size = I915_GTT_PAGE_SIZE;
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
......@@ -911,8 +911,7 @@ static void update_guest_pdps(struct intel_vgpu *vgpu,
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_write_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4);
}
static __maybe_unused bool
......@@ -1007,13 +1006,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
continue;
write:
intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size);
gpa_base = context_gpa;
gpa_size = I915_GTT_PAGE_SIZE;
src = context_base + (i << I915_GTT_PAGE_SHIFT);
}
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
intel_gvt_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
shadow_ring_context = (void *) ctx->lrc_reg_state;
......@@ -1028,7 +1027,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
......@@ -1036,7 +1035,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
#undef COPY_REG
intel_gvt_hypervisor_write_gpa(vgpu,
intel_gvt_write_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
......@@ -1573,7 +1572,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
intel_gvt_read_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}
......@@ -1644,10 +1643,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
return ERR_PTR(-EINVAL);
}
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_header.val), &head, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
guest_head = head;
......@@ -1674,11 +1673,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
gvt_dbg_el("ring %s begin a new workload\n", engine->name);
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
if (!intel_gvt_ggtt_validate_range(vgpu, start,
......@@ -1701,9 +1700,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
workload->rb_ctl = ctl;
if (engine->id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
workload->wa_ctx.indirect_ctx.guest_gma =
......
......@@ -377,7 +377,7 @@ TRACE_EVENT(render_mmio,
/* This part must be out of protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/gvt
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
......@@ -293,7 +293,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_detach_regions(vgpu);
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
......@@ -370,8 +370,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu *vgpu;
int ret;
gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
param->handle, param->low_gm_sz, param->high_gm_sz,
gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
param->low_gm_sz, param->high_gm_sz,
param->fence_sz);
vgpu = vzalloc(sizeof(*vgpu));
......@@ -384,7 +384,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
goto out_free_vgpu;
vgpu->id = ret;
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
mutex_init(&vgpu->vgpu_lock);
......@@ -405,13 +404,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
populate_pvinfo_page(vgpu);
ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
if (ret)
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_gtt(vgpu);
if (ret)
goto out_detach_hypervisor_vgpu;
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_opregion(vgpu);
if (ret)
......@@ -431,14 +426,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_gvt_debugfs_add_vgpu(vgpu);
ret = intel_gvt_hypervisor_set_opregion(vgpu);
ret = intel_gvt_set_opregion(vgpu);
if (ret)
goto out_clean_sched_policy;
if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
ret = intel_gvt_set_edid(vgpu, PORT_B);
else
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
ret = intel_gvt_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
......@@ -454,8 +449,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
......@@ -483,7 +476,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params param;
struct intel_vgpu *vgpu;
param.handle = 0;
param.primary = 1;
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
......
......@@ -470,11 +470,6 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
pci_dev_put(dev_priv->bridge_dev);
}
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
intel_gvt_sanitize_options(dev_priv);
}
/**
* i915_set_dma_info - set all relevant PCI dma info as configured for the
* platform
......@@ -568,8 +563,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
}
}
intel_sanitize_options(dev_priv);
/* needs to be done before ggtt probe */
intel_dram_edram_detect(dev_priv);
......
......@@ -400,6 +400,9 @@ struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
u32 caps;
u32 *initial_mmio;
u8 *initial_cfg_space;
struct list_head entry;
};
struct i915_selftest_stash {
......
......@@ -24,7 +24,10 @@
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "intel_gvt.h"
#include "gvt/gvt.h"
#include "gem/i915_gem_dmabuf.h"
#include "gt/intel_context.h"
#include "gt/intel_ring.h"
#include "gt/shmem_utils.h"
/**
* DOC: Intel GVT-g host support
......@@ -41,6 +44,10 @@
* doc is available on https://01.org/group/2230/documentation-list.
*/
static LIST_HEAD(intel_gvt_devices);
static const struct intel_vgpu_ops *intel_gvt_ops;
static DEFINE_MUTEX(intel_gvt_mutex);
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
......@@ -59,32 +66,162 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return false;
}
/**
* intel_gvt_sanitize_options - sanitize GVT related options
* @dev_priv: drm i915 private data
*
* This function is called at the i915 options sanitize stage.
*/
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
static void free_initial_hw_state(struct drm_i915_private *dev_priv)
{
struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
vfree(vgpu->initial_mmio);
vgpu->initial_mmio = NULL;
kfree(vgpu->initial_cfg_space);
vgpu->initial_cfg_space = NULL;
}
static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
u32 size)
{
struct drm_i915_private *dev_priv = iter->i915;
u32 *mmio, i;
for (i = offset; i < offset + size; i += 4) {
mmio = iter->data + i;
*mmio = intel_uncore_read_notrace(to_gt(dev_priv)->uncore,
_MMIO(i));
}
}
static int handle_mmio(struct intel_gvt_mmio_table_iter *iter,
u32 offset, u32 size)
{
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
save_mmio(iter, offset, size);
return 0;
}
static int save_initial_hw_state(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
struct intel_gvt_mmio_table_iter iter;
void *mem;
int i, ret;
mem = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL);
if (!mem)
return -ENOMEM;
vgpu->initial_cfg_space = mem;
for (i = 0; i < PCI_CFG_SPACE_EXP_SIZE; i += 4)
pci_read_config_dword(pdev, i, mem + i);
mem = vzalloc(2 * SZ_1M);
if (!mem) {
ret = -ENOMEM;
goto err_mmio;
}
vgpu->initial_mmio = mem;
iter.i915 = dev_priv;
iter.data = vgpu->initial_mmio;
iter.handle_mmio_cb = handle_mmio;
ret = intel_gvt_iterate_mmio_table(&iter);
if (ret)
goto err_iterate;
return 0;
err_iterate:
vfree(vgpu->initial_mmio);
vgpu->initial_mmio = NULL;
err_mmio:
kfree(vgpu->initial_cfg_space);
vgpu->initial_cfg_space = NULL;
return ret;
}
static void intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
if (!dev_priv->params.enable_gvt)
if (!dev_priv->params.enable_gvt) {
drm_dbg(&dev_priv->drm,
"GVT-g is disabled by kernel params\n");
return;
}
if (intel_vgpu_active(dev_priv)) {
drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
goto bail;
return;
}
if (!is_supported_device(dev_priv)) {
drm_info(&dev_priv->drm,
"Unsupported device. GVT-g is disabled\n");
goto bail;
return;
}
if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
drm_err(&dev_priv->drm,
"Graphics virtualization is not yet supported with GuC submission\n");
return;
}
return;
bail:
dev_priv->params.enable_gvt = 0;
if (save_initial_hw_state(dev_priv)) {
drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n");
return;
}
if (intel_gvt_ops->init_device(dev_priv))
drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
}
static void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
{
if (dev_priv->gvt)
intel_gvt_ops->clean_device(dev_priv);
free_initial_hw_state(dev_priv);
}
int intel_gvt_set_ops(const struct intel_vgpu_ops *ops)
{
struct drm_i915_private *dev_priv;
mutex_lock(&intel_gvt_mutex);
if (intel_gvt_ops) {
mutex_unlock(&intel_gvt_mutex);
return -EINVAL;
}
intel_gvt_ops = ops;
list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
intel_gvt_init_device(dev_priv);
mutex_unlock(&intel_gvt_mutex);
return 0;
}
EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT);
void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops)
{
struct drm_i915_private *dev_priv;
mutex_lock(&intel_gvt_mutex);
if (intel_gvt_ops != ops) {
mutex_unlock(&intel_gvt_mutex);
return;
}
list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
intel_gvt_clean_device(dev_priv);
intel_gvt_ops = NULL;
mutex_unlock(&intel_gvt_mutex);
}
EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT);
/**
* intel_gvt_init - initialize GVT components
......@@ -98,41 +235,18 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
*/
int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (!dev_priv->params.enable_gvt) {
drm_dbg(&dev_priv->drm,
"GVT-g is disabled by kernel params\n");
return 0;
}
if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
drm_err(&dev_priv->drm,
"i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
ret = intel_gvt_init_device(dev_priv);
if (ret) {
drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
goto bail;
}
return 0;
mutex_lock(&intel_gvt_mutex);
list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices);
if (intel_gvt_ops)
intel_gvt_init_device(dev_priv);
mutex_unlock(&intel_gvt_mutex);
bail:
dev_priv->params.enable_gvt = 0;
return 0;
}
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
}
/**
* intel_gvt_driver_remove - cleanup GVT components when i915 driver is
* unbinding
......@@ -143,10 +257,10 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
*/
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
if (!intel_gvt_active(dev_priv))
return;
mutex_lock(&intel_gvt_mutex);
intel_gvt_clean_device(dev_priv);
list_del(&dev_priv->vgpu.entry);
mutex_unlock(&intel_gvt_mutex);
}
/**
......@@ -159,6 +273,46 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
*/
void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
if (intel_gvt_active(dev_priv))
intel_gvt_pm_resume(dev_priv->gvt);
mutex_lock(&intel_gvt_mutex);
if (dev_priv->gvt)
intel_gvt_ops->pm_resume(dev_priv);
mutex_unlock(&intel_gvt_mutex);
}
/*
* Exported here so that the exports only get created when GVT support is
* actually enabled.
*/
EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT);
EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT);
EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT);
EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT);
EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT);
EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT);
EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT);
EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT);
EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT);
EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT);
EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT);
EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT);
EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT);
EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT);
EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT);
EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT);
EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT);
......@@ -24,16 +24,34 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
#include <linux/types.h>
struct drm_i915_private;
#ifdef CONFIG_DRM_I915_GVT
struct intel_gvt_mmio_table_iter {
struct drm_i915_private *i915;
void *data;
int (*handle_mmio_cb)(struct intel_gvt_mmio_table_iter *iter,
u32 offset, u32 size);
};
int intel_gvt_init(struct drm_i915_private *dev_priv);
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv);
int intel_gvt_init_device(struct drm_i915_private *dev_priv);
void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
void intel_gvt_resume(struct drm_i915_private *dev_priv);
int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter);
struct intel_vgpu_ops {
int (*init_device)(struct drm_i915_private *dev_priv);
void (*clean_device)(struct drm_i915_private *dev_priv);
void (*pm_resume)(struct drm_i915_private *i915);
};
int intel_gvt_set_ops(const struct intel_vgpu_ops *ops);
void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops);
#else
static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
......@@ -44,12 +62,16 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
}
static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
}
static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
struct intel_gvt_mmio_table_iter {
};
static inline int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
{
return 0;
}
#endif
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include "display/intel_dmc_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
#include "gvt/gvt.h"
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
#include "intel_gvt.h"
#include "intel_mchbar_regs.h"
#define MMIO_F(reg, s) do { \
int ret; \
ret = iter->handle_mmio_cb(iter, i915_mmio_reg_offset(reg), s); \
if (ret) \
return ret; \
} while (0)
#define MMIO_D(reg) MMIO_F(reg, 4)
#define MMIO_RING_F(prefix, s) do { \
MMIO_F(prefix(RENDER_RING_BASE), s); \
MMIO_F(prefix(BLT_RING_BASE), s); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s); \
MMIO_F(prefix(VEBOX_RING_BASE), s); \
if (HAS_ENGINE(to_gt(iter->i915), VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s); \
} while (0)
#define MMIO_RING_D(prefix) \
MMIO_RING_F(prefix, 4)
static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
MMIO_RING_D(RING_IMR);
MMIO_D(SDEIMR);
MMIO_D(SDEIER);
MMIO_D(SDEIIR);
MMIO_D(SDEISR);
MMIO_RING_D(RING_HWSTAM);
MMIO_D(BSD_HWS_PGA_GEN7);
MMIO_D(BLT_HWS_PGA_GEN7);
MMIO_D(VEBOX_HWS_PGA_GEN7);
#define RING_REG(base) _MMIO((base) + 0x28)
MMIO_RING_D(RING_REG);
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x134)
MMIO_RING_D(RING_REG);
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x6c)
MMIO_RING_D(RING_REG);
#undef RING_REG
MMIO_D(_MMIO(0x2148));
MMIO_D(CCID(RENDER_RING_BASE));
MMIO_D(_MMIO(0x12198));
MMIO_D(GEN7_CXT_SIZE);
MMIO_RING_D(RING_TAIL);
MMIO_RING_D(RING_HEAD);
MMIO_RING_D(RING_CTL);
MMIO_RING_D(RING_ACTHD);
MMIO_RING_D(RING_START);
/* RING MODE */
#define RING_REG(base) _MMIO((base) + 0x29c)
MMIO_RING_D(RING_REG);
#undef RING_REG
MMIO_RING_D(RING_MI_MODE);
MMIO_RING_D(RING_INSTPM);
MMIO_RING_D(RING_TIMESTAMP);
MMIO_RING_D(RING_TIMESTAMP_UDW);
MMIO_D(GEN7_GT_MODE);
MMIO_D(CACHE_MODE_0_GEN7);
MMIO_D(CACHE_MODE_1);
MMIO_D(CACHE_MODE_0);
MMIO_D(_MMIO(0x2124));
MMIO_D(_MMIO(0x20dc));
MMIO_D(_3D_CHICKEN3);
MMIO_D(_MMIO(0x2088));
MMIO_D(FF_SLICE_CS_CHICKEN2);
MMIO_D(_MMIO(0x2470));
MMIO_D(GAM_ECOCHK);
MMIO_D(GEN7_COMMON_SLICE_CHICKEN1);
MMIO_D(COMMON_SLICE_CHICKEN2);
MMIO_D(_MMIO(0x9030));
MMIO_D(_MMIO(0x20a0));
MMIO_D(_MMIO(0x2420));
MMIO_D(_MMIO(0x2430));
MMIO_D(_MMIO(0x2434));
MMIO_D(_MMIO(0x2438));
MMIO_D(_MMIO(0x243c));
MMIO_D(_MMIO(0x7018));
MMIO_D(HALF_SLICE_CHICKEN3);
MMIO_D(GEN7_HALF_SLICE_CHICKEN1);
/* display */
MMIO_F(_MMIO(0x60220), 0x20);
MMIO_D(_MMIO(0x602a0));
MMIO_D(_MMIO(0x65050));
MMIO_D(_MMIO(0x650b4));
MMIO_D(_MMIO(0xc4040));
MMIO_D(DERRMR);
MMIO_D(PIPEDSL(PIPE_A));
MMIO_D(PIPEDSL(PIPE_B));
MMIO_D(PIPEDSL(PIPE_C));
MMIO_D(PIPEDSL(_PIPE_EDP));
MMIO_D(PIPECONF(PIPE_A));
MMIO_D(PIPECONF(PIPE_B));
MMIO_D(PIPECONF(PIPE_C));
MMIO_D(PIPECONF(_PIPE_EDP));
MMIO_D(PIPESTAT(PIPE_A));
MMIO_D(PIPESTAT(PIPE_B));
MMIO_D(PIPESTAT(PIPE_C));
MMIO_D(PIPESTAT(_PIPE_EDP));
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A));
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B));
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C));
MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP));
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A));
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B));
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C));
MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP));
MMIO_D(CURCNTR(PIPE_A));
MMIO_D(CURCNTR(PIPE_B));
MMIO_D(CURCNTR(PIPE_C));
MMIO_D(CURPOS(PIPE_A));
MMIO_D(CURPOS(PIPE_B));
MMIO_D(CURPOS(PIPE_C));
MMIO_D(CURBASE(PIPE_A));
MMIO_D(CURBASE(PIPE_B));
MMIO_D(CURBASE(PIPE_C));
MMIO_D(CUR_FBC_CTL(PIPE_A));
MMIO_D(CUR_FBC_CTL(PIPE_B));
MMIO_D(CUR_FBC_CTL(PIPE_C));
MMIO_D(_MMIO(0x700ac));
MMIO_D(_MMIO(0x710ac));
MMIO_D(_MMIO(0x720ac));
MMIO_D(_MMIO(0x70090));
MMIO_D(_MMIO(0x70094));
MMIO_D(_MMIO(0x70098));
MMIO_D(_MMIO(0x7009c));
MMIO_D(DSPCNTR(PIPE_A));
MMIO_D(DSPADDR(PIPE_A));
MMIO_D(DSPSTRIDE(PIPE_A));
MMIO_D(DSPPOS(PIPE_A));
MMIO_D(DSPSIZE(PIPE_A));
MMIO_D(DSPSURF(PIPE_A));
MMIO_D(DSPOFFSET(PIPE_A));
MMIO_D(DSPSURFLIVE(PIPE_A));
MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
MMIO_D(DSPCNTR(PIPE_B));
MMIO_D(DSPADDR(PIPE_B));
MMIO_D(DSPSTRIDE(PIPE_B));
MMIO_D(DSPPOS(PIPE_B));
MMIO_D(DSPSIZE(PIPE_B));
MMIO_D(DSPSURF(PIPE_B));
MMIO_D(DSPOFFSET(PIPE_B));
MMIO_D(DSPSURFLIVE(PIPE_B));
MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
MMIO_D(DSPCNTR(PIPE_C));
MMIO_D(DSPADDR(PIPE_C));
MMIO_D(DSPSTRIDE(PIPE_C));
MMIO_D(DSPPOS(PIPE_C));
MMIO_D(DSPSIZE(PIPE_C));
MMIO_D(DSPSURF(PIPE_C));
MMIO_D(DSPOFFSET(PIPE_C));
MMIO_D(DSPSURFLIVE(PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
MMIO_D(SPRCTL(PIPE_A));
MMIO_D(SPRLINOFF(PIPE_A));
MMIO_D(SPRSTRIDE(PIPE_A));
MMIO_D(SPRPOS(PIPE_A));
MMIO_D(SPRSIZE(PIPE_A));
MMIO_D(SPRKEYVAL(PIPE_A));
MMIO_D(SPRKEYMSK(PIPE_A));
MMIO_D(SPRSURF(PIPE_A));
MMIO_D(SPRKEYMAX(PIPE_A));
MMIO_D(SPROFFSET(PIPE_A));
MMIO_D(SPRSCALE(PIPE_A));
MMIO_D(SPRSURFLIVE(PIPE_A));
MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0));
MMIO_D(SPRCTL(PIPE_B));
MMIO_D(SPRLINOFF(PIPE_B));
MMIO_D(SPRSTRIDE(PIPE_B));
MMIO_D(SPRPOS(PIPE_B));
MMIO_D(SPRSIZE(PIPE_B));
MMIO_D(SPRKEYVAL(PIPE_B));
MMIO_D(SPRKEYMSK(PIPE_B));
MMIO_D(SPRSURF(PIPE_B));
MMIO_D(SPRKEYMAX(PIPE_B));
MMIO_D(SPROFFSET(PIPE_B));
MMIO_D(SPRSCALE(PIPE_B));
MMIO_D(SPRSURFLIVE(PIPE_B));
MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0));
MMIO_D(SPRCTL(PIPE_C));
MMIO_D(SPRLINOFF(PIPE_C));
MMIO_D(SPRSTRIDE(PIPE_C));
MMIO_D(SPRPOS(PIPE_C));
MMIO_D(SPRSIZE(PIPE_C));
MMIO_D(SPRKEYVAL(PIPE_C));
MMIO_D(SPRKEYMSK(PIPE_C));
MMIO_D(SPRSURF(PIPE_C));
MMIO_D(SPRKEYMAX(PIPE_C));
MMIO_D(SPROFFSET(PIPE_C));
MMIO_D(SPRSCALE(PIPE_C));
MMIO_D(SPRSURFLIVE(PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
MMIO_D(HTOTAL(TRANSCODER_A));
MMIO_D(HBLANK(TRANSCODER_A));
MMIO_D(HSYNC(TRANSCODER_A));
MMIO_D(VTOTAL(TRANSCODER_A));
MMIO_D(VBLANK(TRANSCODER_A));
MMIO_D(VSYNC(TRANSCODER_A));
MMIO_D(BCLRPAT(TRANSCODER_A));
MMIO_D(VSYNCSHIFT(TRANSCODER_A));
MMIO_D(PIPESRC(TRANSCODER_A));
MMIO_D(HTOTAL(TRANSCODER_B));
MMIO_D(HBLANK(TRANSCODER_B));
MMIO_D(HSYNC(TRANSCODER_B));
MMIO_D(VTOTAL(TRANSCODER_B));
MMIO_D(VBLANK(TRANSCODER_B));
MMIO_D(VSYNC(TRANSCODER_B));
MMIO_D(BCLRPAT(TRANSCODER_B));
MMIO_D(VSYNCSHIFT(TRANSCODER_B));
MMIO_D(PIPESRC(TRANSCODER_B));
MMIO_D(HTOTAL(TRANSCODER_C));
MMIO_D(HBLANK(TRANSCODER_C));
MMIO_D(HSYNC(TRANSCODER_C));
MMIO_D(VTOTAL(TRANSCODER_C));
MMIO_D(VBLANK(TRANSCODER_C));
MMIO_D(VSYNC(TRANSCODER_C));
MMIO_D(BCLRPAT(TRANSCODER_C));
MMIO_D(VSYNCSHIFT(TRANSCODER_C));
MMIO_D(PIPESRC(TRANSCODER_C));
MMIO_D(HTOTAL(TRANSCODER_EDP));
MMIO_D(HBLANK(TRANSCODER_EDP));
MMIO_D(HSYNC(TRANSCODER_EDP));
MMIO_D(VTOTAL(TRANSCODER_EDP));
MMIO_D(VBLANK(TRANSCODER_EDP));
MMIO_D(VSYNC(TRANSCODER_EDP));
MMIO_D(BCLRPAT(TRANSCODER_EDP));
MMIO_D(VSYNCSHIFT(TRANSCODER_EDP));
MMIO_D(PIPE_DATA_M1(TRANSCODER_A));
MMIO_D(PIPE_DATA_N1(TRANSCODER_A));
MMIO_D(PIPE_DATA_M2(TRANSCODER_A));
MMIO_D(PIPE_DATA_N2(TRANSCODER_A));
MMIO_D(PIPE_LINK_M1(TRANSCODER_A));
MMIO_D(PIPE_LINK_N1(TRANSCODER_A));
MMIO_D(PIPE_LINK_M2(TRANSCODER_A));
MMIO_D(PIPE_LINK_N2(TRANSCODER_A));
MMIO_D(PIPE_DATA_M1(TRANSCODER_B));
MMIO_D(PIPE_DATA_N1(TRANSCODER_B));
MMIO_D(PIPE_DATA_M2(TRANSCODER_B));
MMIO_D(PIPE_DATA_N2(TRANSCODER_B));
MMIO_D(PIPE_LINK_M1(TRANSCODER_B));
MMIO_D(PIPE_LINK_N1(TRANSCODER_B));
MMIO_D(PIPE_LINK_M2(TRANSCODER_B));
MMIO_D(PIPE_LINK_N2(TRANSCODER_B));
MMIO_D(PIPE_DATA_M1(TRANSCODER_C));
MMIO_D(PIPE_DATA_N1(TRANSCODER_C));
MMIO_D(PIPE_DATA_M2(TRANSCODER_C));
MMIO_D(PIPE_DATA_N2(TRANSCODER_C));
MMIO_D(PIPE_LINK_M1(TRANSCODER_C));
MMIO_D(PIPE_LINK_N1(TRANSCODER_C));
MMIO_D(PIPE_LINK_M2(TRANSCODER_C));
MMIO_D(PIPE_LINK_N2(TRANSCODER_C));
MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP));
MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP));
MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP));
MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP));
MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP));
MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP));
MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP));
MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP));
MMIO_D(PF_CTL(PIPE_A));
MMIO_D(PF_WIN_SZ(PIPE_A));
MMIO_D(PF_WIN_POS(PIPE_A));
MMIO_D(PF_VSCALE(PIPE_A));
MMIO_D(PF_HSCALE(PIPE_A));
MMIO_D(PF_CTL(PIPE_B));
MMIO_D(PF_WIN_SZ(PIPE_B));
MMIO_D(PF_WIN_POS(PIPE_B));
MMIO_D(PF_VSCALE(PIPE_B));
MMIO_D(PF_HSCALE(PIPE_B));
MMIO_D(PF_CTL(PIPE_C));
MMIO_D(PF_WIN_SZ(PIPE_C));
MMIO_D(PF_WIN_POS(PIPE_C));
MMIO_D(PF_VSCALE(PIPE_C));
MMIO_D(PF_HSCALE(PIPE_C));
MMIO_D(WM0_PIPE_ILK(PIPE_A));
MMIO_D(WM0_PIPE_ILK(PIPE_B));
MMIO_D(WM0_PIPE_ILK(PIPE_C));
MMIO_D(WM1_LP_ILK);
MMIO_D(WM2_LP_ILK);
MMIO_D(WM3_LP_ILK);
MMIO_D(WM1S_LP_ILK);
MMIO_D(WM2S_LP_IVB);
MMIO_D(WM3S_LP_IVB);
MMIO_D(BLC_PWM_CPU_CTL2);
MMIO_D(BLC_PWM_CPU_CTL);
MMIO_D(BLC_PWM_PCH_CTL1);
MMIO_D(BLC_PWM_PCH_CTL2);
MMIO_D(_MMIO(0x48268));
MMIO_F(PCH_GMBUS0, 4 * 4);
MMIO_F(PCH_GPIO_BASE, 6 * 4);
MMIO_F(_MMIO(0xe4f00), 0x28);
MMIO_D(_MMIO(_PCH_TRANSACONF));
MMIO_D(_MMIO(_PCH_TRANSBCONF));
MMIO_D(FDI_RX_IIR(PIPE_A));
MMIO_D(FDI_RX_IIR(PIPE_B));
MMIO_D(FDI_RX_IIR(PIPE_C));
MMIO_D(FDI_RX_IMR(PIPE_A));
MMIO_D(FDI_RX_IMR(PIPE_B));
MMIO_D(FDI_RX_IMR(PIPE_C));
MMIO_D(FDI_RX_CTL(PIPE_A));
MMIO_D(FDI_RX_CTL(PIPE_B));
MMIO_D(FDI_RX_CTL(PIPE_C));
MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A));
MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A));
MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A));
MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A));
MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A));
MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A));
MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A));
MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B));
MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B));
MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B));
MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B));
MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B));
MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B));
MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B));
MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1));
MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1));
MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2));
MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2));
MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1));
MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1));
MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2));
MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2));
MMIO_D(TRANS_DP_CTL(PIPE_A));
MMIO_D(TRANS_DP_CTL(PIPE_B));
MMIO_D(TRANS_DP_CTL(PIPE_C));
MMIO_D(TVIDEO_DIP_CTL(PIPE_A));
MMIO_D(TVIDEO_DIP_DATA(PIPE_A));
MMIO_D(TVIDEO_DIP_GCP(PIPE_A));
MMIO_D(TVIDEO_DIP_CTL(PIPE_B));
MMIO_D(TVIDEO_DIP_DATA(PIPE_B));
MMIO_D(TVIDEO_DIP_GCP(PIPE_B));
MMIO_D(TVIDEO_DIP_CTL(PIPE_C));
MMIO_D(TVIDEO_DIP_DATA(PIPE_C));
MMIO_D(TVIDEO_DIP_GCP(PIPE_C));
MMIO_D(_MMIO(_FDI_RXA_MISC));
MMIO_D(_MMIO(_FDI_RXB_MISC));
MMIO_D(_MMIO(_FDI_RXA_TUSIZE1));
MMIO_D(_MMIO(_FDI_RXA_TUSIZE2));
MMIO_D(_MMIO(_FDI_RXB_TUSIZE1));
MMIO_D(_MMIO(_FDI_RXB_TUSIZE2));
MMIO_D(PCH_PP_CONTROL);
MMIO_D(PCH_PP_DIVISOR);
MMIO_D(PCH_PP_STATUS);
MMIO_D(PCH_LVDS);
MMIO_D(_MMIO(_PCH_DPLL_A));
MMIO_D(_MMIO(_PCH_DPLL_B));
MMIO_D(_MMIO(_PCH_FPA0));
MMIO_D(_MMIO(_PCH_FPA1));
MMIO_D(_MMIO(_PCH_FPB0));
MMIO_D(_MMIO(_PCH_FPB1));
MMIO_D(PCH_DREF_CONTROL);
MMIO_D(PCH_RAWCLK_FREQ);
MMIO_D(PCH_DPLL_SEL);
MMIO_D(_MMIO(0x61208));
MMIO_D(_MMIO(0x6120c));
MMIO_D(PCH_PP_ON_DELAYS);
MMIO_D(PCH_PP_OFF_DELAYS);
MMIO_D(_MMIO(0xe651c));
MMIO_D(_MMIO(0xe661c));
MMIO_D(_MMIO(0xe671c));
MMIO_D(_MMIO(0xe681c));
MMIO_D(_MMIO(0xe6c04));
MMIO_D(_MMIO(0xe6e1c));
MMIO_D(PCH_PORT_HOTPLUG);
MMIO_D(LCPLL_CTL);
MMIO_D(FUSE_STRAP);
MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL);
MMIO_D(DISP_ARB_CTL);
MMIO_D(DISP_ARB_CTL2);
MMIO_D(ILK_DISPLAY_CHICKEN1);
MMIO_D(ILK_DISPLAY_CHICKEN2);
MMIO_D(ILK_DSPCLK_GATE_D);
MMIO_D(SOUTH_CHICKEN1);
MMIO_D(SOUTH_CHICKEN2);
MMIO_D(_MMIO(_TRANSA_CHICKEN1));
MMIO_D(_MMIO(_TRANSB_CHICKEN1));
MMIO_D(SOUTH_DSPCLK_GATE_D);
MMIO_D(_MMIO(_TRANSA_CHICKEN2));
MMIO_D(_MMIO(_TRANSB_CHICKEN2));
MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A));
MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A));
MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A));
MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A));
MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A));
MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A));
MMIO_D(ILK_FBC_RT_BASE);
MMIO_D(IPS_CTL);
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A));
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A));
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A));
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A));
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A));
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A));
MMIO_D(PIPE_CSC_MODE(PIPE_A));
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A));
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A));
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A));
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A));
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A));
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A));
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B));
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B));
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B));
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B));
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B));
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B));
MMIO_D(PIPE_CSC_MODE(PIPE_B));
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B));
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B));
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B));
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B));
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B));
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B));
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C));
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C));
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C));
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C));
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C));
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C));
MMIO_D(PIPE_CSC_MODE(PIPE_C));
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C));
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C));
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C));
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C));
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C));
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C));
MMIO_D(PREC_PAL_INDEX(PIPE_A));
MMIO_D(PREC_PAL_DATA(PIPE_A));
MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3);
MMIO_D(PREC_PAL_INDEX(PIPE_B));
MMIO_D(PREC_PAL_DATA(PIPE_B));
MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3);
MMIO_D(PREC_PAL_INDEX(PIPE_C));
MMIO_D(PREC_PAL_DATA(PIPE_C));
MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3);
MMIO_D(_MMIO(0x60110));
MMIO_D(_MMIO(0x61110));
MMIO_F(_MMIO(0x70400), 0x40);
MMIO_F(_MMIO(0x71400), 0x40);
MMIO_F(_MMIO(0x72400), 0x40);
MMIO_D(WM_LINETIME(PIPE_A));
MMIO_D(WM_LINETIME(PIPE_B));
MMIO_D(WM_LINETIME(PIPE_C));
MMIO_D(SPLL_CTL);
MMIO_D(_MMIO(_WRPLL_CTL1));
MMIO_D(_MMIO(_WRPLL_CTL2));
MMIO_D(PORT_CLK_SEL(PORT_A));
MMIO_D(PORT_CLK_SEL(PORT_B));
MMIO_D(PORT_CLK_SEL(PORT_C));
MMIO_D(PORT_CLK_SEL(PORT_D));
MMIO_D(PORT_CLK_SEL(PORT_E));
MMIO_D(TRANS_CLK_SEL(TRANSCODER_A));
MMIO_D(TRANS_CLK_SEL(TRANSCODER_B));
MMIO_D(TRANS_CLK_SEL(TRANSCODER_C));
MMIO_D(HSW_NDE_RSTWRN_OPT);
MMIO_D(_MMIO(0x46508));
MMIO_D(_MMIO(0x49080));
MMIO_D(_MMIO(0x49180));
MMIO_D(_MMIO(0x49280));
MMIO_F(_MMIO(0x49090), 0x14);
MMIO_F(_MMIO(0x49190), 0x14);
MMIO_F(_MMIO(0x49290), 0x14);
MMIO_D(GAMMA_MODE(PIPE_A));
MMIO_D(GAMMA_MODE(PIPE_B));
MMIO_D(GAMMA_MODE(PIPE_C));
MMIO_D(PIPE_MULT(PIPE_A));
MMIO_D(PIPE_MULT(PIPE_B));
MMIO_D(PIPE_MULT(PIPE_C));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C));
MMIO_D(SFUSE_STRAP);
MMIO_D(SBI_ADDR);
MMIO_D(SBI_DATA);
MMIO_D(SBI_CTL_STAT);
MMIO_D(PIXCLK_GATE);
MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4);
MMIO_D(DDI_BUF_CTL(PORT_A));
MMIO_D(DDI_BUF_CTL(PORT_B));
MMIO_D(DDI_BUF_CTL(PORT_C));
MMIO_D(DDI_BUF_CTL(PORT_D));
MMIO_D(DDI_BUF_CTL(PORT_E));
MMIO_D(DP_TP_CTL(PORT_A));
MMIO_D(DP_TP_CTL(PORT_B));
MMIO_D(DP_TP_CTL(PORT_C));
MMIO_D(DP_TP_CTL(PORT_D));
MMIO_D(DP_TP_CTL(PORT_E));
MMIO_D(DP_TP_STATUS(PORT_A));
MMIO_D(DP_TP_STATUS(PORT_B));
MMIO_D(DP_TP_STATUS(PORT_C));
MMIO_D(DP_TP_STATUS(PORT_D));
MMIO_D(DP_TP_STATUS(PORT_E));
MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50);
MMIO_F(_MMIO(0x64e60), 0x50);
MMIO_F(_MMIO(0x64eC0), 0x50);
MMIO_F(_MMIO(0x64f20), 0x50);
MMIO_F(_MMIO(0x64f80), 0x50);
MMIO_D(HSW_AUD_CFG(PIPE_A));
MMIO_D(HSW_AUD_PIN_ELD_CP_VLD);
MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A));
MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_A));
MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_B));
MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_C));
MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_EDP));
MMIO_D(_MMIO(_TRANSA_MSA_MISC));
MMIO_D(_MMIO(_TRANSB_MSA_MISC));
MMIO_D(_MMIO(_TRANSC_MSA_MISC));
MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC));
MMIO_D(FORCEWAKE);
MMIO_D(FORCEWAKE_ACK);
MMIO_D(GEN6_GT_CORE_STATUS);
MMIO_D(GEN6_GT_THREAD_STATUS_REG);
MMIO_D(GTFIFODBG);
MMIO_D(GTFIFOCTL);
MMIO_D(ECOBUS);
MMIO_D(GEN6_RC_CONTROL);
MMIO_D(GEN6_RC_STATE);
MMIO_D(GEN6_RPNSWREQ);
MMIO_D(GEN6_RC_VIDEO_FREQ);
MMIO_D(GEN6_RP_DOWN_TIMEOUT);
MMIO_D(GEN6_RP_INTERRUPT_LIMITS);
MMIO_D(GEN6_RPSTAT1);
MMIO_D(GEN6_RP_CONTROL);
MMIO_D(GEN6_RP_UP_THRESHOLD);
MMIO_D(GEN6_RP_DOWN_THRESHOLD);
MMIO_D(GEN6_RP_CUR_UP_EI);
MMIO_D(GEN6_RP_CUR_UP);
MMIO_D(GEN6_RP_PREV_UP);
MMIO_D(GEN6_RP_CUR_DOWN_EI);
MMIO_D(GEN6_RP_CUR_DOWN);
MMIO_D(GEN6_RP_PREV_DOWN);
MMIO_D(GEN6_RP_UP_EI);
MMIO_D(GEN6_RP_DOWN_EI);
MMIO_D(GEN6_RP_IDLE_HYSTERSIS);
MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT);
MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT);
MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT);
MMIO_D(GEN6_RC_EVALUATION_INTERVAL);
MMIO_D(GEN6_RC_IDLE_HYSTERSIS);
MMIO_D(GEN6_RC_SLEEP);
MMIO_D(GEN6_RC1e_THRESHOLD);
MMIO_D(GEN6_RC6_THRESHOLD);
MMIO_D(GEN6_RC6p_THRESHOLD);
MMIO_D(GEN6_RC6pp_THRESHOLD);
MMIO_D(GEN6_PMINTRMSK);
MMIO_D(RSTDBYCTL);
MMIO_D(GEN6_GDRST);
MMIO_F(FENCE_REG_GEN6_LO(0), 0x80);
MMIO_D(CPU_VGACNTRL);
MMIO_D(TILECTL);
MMIO_D(GEN6_UCGCTL1);
MMIO_D(GEN6_UCGCTL2);
MMIO_F(_MMIO(0x4f000), 0x90);
MMIO_D(GEN6_PCODE_DATA);
MMIO_D(_MMIO(0x13812c));
MMIO_D(GEN7_ERR_INT);
MMIO_D(HSW_EDRAM_CAP);
MMIO_D(HSW_IDICR);
MMIO_D(GFX_FLSH_CNTL_GEN6);
MMIO_D(_MMIO(0x3c));
MMIO_D(_MMIO(0x860));
MMIO_D(ECOSKPD(RENDER_RING_BASE));
MMIO_D(_MMIO(0x121d0));
MMIO_D(ECOSKPD(BLT_RING_BASE));
MMIO_D(_MMIO(0x41d0));
MMIO_D(GAC_ECO_BITS);
MMIO_D(_MMIO(0x6200));
MMIO_D(_MMIO(0x6204));
MMIO_D(_MMIO(0x6208));
MMIO_D(_MMIO(0x7118));
MMIO_D(_MMIO(0x7180));
MMIO_D(_MMIO(0x7408));
MMIO_D(_MMIO(0x7c00));
MMIO_D(GEN6_MBCTL);
MMIO_D(_MMIO(0x911c));
MMIO_D(_MMIO(0x9120));
MMIO_D(GEN7_UCGCTL4);
MMIO_D(GAB_CTL);
MMIO_D(_MMIO(0x48800));
MMIO_D(_MMIO(0xce044));
MMIO_D(_MMIO(0xe6500));
MMIO_D(_MMIO(0xe6504));
MMIO_D(_MMIO(0xe6600));
MMIO_D(_MMIO(0xe6604));
MMIO_D(_MMIO(0xe6700));
MMIO_D(_MMIO(0xe6704));
MMIO_D(_MMIO(0xe6800));
MMIO_D(_MMIO(0xe6804));
MMIO_D(PCH_GMBUS4);
MMIO_D(PCH_GMBUS5);
MMIO_D(_MMIO(0x902c));
MMIO_D(_MMIO(0xec008));
MMIO_D(_MMIO(0xec00c));
MMIO_D(_MMIO(0xec008 + 0x18));
MMIO_D(_MMIO(0xec00c + 0x18));
MMIO_D(_MMIO(0xec008 + 0x18 * 2));
MMIO_D(_MMIO(0xec00c + 0x18 * 2));
MMIO_D(_MMIO(0xec008 + 0x18 * 3));
MMIO_D(_MMIO(0xec00c + 0x18 * 3));
MMIO_D(_MMIO(0xec408));
MMIO_D(_MMIO(0xec40c));
MMIO_D(_MMIO(0xec408 + 0x18));
MMIO_D(_MMIO(0xec40c + 0x18));
MMIO_D(_MMIO(0xec408 + 0x18 * 2));
MMIO_D(_MMIO(0xec40c + 0x18 * 2));
MMIO_D(_MMIO(0xec408 + 0x18 * 3));
MMIO_D(_MMIO(0xec40c + 0x18 * 3));
MMIO_D(_MMIO(0xfc810));
MMIO_D(_MMIO(0xfc81c));
MMIO_D(_MMIO(0xfc828));
MMIO_D(_MMIO(0xfc834));
MMIO_D(_MMIO(0xfcc00));
MMIO_D(_MMIO(0xfcc0c));
MMIO_D(_MMIO(0xfcc18));
MMIO_D(_MMIO(0xfcc24));
MMIO_D(_MMIO(0xfd000));
MMIO_D(_MMIO(0xfd00c));
MMIO_D(_MMIO(0xfd018));
MMIO_D(_MMIO(0xfd024));
MMIO_D(_MMIO(0xfd034));
MMIO_D(FPGA_DBG);
MMIO_D(_MMIO(0x2054));
MMIO_D(_MMIO(0x12054));
MMIO_D(_MMIO(0x22054));
MMIO_D(_MMIO(0x1a054));
MMIO_D(_MMIO(0x44070));
MMIO_D(_MMIO(0x2178));
MMIO_D(_MMIO(0x217c));
MMIO_D(_MMIO(0x12178));
MMIO_D(_MMIO(0x1217c));
MMIO_F(_MMIO(0x5200), 32);
MMIO_F(_MMIO(0x5240), 32);
MMIO_F(_MMIO(0x5280), 16);
MMIO_D(BCS_SWCTRL);
MMIO_F(HS_INVOCATION_COUNT, 8);
MMIO_F(DS_INVOCATION_COUNT, 8);
MMIO_F(IA_VERTICES_COUNT, 8);
MMIO_F(IA_PRIMITIVES_COUNT, 8);
MMIO_F(VS_INVOCATION_COUNT, 8);
MMIO_F(GS_INVOCATION_COUNT, 8);
MMIO_F(GS_PRIMITIVES_COUNT, 8);
MMIO_F(CL_INVOCATION_COUNT, 8);
MMIO_F(CL_PRIMITIVES_COUNT, 8);
MMIO_F(PS_INVOCATION_COUNT, 8);
MMIO_F(PS_DEPTH_COUNT, 8);
MMIO_D(ARB_MODE);
MMIO_RING_D(RING_BBADDR);
MMIO_D(_MMIO(0x2220));
MMIO_D(_MMIO(0x12220));
MMIO_D(_MMIO(0x22220));
MMIO_RING_D(RING_SYNC_1);
MMIO_RING_D(RING_SYNC_0);
MMIO_D(GUC_STATUS);
MMIO_F(_MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000);
MMIO_F(_MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE);
MMIO_F(LGC_PALETTE(PIPE_A, 0), 1024);
MMIO_F(LGC_PALETTE(PIPE_B, 0), 1024);
MMIO_F(LGC_PALETTE(PIPE_C, 0), 1024);
return 0;
}
static int iterate_bdw_only_mmio(struct intel_gvt_mmio_table_iter *iter)
{
MMIO_D(HSW_PWR_WELL_CTL1);
MMIO_D(HSW_PWR_WELL_CTL2);
MMIO_D(HSW_PWR_WELL_CTL3);
MMIO_D(HSW_PWR_WELL_CTL4);
MMIO_D(HSW_PWR_WELL_CTL5);
MMIO_D(HSW_PWR_WELL_CTL6);
MMIO_D(WM_MISC);
MMIO_D(_MMIO(_SRD_CTL_EDP));
MMIO_D(_MMIO(0xb1f0));
MMIO_D(_MMIO(0xb1c0));
MMIO_D(_MMIO(0xb100));
MMIO_D(_MMIO(0xb10c));
MMIO_D(_MMIO(0xb110));
MMIO_D(_MMIO(0x83a4));
MMIO_D(_MMIO(0x8430));
MMIO_D(_MMIO(0x2248));
MMIO_D(FORCEWAKE_ACK_HSW);
return 0;
}
static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
MMIO_D(GEN8_GT_IMR(0));
MMIO_D(GEN8_GT_IER(0));
MMIO_D(GEN8_GT_IIR(0));
MMIO_D(GEN8_GT_ISR(0));
MMIO_D(GEN8_GT_IMR(1));
MMIO_D(GEN8_GT_IER(1));
MMIO_D(GEN8_GT_IIR(1));
MMIO_D(GEN8_GT_ISR(1));
MMIO_D(GEN8_GT_IMR(2));
MMIO_D(GEN8_GT_IER(2));
MMIO_D(GEN8_GT_IIR(2));
MMIO_D(GEN8_GT_ISR(2));
MMIO_D(GEN8_GT_IMR(3));
MMIO_D(GEN8_GT_IER(3));
MMIO_D(GEN8_GT_IIR(3));
MMIO_D(GEN8_GT_ISR(3));
MMIO_D(GEN8_DE_PIPE_IMR(PIPE_A));
MMIO_D(GEN8_DE_PIPE_IER(PIPE_A));
MMIO_D(GEN8_DE_PIPE_IIR(PIPE_A));
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A));
MMIO_D(GEN8_DE_PIPE_IMR(PIPE_B));
MMIO_D(GEN8_DE_PIPE_IER(PIPE_B));
MMIO_D(GEN8_DE_PIPE_IIR(PIPE_B));
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B));
MMIO_D(GEN8_DE_PIPE_IMR(PIPE_C));
MMIO_D(GEN8_DE_PIPE_IER(PIPE_C));
MMIO_D(GEN8_DE_PIPE_IIR(PIPE_C));
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C));
MMIO_D(GEN8_DE_PORT_IMR);
MMIO_D(GEN8_DE_PORT_IER);
MMIO_D(GEN8_DE_PORT_IIR);
MMIO_D(GEN8_DE_PORT_ISR);
MMIO_D(GEN8_DE_MISC_IMR);
MMIO_D(GEN8_DE_MISC_IER);
MMIO_D(GEN8_DE_MISC_IIR);
MMIO_D(GEN8_DE_MISC_ISR);
MMIO_D(GEN8_PCU_IMR);
MMIO_D(GEN8_PCU_IER);
MMIO_D(GEN8_PCU_IIR);
MMIO_D(GEN8_PCU_ISR);
MMIO_D(GEN8_MASTER_IRQ);
MMIO_RING_D(RING_ACTHD_UDW);
#define RING_REG(base) _MMIO((base) + 0xd0)
MMIO_RING_D(RING_REG);
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x230)
MMIO_RING_D(RING_REG);
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x234)
MMIO_RING_F(RING_REG, 8);
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x244)
MMIO_RING_D(RING_REG);
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x370)
MMIO_RING_F(RING_REG, 48);
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x3a0)
MMIO_RING_D(RING_REG);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A));
MMIO_D(PIPEMISC(PIPE_B));
MMIO_D(PIPEMISC(PIPE_C));
MMIO_D(_MMIO(0x1c1d0));
MMIO_D(GEN6_MBCUNIT_SNPCR);
MMIO_D(GEN7_MISCCPCTL);
MMIO_D(_MMIO(0x1c054));
MMIO_D(GEN6_PCODE_MAILBOX);
if (!IS_BROXTON(dev_priv))
MMIO_D(GEN8_PRIVATE_PAT_LO);
MMIO_D(GEN8_PRIVATE_PAT_HI);
MMIO_D(GAMTARBMODE);
#define RING_REG(base) _MMIO((base) + 0x270)
MMIO_RING_F(RING_REG, 32);
#undef RING_REG
MMIO_RING_D(RING_HWS_PGA);
MMIO_D(HDC_CHICKEN0);
MMIO_D(CHICKEN_PIPESL_1(PIPE_A));
MMIO_D(CHICKEN_PIPESL_1(PIPE_B));
MMIO_D(CHICKEN_PIPESL_1(PIPE_C));
MMIO_D(_MMIO(0x6671c));
MMIO_D(_MMIO(0x66c00));
MMIO_D(_MMIO(0x66c04));
MMIO_D(HSW_GTT_CACHE_EN);
MMIO_D(GEN8_EU_DISABLE0);
MMIO_D(GEN8_EU_DISABLE1);
MMIO_D(GEN8_EU_DISABLE2);
MMIO_D(_MMIO(0xfdc));
MMIO_D(GEN8_ROW_CHICKEN);
MMIO_D(GEN7_ROW_CHICKEN2);
MMIO_D(GEN8_UCGCTL6);
MMIO_D(GEN8_L3SQCREG4);
MMIO_D(GEN9_SCRATCH_LNCF1);
MMIO_F(_MMIO(0x24d0), 48);
MMIO_D(_MMIO(0x44484));
MMIO_D(_MMIO(0x4448c));
MMIO_D(GEN8_L3_LRA_1_GPGPU);
MMIO_D(_MMIO(0x110000));
MMIO_D(_MMIO(0x48400));
MMIO_D(_MMIO(0x6e570));
MMIO_D(_MMIO(0x65f10));
MMIO_D(_MMIO(0xe194));
MMIO_D(_MMIO(0xe188));
MMIO_D(HALF_SLICE_CHICKEN2);
MMIO_D(_MMIO(0x2580));
MMIO_D(_MMIO(0xe220));
MMIO_D(_MMIO(0xe230));
MMIO_D(_MMIO(0xe240));
MMIO_D(_MMIO(0xe260));
MMIO_D(_MMIO(0xe270));
MMIO_D(_MMIO(0xe280));
MMIO_D(_MMIO(0xe2a0));
MMIO_D(_MMIO(0xe2b0));
MMIO_D(_MMIO(0xe2c0));
MMIO_D(_MMIO(0x21f0));
MMIO_D(GEN8_GAMW_ECO_DEV_RW_IA);
MMIO_D(_MMIO(0x215c));
MMIO_F(_MMIO(0x2290), 8);
MMIO_D(_MMIO(0x2b00));
MMIO_D(_MMIO(0x2360));
MMIO_D(_MMIO(0x1c17c));
MMIO_D(_MMIO(0x1c178));
MMIO_D(_MMIO(0x4260));
MMIO_D(_MMIO(0x4264));
MMIO_D(_MMIO(0x4268));
MMIO_D(_MMIO(0x426c));
MMIO_D(_MMIO(0x4270));
MMIO_D(_MMIO(0x4094));
MMIO_D(_MMIO(0x22178));
MMIO_D(_MMIO(0x1a178));
MMIO_D(_MMIO(0x1a17c));
MMIO_D(_MMIO(0x2217c));
MMIO_D(EDP_PSR_IMR);
MMIO_D(EDP_PSR_IIR);
MMIO_D(_MMIO(0xe4cc));
MMIO_D(GEN7_SC_INSTDONE);
return 0;
}
static int iterate_pre_skl_mmio(struct intel_gvt_mmio_table_iter *iter)
{
MMIO_D(FORCEWAKE_MT);
MMIO_D(PCH_ADPA);
MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4);
MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4);
MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4);
MMIO_F(_MMIO(0x70440), 0xc);
MMIO_F(_MMIO(0x71440), 0xc);
MMIO_F(_MMIO(0x72440), 0xc);
MMIO_F(_MMIO(0x7044c), 0xc);
MMIO_F(_MMIO(0x7144c), 0xc);
MMIO_F(_MMIO(0x7244c), 0xc);
return 0;
}
static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
MMIO_D(FORCEWAKE_RENDER_GEN9);
MMIO_D(FORCEWAKE_ACK_RENDER_GEN9);
MMIO_D(FORCEWAKE_GT_GEN9);
MMIO_D(FORCEWAKE_ACK_GT_GEN9);
MMIO_D(FORCEWAKE_MEDIA_GEN9);
MMIO_D(FORCEWAKE_ACK_MEDIA_GEN9);
MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4);
MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4);
MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4);
MMIO_D(HSW_PWR_WELL_CTL1);
MMIO_D(HSW_PWR_WELL_CTL2);
MMIO_D(DBUF_CTL_S(0));
MMIO_D(GEN9_PG_ENABLE);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS);
MMIO_D(GEN9_GAMT_ECO_REG_RW_IA);
MMIO_D(MMCD_MISC_CTRL);
MMIO_D(CHICKEN_PAR1_1);
MMIO_D(DC_STATE_EN);
MMIO_D(DC_STATE_DEBUG);
MMIO_D(CDCLK_CTL);
MMIO_D(LCPLL1_CTL);
MMIO_D(LCPLL2_CTL);
MMIO_D(_MMIO(_DPLL1_CFGCR1));
MMIO_D(_MMIO(_DPLL2_CFGCR1));
MMIO_D(_MMIO(_DPLL3_CFGCR1));
MMIO_D(_MMIO(_DPLL1_CFGCR2));
MMIO_D(_MMIO(_DPLL2_CFGCR2));
MMIO_D(_MMIO(_DPLL3_CFGCR2));
MMIO_D(DPLL_CTRL1);
MMIO_D(DPLL_CTRL2);
MMIO_D(DPLL_STATUS);
MMIO_D(SKL_PS_WIN_POS(PIPE_A, 0));
MMIO_D(SKL_PS_WIN_POS(PIPE_A, 1));
MMIO_D(SKL_PS_WIN_POS(PIPE_B, 0));
MMIO_D(SKL_PS_WIN_POS(PIPE_B, 1));
MMIO_D(SKL_PS_WIN_POS(PIPE_C, 0));
MMIO_D(SKL_PS_WIN_POS(PIPE_C, 1));
MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 0));
MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 1));
MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 0));
MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 1));
MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 0));
MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 1));
MMIO_D(SKL_PS_CTRL(PIPE_A, 0));
MMIO_D(SKL_PS_CTRL(PIPE_A, 1));
MMIO_D(SKL_PS_CTRL(PIPE_B, 0));
MMIO_D(SKL_PS_CTRL(PIPE_B, 1));
MMIO_D(SKL_PS_CTRL(PIPE_C, 0));
MMIO_D(SKL_PS_CTRL(PIPE_C, 1));
MMIO_D(PLANE_BUF_CFG(PIPE_A, 0));
MMIO_D(PLANE_BUF_CFG(PIPE_A, 1));
MMIO_D(PLANE_BUF_CFG(PIPE_A, 2));
MMIO_D(PLANE_BUF_CFG(PIPE_A, 3));
MMIO_D(PLANE_BUF_CFG(PIPE_B, 0));
MMIO_D(PLANE_BUF_CFG(PIPE_B, 1));
MMIO_D(PLANE_BUF_CFG(PIPE_B, 2));
MMIO_D(PLANE_BUF_CFG(PIPE_B, 3));
MMIO_D(PLANE_BUF_CFG(PIPE_C, 0));
MMIO_D(PLANE_BUF_CFG(PIPE_C, 1));
MMIO_D(PLANE_BUF_CFG(PIPE_C, 2));
MMIO_D(PLANE_BUF_CFG(PIPE_C, 3));
MMIO_D(CUR_BUF_CFG(PIPE_A));
MMIO_D(CUR_BUF_CFG(PIPE_B));
MMIO_D(CUR_BUF_CFG(PIPE_C));
MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8);
MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8);
MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8);
MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8);
MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8);
MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8);
MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8);
MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8);
MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8);
MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8);
MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8);
MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8);
MMIO_D(PLANE_WM_TRANS(PIPE_A, 0));
MMIO_D(PLANE_WM_TRANS(PIPE_A, 1));
MMIO_D(PLANE_WM_TRANS(PIPE_A, 2));
MMIO_D(PLANE_WM_TRANS(PIPE_B, 0));
MMIO_D(PLANE_WM_TRANS(PIPE_B, 1));
MMIO_D(PLANE_WM_TRANS(PIPE_B, 2));
MMIO_D(PLANE_WM_TRANS(PIPE_C, 0));
MMIO_D(PLANE_WM_TRANS(PIPE_C, 1));
MMIO_D(PLANE_WM_TRANS(PIPE_C, 2));
MMIO_D(CUR_WM_TRANS(PIPE_A));
MMIO_D(CUR_WM_TRANS(PIPE_B));
MMIO_D(CUR_WM_TRANS(PIPE_C));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 0));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 1));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 2));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 3));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 0));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 1));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 2));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 3));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 0));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 1));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 2));
MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 3));
MMIO_D(_MMIO(_REG_701C0(PIPE_A, 1)));
MMIO_D(_MMIO(_REG_701C0(PIPE_A, 2)));
MMIO_D(_MMIO(_REG_701C0(PIPE_A, 3)));
MMIO_D(_MMIO(_REG_701C0(PIPE_A, 4)));
MMIO_D(_MMIO(_REG_701C0(PIPE_B, 1)));
MMIO_D(_MMIO(_REG_701C0(PIPE_B, 2)));
MMIO_D(_MMIO(_REG_701C0(PIPE_B, 3)));
MMIO_D(_MMIO(_REG_701C0(PIPE_B, 4)));
MMIO_D(_MMIO(_REG_701C0(PIPE_C, 1)));
MMIO_D(_MMIO(_REG_701C0(PIPE_C, 2)));
MMIO_D(_MMIO(_REG_701C0(PIPE_C, 3)));
MMIO_D(_MMIO(_REG_701C0(PIPE_C, 4)));
MMIO_D(_MMIO(_REG_701C4(PIPE_A, 1)));
MMIO_D(_MMIO(_REG_701C4(PIPE_A, 2)));
MMIO_D(_MMIO(_REG_701C4(PIPE_A, 3)));
MMIO_D(_MMIO(_REG_701C4(PIPE_A, 4)));
MMIO_D(_MMIO(_REG_701C4(PIPE_B, 1)));
MMIO_D(_MMIO(_REG_701C4(PIPE_B, 2)));
MMIO_D(_MMIO(_REG_701C4(PIPE_B, 3)));
MMIO_D(_MMIO(_REG_701C4(PIPE_B, 4)));
MMIO_D(_MMIO(_REG_701C4(PIPE_C, 1)));
MMIO_D(_MMIO(_REG_701C4(PIPE_C, 2)));
MMIO_D(_MMIO(_REG_701C4(PIPE_C, 3)));
MMIO_D(_MMIO(_REG_701C4(PIPE_C, 4)));
MMIO_D(_MMIO(_PLANE_CTL_3_A));
MMIO_D(_MMIO(_PLANE_CTL_3_B));
MMIO_D(_MMIO(0x72380));
MMIO_D(_MMIO(0x7239c));
MMIO_D(_MMIO(_PLANE_SURF_3_A));
MMIO_D(_MMIO(_PLANE_SURF_3_B));
MMIO_D(DMC_SSP_BASE);
MMIO_D(DMC_HTP_SKL);
MMIO_D(DMC_LAST_WRITE);
MMIO_D(BDW_SCRATCH1);
MMIO_D(SKL_DFSM);
MMIO_D(DISPIO_CR_TX_BMU_CR0);
MMIO_F(GEN9_GFX_MOCS(0), 0x7f8);
MMIO_F(GEN7_L3CNTLREG2, 0x80);
MMIO_D(RPM_CONFIG0);
MMIO_D(_MMIO(0xd08));
MMIO_D(RC6_LOCATION);
MMIO_D(GEN7_FF_SLICE_CS_CHICKEN1);
MMIO_D(GEN9_CS_DEBUG_MODE1);
/* TRTT */
MMIO_D(TRVATTL3PTRDW(0));
MMIO_D(TRVATTL3PTRDW(1));
MMIO_D(TRVATTL3PTRDW(2));
MMIO_D(TRVATTL3PTRDW(3));
MMIO_D(TRVADR);
MMIO_D(TRTTE);
MMIO_D(_MMIO(0x4dfc));
MMIO_D(_MMIO(0x46430));
MMIO_D(_MMIO(0x46520));
MMIO_D(_MMIO(0xc403c));
MMIO_D(GEN8_GARBCNTL);
MMIO_D(DMA_CTRL);
MMIO_D(_MMIO(0x65900));
MMIO_D(GEN6_STOLEN_RESERVED);
MMIO_D(_MMIO(0x4068));
MMIO_D(_MMIO(0x67054));
MMIO_D(_MMIO(0x6e560));
MMIO_D(_MMIO(0x6e554));
MMIO_D(_MMIO(0x2b20));
MMIO_D(_MMIO(0x65f00));
MMIO_D(_MMIO(0x65f08));
MMIO_D(_MMIO(0x320f0));
MMIO_D(_MMIO(0x70034));
MMIO_D(_MMIO(0x71034));
MMIO_D(_MMIO(0x72034));
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)));
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)));
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)));
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)));
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)));
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)));
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)));
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)));
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)));
MMIO_D(_MMIO(0x44500));
#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
MMIO_RING_D(CSFE_CHICKEN1_REG);
#undef CSFE_CHICKEN1_REG
MMIO_D(GEN8_HDC_CHICKEN1);
MMIO_D(GEN9_WM_CHICKEN3);
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
MMIO_D(GAMT_CHKN_BIT_REG);
if (!IS_BROXTON(dev_priv))
MMIO_D(GEN9_CTX_PREEMPT_REG);
MMIO_F(_MMIO(DMC_MMIO_START_RANGE), 0x3000);
return 0;
}
static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
MMIO_F(_MMIO(0x80000), 0x3000);
MMIO_D(GEN7_SAMPLER_INSTDONE);
MMIO_D(GEN7_ROW_INSTDONE);
MMIO_D(GEN8_FAULT_TLB_DATA0);
MMIO_D(GEN8_FAULT_TLB_DATA1);
MMIO_D(ERROR_GEN6);
MMIO_D(DONE_REG);
MMIO_D(EIR);
MMIO_D(PGTBL_ER);
MMIO_D(_MMIO(0x4194));
MMIO_D(_MMIO(0x4294));
MMIO_D(_MMIO(0x4494));
MMIO_RING_D(RING_PSMI_CTL);
MMIO_RING_D(RING_DMA_FADD);
MMIO_RING_D(RING_DMA_FADD_UDW);
MMIO_RING_D(RING_IPEHR);
MMIO_RING_D(RING_INSTPS);
MMIO_RING_D(RING_BBADDR_UDW);
MMIO_RING_D(RING_BBSTATE);
MMIO_RING_D(RING_IPEIR);
MMIO_F(SOFT_SCRATCH(0), 16 * 4);
MMIO_D(BXT_P_CR_GT_DISP_PWRON);
MMIO_D(BXT_RP_STATE_CAP);
MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY0));
MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY1));
MMIO_D(BXT_PHY_CTL(PORT_A));
MMIO_D(BXT_PHY_CTL(PORT_B));
MMIO_D(BXT_PHY_CTL(PORT_C));
MMIO_D(BXT_PORT_PLL_ENABLE(PORT_A));
MMIO_D(BXT_PORT_PLL_ENABLE(PORT_B));
MMIO_D(BXT_PORT_PLL_ENABLE(PORT_C));
MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0));
MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0));
MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0));
MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0));
MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0));
MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0));
MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0));
MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0));
MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0));
MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1));
MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1));
MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1));
MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1));
MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1));
MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1));
MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1));
MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1));
MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1));
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10));
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9));
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10));
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2));
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3));
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0));
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1));
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2));
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3));
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6));
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8));
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9));
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10));
MMIO_D(BXT_DE_PLL_CTL);
MMIO_D(BXT_DE_PLL_ENABLE);
MMIO_D(BXT_DSI_PLL_CTL);
MMIO_D(BXT_DSI_PLL_ENABLE);
MMIO_D(GEN9_CLKGATE_DIS_0);
MMIO_D(GEN9_CLKGATE_DIS_4);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A));
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B));
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C));
MMIO_D(RC6_CTX_BASE);
MMIO_D(GEN8_PUSHBUS_CONTROL);
MMIO_D(GEN8_PUSHBUS_ENABLE);
MMIO_D(GEN8_PUSHBUS_SHIFT);
MMIO_D(GEN6_GFXPAUSE);
MMIO_D(GEN8_L3SQCREG1);
MMIO_D(GEN8_L3CNTLREG);
MMIO_D(_MMIO(0x20D8));
MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40);
MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40);
MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40);
MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40);
MMIO_D(GEN9_CTX_PREEMPT_REG);
MMIO_D(GEN8_PRIVATE_PAT_LO);
return 0;
}
/**
* intel_gvt_iterate_mmio_table - Iterate the GVT MMIO table
* @iter: the interator
*
* This function is called for iterating the GVT MMIO table when i915 is
* taking the snapshot of the HW and GVT is building MMIO tracking table.
*/
int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *i915 = iter->i915;
int ret;
ret = iterate_generic_mmio(iter);
if (ret)
goto err;
if (IS_BROADWELL(i915)) {
ret = iterate_bdw_only_mmio(iter);
if (ret)
goto err;
ret = iterate_bdw_plus_mmio(iter);
if (ret)
goto err;
ret = iterate_pre_skl_mmio(iter);
if (ret)
goto err;
} else if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915)) {
ret = iterate_bdw_plus_mmio(iter);
if (ret)
goto err;
ret = iterate_skl_plus_mmio(iter);
if (ret)
goto err;
} else if (IS_BROXTON(i915)) {
ret = iterate_bdw_plus_mmio(iter);
if (ret)
goto err;
ret = iterate_skl_plus_mmio(iter);
if (ret)
goto err;
ret = iterate_bxt_mmio(iter);
if (ret)
goto err;
}
return 0;
err:
return ret;
}
EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, I915_GVT);
......@@ -656,17 +656,12 @@ struct mdev_driver vfio_ccw_mdev_driver = {
},
.probe = vfio_ccw_mdev_probe,
.remove = vfio_ccw_mdev_remove,
};
static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.owner = THIS_MODULE,
.device_driver = &vfio_ccw_mdev_driver,
.supported_type_groups = mdev_type_groups,
};
int vfio_ccw_mdev_reg(struct subchannel *sch)
{
return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
}
void vfio_ccw_mdev_unreg(struct subchannel *sch)
......
......@@ -1496,12 +1496,7 @@ static struct mdev_driver vfio_ap_matrix_driver = {
},
.probe = vfio_ap_mdev_probe,
.remove = vfio_ap_mdev_remove,
};
static const struct mdev_parent_ops vfio_ap_matrix_ops = {
.owner = THIS_MODULE,
.device_driver = &vfio_ap_matrix_driver,
.supported_type_groups = vfio_ap_mdev_type_groups,
.supported_type_groups = vfio_ap_mdev_type_groups,
};
int vfio_ap_mdev_register(void)
......@@ -1514,7 +1509,7 @@ int vfio_ap_mdev_register(void)
if (ret)
return ret;
ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver);
if (ret)
goto err_driver;
return 0;
......
# SPDX-License-Identifier: GPL-2.0-only
mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o vfio_mdev.o
mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o
obj-$(CONFIG_VFIO_MDEV) += mdev.o
......@@ -89,17 +89,10 @@ void mdev_release_parent(struct kref *kref)
static void mdev_device_remove_common(struct mdev_device *mdev)
{
struct mdev_parent *parent = mdev->type->parent;
int ret;
mdev_remove_sysfs_files(mdev);
device_del(&mdev->dev);
lockdep_assert_held(&parent->unreg_sem);
if (parent->ops->remove) {
ret = parent->ops->remove(mdev);
if (ret)
dev_err(&mdev->dev, "Remove failed: err=%d\n", ret);
}
/* Balances with device_initialize() */
put_device(&mdev->dev);
}
......@@ -116,12 +109,12 @@ static int mdev_device_remove_cb(struct device *dev, void *data)
/*
* mdev_register_device : Register a device
* @dev: device structure representing parent device.
* @ops: Parent device operation structure to be registered.
* @mdev_driver: Device driver to bind to the newly created mdev
*
* Add device to list of registered parent devices.
* Returns a negative value on error, otherwise 0.
*/
int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver)
{
int ret;
struct mdev_parent *parent;
......@@ -129,9 +122,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
char *envp[] = { env_string, NULL };
/* check for mandatory ops */
if (!ops || !ops->supported_type_groups)
return -EINVAL;
if (!ops->device_driver && (!ops->create || !ops->remove))
if (!mdev_driver->supported_type_groups)
return -EINVAL;
dev = get_device(dev);
......@@ -158,7 +149,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
init_rwsem(&parent->unreg_sem);
parent->dev = dev;
parent->ops = ops;
parent->mdev_driver = mdev_driver;
if (!mdev_bus_compat_class) {
mdev_bus_compat_class = class_compat_register("mdev_bus");
......@@ -256,7 +247,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
int ret;
struct mdev_device *mdev, *tmp;
struct mdev_parent *parent = type->parent;
struct mdev_driver *drv = parent->ops->device_driver;
struct mdev_driver *drv = parent->mdev_driver;
mutex_lock(&mdev_list_lock);
......@@ -278,7 +269,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
mdev->dev.parent = parent->dev;
mdev->dev.bus = &mdev_bus_type;
mdev->dev.release = mdev_device_release;
mdev->dev.groups = parent->ops->mdev_attr_groups;
mdev->dev.groups = mdev_device_groups;
mdev->type = type;
/* Pairs with the put in mdev_device_release() */
kobject_get(&type->kobj);
......@@ -297,18 +288,10 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
goto out_put_device;
}
if (parent->ops->create) {
ret = parent->ops->create(mdev);
if (ret)
goto out_unlock;
}
ret = device_add(&mdev->dev);
if (ret)
goto out_remove;
goto out_unlock;
if (!drv)
drv = &vfio_mdev_driver;
ret = device_driver_attach(&drv->driver, &mdev->dev);
if (ret)
goto out_del;
......@@ -325,9 +308,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
out_del:
device_del(&mdev->dev);
out_remove:
if (parent->ops->remove)
parent->ops->remove(mdev);
out_unlock:
up_read(&parent->unreg_sem);
out_put_device:
......@@ -370,28 +350,14 @@ int mdev_device_remove(struct mdev_device *mdev)
static int __init mdev_init(void)
{
int rc;
rc = mdev_bus_register();
if (rc)
return rc;
rc = mdev_register_driver(&vfio_mdev_driver);
if (rc)
goto err_bus;
return 0;
err_bus:
mdev_bus_unregister();
return rc;
return bus_register(&mdev_bus_type);
}
static void __exit mdev_exit(void)
{
mdev_unregister_driver(&vfio_mdev_driver);
if (mdev_bus_compat_class)
class_compat_unregister(mdev_bus_compat_class);
mdev_bus_unregister();
bus_unregister(&mdev_bus_type);
}
subsys_initcall(mdev_init)
......
......@@ -74,13 +74,3 @@ void mdev_unregister_driver(struct mdev_driver *drv)
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(mdev_unregister_driver);
int mdev_bus_register(void)
{
return bus_register(&mdev_bus_type);
}
void mdev_bus_unregister(void)
{
bus_unregister(&mdev_bus_type);
}
......@@ -15,7 +15,7 @@ void mdev_bus_unregister(void);
struct mdev_parent {
struct device *dev;
const struct mdev_parent_ops *ops;
struct mdev_driver *mdev_driver;
struct kref ref;
struct list_head next;
struct kset *mdev_types_kset;
......@@ -32,13 +32,13 @@ struct mdev_type {
unsigned int type_group_id;
};
extern const struct attribute_group *mdev_device_groups[];
#define to_mdev_type_attr(_attr) \
container_of(_attr, struct mdev_type_attribute, attr)
#define to_mdev_type(_kobj) \
container_of(_kobj, struct mdev_type, kobj)
extern struct mdev_driver vfio_mdev_driver;
int parent_create_sysfs_files(struct mdev_parent *parent);
void parent_remove_sysfs_files(struct mdev_parent *parent);
......
......@@ -97,7 +97,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
{
struct mdev_type *type;
struct attribute_group *group =
parent->ops->supported_type_groups[type_group_id];
parent->mdev_driver->supported_type_groups[type_group_id];
int ret;
if (!group->name) {
......@@ -154,7 +154,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
static void remove_mdev_supported_type(struct mdev_type *type)
{
struct attribute_group *group =
type->parent->ops->supported_type_groups[type->type_group_id];
type->parent->mdev_driver->supported_type_groups[type->type_group_id];
sysfs_remove_files(&type->kobj,
(const struct attribute **)group->attrs);
......@@ -168,7 +168,7 @@ static int add_mdev_supported_type_groups(struct mdev_parent *parent)
{
int i;
for (i = 0; parent->ops->supported_type_groups[i]; i++) {
for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) {
struct mdev_type *type;
type = add_mdev_supported_type(parent, i);
......@@ -197,7 +197,6 @@ void parent_remove_sysfs_files(struct mdev_parent *parent)
remove_mdev_supported_type(type);
}
sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups);
kset_unregister(parent->mdev_types_kset);
}
......@@ -213,17 +212,10 @@ int parent_create_sysfs_files(struct mdev_parent *parent)
INIT_LIST_HEAD(&parent->type_list);
ret = sysfs_create_groups(&parent->dev->kobj,
parent->ops->dev_attr_groups);
if (ret)
goto create_err;
ret = add_mdev_supported_type_groups(parent);
if (ret)
sysfs_remove_groups(&parent->dev->kobj,
parent->ops->dev_attr_groups);
else
return ret;
goto create_err;
return 0;
create_err:
kset_unregister(parent->mdev_types_kset);
......@@ -252,11 +244,20 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_WO(remove);
static const struct attribute *mdev_device_attrs[] = {
static struct attribute *mdev_device_attrs[] = {
&dev_attr_remove.attr,
NULL,
};
static const struct attribute_group mdev_device_group = {
.attrs = mdev_device_attrs,
};
const struct attribute_group *mdev_device_groups[] = {
&mdev_device_group,
NULL
};
int mdev_create_sysfs_files(struct mdev_device *mdev)
{
struct mdev_type *type = mdev->type;
......@@ -270,15 +271,8 @@ int mdev_create_sysfs_files(struct mdev_device *mdev)
ret = sysfs_create_link(kobj, &type->kobj, "mdev_type");
if (ret)
goto type_link_failed;
ret = sysfs_create_files(kobj, mdev_device_attrs);
if (ret)
goto create_files_failed;
return ret;
create_files_failed:
sysfs_remove_link(kobj, "mdev_type");
type_link_failed:
sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev));
return ret;
......@@ -288,7 +282,6 @@ void mdev_remove_sysfs_files(struct mdev_device *mdev)
{
struct kobject *kobj = &mdev->dev.kobj;
sysfs_remove_files(kobj, mdev_device_attrs);
sysfs_remove_link(kobj, "mdev_type");
sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev));
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO based driver for Mediated device
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <cjia@nvidia.com>
* Kirti Wankhede <kwankhede@nvidia.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
#include "mdev_private.h"
static int vfio_mdev_open_device(struct vfio_device *core_vdev)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
if (unlikely(!parent->ops->open_device))
return 0;
return parent->ops->open_device(mdev);
}
static void vfio_mdev_close_device(struct vfio_device *core_vdev)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
if (likely(parent->ops->close_device))
parent->ops->close_device(mdev);
}
static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev,
unsigned int cmd, unsigned long arg)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
if (unlikely(!parent->ops->ioctl))
return 0;
return parent->ops->ioctl(mdev, cmd, arg);
}
static ssize_t vfio_mdev_read(struct vfio_device *core_vdev, char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
if (unlikely(!parent->ops->read))
return -EINVAL;
return parent->ops->read(mdev, buf, count, ppos);
}
static ssize_t vfio_mdev_write(struct vfio_device *core_vdev,
const char __user *buf, size_t count,
loff_t *ppos)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
if (unlikely(!parent->ops->write))
return -EINVAL;
return parent->ops->write(mdev, buf, count, ppos);
}
static int vfio_mdev_mmap(struct vfio_device *core_vdev,
struct vm_area_struct *vma)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
if (unlikely(!parent->ops->mmap))
return -EINVAL;
return parent->ops->mmap(mdev, vma);
}
static void vfio_mdev_request(struct vfio_device *core_vdev, unsigned int count)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
if (parent->ops->request)
parent->ops->request(mdev, count);
else if (count == 0)
dev_notice(mdev_dev(mdev),
"No mdev vendor driver request callback support, blocked until released by user\n");
}
static const struct vfio_device_ops vfio_mdev_dev_ops = {
.name = "vfio-mdev",
.open_device = vfio_mdev_open_device,
.close_device = vfio_mdev_close_device,
.ioctl = vfio_mdev_unlocked_ioctl,
.read = vfio_mdev_read,
.write = vfio_mdev_write,
.mmap = vfio_mdev_mmap,
.request = vfio_mdev_request,
};
static int vfio_mdev_probe(struct mdev_device *mdev)
{
struct vfio_device *vdev;
int ret;
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev)
return -ENOMEM;
vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops);
ret = vfio_register_emulated_iommu_dev(vdev);
if (ret)
goto out_uninit;
dev_set_drvdata(&mdev->dev, vdev);
return 0;
out_uninit:
vfio_uninit_group_dev(vdev);
kfree(vdev);
return ret;
}
static void vfio_mdev_remove(struct mdev_device *mdev)
{
struct vfio_device *vdev = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(vdev);
vfio_uninit_group_dev(vdev);
kfree(vdev);
}
struct mdev_driver vfio_mdev_driver = {
.driver = {
.name = "vfio_mdev",
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
},
.probe = vfio_mdev_probe,
.remove = vfio_mdev_remove,
};
......@@ -15,7 +15,6 @@ struct mdev_type;
struct mdev_device {
struct device dev;
guid_t uuid;
void *driver_data;
struct list_head next;
struct mdev_type *type;
bool active;
......@@ -30,74 +29,6 @@ unsigned int mdev_get_type_group_id(struct mdev_device *mdev);
unsigned int mtype_get_type_group_id(struct mdev_type *mtype);
struct device *mtype_get_parent_dev(struct mdev_type *mtype);
/**
* struct mdev_parent_ops - Structure to be registered for each parent device to
* register the device to mdev module.
*
* @owner: The module owner.
* @device_driver: Which device driver to probe() on newly created devices
* @dev_attr_groups: Attributes of the parent device.
* @mdev_attr_groups: Attributes of the mediated device.
* @supported_type_groups: Attributes to define supported types. It is mandatory
* to provide supported types.
* @create: Called to allocate basic resources in parent device's
* driver for a particular mediated device. It is
* mandatory to provide create ops.
* @mdev: mdev_device structure on of mediated device
* that is being created
* Returns integer: success (0) or error (< 0)
* @remove: Called to free resources in parent device's driver for
* a mediated device. It is mandatory to provide 'remove'
* ops.
* @mdev: mdev_device device structure which is being
* destroyed
* Returns integer: success (0) or error (< 0)
* @read: Read emulation callback
* @mdev: mediated device structure
* @buf: read buffer
* @count: number of bytes to read
* @ppos: address.
* Retuns number on bytes read on success or error.
* @write: Write emulation callback
* @mdev: mediated device structure
* @buf: write buffer
* @count: number of bytes to be written
* @ppos: address.
* Retuns number on bytes written on success or error.
* @ioctl: IOCTL callback
* @mdev: mediated device structure
* @cmd: ioctl command
* @arg: arguments to ioctl
* @mmap: mmap callback
* @mdev: mediated device structure
* @vma: vma structure
* @request: request callback to release device
* @mdev: mediated device structure
* @count: request sequence number
* Parent device that support mediated device should be registered with mdev
* module with mdev_parent_ops structure.
**/
struct mdev_parent_ops {
struct module *owner;
struct mdev_driver *device_driver;
const struct attribute_group **dev_attr_groups;
const struct attribute_group **mdev_attr_groups;
struct attribute_group **supported_type_groups;
int (*create)(struct mdev_device *mdev);
int (*remove)(struct mdev_device *mdev);
int (*open_device)(struct mdev_device *mdev);
void (*close_device)(struct mdev_device *mdev);
ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
size_t count, loff_t *ppos);
ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
size_t count, loff_t *ppos);
long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg);
int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
void (*request)(struct mdev_device *mdev, unsigned int count);
};
/* interface for exporting mdev supported type attributes */
struct mdev_type_attribute {
struct attribute attr;
......@@ -122,23 +53,18 @@ struct mdev_type_attribute mdev_type_attr_##_name = \
* struct mdev_driver - Mediated device driver
* @probe: called when new device created
* @remove: called when device removed
* @supported_type_groups: Attributes to define supported types. It is mandatory
* to provide supported types.
* @driver: device driver structure
*
**/
struct mdev_driver {
int (*probe)(struct mdev_device *dev);
void (*remove)(struct mdev_device *dev);
struct attribute_group **supported_type_groups;
struct device_driver driver;
};
static inline void *mdev_get_drvdata(struct mdev_device *mdev)
{
return mdev->driver_data;
}
static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data)
{
mdev->driver_data = data;
}
static inline const guid_t *mdev_uuid(struct mdev_device *mdev)
{
return &mdev->uuid;
......@@ -146,7 +72,7 @@ static inline const guid_t *mdev_uuid(struct mdev_device *mdev)
extern struct bus_type mdev_bus_type;
int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops);
int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver);
void mdev_unregister_device(struct device *dev);
int mdev_register_driver(struct mdev_driver *drv);
......
......@@ -1412,12 +1412,7 @@ static struct mdev_driver mbochs_driver = {
},
.probe = mbochs_probe,
.remove = mbochs_remove,
};
static const struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
.device_driver = &mbochs_driver,
.supported_type_groups = mdev_type_groups,
.supported_type_groups = mdev_type_groups,
};
static const struct file_operations vd_fops = {
......@@ -1462,7 +1457,7 @@ static int __init mbochs_dev_init(void)
if (ret)
goto err_class;
ret = mdev_register_device(&mbochs_dev, &mdev_fops);
ret = mdev_register_device(&mbochs_dev, &mbochs_driver);
if (ret)
goto err_device;
......
......@@ -723,12 +723,7 @@ static struct mdev_driver mdpy_driver = {
},
.probe = mdpy_probe,
.remove = mdpy_remove,
};
static const struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
.device_driver = &mdpy_driver,
.supported_type_groups = mdev_type_groups,
.supported_type_groups = mdev_type_groups,
};
static const struct file_operations vd_fops = {
......@@ -771,7 +766,7 @@ static int __init mdpy_dev_init(void)
if (ret)
goto err_class;
ret = mdev_register_device(&mdpy_dev, &mdev_fops);
ret = mdev_register_device(&mdpy_dev, &mdpy_driver);
if (ret)
goto err_device;
......
......@@ -1207,38 +1207,11 @@ static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
return -ENOTTY;
}
static ssize_t
sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "This is phy device\n");
}
static DEVICE_ATTR_RO(sample_mtty_dev);
static struct attribute *mtty_dev_attrs[] = {
&dev_attr_sample_mtty_dev.attr,
NULL,
};
static const struct attribute_group mtty_dev_group = {
.name = "mtty_dev",
.attrs = mtty_dev_attrs,
};
static const struct attribute_group *mtty_dev_groups[] = {
&mtty_dev_group,
NULL,
};
static ssize_t
sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
if (mdev_from_dev(dev))
return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
return sprintf(buf, "\n");
return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
}
static DEVICE_ATTR_RO(sample_mdev_dev);
......@@ -1328,13 +1301,7 @@ static struct mdev_driver mtty_driver = {
},
.probe = mtty_probe,
.remove = mtty_remove,
};
static const struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
.device_driver = &mtty_driver,
.dev_attr_groups = mtty_dev_groups,
.supported_type_groups = mdev_type_groups,
.supported_type_groups = mdev_type_groups,
};
static void mtty_device_release(struct device *dev)
......@@ -1385,7 +1352,7 @@ static int __init mtty_dev_init(void)
if (ret)
goto err_class;
ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
ret = mdev_register_device(&mtty_dev.dev, &mtty_driver);
if (ret)
goto err_device;
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment