Commit 0d7e76be authored by Rodrigo Vivi's avatar Rodrigo Vivi

Merge tag 'gvt-next-2017-12-05' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2017-12-05

- VFIO mdev display dmabuf interface and gvt support (Tina)
- VFIO mdev opregion support/fixes (Tina/Xiong/Chris)
- workload scheduling optimization (Changbin)
- preemption fix and temporal workaround (Zhenyu)
- and misc fixes after refactor (Chris)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171205032629.vylemph57toipeax@zhen-hp.sh.intel.com
parents 2abf3c0d 1603660b
GVT_DIR := gvt GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
......
...@@ -335,7 +335,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -335,7 +335,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case INTEL_GVT_PCI_OPREGION: case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4))) if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL; return -EINVAL;
ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data); ret = intel_vgpu_opregion_base_write_handler(vgpu,
*(u32 *)p_data);
if (ret) if (ret)
return ret; return ret;
......
...@@ -67,7 +67,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) ...@@ -67,7 +67,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
return 1; return 1;
} }
static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
......
...@@ -179,4 +179,6 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution); ...@@ -179,4 +179,6 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu); void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu); void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe);
#endif #endif
This diff is collapsed.
/*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Xiaoguang Chen
* Tina Zhang <tina.zhang@intel.com>
*/
#ifndef _GVT_DMABUF_H_
#define _GVT_DMABUF_H_
#include <linux/vfio.h>
struct intel_vgpu_fb_info {
__u64 start;
__u64 start_gpa;
__u64 drm_format_mod;
__u32 drm_format; /* drm format of plane */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
__u32 size; /* size of plane in bytes, align on page */
__u32 x_pos; /* horizontal position of cursor plane */
__u32 y_pos; /* vertical position of cursor plane */
__u32 x_hot; /* horizontal position of cursor hotspot */
__u32 y_hot; /* vertical position of cursor hotspot */
struct intel_vgpu_dmabuf_obj *obj;
};
/**
* struct intel_vgpu_dmabuf_obj- Intel vGPU device buffer object
*/
struct intel_vgpu_dmabuf_obj {
struct intel_vgpu *vgpu;
struct intel_vgpu_fb_info *info;
__u32 dmabuf_id;
struct kref kref;
bool initref;
struct list_head list;
};
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id);
void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
#endif
...@@ -458,7 +458,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -458,7 +458,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in); emulate_schedule_in);
queue_workload(workload); intel_vgpu_queue_workload(workload);
return 0; return 0;
} }
...@@ -528,7 +528,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -528,7 +528,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
} }
void clean_execlist(struct intel_vgpu *vgpu) static void clean_execlist(struct intel_vgpu *vgpu)
{ {
enum intel_engine_id i; enum intel_engine_id i;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -542,7 +542,7 @@ void clean_execlist(struct intel_vgpu *vgpu) ...@@ -542,7 +542,7 @@ void clean_execlist(struct intel_vgpu *vgpu)
} }
} }
void reset_execlist(struct intel_vgpu *vgpu, static void reset_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask) unsigned long engine_mask)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
...@@ -553,7 +553,7 @@ void reset_execlist(struct intel_vgpu *vgpu, ...@@ -553,7 +553,7 @@ void reset_execlist(struct intel_vgpu *vgpu,
init_vgpu_execlist(vgpu, engine->id); init_vgpu_execlist(vgpu, engine->id);
} }
int init_execlist(struct intel_vgpu *vgpu) static int init_execlist(struct intel_vgpu *vgpu)
{ {
reset_execlist(vgpu, ALL_ENGINES); reset_execlist(vgpu, ALL_ENGINES);
return 0; return 0;
......
This diff is collapsed.
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Bing Niu <bing.niu@intel.com>
* Xu Han <xu.han@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Xiaoguang Chen <xiaoguang.chen@intel.com>
* Yang Liu <yang2.liu@intel.com>
* Tina Zhang <tina.zhang@intel.com>
*
*/
#ifndef _GVT_FB_DECODER_H_
#define _GVT_FB_DECODER_H_
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
#define _PIPE_V_SRCSZ_MASK (0xfff << _PIPE_V_SRCSZ_SHIFT)
#define _PIPE_H_SRCSZ_SHIFT 16
#define _PIPE_H_SRCSZ_MASK (0x1fff << _PIPE_H_SRCSZ_SHIFT)
#define _PRI_PLANE_FMT_SHIFT 26
#define _PRI_PLANE_STRIDE_MASK (0x3ff << 6)
#define _PRI_PLANE_X_OFF_SHIFT 0
#define _PRI_PLANE_X_OFF_MASK (0x1fff << _PRI_PLANE_X_OFF_SHIFT)
#define _PRI_PLANE_Y_OFF_SHIFT 16
#define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT)
#define _CURSOR_MODE 0x3f
#define _CURSOR_ALPHA_FORCE_SHIFT 8
#define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT)
#define _CURSOR_ALPHA_PLANE_SHIFT 10
#define _CURSOR_ALPHA_PLANE_MASK (0x3 << _CURSOR_ALPHA_PLANE_SHIFT)
#define _CURSOR_POS_X_SHIFT 0
#define _CURSOR_POS_X_MASK (0x1fff << _CURSOR_POS_X_SHIFT)
#define _CURSOR_SIGN_X_SHIFT 15
#define _CURSOR_SIGN_X_MASK (1 << _CURSOR_SIGN_X_SHIFT)
#define _CURSOR_POS_Y_SHIFT 16
#define _CURSOR_POS_Y_MASK (0xfff << _CURSOR_POS_Y_SHIFT)
#define _CURSOR_SIGN_Y_SHIFT 31
#define _CURSOR_SIGN_Y_MASK (1 << _CURSOR_SIGN_Y_SHIFT)
#define _SPRITE_FMT_SHIFT 25
#define _SPRITE_COLOR_ORDER_SHIFT 20
#define _SPRITE_YUV_ORDER_SHIFT 16
#define _SPRITE_STRIDE_SHIFT 6
#define _SPRITE_STRIDE_MASK (0x1ff << _SPRITE_STRIDE_SHIFT)
#define _SPRITE_SIZE_WIDTH_SHIFT 0
#define _SPRITE_SIZE_HEIGHT_SHIFT 16
#define _SPRITE_SIZE_WIDTH_MASK (0x1fff << _SPRITE_SIZE_WIDTH_SHIFT)
#define _SPRITE_SIZE_HEIGHT_MASK (0xfff << _SPRITE_SIZE_HEIGHT_SHIFT)
#define _SPRITE_POS_X_SHIFT 0
#define _SPRITE_POS_Y_SHIFT 16
#define _SPRITE_POS_X_MASK (0x1fff << _SPRITE_POS_X_SHIFT)
#define _SPRITE_POS_Y_MASK (0xfff << _SPRITE_POS_Y_SHIFT)
#define _SPRITE_OFFSET_START_X_SHIFT 0
#define _SPRITE_OFFSET_START_Y_SHIFT 16
#define _SPRITE_OFFSET_START_X_MASK (0x1fff << _SPRITE_OFFSET_START_X_SHIFT)
#define _SPRITE_OFFSET_START_Y_MASK (0xfff << _SPRITE_OFFSET_START_Y_SHIFT)
enum GVT_FB_EVENT {
FB_MODE_SET_START = 1,
FB_MODE_SET_END,
FB_DISPLAY_FLIP,
};
enum DDI_PORT {
DDI_PORT_NONE = 0,
DDI_PORT_B = 1,
DDI_PORT_C = 2,
DDI_PORT_D = 3,
DDI_PORT_E = 4
};
struct intel_gvt;
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */
u8 tiled; /* X-tiled */
u8 bpp; /* bits per pixel */
u32 hw_format; /* format field in the PRI_CTL register */
u32 drm_format; /* format in DRM definition */
u32 base; /* framebuffer base in graphics memory */
u64 base_gpa;
u32 x_offset; /* in pixels */
u32 y_offset; /* in lines */
u32 width; /* in pixels */
u32 height; /* in lines */
u32 stride; /* in bytes */
};
struct intel_vgpu_sprite_plane_format {
u8 enabled; /* plane is enabled */
u8 tiled; /* X-tiled */
u8 bpp; /* bits per pixel */
u32 hw_format; /* format field in the SPR_CTL register */
u32 drm_format; /* format in DRM definition */
u32 base; /* sprite base in graphics memory */
u64 base_gpa;
u32 x_pos; /* in pixels */
u32 y_pos; /* in lines */
u32 x_offset; /* in pixels */
u32 y_offset; /* in lines */
u32 width; /* in pixels */
u32 height; /* in lines */
u32 stride; /* in bytes */
};
struct intel_vgpu_cursor_plane_format {
u8 enabled;
u8 mode; /* cursor mode select */
u8 bpp; /* bits per pixel */
u32 drm_format; /* format in DRM definition */
u32 base; /* cursor base in graphics memory */
u64 base_gpa;
u32 x_pos; /* in pixels */
u32 y_pos; /* in lines */
u8 x_sign; /* X Position Sign */
u8 y_sign; /* Y Position Sign */
u32 width; /* in pixels */
u32 height; /* in lines */
u32 x_hot; /* in pixels */
u32 y_hot; /* in pixels */
};
struct intel_vgpu_pipe_format {
struct intel_vgpu_primary_plane_format primary;
struct intel_vgpu_sprite_plane_format sprite;
struct intel_vgpu_cursor_plane_format cursor;
enum DDI_PORT ddi_port; /* the DDI port that pipe is connected to */
};
struct intel_vgpu_fb_format {
struct intel_vgpu_pipe_format pipes[I915_MAX_PIPES];
};
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane);
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane);
int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_sprite_plane_format *plane);
#endif
...@@ -181,6 +181,8 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -181,6 +181,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_deactivate = intel_gvt_deactivate_vgpu, .vgpu_deactivate = intel_gvt_deactivate_vgpu,
.gvt_find_vgpu_type = intel_gvt_find_vgpu_type, .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
.get_gvt_attrs = intel_get_gvt_attrs, .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
}; };
/** /**
......
...@@ -46,6 +46,8 @@ ...@@ -46,6 +46,8 @@
#include "sched_policy.h" #include "sched_policy.h"
#include "render.h" #include "render.h"
#include "cmd_parser.h" #include "cmd_parser.h"
#include "fb_decoder.h"
#include "dmabuf.h"
#define GVT_MAX_VGPU 8 #define GVT_MAX_VGPU 8
...@@ -123,7 +125,9 @@ struct intel_vgpu_irq { ...@@ -123,7 +125,9 @@ struct intel_vgpu_irq {
}; };
struct intel_vgpu_opregion { struct intel_vgpu_opregion {
bool mapped;
void *va; void *va;
void *va_gopregion;
u32 gfn[INTEL_GVT_OPREGION_PAGES]; u32 gfn[INTEL_GVT_OPREGION_PAGES];
}; };
...@@ -206,8 +210,16 @@ struct intel_vgpu { ...@@ -206,8 +210,16 @@ struct intel_vgpu {
struct kvm *kvm; struct kvm *kvm;
struct work_struct release_work; struct work_struct release_work;
atomic_t released; atomic_t released;
struct vfio_device *vfio_device;
} vdev; } vdev;
#endif #endif
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
struct completion vblank_done;
}; };
/* validating GM healthy status*/ /* validating GM healthy status*/
...@@ -505,7 +517,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) ...@@ -505,7 +517,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
} }
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu);
...@@ -532,6 +545,8 @@ struct intel_gvt_ops { ...@@ -532,6 +545,8 @@ struct intel_gvt_ops {
const char *name); const char *name);
bool (*get_gvt_attrs)(struct attribute ***type_attrs, bool (*get_gvt_attrs)(struct attribute ***type_attrs,
struct attribute_group ***intel_vgpu_type_groups); struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
}; };
......
...@@ -55,6 +55,9 @@ struct intel_gvt_mpt { ...@@ -55,6 +55,9 @@ struct intel_gvt_mpt {
unsigned long mfn, unsigned int nr, bool map); unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end, int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map); bool map);
int (*set_opregion)(void *vgpu);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
}; };
extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt xengt_mpt;
......
...@@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops; ...@@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
#define OPREGION_SIGNATURE "IntelGraphicsMem"
struct vfio_region;
struct intel_vgpu_regops {
size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite);
void (*release)(struct intel_vgpu *vgpu,
struct vfio_region *region);
};
struct vfio_region { struct vfio_region {
u32 type; u32 type;
u32 subtype; u32 subtype;
size_t size; size_t size;
u32 flags; u32 flags;
const struct intel_vgpu_regops *ops;
void *data;
}; };
struct kvmgt_pgfn { struct kvmgt_pgfn {
...@@ -316,6 +328,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, ...@@ -316,6 +328,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
} }
} }
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
void *base = vgpu->vdev.region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos >= vgpu->vdev.region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
}
static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
struct vfio_region *region)
{
}
static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
.rw = intel_vgpu_reg_rw_opregion,
.release = intel_vgpu_reg_release_opregion,
};
static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
unsigned int type, unsigned int subtype,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
struct vfio_region *region;
region = krealloc(vgpu->vdev.region,
(vgpu->vdev.num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
vgpu->vdev.region = region;
vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
vgpu->vdev.num_regions++;
return 0;
}
static int kvmgt_get_vfio_device(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
vgpu->vdev.vfio_device = vfio_device_get_from_dev(
mdev_dev(vgpu->vdev.mdev));
if (!vgpu->vdev.vfio_device) {
gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV;
}
return 0;
}
static int kvmgt_set_opregion(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
void *base;
int ret;
/* Each vgpu has its own opregion, although VFIO would create another
* one later. This one is used to expose opregion to VFIO. And the
* other one created by VFIO later, is used by guest actually.
*/
base = vgpu_opregion(vgpu)->va;
if (!base)
return -ENOMEM;
if (memcmp(base, OPREGION_SIGNATURE, 16)) {
memunmap(base);
return -EINVAL;
}
ret = intel_vgpu_register_reg(vgpu,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
&intel_vgpu_regops_opregion, OPREGION_SIZE,
VFIO_REGION_INFO_FLAG_READ, base);
return ret;
}
static void kvmgt_put_vfio_device(void *vgpu)
{
if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
return;
vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
}
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{ {
struct intel_vgpu *vgpu = NULL; struct intel_vgpu *vgpu = NULL;
...@@ -546,7 +660,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -546,7 +660,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
int ret = -EINVAL; int ret = -EINVAL;
if (index >= VFIO_PCI_NUM_REGIONS) { if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
gvt_vgpu_err("invalid index: %u\n", index); gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL; return -EINVAL;
} }
...@@ -574,8 +688,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -574,8 +688,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_BAR5_REGION_INDEX: case VFIO_PCI_BAR5_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX: case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX:
break;
default: default:
gvt_vgpu_err("unsupported region: %u\n", index); if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
} }
return ret == 0 ? count : ret; return ret == 0 ? count : ret;
...@@ -838,7 +958,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -838,7 +958,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET; info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS; info.num_regions = VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS; info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ? return copy_to_user((void __user *)arg, &info, minsz) ?
...@@ -959,6 +1080,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -959,6 +1080,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} }
if (caps.size) { if (caps.size) {
info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) { if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size; info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0; info.cap_offset = 0;
...@@ -1045,6 +1167,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1045,6 +1167,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_RESET) { } else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu); intel_gvt_ops->vgpu_reset(vgpu);
return 0; return 0;
} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
struct vfio_device_gfx_plane_info dmabuf;
int ret = 0;
minsz = offsetofend(struct vfio_device_gfx_plane_info,
dmabuf_id);
if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
return -EFAULT;
if (dmabuf.argsz < minsz)
return -EINVAL;
ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
if (ret != 0)
return ret;
return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
__u32 dmabuf_id;
__s32 dmabuf_fd;
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
return dmabuf_fd;
} }
return 0; return 0;
...@@ -1286,6 +1435,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev) ...@@ -1286,6 +1435,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info); kvmgt_protect_table_init(info);
gvt_cache_init(vgpu); gvt_cache_init(vgpu);
mutex_init(&vgpu->dmabuf_lock);
init_completion(&vgpu->vblank_done);
info->track_node.track_write = kvmgt_page_track_write; info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node); kvm_page_track_register_notifier(kvm, &info->track_node);
...@@ -1426,6 +1578,9 @@ struct intel_gvt_mpt kvmgt_mpt = { ...@@ -1426,6 +1578,9 @@ struct intel_gvt_mpt kvmgt_mpt = {
.read_gpa = kvmgt_read_gpa, .read_gpa = kvmgt_read_gpa,
.write_gpa = kvmgt_write_gpa, .write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn, .gfn_to_mfn = kvmgt_gfn_to_pfn,
.set_opregion = kvmgt_set_opregion,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
}; };
EXPORT_SYMBOL_GPL(kvmgt_mpt); EXPORT_SYMBOL_GPL(kvmgt_mpt);
......
...@@ -294,4 +294,49 @@ static inline int intel_gvt_hypervisor_set_trap_area( ...@@ -294,4 +294,49 @@ static inline int intel_gvt_hypervisor_set_trap_area(
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
} }
/**
* intel_gvt_hypervisor_set_opregion - Set opregion for guest
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->set_opregion)
return 0;
return intel_gvt_host.mpt->set_opregion(vgpu);
}
/**
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->get_vfio_device)
return 0;
return intel_gvt_host.mpt->get_vfio_device(vgpu);
}
/**
* intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
{
if (!intel_gvt_host.mpt->put_vfio_device)
return;
intel_gvt_host.mpt->put_vfio_device(vgpu);
}
#endif /* _GVT_MPT_H_ */ #endif /* _GVT_MPT_H_ */
...@@ -213,11 +213,20 @@ static void virt_vbt_generation(struct vbt *v) ...@@ -213,11 +213,20 @@ static void virt_vbt_generation(struct vbt *v)
v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS; v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
} }
static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu) /**
* intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
* @vgpu: a vGPU
* @gpa: guest physical address of opregion
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
{ {
u8 *buf; u8 *buf;
struct opregion_header *header; struct opregion_header *header;
struct vbt v; struct vbt v;
const char opregion_signature[16] = OPREGION_SIGNATURE;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
...@@ -231,8 +240,8 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu) ...@@ -231,8 +240,8 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */ /* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va; buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf; header = (struct opregion_header *)buf;
memcpy(header->signature, OPREGION_SIGNATURE, memcpy(header->signature, opregion_signature,
sizeof(OPREGION_SIGNATURE)); sizeof(opregion_signature));
header->size = 0x8; header->size = 0x8;
header->opregion_ver = 0x02000000; header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT; header->mboxes = MBOX_VBT;
...@@ -250,25 +259,6 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu) ...@@ -250,25 +259,6 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
return 0; return 0;
} }
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
int i, ret;
if (WARN((vgpu_opregion(vgpu)->va),
"vgpu%d: opregion has been initialized already.\n",
vgpu->id))
return -EINVAL;
ret = alloc_and_init_virt_opregion(vgpu);
if (ret < 0)
return ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
return 0;
}
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
{ {
u64 mfn; u64 mfn;
...@@ -290,59 +280,91 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) ...@@ -290,59 +280,91 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
return ret; return ret;
} }
} }
vgpu_opregion(vgpu)->mapped = map;
return 0; return 0;
} }
/** /**
* intel_vgpu_clean_opregion - clean the stuff used to emulate opregion * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
*
* @vgpu: a vGPU * @vgpu: a vGPU
* @gpa: guest physical address of opregion
* *
* Returns:
* Zero on success, negative error code if failed.
*/ */
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{ {
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va) int i, ret = 0;
return; unsigned long pfn;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { gvt_dbg_core("emulate opregion from kernel\n");
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_KVM:
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT,
INTEL_GVT_OPREGION_SIZE,
MEMREMAP_WB);
if (!vgpu_opregion(vgpu)->va_gopregion) {
gvt_vgpu_err("failed to map guest opregion\n");
ret = -EFAULT;
}
vgpu_opregion(vgpu)->mapped = true;
break;
case INTEL_GVT_HYPERVISOR_XEN:
/**
* Wins guest on Xengt will write this register twice: xen
* hvmloader and windows graphic driver.
*/
if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false); map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE));
vgpu_opregion(vgpu)->va = NULL; for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
ret = map_vgpu_opregion(vgpu, true);
break;
default:
ret = -EINVAL;
gvt_vgpu_err("not supported hypervisor\n");
} }
return ret;
} }
/** /**
* intel_vgpu_init_opregion - initialize the stuff used to emulate opregion * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
* @vgpu: a vGPU * @vgpu: a vGPU
* @gpa: guest physical address of opregion
* *
* Returns:
* Zero on success, negative error code if failed.
*/ */
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{ {
int ret; gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id); if (!vgpu_opregion(vgpu)->va)
return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
gvt_dbg_core("emulate opregion from kernel\n"); if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false);
ret = init_vgpu_opregion(vgpu, gpa); } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
if (ret) if (vgpu_opregion(vgpu)->mapped) {
return ret; memunmap(vgpu_opregion(vgpu)->va_gopregion);
vgpu_opregion(vgpu)->va_gopregion = NULL;
ret = map_vgpu_opregion(vgpu, true); }
if (ret)
return ret;
} }
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE));
vgpu_opregion(vgpu)->va = NULL;
return 0;
} }
#define GVT_OPREGION_FUNC(scic) \ #define GVT_OPREGION_FUNC(scic) \
({ \ ({ \
u32 __ret; \ u32 __ret; \
...@@ -461,8 +483,21 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) ...@@ -461,8 +483,21 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
u32 *scic, *parm; u32 *scic, *parm;
u32 func, subfunc; u32 func, subfunc;
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN:
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC; scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
break;
case INTEL_GVT_HYPERVISOR_KVM:
scic = vgpu_opregion(vgpu)->va_gopregion +
INTEL_GVT_OPREGION_SCIC;
parm = vgpu_opregion(vgpu)->va_gopregion +
INTEL_GVT_OPREGION_PARM;
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
return -EINVAL;
}
if (!(swsci & SWSCI_SCI_SELECT)) { if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_vgpu_err("requesting SMI service\n"); gvt_vgpu_err("requesting SMI service\n");
......
...@@ -372,6 +372,11 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) ...@@ -372,6 +372,11 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
} }
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{ {
struct intel_gvt_workload_scheduler *scheduler = struct intel_gvt_workload_scheduler *scheduler =
......
...@@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu); ...@@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu); void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
void intel_gvt_kick_schedule(struct intel_gvt *gvt);
#endif #endif
...@@ -188,10 +188,12 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -188,10 +188,12 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1); atomic_set(&workload->shadow_ctx_active, 1);
break; break;
case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_OUT:
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
save_ring_hw_state(workload->vgpu, ring_id); save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
break; break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
save_ring_hw_state(workload->vgpu, ring_id);
break;
default: default:
WARN_ON(1); WARN_ON(1);
return NOTIFY_OK; return NOTIFY_OK;
...@@ -245,7 +247,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -245,7 +247,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
return 0; return 0;
} }
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
if (!wa_ctx->indirect_ctx.obj) if (!wa_ctx->indirect_ctx.obj)
return; return;
...@@ -1036,6 +1038,9 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1036,6 +1038,9 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(s->shadow_ctx)) if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx); return PTR_ERR(s->shadow_ctx);
if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
s->shadow_ctx->priority = INT_MAX;
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create("gvt-g_vgpu_workload", s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
...@@ -1328,3 +1333,15 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1328,3 +1333,15 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
return workload; return workload;
} }
/**
* intel_vgpu_queue_workload - Qeue a vGPU workload
* @workload: the workload to queue in
*/
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{
list_add_tail(&workload->list,
workload_q_head(workload->vgpu, workload->ring_id));
intel_gvt_kick_schedule(workload->vgpu->gvt);
wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
}
...@@ -125,12 +125,7 @@ struct intel_vgpu_shadow_bb { ...@@ -125,12 +125,7 @@ struct intel_vgpu_shadow_bb {
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
(&(vgpu->submission.workload_q_head[ring_id])) (&(vgpu->submission.workload_q_head[ring_id]))
#define queue_workload(workload) do { \ void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
list_add_tail(&workload->list, \
workload_q_head(workload->vgpu, workload->ring_id)); \
wake_up(&workload->vgpu->gvt-> \
scheduler.waitq[workload->ring_id]); \
} while (0)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt); int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
......
...@@ -236,6 +236,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) ...@@ -236,6 +236,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
} }
intel_vgpu_stop_schedule(vgpu); intel_vgpu_stop_schedule(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
} }
...@@ -265,6 +266,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -265,6 +266,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_gvt_hypervisor_detach_vgpu(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu); intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu); intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
vfree(vgpu); vfree(vgpu);
intel_gvt_update_vgpu_types(gvt); intel_gvt_update_vgpu_types(gvt);
...@@ -349,7 +351,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -349,7 +351,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle; vgpu->handle = param->handle;
vgpu->gvt = gvt; vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight; vgpu->sched_ctl.weight = param->weight;
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary); intel_vgpu_init_cfg_space(vgpu, param->primary);
ret = intel_vgpu_init_mmio(vgpu); ret = intel_vgpu_init_mmio(vgpu);
...@@ -370,10 +373,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -370,10 +373,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_detach_hypervisor_vgpu; goto out_detach_hypervisor_vgpu;
ret = intel_vgpu_init_display(vgpu, param->resolution); ret = intel_vgpu_init_opregion(vgpu);
if (ret) if (ret)
goto out_clean_gtt; goto out_clean_gtt;
ret = intel_vgpu_init_display(vgpu, param->resolution);
if (ret)
goto out_clean_opregion;
ret = intel_vgpu_setup_submission(vgpu); ret = intel_vgpu_setup_submission(vgpu);
if (ret) if (ret)
goto out_clean_display; goto out_clean_display;
...@@ -386,6 +393,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -386,6 +393,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_clean_sched_policy; goto out_clean_sched_policy;
ret = intel_gvt_hypervisor_set_opregion(vgpu);
if (ret)
goto out_clean_sched_policy;
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return vgpu; return vgpu;
...@@ -396,6 +407,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -396,6 +407,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_submission(vgpu);
out_clean_display: out_clean_display:
intel_vgpu_clean_display(vgpu); intel_vgpu_clean_display(vgpu);
out_clean_opregion:
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt: out_clean_gtt:
intel_vgpu_clean_gtt(vgpu); intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu: out_detach_hypervisor_vgpu:
......
...@@ -261,6 +261,8 @@ struct drm_i915_gem_object { ...@@ -261,6 +261,8 @@ struct drm_i915_gem_object {
} userptr; } userptr;
unsigned long scratch; unsigned long scratch;
void *gvt_info;
}; };
/** for phys allocated objects */ /** for phys allocated objects */
......
...@@ -502,6 +502,68 @@ struct vfio_pci_hot_reset { ...@@ -502,6 +502,68 @@ struct vfio_pci_hot_reset {
#define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13) #define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
/**
* VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
* struct vfio_device_query_gfx_plane)
*
* Set the drm_plane_type and flags, then retrieve the gfx plane info.
*
* flags supported:
* - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
* to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
* support for dma-buf.
* - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
* to ask if the mdev supports region. 0 on support, -EINVAL on no
* support for region.
* - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
* with each call to query the plane info.
* - Others are invalid and return -EINVAL.
*
* Note:
* 1. Plane could be disabled by guest. In that case, success will be
* returned with zero-initialized drm_format, size, width and height
* fields.
* 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
*
* Return: 0 on success, -errno on other failure.
*/
struct vfio_device_gfx_plane_info {
__u32 argsz;
__u32 flags;
#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
/* in */
__u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
/* out */
__u32 drm_format; /* drm format of plane */
__u64 drm_format_mod; /* tiled mode */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
__u32 size; /* size of plane in bytes, align on page*/
__u32 x_pos; /* horizontal position of cursor plane */
__u32 y_pos; /* vertical position of cursor plane*/
__u32 x_hot; /* horizontal position of cursor hotspot */
__u32 y_hot; /* vertical position of cursor hotspot */
union {
__u32 region_index; /* region index */
__u32 dmabuf_id; /* dma-buf id */
};
};
#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
/**
* VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
*
* Return a new dma-buf file descriptor for an exposed guest framebuffer
* described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
* DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
*/
#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
/* -------- API for Type1 VFIO IOMMU -------- */ /* -------- API for Type1 VFIO IOMMU -------- */
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment