Commit 70647f91 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-fixes-2017-03-08' of https://github.com/01org/gvt-linux into drm-intel-fixes

gvt-fixes-2017-03-08

- MMIO cmd access flag cleanup
- Virtual display fixes from Weinan and Bing
- config space reset fix from Changbin
- better workload submission error path fix from Chuanxiao
- other misc fixes
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parents 77e14ae6 627c845c
......@@ -41,6 +41,54 @@ enum {
INTEL_GVT_PCI_BAR_MAX,
};
/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
* byte) byte by byte in standard pci configuration space. (not the full
* 256 bytes.)
*/
static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
[PCI_COMMAND] = 0xff, 0x07,
[PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
[PCI_CACHE_LINE_SIZE] = 0xff,
[PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
[PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
[PCI_INTERRUPT_LINE] = 0xff,
};
/**
* vgpu_pci_cfg_mem_write - write virtual cfg space memory
*
* Use this function to write virtual cfg space memory.
* For standard cfg space, only RW bits can be changed,
* and we emulates the RW1C behavior of PCI_STATUS register.
*/
static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
u8 *src, unsigned int bytes)
{
u8 *cfg_base = vgpu_cfg_space(vgpu);
u8 mask, new, old;
int i = 0;
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
mask = pci_cfg_space_rw_bmp[off + i];
old = cfg_base[off + i];
new = src[i] & mask;
/**
* The PCI_STATUS high byte has RW1C bits, here
* emulates clear by writing 1 for these bits.
* Writing a 0b to RW1C bits has no effect.
*/
if (off + i == PCI_STATUS + 1)
new = (~new & old) & mask;
cfg_base[off + i] = (old & ~mask) | new;
}
/* For other configuration space directly copy as it is. */
if (i < bytes)
memcpy(cfg_base + off + i, src + i, bytes - i);
}
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
*
......@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new;
int ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
......@@ -277,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (ret)
return ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
default:
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
}
return 0;
......
......@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
if (d_info == NULL)
return;
gvt_err("opcode=0x%x %s sub_ops:",
gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
cmd >> (32 - d_info->op_len), d_info->name);
for (i = 0; i < d_info->nr_sub_op; i++)
......@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
s->ring_id, s->ring_start, s->ring_start + s->ring_size,
s->ring_head, s->ring_tail);
gvt_err(" %s %s ip_gma(%08lx) ",
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
"RING_BUFFER" : "BATCH_BUFFER",
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
if (s->ip_va == NULL) {
gvt_err(" ip_va(NULL)");
gvt_dbg_cmd(" ip_va(NULL)");
return;
}
gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
......
......@@ -176,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
if (IS_SKYLAKE(dev_priv) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
......@@ -196,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_PORT_DP_A_HOTPLUG;
else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
}
......
This diff is collapsed.
......@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
page = pfn_to_page(pfn);
if (is_error_page(page))
if (unlikely(!pfn_valid(pfn)))
return -EFAULT;
page = pfn_to_page(pfn);
daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr))
......
......@@ -151,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id);
/* If the status is -EINPROGRESS means this workload
* doesn't meet any issue during dispatching so when
* get the SCHEDULE_OUT set the status to be zero for
* good. If the status is NOT -EINPROGRESS means there
* is something wrong happened during dispatching and
* the status should not be set to zero
*/
if (workload->status == -EINPROGRESS)
workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
......@@ -362,16 +371,24 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload = scheduler->current_workload[ring_id];
vgpu = workload->vgpu;
if (!workload->status && !vgpu->resetting) {
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
if (workload->req) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
ring_id, workload, workload->status);
......@@ -400,7 +417,6 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
long lret;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
......@@ -449,23 +465,24 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
lret = i915_wait_request(workload->req,
retry:
i915_wait_request(workload->req,
0, MAX_SCHEDULE_TIMEOUT);
if (lret < 0) {
workload->status = lret;
gvt_err("fail to wait workload, skip\n");
} else {
workload->status = 0;
/* I915 has replay mechanism and a request will be replayed
* if there is i915 reset. So the seqno will be updated anyway.
* If the seqno is not updated yet after waiting, which means
* the replay may still be in progress and we can wait again.
*/
if (!i915_gem_request_completed(workload->req)) {
gvt_dbg_sched("workload %p not completed, wait again\n",
workload);
goto retry;
}
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status);
if (workload->req)
i915_gem_request_put(fetch_and_zero(&workload->req));
complete_current_workload(gvt, ring_id);
if (need_force_wake)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment