Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
fca7567c
Commit
fca7567c
authored
Apr 17, 2013
by
Alexander Graf
Browse files
Options
Browse Files
Download
Plain Diff
Merge commit 'origin/next' into kvm-ppc-next
parents
fbfba342
79558f11
Changes
34
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
34 changed files
with
818 additions
and
453 deletions
+818
-453
arch/arm/kvm/arm.c
arch/arm/kvm/arm.c
+2
-1
arch/ia64/kvm/lapic.h
arch/ia64/kvm/lapic.h
+0
-6
arch/s390/kvm/intercept.c
arch/s390/kvm/intercept.c
+4
-8
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.c
+18
-14
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/kvm-s390.h
+6
-6
arch/s390/kvm/priv.c
arch/s390/kvm/priv.c
+74
-129
arch/x86/include/asm/entry_arch.h
arch/x86/include/asm/entry_arch.h
+4
-0
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/hardirq.h
+3
-0
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/hw_irq.h
+1
-0
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/irq_vectors.h
+5
-0
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/kvm_host.h
+5
-5
arch/x86/include/asm/vmx.h
arch/x86/include/asm/vmx.h
+4
-0
arch/x86/kernel/entry_64.S
arch/x86/kernel/entry_64.S
+5
-0
arch/x86/kernel/irq.c
arch/x86/kernel/irq.c
+22
-0
arch/x86/kernel/irqinit.c
arch/x86/kernel/irqinit.c
+4
-0
arch/x86/kvm/emulate.c
arch/x86/kvm/emulate.c
+19
-8
arch/x86/kvm/i8254.c
arch/x86/kvm/i8254.c
+2
-2
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.c
+67
-74
arch/x86/kvm/lapic.h
arch/x86/kvm/lapic.h
+7
-6
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.c
+7
-4
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/paging_tmpl.h
+1
-0
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.c
+11
-3
arch/x86/kvm/svm.c
arch/x86/kvm/svm.c
+12
-0
arch/x86/kvm/vmx.c
arch/x86/kvm/vmx.c
+272
-80
arch/x86/kvm/x86.c
arch/x86/kvm/x86.c
+36
-13
drivers/s390/kvm/kvm_virtio.c
drivers/s390/kvm/kvm_virtio.c
+6
-5
drivers/s390/kvm/virtio_ccw.c
drivers/s390/kvm/virtio_ccw.c
+4
-1
include/linux/kvm_host.h
include/linux/kvm_host.h
+14
-6
virt/kvm/assigned-dev.c
virt/kvm/assigned-dev.c
+7
-6
virt/kvm/eventfd.c
virt/kvm/eventfd.c
+15
-8
virt/kvm/ioapic.c
virt/kvm/ioapic.c
+132
-31
virt/kvm/ioapic.h
virt/kvm/ioapic.h
+20
-7
virt/kvm/irq_comm.c
virt/kvm/irq_comm.c
+20
-15
virt/kvm/kvm_main.c
virt/kvm/kvm_main.c
+9
-15
No files found.
arch/arm/kvm/arm.c
View file @
fca7567c
...
@@ -805,7 +805,8 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
...
@@ -805,7 +805,8 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
return
0
;
return
0
;
}
}
int
kvm_vm_ioctl_irq_line
(
struct
kvm
*
kvm
,
struct
kvm_irq_level
*
irq_level
)
int
kvm_vm_ioctl_irq_line
(
struct
kvm
*
kvm
,
struct
kvm_irq_level
*
irq_level
,
bool
line_status
)
{
{
u32
irq
=
irq_level
->
irq
;
u32
irq
=
irq_level
->
irq
;
unsigned
int
irq_type
,
vcpu_idx
,
irq_num
;
unsigned
int
irq_type
,
vcpu_idx
,
irq_num
;
...
...
arch/ia64/kvm/lapic.h
View file @
fca7567c
...
@@ -27,10 +27,4 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
...
@@ -27,10 +27,4 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
#define kvm_apic_present(x) (true)
#define kvm_apic_present(x) (true)
#define kvm_lapic_enabled(x) (true)
#define kvm_lapic_enabled(x) (true)
static
inline
bool
kvm_apic_vid_enabled
(
void
)
{
/* IA64 has no apicv supporting, do nothing here */
return
false
;
}
#endif
#endif
arch/s390/kvm/intercept.c
View file @
fca7567c
...
@@ -45,10 +45,8 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
...
@@ -45,10 +45,8 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
do
{
do
{
rc
=
get_guest
(
vcpu
,
vcpu
->
arch
.
sie_block
->
gcr
[
reg
],
rc
=
get_guest
(
vcpu
,
vcpu
->
arch
.
sie_block
->
gcr
[
reg
],
(
u64
__user
*
)
useraddr
);
(
u64
__user
*
)
useraddr
);
if
(
rc
)
{
if
(
rc
)
kvm_s390_inject_program_int
(
vcpu
,
PGM_ADDRESSING
);
return
kvm_s390_inject_program_int
(
vcpu
,
PGM_ADDRESSING
);
break
;
}
useraddr
+=
8
;
useraddr
+=
8
;
if
(
reg
==
reg3
)
if
(
reg
==
reg3
)
break
;
break
;
...
@@ -79,10 +77,8 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
...
@@ -79,10 +77,8 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
reg
=
reg1
;
reg
=
reg1
;
do
{
do
{
rc
=
get_guest
(
vcpu
,
val
,
(
u32
__user
*
)
useraddr
);
rc
=
get_guest
(
vcpu
,
val
,
(
u32
__user
*
)
useraddr
);
if
(
rc
)
{
if
(
rc
)
kvm_s390_inject_program_int
(
vcpu
,
PGM_ADDRESSING
);
return
kvm_s390_inject_program_int
(
vcpu
,
PGM_ADDRESSING
);
break
;
}
vcpu
->
arch
.
sie_block
->
gcr
[
reg
]
&=
0xffffffff00000000ul
;
vcpu
->
arch
.
sie_block
->
gcr
[
reg
]
&=
0xffffffff00000000ul
;
vcpu
->
arch
.
sie_block
->
gcr
[
reg
]
|=
val
;
vcpu
->
arch
.
sie_block
->
gcr
[
reg
]
|=
val
;
useraddr
+=
4
;
useraddr
+=
4
;
...
...
arch/s390/kvm/kvm-s390.c
View file @
fca7567c
...
@@ -149,6 +149,9 @@ int kvm_dev_ioctl_check_extension(long ext)
...
@@ -149,6 +149,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case
KVM_CAP_MAX_VCPUS
:
case
KVM_CAP_MAX_VCPUS
:
r
=
KVM_MAX_VCPUS
;
r
=
KVM_MAX_VCPUS
;
break
;
break
;
case
KVM_CAP_NR_MEMSLOTS
:
r
=
KVM_USER_MEM_SLOTS
;
break
;
case
KVM_CAP_S390_COW
:
case
KVM_CAP_S390_COW
:
r
=
MACHINE_HAS_ESOP
;
r
=
MACHINE_HAS_ESOP
;
break
;
break
;
...
@@ -633,8 +636,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
...
@@ -633,8 +636,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
}
else
{
}
else
{
VCPU_EVENT
(
vcpu
,
3
,
"%s"
,
"fault in sie instruction"
);
VCPU_EVENT
(
vcpu
,
3
,
"%s"
,
"fault in sie instruction"
);
trace_kvm_s390_sie_fault
(
vcpu
);
trace_kvm_s390_sie_fault
(
vcpu
);
kvm_s390_inject_program_int
(
vcpu
,
PGM_ADDRESSING
);
rc
=
kvm_s390_inject_program_int
(
vcpu
,
PGM_ADDRESSING
);
rc
=
0
;
}
}
}
}
VCPU_EVENT
(
vcpu
,
6
,
"exit sie icptcode %d"
,
VCPU_EVENT
(
vcpu
,
6
,
"exit sie icptcode %d"
,
...
@@ -978,18 +980,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
...
@@ -978,18 +980,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct
kvm_userspace_memory_region
*
mem
,
struct
kvm_userspace_memory_region
*
mem
,
enum
kvm_mr_change
change
)
enum
kvm_mr_change
change
)
{
{
/* A few sanity checks. We can have exactly one memory slot which has
/* A few sanity checks. We can have memory slots which have to be
to start at guest virtual zero and which has to be located at a
located/ended at a segment boundary (1MB). The memory in userland is
page boundary in userland and which has to end at a page boundary.
ok to be fragmented into various different vmas. It is okay to mmap()
The memory in userland is ok to be fragmented into various different
and munmap() stuff in this slot after doing this call at any time */
vmas. It is okay to mmap() and munmap() stuff in this slot after
doing this call at any time */
if
(
mem
->
slot
)
return
-
EINVAL
;
if
(
mem
->
guest_phys_addr
)
return
-
EINVAL
;
if
(
mem
->
userspace_addr
&
0xffffful
)
if
(
mem
->
userspace_addr
&
0xffffful
)
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -1007,6 +1001,16 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
...
@@ -1007,6 +1001,16 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
{
{
int
rc
;
int
rc
;
/* If the basics of the memslot do not change, we do not want
* to update the gmap. Every update causes several unnecessary
* segment translation exceptions. This is usually handled just
* fine by the normal fault handler + gmap, but it will also
* cause faults on the prefix page of running guest CPUs.
*/
if
(
old
->
userspace_addr
==
mem
->
userspace_addr
&&
old
->
base_gfn
*
PAGE_SIZE
==
mem
->
guest_phys_addr
&&
old
->
npages
*
PAGE_SIZE
==
mem
->
memory_size
)
return
;
rc
=
gmap_map_segment
(
kvm
->
arch
.
gmap
,
mem
->
userspace_addr
,
rc
=
gmap_map_segment
(
kvm
->
arch
.
gmap
,
mem
->
userspace_addr
,
mem
->
guest_phys_addr
,
mem
->
memory_size
);
mem
->
guest_phys_addr
,
mem
->
memory_size
);
...
...
arch/s390/kvm/kvm-s390.h
View file @
fca7567c
...
@@ -110,12 +110,12 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
...
@@ -110,12 +110,12 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
void
kvm_s390_tasklet
(
unsigned
long
parm
);
void
kvm_s390_tasklet
(
unsigned
long
parm
);
void
kvm_s390_deliver_pending_interrupts
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_deliver_pending_interrupts
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_deliver_pending_machine_checks
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_s390_deliver_pending_machine_checks
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_s390_inject_vm
(
struct
kvm
*
kvm
,
int
__must_check
kvm_s390_inject_vm
(
struct
kvm
*
kvm
,
struct
kvm_s390_interrupt
*
s390int
);
struct
kvm_s390_interrupt
*
s390int
);
int
kvm_s390_inject_vcpu
(
struct
kvm_vcpu
*
vcpu
,
int
__must_check
kvm_s390_inject_vcpu
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_s390_interrupt
*
s390int
);
struct
kvm_s390_interrupt
*
s390int
);
int
kvm_s390_inject_program_int
(
struct
kvm_vcpu
*
vcpu
,
u16
code
);
int
__must_check
kvm_s390_inject_program_int
(
struct
kvm_vcpu
*
vcpu
,
u16
code
);
int
kvm_s390_inject_sigp_stop
(
struct
kvm_vcpu
*
vcpu
,
int
action
);
int
__must_check
kvm_s390_inject_sigp_stop
(
struct
kvm_vcpu
*
vcpu
,
int
action
);
struct
kvm_s390_interrupt_info
*
kvm_s390_get_io_int
(
struct
kvm
*
kvm
,
struct
kvm_s390_interrupt_info
*
kvm_s390_get_io_int
(
struct
kvm
*
kvm
,
u64
cr6
,
u64
schid
);
u64
cr6
,
u64
schid
);
...
...
arch/s390/kvm/priv.c
View file @
fca7567c
This diff is collapsed.
Click to expand it.
arch/x86/include/asm/entry_arch.h
View file @
fca7567c
...
@@ -19,6 +19,10 @@ BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
...
@@ -19,6 +19,10 @@ BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
BUILD_INTERRUPT
(
x86_platform_ipi
,
X86_PLATFORM_IPI_VECTOR
)
BUILD_INTERRUPT
(
x86_platform_ipi
,
X86_PLATFORM_IPI_VECTOR
)
#ifdef CONFIG_HAVE_KVM
BUILD_INTERRUPT
(
kvm_posted_intr_ipi
,
POSTED_INTR_VECTOR
)
#endif
/*
/*
* every pentium local APIC has two 'local interrupts', with a
* every pentium local APIC has two 'local interrupts', with a
* soft-definable vector attached to both interrupts, one of
* soft-definable vector attached to both interrupts, one of
...
...
arch/x86/include/asm/hardirq.h
View file @
fca7567c
...
@@ -11,6 +11,9 @@ typedef struct {
...
@@ -11,6 +11,9 @@ typedef struct {
unsigned
int
apic_timer_irqs
;
/* arch dependent */
unsigned
int
apic_timer_irqs
;
/* arch dependent */
unsigned
int
irq_spurious_count
;
unsigned
int
irq_spurious_count
;
unsigned
int
icr_read_retry_count
;
unsigned
int
icr_read_retry_count
;
#endif
#ifdef CONFIG_HAVE_KVM
unsigned
int
kvm_posted_intr_ipis
;
#endif
#endif
unsigned
int
x86_platform_ipis
;
/* arch dependent */
unsigned
int
x86_platform_ipis
;
/* arch dependent */
unsigned
int
apic_perf_irqs
;
unsigned
int
apic_perf_irqs
;
...
...
arch/x86/include/asm/hw_irq.h
View file @
fca7567c
...
@@ -28,6 +28,7 @@
...
@@ -28,6 +28,7 @@
/* Interrupt handlers registered during init_IRQ */
/* Interrupt handlers registered during init_IRQ */
extern
void
apic_timer_interrupt
(
void
);
extern
void
apic_timer_interrupt
(
void
);
extern
void
x86_platform_ipi
(
void
);
extern
void
x86_platform_ipi
(
void
);
extern
void
kvm_posted_intr_ipi
(
void
);
extern
void
error_interrupt
(
void
);
extern
void
error_interrupt
(
void
);
extern
void
irq_work_interrupt
(
void
);
extern
void
irq_work_interrupt
(
void
);
...
...
arch/x86/include/asm/irq_vectors.h
View file @
fca7567c
...
@@ -102,6 +102,11 @@
...
@@ -102,6 +102,11 @@
*/
*/
#define X86_PLATFORM_IPI_VECTOR 0xf7
#define X86_PLATFORM_IPI_VECTOR 0xf7
/* Vector for KVM to deliver posted interrupt IPI */
#ifdef CONFIG_HAVE_KVM
#define POSTED_INTR_VECTOR 0xf2
#endif
/*
/*
* IRQ work vector:
* IRQ work vector:
*/
*/
...
...
arch/x86/include/asm/kvm_host.h
View file @
fca7567c
...
@@ -94,9 +94,6 @@
...
@@ -94,9 +94,6 @@
#define ASYNC_PF_PER_VCPU 64
#define ASYNC_PF_PER_VCPU 64
extern
raw_spinlock_t
kvm_lock
;
extern
struct
list_head
vm_list
;
struct
kvm_vcpu
;
struct
kvm_vcpu
;
struct
kvm
;
struct
kvm
;
struct
kvm_async_pf
;
struct
kvm_async_pf
;
...
@@ -704,6 +701,8 @@ struct kvm_x86_ops {
...
@@ -704,6 +701,8 @@ struct kvm_x86_ops {
void
(
*
hwapic_isr_update
)(
struct
kvm
*
kvm
,
int
isr
);
void
(
*
hwapic_isr_update
)(
struct
kvm
*
kvm
,
int
isr
);
void
(
*
load_eoi_exitmap
)(
struct
kvm_vcpu
*
vcpu
,
u64
*
eoi_exit_bitmap
);
void
(
*
load_eoi_exitmap
)(
struct
kvm_vcpu
*
vcpu
,
u64
*
eoi_exit_bitmap
);
void
(
*
set_virtual_x2apic_mode
)(
struct
kvm_vcpu
*
vcpu
,
bool
set
);
void
(
*
set_virtual_x2apic_mode
)(
struct
kvm_vcpu
*
vcpu
,
bool
set
);
void
(
*
deliver_posted_interrupt
)(
struct
kvm_vcpu
*
vcpu
,
int
vector
);
void
(
*
sync_pir_to_irr
)(
struct
kvm_vcpu
*
vcpu
);
int
(
*
set_tss_addr
)(
struct
kvm
*
kvm
,
unsigned
int
addr
);
int
(
*
set_tss_addr
)(
struct
kvm
*
kvm
,
unsigned
int
addr
);
int
(
*
get_tdp_level
)(
void
);
int
(
*
get_tdp_level
)(
void
);
u64
(
*
get_mt_mask
)(
struct
kvm_vcpu
*
vcpu
,
gfn_t
gfn
,
bool
is_mmio
);
u64
(
*
get_mt_mask
)(
struct
kvm_vcpu
*
vcpu
,
gfn_t
gfn
,
bool
is_mmio
);
...
@@ -730,6 +729,7 @@ struct kvm_x86_ops {
...
@@ -730,6 +729,7 @@ struct kvm_x86_ops {
int
(
*
check_intercept
)(
struct
kvm_vcpu
*
vcpu
,
int
(
*
check_intercept
)(
struct
kvm_vcpu
*
vcpu
,
struct
x86_instruction_info
*
info
,
struct
x86_instruction_info
*
info
,
enum
x86_intercept_stage
stage
);
enum
x86_intercept_stage
stage
);
void
(
*
handle_external_intr
)(
struct
kvm_vcpu
*
vcpu
);
};
};
struct
kvm_arch_async_pf
{
struct
kvm_arch_async_pf
{
...
@@ -798,6 +798,7 @@ enum emulation_result {
...
@@ -798,6 +798,7 @@ enum emulation_result {
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
#define EMULTYPE_SKIP (1 << 2)
#define EMULTYPE_RETRY (1 << 3)
#define EMULTYPE_RETRY (1 << 3)
#define EMULTYPE_NO_REEXECUTE (1 << 4)
int
x86_emulate_instruction
(
struct
kvm_vcpu
*
vcpu
,
unsigned
long
cr2
,
int
x86_emulate_instruction
(
struct
kvm_vcpu
*
vcpu
,
unsigned
long
cr2
,
int
emulation_type
,
void
*
insn
,
int
insn_len
);
int
emulation_type
,
void
*
insn
,
int
insn_len
);
...
@@ -975,7 +976,6 @@ enum {
...
@@ -975,7 +976,6 @@ enum {
* Trap the fault and ignore the instruction if that happens.
* Trap the fault and ignore the instruction if that happens.
*/
*/
asmlinkage
void
kvm_spurious_fault
(
void
);
asmlinkage
void
kvm_spurious_fault
(
void
);
extern
bool
kvm_rebooting
;
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
"666: " insn "\n\t" \
"666: " insn "\n\t" \
...
@@ -1030,7 +1030,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu);
...
@@ -1030,7 +1030,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void
kvm_pmu_cpuid_update
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_pmu_cpuid_update
(
struct
kvm_vcpu
*
vcpu
);
bool
kvm_pmu_msr
(
struct
kvm_vcpu
*
vcpu
,
u32
msr
);
bool
kvm_pmu_msr
(
struct
kvm_vcpu
*
vcpu
,
u32
msr
);
int
kvm_pmu_get_msr
(
struct
kvm_vcpu
*
vcpu
,
u32
msr
,
u64
*
data
);
int
kvm_pmu_get_msr
(
struct
kvm_vcpu
*
vcpu
,
u32
msr
,
u64
*
data
);
int
kvm_pmu_set_msr
(
struct
kvm_vcpu
*
vcpu
,
u32
msr
,
u64
data
);
int
kvm_pmu_set_msr
(
struct
kvm_vcpu
*
vcpu
,
struct
msr_data
*
msr_info
);
int
kvm_pmu_read_pmc
(
struct
kvm_vcpu
*
vcpu
,
unsigned
pmc
,
u64
*
data
);
int
kvm_pmu_read_pmc
(
struct
kvm_vcpu
*
vcpu
,
unsigned
pmc
,
u64
*
data
);
void
kvm_handle_pmu_event
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_handle_pmu_event
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_deliver_pmi
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_deliver_pmi
(
struct
kvm_vcpu
*
vcpu
);
...
...
arch/x86/include/asm/vmx.h
View file @
fca7567c
...
@@ -71,6 +71,7 @@
...
@@ -71,6 +71,7 @@
#define PIN_BASED_NMI_EXITING 0x00000008
#define PIN_BASED_NMI_EXITING 0x00000008
#define PIN_BASED_VIRTUAL_NMIS 0x00000020
#define PIN_BASED_VIRTUAL_NMIS 0x00000020
#define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
#define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
#define PIN_BASED_POSTED_INTR 0x00000080
#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
...
@@ -102,6 +103,7 @@
...
@@ -102,6 +103,7 @@
/* VMCS Encodings */
/* VMCS Encodings */
enum
vmcs_field
{
enum
vmcs_field
{
VIRTUAL_PROCESSOR_ID
=
0x00000000
,
VIRTUAL_PROCESSOR_ID
=
0x00000000
,
POSTED_INTR_NV
=
0x00000002
,
GUEST_ES_SELECTOR
=
0x00000800
,
GUEST_ES_SELECTOR
=
0x00000800
,
GUEST_CS_SELECTOR
=
0x00000802
,
GUEST_CS_SELECTOR
=
0x00000802
,
GUEST_SS_SELECTOR
=
0x00000804
,
GUEST_SS_SELECTOR
=
0x00000804
,
...
@@ -136,6 +138,8 @@ enum vmcs_field {
...
@@ -136,6 +138,8 @@ enum vmcs_field {
VIRTUAL_APIC_PAGE_ADDR_HIGH
=
0x00002013
,
VIRTUAL_APIC_PAGE_ADDR_HIGH
=
0x00002013
,
APIC_ACCESS_ADDR
=
0x00002014
,
APIC_ACCESS_ADDR
=
0x00002014
,
APIC_ACCESS_ADDR_HIGH
=
0x00002015
,
APIC_ACCESS_ADDR_HIGH
=
0x00002015
,
POSTED_INTR_DESC_ADDR
=
0x00002016
,
POSTED_INTR_DESC_ADDR_HIGH
=
0x00002017
,
EPT_POINTER
=
0x0000201a
,
EPT_POINTER
=
0x0000201a
,
EPT_POINTER_HIGH
=
0x0000201b
,
EPT_POINTER_HIGH
=
0x0000201b
,
EOI_EXIT_BITMAP0
=
0x0000201c
,
EOI_EXIT_BITMAP0
=
0x0000201c
,
...
...
arch/x86/kernel/entry_64.S
View file @
fca7567c
...
@@ -1166,6 +1166,11 @@ apicinterrupt LOCAL_TIMER_VECTOR \
...
@@ -1166,6 +1166,11 @@ apicinterrupt LOCAL_TIMER_VECTOR \
apicinterrupt
X86_PLATFORM_IPI_VECTOR
\
apicinterrupt
X86_PLATFORM_IPI_VECTOR
\
x86_platform_ipi
smp_x86_platform_ipi
x86_platform_ipi
smp_x86_platform_ipi
#ifdef CONFIG_HAVE_KVM
apicinterrupt
POSTED_INTR_VECTOR
\
kvm_posted_intr_ipi
smp_kvm_posted_intr_ipi
#endif
apicinterrupt
THRESHOLD_APIC_VECTOR
\
apicinterrupt
THRESHOLD_APIC_VECTOR
\
threshold_interrupt
smp_threshold_interrupt
threshold_interrupt
smp_threshold_interrupt
apicinterrupt
THERMAL_APIC_VECTOR
\
apicinterrupt
THERMAL_APIC_VECTOR
\
...
...
arch/x86/kernel/irq.c
View file @
fca7567c
...
@@ -228,6 +228,28 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
...
@@ -228,6 +228,28 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
set_irq_regs
(
old_regs
);
set_irq_regs
(
old_regs
);
}
}
#ifdef CONFIG_HAVE_KVM
/*
* Handler for POSTED_INTERRUPT_VECTOR.
*/
void
smp_kvm_posted_intr_ipi
(
struct
pt_regs
*
regs
)
{
struct
pt_regs
*
old_regs
=
set_irq_regs
(
regs
);
ack_APIC_irq
();
irq_enter
();
exit_idle
();
inc_irq_stat
(
kvm_posted_intr_ipis
);
irq_exit
();
set_irq_regs
(
old_regs
);
}
#endif
EXPORT_SYMBOL_GPL
(
vector_used_by_percpu_irq
);
EXPORT_SYMBOL_GPL
(
vector_used_by_percpu_irq
);
#ifdef CONFIG_HOTPLUG_CPU
#ifdef CONFIG_HOTPLUG_CPU
...
...
arch/x86/kernel/irqinit.c
View file @
fca7567c
...
@@ -172,6 +172,10 @@ static void __init apic_intr_init(void)
...
@@ -172,6 +172,10 @@ static void __init apic_intr_init(void)
/* IPI for X86 platform specific use */
/* IPI for X86 platform specific use */
alloc_intr_gate
(
X86_PLATFORM_IPI_VECTOR
,
x86_platform_ipi
);
alloc_intr_gate
(
X86_PLATFORM_IPI_VECTOR
,
x86_platform_ipi
);
#ifdef CONFIG_HAVE_KVM
/* IPI for KVM to deliver posted interrupt */
alloc_intr_gate
(
POSTED_INTR_VECTOR
,
kvm_posted_intr_ipi
);
#endif
/* IPI vectors for APIC spurious and error interrupts */
/* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate
(
SPURIOUS_APIC_VECTOR
,
spurious_interrupt
);
alloc_intr_gate
(
SPURIOUS_APIC_VECTOR
,
spurious_interrupt
);
...
...
arch/x86/kvm/emulate.c
View file @
fca7567c
...
@@ -132,8 +132,9 @@
...
@@ -132,8 +132,9 @@
#define Priv (1<<27)
/* instruction generates #GP if current CPL != 0 */
#define Priv (1<<27)
/* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
#define No64 (1<<28)
#define PageTable (1 << 29)
/* instruction used to write page table */
#define PageTable (1 << 29)
/* instruction used to write page table */
#define NotImpl (1 << 30)
/* instruction is not implemented */
/* Source 2 operand type */
/* Source 2 operand type */
#define Src2Shift (3
0
)
#define Src2Shift (3
1
)
#define Src2None (OpNone << Src2Shift)
#define Src2None (OpNone << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
...
@@ -1578,12 +1579,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
...
@@ -1578,12 +1579,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
memset
(
&
seg_desc
,
0
,
sizeof
seg_desc
);
memset
(
&
seg_desc
,
0
,
sizeof
seg_desc
);
if
(
(
seg
<=
VCPU_SREG_GS
&&
ctxt
->
mode
==
X86EMUL_MODE_VM86
)
if
(
ctxt
->
mode
==
X86EMUL_MODE_REAL
)
{
||
ctxt
->
mode
==
X86EMUL_MODE_REAL
)
{
/* set real mode segment descriptor (keep limit etc. for
/* set real mode segment descriptor
*/
* unreal mode)
*/
ctxt
->
ops
->
get_segment
(
ctxt
,
&
dummy
,
&
seg_desc
,
NULL
,
seg
);
ctxt
->
ops
->
get_segment
(
ctxt
,
&
dummy
,
&
seg_desc
,
NULL
,
seg
);
set_desc_base
(
&
seg_desc
,
selector
<<
4
);
set_desc_base
(
&
seg_desc
,
selector
<<
4
);
goto
load
;
goto
load
;
}
else
if
(
seg
<=
VCPU_SREG_GS
&&
ctxt
->
mode
==
X86EMUL_MODE_VM86
)
{
/* VM86 needs a clean new segment descriptor */
set_desc_base
(
&
seg_desc
,
selector
<<
4
);
set_desc_limit
(
&
seg_desc
,
0xffff
);
seg_desc
.
type
=
3
;
seg_desc
.
p
=
1
;
seg_desc
.
s
=
1
;
seg_desc
.
dpl
=
3
;
goto
load
;
}
}
rpl
=
selector
&
3
;
rpl
=
selector
&
3
;
...
@@ -3615,7 +3625,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
...
@@ -3615,7 +3625,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
.check_perm = (_p) }
.check_perm = (_p) }
#define N D(
0
)
#define N D(
NotImpl
)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
...
@@ -3713,7 +3723,7 @@ static const struct opcode group5[] = {
...
@@ -3713,7 +3723,7 @@ static const struct opcode group5[] = {
I
(
SrcMemFAddr
|
ImplicitOps
|
Stack
,
em_call_far
),
I
(
SrcMemFAddr
|
ImplicitOps
|
Stack
,
em_call_far
),
I
(
SrcMem
|
Stack
,
em_grp45
),
I
(
SrcMem
|
Stack
,
em_grp45
),
I
(
SrcMemFAddr
|
ImplicitOps
,
em_grp45
),
I
(
SrcMemFAddr
|
ImplicitOps
,
em_grp45
),
I
(
SrcMem
|
Stack
,
em_grp45
),
N
,
I
(
SrcMem
|
Stack
,
em_grp45
),
D
(
Undefined
)
,
};
};
static
const
struct
opcode
group6
[]
=
{
static
const
struct
opcode
group6
[]
=
{
...
@@ -4373,7 +4383,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
...
@@ -4373,7 +4383,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt
->
intercept
=
opcode
.
intercept
;
ctxt
->
intercept
=
opcode
.
intercept
;
/* Unrecognised? */
/* Unrecognised? */
if
(
ctxt
->
d
==
0
||
(
ctxt
->
d
&
Undefined
))
if
(
ctxt
->
d
==
0
||
(
ctxt
->
d
&
NotImpl
))
return
EMULATION_FAILED
;
return
EMULATION_FAILED
;
if
(
!
(
ctxt
->
d
&
VendorSpecific
)
&&
ctxt
->
only_vendor_specific_insn
)
if
(
!
(
ctxt
->
d
&
VendorSpecific
)
&&
ctxt
->
only_vendor_specific_insn
)
...
@@ -4511,7 +4521,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
...
@@ -4511,7 +4521,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
ctxt
->
mem_read
.
pos
=
0
;
ctxt
->
mem_read
.
pos
=
0
;
if
(
ctxt
->
mode
==
X86EMUL_MODE_PROT64
&&
(
ctxt
->
d
&
No64
))
{
if
((
ctxt
->
mode
==
X86EMUL_MODE_PROT64
&&
(
ctxt
->
d
&
No64
))
||
(
ctxt
->
d
&
Undefined
))
{
rc
=
emulate_ud
(
ctxt
);
rc
=
emulate_ud
(
ctxt
);
goto
done
;
goto
done
;
}
}
...
...
arch/x86/kvm/i8254.c
View file @
fca7567c
...
@@ -290,8 +290,8 @@ static void pit_do_work(struct kthread_work *work)
...
@@ -290,8 +290,8 @@ static void pit_do_work(struct kthread_work *work)
}
}
spin_unlock
(
&
ps
->
inject_lock
);
spin_unlock
(
&
ps
->
inject_lock
);
if
(
inject
)
{
if
(
inject
)
{
kvm_set_irq
(
kvm
,
kvm
->
arch
.
vpit
->
irq_source_id
,
0
,
1
);
kvm_set_irq
(
kvm
,
kvm
->
arch
.
vpit
->
irq_source_id
,
0
,
1
,
false
);
kvm_set_irq
(
kvm
,
kvm
->
arch
.
vpit
->
irq_source_id
,
0
,
0
);
kvm_set_irq
(
kvm
,
kvm
->
arch
.
vpit
->
irq_source_id
,
0
,
0
,
false
);
/*
/*
* Provides NMI watchdog support via Virtual Wire mode.
* Provides NMI watchdog support via Virtual Wire mode.
...
...
arch/x86/kvm/lapic.c
View file @
fca7567c
...
@@ -94,6 +94,14 @@ static inline int apic_test_vector(int vec, void *bitmap)
...
@@ -94,6 +94,14 @@ static inline int apic_test_vector(int vec, void *bitmap)
return
test_bit
(
VEC_POS
(
vec
),
(
bitmap
)
+
REG_POS
(
vec
));
return
test_bit
(
VEC_POS
(
vec
),
(
bitmap
)
+
REG_POS
(
vec
));
}
}
bool
kvm_apic_pending_eoi
(
struct
kvm_vcpu
*
vcpu
,
int
vector
)
{
struct
kvm_lapic
*
apic
=
vcpu
->
arch
.
apic
;
return
apic_test_vector
(
vector
,
apic
->
regs
+
APIC_ISR
)
||
apic_test_vector
(
vector
,
apic
->
regs
+
APIC_IRR
);
}
static
inline
void
apic_set_vector
(
int
vec
,
void
*
bitmap
)
static
inline
void
apic_set_vector
(
int
vec
,
void
*
bitmap
)
{
{
set_bit
(
VEC_POS
(
vec
),
(
bitmap
)
+
REG_POS
(
vec
));
set_bit
(
VEC_POS
(
vec
),
(
bitmap
)
+
REG_POS
(
vec
));
...
@@ -145,53 +153,6 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
...
@@ -145,53 +153,6 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return
(
kvm_apic_get_reg
(
apic
,
APIC_ID
)
>>
24
)
&
0xff
;
return
(
kvm_apic_get_reg
(
apic
,
APIC_ID
)
>>
24
)
&
0xff
;
}
}
void
kvm_calculate_eoi_exitmap
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_irq
*
irq
,
u64
*
eoi_exit_bitmap
)
{
struct
kvm_lapic
**
dst
;
struct
kvm_apic_map
*
map
;
unsigned
long
bitmap
=
1
;
int
i
;
rcu_read_lock
();
map
=
rcu_dereference
(
vcpu
->
kvm
->
arch
.
apic_map
);
if
(
unlikely
(
!
map
))
{
__set_bit
(
irq
->
vector
,
(
unsigned
long
*
)
eoi_exit_bitmap
);
goto
out
;
}
if
(
irq
->
dest_mode
==
0
)
{
/* physical mode */
if
(
irq
->
delivery_mode
==
APIC_DM_LOWEST
||
irq
->
dest_id
==
0xff
)
{
__set_bit
(
irq
->
vector
,
(
unsigned
long
*
)
eoi_exit_bitmap
);
goto
out
;
}
dst
=
&
map
->
phys_map
[
irq
->
dest_id
&
0xff
];
}
else
{
u32
mda
=
irq
->
dest_id
<<
(
32
-
map
->
ldr_bits
);
dst
=
map
->
logical_map
[
apic_cluster_id
(
map
,
mda
)];
bitmap
=
apic_logical_id
(
map
,
mda
);
}
for_each_set_bit
(
i
,
&
bitmap
,
16
)
{
if
(
!
dst
[
i
])
continue
;
if
(
dst
[
i
]
->
vcpu
==
vcpu
)
{
__set_bit
(
irq
->
vector
,
(
unsigned
long
*
)
eoi_exit_bitmap
);
break
;
}
}
out:
rcu_read_unlock
();
}
static
void
recalculate_apic_map
(
struct
kvm
*
kvm
)
static
void
recalculate_apic_map
(
struct
kvm
*
kvm
)
{
{
struct
kvm_apic_map
*
new
,
*
old
=
NULL
;
struct
kvm_apic_map
*
new
,
*
old
=
NULL
;
...
@@ -256,7 +217,7 @@ static void recalculate_apic_map(struct kvm *kvm)
...
@@ -256,7 +217,7 @@ static void recalculate_apic_map(struct kvm *kvm)
if
(
old
)
if
(
old
)
kfree_rcu
(
old
,
rcu
);
kfree_rcu
(
old
,
rcu
);
kvm_
ioapic_make_eoibitmap_request
(
kvm
);
kvm_
vcpu_request_scan_ioapic
(
kvm
);
}
}
static
inline
void
kvm_apic_set_id
(
struct
kvm_lapic
*
apic
,
u8
id
)
static
inline
void
kvm_apic_set_id
(
struct
kvm_lapic
*
apic
,
u8
id
)
...
@@ -357,6 +318,19 @@ static u8 count_vectors(void *bitmap)
...
@@ -357,6 +318,19 @@ static u8 count_vectors(void *bitmap)
return
count
;
return
count
;
}
}
void
kvm_apic_update_irr
(
struct
kvm_vcpu
*
vcpu
,
u32
*
pir
)
{
u32
i
,
pir_val
;
struct
kvm_lapic
*
apic
=
vcpu
->
arch
.
apic
;
for
(
i
=
0
;
i
<=
7
;
i
++
)
{
pir_val
=
xchg
(
&
pir
[
i
],
0
);
if
(
pir_val
)
*
((
u32
*
)(
apic
->
regs
+
APIC_IRR
+
i
*
0x10
))
|=
pir_val
;
}
}
EXPORT_SYMBOL_GPL
(
kvm_apic_update_irr
);
static
inline
int
apic_test_and_set_irr
(
int
vec
,
struct
kvm_lapic
*
apic
)
static
inline
int
apic_test_and_set_irr
(
int
vec
,
struct
kvm_lapic
*
apic
)
{
{
apic
->
irr_pending
=
true
;
apic
->
irr_pending
=
true
;
...
@@ -379,6 +353,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
...
@@ -379,6 +353,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
if
(
!
apic
->
irr_pending
)
if
(
!
apic
->
irr_pending
)
return
-
1
;
return
-
1
;
kvm_x86_ops
->
sync_pir_to_irr
(
apic
->
vcpu
);
result
=
apic_search_irr
(
apic
);
result
=
apic_search_irr
(
apic
);
ASSERT
(
result
==
-
1
||
result
>=
16
);
ASSERT
(
result
==
-
1
||
result
>=
16
);
...
@@ -431,14 +406,16 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
...
@@ -431,14 +406,16 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
}
}
static
int
__apic_accept_irq
(
struct
kvm_lapic
*
apic
,
int
delivery_mode
,
static
int
__apic_accept_irq
(
struct
kvm_lapic
*
apic
,
int
delivery_mode
,
int
vector
,
int
level
,
int
trig_mode
);
int
vector
,
int
level
,
int
trig_mode
,
unsigned
long
*
dest_map
);
int
kvm_apic_set_irq
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_irq
*
irq
)
int
kvm_apic_set_irq
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_irq
*
irq
,
unsigned
long
*
dest_map
)
{
{
struct
kvm_lapic
*
apic
=
vcpu
->
arch
.
apic
;
struct
kvm_lapic
*
apic
=
vcpu
->
arch
.
apic
;
return
__apic_accept_irq
(
apic
,
irq
->
delivery_mode
,
irq
->
vector
,
return
__apic_accept_irq
(
apic
,
irq
->
delivery_mode
,
irq
->
vector
,
irq
->
level
,
irq
->
trig_mode
);
irq
->
level
,
irq
->
trig_mode
,
dest_map
);
}
}
static
int
pv_eoi_put_user
(
struct
kvm_vcpu
*
vcpu
,
u8
val
)
static
int
pv_eoi_put_user
(
struct
kvm_vcpu
*
vcpu
,
u8
val
)
...
@@ -505,6 +482,15 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
...
@@ -505,6 +482,15 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
return
result
;
return
result
;
}
}
void
kvm_apic_update_tmr
(
struct
kvm_vcpu
*
vcpu
,
u32
*
tmr
)
{
struct
kvm_lapic
*
apic
=
vcpu
->
arch
.
apic
;
int
i
;
for
(
i
=
0
;
i
<
8
;
i
++
)
apic_set_reg
(
apic
,
APIC_TMR
+
0x10
*
i
,
tmr
[
i
]);
}
static
void
apic_update_ppr
(
struct
kvm_lapic
*
apic
)
static
void
apic_update_ppr
(
struct
kvm_lapic
*
apic
)
{
{
u32
tpr
,
isrv
,
ppr
,
old_ppr
;
u32
tpr
,
isrv
,
ppr
,
old_ppr
;
...
@@ -611,7 +597,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
...
@@ -611,7 +597,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
}
}
bool
kvm_irq_delivery_to_apic_fast
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
src
,
bool
kvm_irq_delivery_to_apic_fast
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
src
,
struct
kvm_lapic_irq
*
irq
,
int
*
r
)
struct
kvm_lapic_irq
*
irq
,
int
*
r
,
unsigned
long
*
dest_map
)
{
{
struct
kvm_apic_map
*
map
;
struct
kvm_apic_map
*
map
;
unsigned
long
bitmap
=
1
;
unsigned
long
bitmap
=
1
;
...
@@ -622,7 +608,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
...
@@ -622,7 +608,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
*
r
=
-
1
;
*
r
=
-
1
;
if
(
irq
->
shorthand
==
APIC_DEST_SELF
)
{
if
(
irq
->
shorthand
==
APIC_DEST_SELF
)
{
*
r
=
kvm_apic_set_irq
(
src
->
vcpu
,
irq
);
*
r
=
kvm_apic_set_irq
(
src
->
vcpu
,
irq
,
dest_map
);
return
true
;
return
true
;
}
}
...
@@ -667,7 +653,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
...
@@ -667,7 +653,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
continue
;
continue
;
if
(
*
r
<
0
)
if
(
*
r
<
0
)
*
r
=
0
;
*
r
=
0
;
*
r
+=
kvm_apic_set_irq
(
dst
[
i
]
->
vcpu
,
irq
);
*
r
+=
kvm_apic_set_irq
(
dst
[
i
]
->
vcpu
,
irq
,
dest_map
);
}
}
ret
=
true
;
ret
=
true
;
...
@@ -681,7 +667,8 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
...
@@ -681,7 +667,8 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
* Return 1 if successfully added and 0 if discarded.
* Return 1 if successfully added and 0 if discarded.
*/
*/
static
int
__apic_accept_irq
(
struct
kvm_lapic
*
apic
,
int
delivery_mode
,
static
int
__apic_accept_irq
(
struct
kvm_lapic
*
apic
,
int
delivery_mode
,
int
vector
,
int
level
,
int
trig_mode
)
int
vector
,
int
level
,
int
trig_mode
,
unsigned
long
*
dest_map
)
{
{
int
result
=
0
;
int
result
=
0
;
struct
kvm_vcpu
*
vcpu
=
apic
->
vcpu
;
struct
kvm_vcpu
*
vcpu
=
apic
->
vcpu
;
...
@@ -694,24 +681,28 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
...
@@ -694,24 +681,28 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
if
(
unlikely
(
!
apic_enabled
(
apic
)))
if
(
unlikely
(
!
apic_enabled
(
apic
)))
break
;
break
;
if
(
trig_mode
)
{
if
(
dest_map
)
apic_debug
(
"level trig mode for vector %d"
,
vector
);
__set_bit
(
vcpu
->
vcpu_id
,
dest_map
);
apic_set_vector
(
vector
,
apic
->
regs
+
APIC_TMR
);
}
else
apic_clear_vector
(
vector
,
apic
->
regs
+
APIC_TMR
);
result
=
!
apic_test_and_set_irr
(
vector
,
apic
);
if
(
kvm_x86_ops
->
deliver_posted_interrupt
)
{
trace_kvm_apic_accept_irq
(
vcpu
->
vcpu_id
,
delivery_mode
,
result
=
1
;
trig_mode
,
vector
,
!
result
);
kvm_x86_ops
->
deliver_posted_interrupt
(
vcpu
,
vector
);
if
(
!
result
)
{
}
else
{
if
(
trig_mode
)
result
=
!
apic_test_and_set_irr
(
vector
,
apic
);
apic_debug
(
"level trig mode repeatedly for "
"vector %d"
,
vector
);
break
;
}
kvm_make_request
(
KVM_REQ_EVENT
,
vcpu
);
if
(
!
result
)
{
kvm_vcpu_kick
(
vcpu
);
if
(
trig_mode
)
apic_debug
(
"level trig mode repeatedly "
"for vector %d"
,
vector
);
goto
out
;
}
kvm_make_request
(
KVM_REQ_EVENT
,
vcpu
);
kvm_vcpu_kick
(
vcpu
);
}
out:
trace_kvm_apic_accept_irq
(
vcpu
->
vcpu_id
,
delivery_mode
,
trig_mode
,
vector
,
!
result
);
break
;
break
;
case
APIC_DM_REMRD
:
case
APIC_DM_REMRD
:
...
@@ -786,7 +777,7 @@ static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
...
@@ -786,7 +777,7 @@ static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
trigger_mode
=
IOAPIC_LEVEL_TRIG
;
trigger_mode
=
IOAPIC_LEVEL_TRIG
;
else
else
trigger_mode
=
IOAPIC_EDGE_TRIG
;
trigger_mode
=
IOAPIC_EDGE_TRIG
;
kvm_ioapic_update_eoi
(
apic
->
vcpu
->
kvm
,
vector
,
trigger_mode
);
kvm_ioapic_update_eoi
(
apic
->
vcpu
,
vector
,
trigger_mode
);
}
}
}
}
...
@@ -852,7 +843,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
...
@@ -852,7 +843,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
irq
.
trig_mode
,
irq
.
level
,
irq
.
dest_mode
,
irq
.
delivery_mode
,
irq
.
trig_mode
,
irq
.
level
,
irq
.
dest_mode
,
irq
.
delivery_mode
,
irq
.
vector
);
irq
.
vector
);
kvm_irq_delivery_to_apic
(
apic
->
vcpu
->
kvm
,
apic
,
&
irq
);
kvm_irq_delivery_to_apic
(
apic
->
vcpu
->
kvm
,
apic
,
&
irq
,
NULL
);
}
}
static
u32
apic_get_tmcct
(
struct
kvm_lapic
*
apic
)
static
u32
apic_get_tmcct
(
struct
kvm_lapic
*
apic
)
...
@@ -1488,7 +1479,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
...
@@ -1488,7 +1479,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
vector
=
reg
&
APIC_VECTOR_MASK
;
vector
=
reg
&
APIC_VECTOR_MASK
;
mode
=
reg
&
APIC_MODE_MASK
;
mode
=
reg
&
APIC_MODE_MASK
;
trig_mode
=
reg
&
APIC_LVT_LEVEL_TRIGGER
;
trig_mode
=
reg
&
APIC_LVT_LEVEL_TRIGGER
;
return
__apic_accept_irq
(
apic
,
mode
,
vector
,
1
,
trig_mode
);
return
__apic_accept_irq
(
apic
,
mode
,
vector
,
1
,
trig_mode
,
NULL
);
}
}
return
0
;
return
0
;
}
}
...
@@ -1658,6 +1650,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
...
@@ -1658,6 +1650,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
apic
->
highest_isr_cache
=
-
1
;
apic
->
highest_isr_cache
=
-
1
;
kvm_x86_ops
->
hwapic_isr_update
(
vcpu
->
kvm
,
apic_find_highest_isr
(
apic
));
kvm_x86_ops
->
hwapic_isr_update
(
vcpu
->
kvm
,
apic_find_highest_isr
(
apic
));
kvm_make_request
(
KVM_REQ_EVENT
,
vcpu
);
kvm_make_request
(
KVM_REQ_EVENT
,
vcpu
);
kvm_rtc_eoi_tracking_restore_one
(
vcpu
);
}
}
void
__kvm_migrate_apic_timer
(
struct
kvm_vcpu
*
vcpu
)
void
__kvm_migrate_apic_timer
(
struct
kvm_vcpu
*
vcpu
)
...
...
arch/x86/kvm/lapic.h
View file @
fca7567c
...
@@ -53,13 +53,16 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
...
@@ -53,13 +53,16 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
u64
kvm_lapic_get_base
(
struct
kvm_vcpu
*
vcpu
);
u64
kvm_lapic_get_base
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_apic_set_version
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_apic_set_version
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_apic_update_tmr
(
struct
kvm_vcpu
*
vcpu
,
u32
*
tmr
);
void
kvm_apic_update_irr
(
struct
kvm_vcpu
*
vcpu
,
u32
*
pir
);
int
kvm_apic_match_physical_addr
(
struct
kvm_lapic
*
apic
,
u16
dest
);
int
kvm_apic_match_physical_addr
(
struct
kvm_lapic
*
apic
,
u16
dest
);
int
kvm_apic_match_logical_addr
(
struct
kvm_lapic
*
apic
,
u8
mda
);
int
kvm_apic_match_logical_addr
(
struct
kvm_lapic
*
apic
,
u8
mda
);
int
kvm_apic_set_irq
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_irq
*
irq
);
int
kvm_apic_set_irq
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_irq
*
irq
,
unsigned
long
*
dest_map
);
int
kvm_apic_local_deliver
(
struct
kvm_lapic
*
apic
,
int
lvt_type
);
int
kvm_apic_local_deliver
(
struct
kvm_lapic
*
apic
,
int
lvt_type
);
bool
kvm_irq_delivery_to_apic_fast
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
src
,
bool
kvm_irq_delivery_to_apic_fast
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
src
,
struct
kvm_lapic_irq
*
irq
,
int
*
r
);
struct
kvm_lapic_irq
*
irq
,
int
*
r
,
unsigned
long
*
dest_map
);
u64
kvm_get_apic_base
(
struct
kvm_vcpu
*
vcpu
);
u64
kvm_get_apic_base
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_set_apic_base
(
struct
kvm_vcpu
*
vcpu
,
u64
data
);
void
kvm_set_apic_base
(
struct
kvm_vcpu
*
vcpu
,
u64
data
);
...
@@ -160,13 +163,11 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
...
@@ -160,13 +163,11 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
return
ldr
&
map
->
lid_mask
;
return
ldr
&
map
->
lid_mask
;
}
}
void
kvm_calculate_eoi_exitmap
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_irq
*
irq
,
u64
*
eoi_bitmap
);
static
inline
bool
kvm_apic_has_events
(
struct
kvm_vcpu
*
vcpu
)
static
inline
bool
kvm_apic_has_events
(
struct
kvm_vcpu
*
vcpu
)
{
{
return
vcpu
->
arch
.
apic
->
pending_events
;
return
vcpu
->
arch
.
apic
->
pending_events
;
}
}
bool
kvm_apic_pending_eoi
(
struct
kvm_vcpu
*
vcpu
,
int
vector
);
#endif
#endif
arch/x86/kvm/mmu.c
View file @
fca7567c
...
@@ -1501,15 +1501,11 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
...
@@ -1501,15 +1501,11 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
mmu_spte_clear_no_track
(
parent_pte
);
mmu_spte_clear_no_track
(
parent_pte
);
}
}
static
void
make_mmu_pages_available
(
struct
kvm_vcpu
*
vcpu
);
static
struct
kvm_mmu_page
*
kvm_mmu_alloc_page
(
struct
kvm_vcpu
*
vcpu
,
static
struct
kvm_mmu_page
*
kvm_mmu_alloc_page
(
struct
kvm_vcpu
*
vcpu
,
u64
*
parent_pte
,
int
direct
)
u64
*
parent_pte
,
int
direct
)
{
{
struct
kvm_mmu_page
*
sp
;
struct
kvm_mmu_page
*
sp
;
make_mmu_pages_available
(
vcpu
);
sp
=
mmu_memory_cache_alloc
(
&
vcpu
->
arch
.
mmu_page_header_cache
);
sp
=
mmu_memory_cache_alloc
(
&
vcpu
->
arch
.
mmu_page_header_cache
);
sp
->
spt
=
mmu_memory_cache_alloc
(
&
vcpu
->
arch
.
mmu_page_cache
);
sp
->
spt
=
mmu_memory_cache_alloc
(
&
vcpu
->
arch
.
mmu_page_cache
);
if
(
!
direct
)
if
(
!
direct
)
...
@@ -2806,6 +2802,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
...
@@ -2806,6 +2802,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
static
bool
try_async_pf
(
struct
kvm_vcpu
*
vcpu
,
bool
prefault
,
gfn_t
gfn
,
static
bool
try_async_pf
(
struct
kvm_vcpu
*
vcpu
,
bool
prefault
,
gfn_t
gfn
,
gva_t
gva
,
pfn_t
*
pfn
,
bool
write
,
bool
*
writable
);
gva_t
gva
,
pfn_t
*
pfn
,
bool
write
,
bool
*
writable
);
static
void
make_mmu_pages_available
(
struct
kvm_vcpu
*
vcpu
);
static
int
nonpaging_map
(
struct
kvm_vcpu
*
vcpu
,
gva_t
v
,
u32
error_code
,
static
int
nonpaging_map
(
struct
kvm_vcpu
*
vcpu
,
gva_t
v
,
u32
error_code
,
gfn_t
gfn
,
bool
prefault
)
gfn_t
gfn
,
bool
prefault
)
...
@@ -2847,6 +2844,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
...
@@ -2847,6 +2844,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
if
(
mmu_notifier_retry
(
vcpu
->
kvm
,
mmu_seq
))
if
(
mmu_notifier_retry
(
vcpu
->
kvm
,
mmu_seq
))
goto
out_unlock
;
goto
out_unlock
;
make_mmu_pages_available
(
vcpu
);
if
(
likely
(
!
force_pt_level
))
if
(
likely
(
!
force_pt_level
))
transparent_hugepage_adjust
(
vcpu
,
&
gfn
,
&
pfn
,
&
level
);
transparent_hugepage_adjust
(
vcpu
,
&
gfn
,
&
pfn
,
&
level
);
r
=
__direct_map
(
vcpu
,
v
,
write
,
map_writable
,
level
,
gfn
,
pfn
,
r
=
__direct_map
(
vcpu
,
v
,
write
,
map_writable
,
level
,
gfn
,
pfn
,
...
@@ -2924,6 +2922,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
...
@@ -2924,6 +2922,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
if
(
vcpu
->
arch
.
mmu
.
shadow_root_level
==
PT64_ROOT_LEVEL
)
{
if
(
vcpu
->
arch
.
mmu
.
shadow_root_level
==
PT64_ROOT_LEVEL
)
{
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
make_mmu_pages_available
(
vcpu
);
sp
=
kvm_mmu_get_page
(
vcpu
,
0
,
0
,
PT64_ROOT_LEVEL
,
sp
=
kvm_mmu_get_page
(
vcpu
,
0
,
0
,
PT64_ROOT_LEVEL
,
1
,
ACC_ALL
,
NULL
);
1
,
ACC_ALL
,
NULL
);
++
sp
->
root_count
;
++
sp
->
root_count
;
...
@@ -2935,6 +2934,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
...
@@ -2935,6 +2934,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
ASSERT
(
!
VALID_PAGE
(
root
));
ASSERT
(
!
VALID_PAGE
(
root
));
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
make_mmu_pages_available
(
vcpu
);
sp
=
kvm_mmu_get_page
(
vcpu
,
i
<<
(
30
-
PAGE_SHIFT
),
sp
=
kvm_mmu_get_page
(
vcpu
,
i
<<
(
30
-
PAGE_SHIFT
),
i
<<
30
,
i
<<
30
,
PT32_ROOT_LEVEL
,
1
,
ACC_ALL
,
PT32_ROOT_LEVEL
,
1
,
ACC_ALL
,
...
@@ -2973,6 +2973,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
...
@@ -2973,6 +2973,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
ASSERT
(
!
VALID_PAGE
(
root
));
ASSERT
(
!
VALID_PAGE
(
root
));
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
make_mmu_pages_available
(
vcpu
);
sp
=
kvm_mmu_get_page
(
vcpu
,
root_gfn
,
0
,
PT64_ROOT_LEVEL
,
sp
=
kvm_mmu_get_page
(
vcpu
,
root_gfn
,
0
,
PT64_ROOT_LEVEL
,
0
,
ACC_ALL
,
NULL
);
0
,
ACC_ALL
,
NULL
);
root
=
__pa
(
sp
->
spt
);
root
=
__pa
(
sp
->
spt
);
...
@@ -3006,6 +3007,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
...
@@ -3006,6 +3007,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
return
1
;
return
1
;
}
}
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
make_mmu_pages_available
(
vcpu
);
sp
=
kvm_mmu_get_page
(
vcpu
,
root_gfn
,
i
<<
30
,
sp
=
kvm_mmu_get_page
(
vcpu
,
root_gfn
,
i
<<
30
,
PT32_ROOT_LEVEL
,
0
,
PT32_ROOT_LEVEL
,
0
,
ACC_ALL
,
NULL
);
ACC_ALL
,
NULL
);
...
@@ -3311,6 +3313,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
...
@@ -3311,6 +3313,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
if
(
mmu_notifier_retry
(
vcpu
->
kvm
,
mmu_seq
))
if
(
mmu_notifier_retry
(
vcpu
->
kvm
,
mmu_seq
))
goto
out_unlock
;
goto
out_unlock
;
make_mmu_pages_available
(
vcpu
);
if
(
likely
(
!
force_pt_level
))
if
(
likely
(
!
force_pt_level
))
transparent_hugepage_adjust
(
vcpu
,
&
gfn
,
&
pfn
,
&
level
);
transparent_hugepage_adjust
(
vcpu
,
&
gfn
,
&
pfn
,
&
level
);
r
=
__direct_map
(
vcpu
,
gpa
,
write
,
map_writable
,
r
=
__direct_map
(
vcpu
,
gpa
,
write
,
map_writable
,
...
...
arch/x86/kvm/paging_tmpl.h
View file @
fca7567c
...
@@ -627,6 +627,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
...
@@ -627,6 +627,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
goto
out_unlock
;
goto
out_unlock
;
kvm_mmu_audit
(
vcpu
,
AUDIT_PRE_PAGE_FAULT
);
kvm_mmu_audit
(
vcpu
,
AUDIT_PRE_PAGE_FAULT
);
make_mmu_pages_available
(
vcpu
);
if
(
!
force_pt_level
)
if
(
!
force_pt_level
)
transparent_hugepage_adjust
(
vcpu
,
&
walker
.
gfn
,
&
pfn
,
&
level
);
transparent_hugepage_adjust
(
vcpu
,
&
walker
.
gfn
,
&
pfn
,
&
level
);
r
=
FNAME
(
fetch
)(
vcpu
,
addr
,
&
walker
,
write_fault
,
r
=
FNAME
(
fetch
)(
vcpu
,
addr
,
&
walker
,
write_fault
,
...
...
arch/x86/kvm/pmu.c
View file @
fca7567c
...
@@ -360,10 +360,12 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
...
@@ -360,10 +360,12 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
return
1
;
return
1
;
}
}
int
kvm_pmu_set_msr
(
struct
kvm_vcpu
*
vcpu
,
u32
index
,
u64
data
)
int
kvm_pmu_set_msr
(
struct
kvm_vcpu
*
vcpu
,
struct
msr_data
*
msr_info
)
{
{
struct
kvm_pmu
*
pmu
=
&
vcpu
->
arch
.
pmu
;
struct
kvm_pmu
*
pmu
=
&
vcpu
->
arch
.
pmu
;
struct
kvm_pmc
*
pmc
;
struct
kvm_pmc
*
pmc
;
u32
index
=
msr_info
->
index
;
u64
data
=
msr_info
->
data
;
switch
(
index
)
{
switch
(
index
)
{
case
MSR_CORE_PERF_FIXED_CTR_CTRL
:
case
MSR_CORE_PERF_FIXED_CTR_CTRL
:
...
@@ -375,6 +377,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
...
@@ -375,6 +377,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
}
}
break
;
break
;
case
MSR_CORE_PERF_GLOBAL_STATUS
:
case
MSR_CORE_PERF_GLOBAL_STATUS
:
if
(
msr_info
->
host_initiated
)
{
pmu
->
global_status
=
data
;
return
0
;
}
break
;
/* RO MSR */
break
;
/* RO MSR */
case
MSR_CORE_PERF_GLOBAL_CTRL
:
case
MSR_CORE_PERF_GLOBAL_CTRL
:
if
(
pmu
->
global_ctrl
==
data
)
if
(
pmu
->
global_ctrl
==
data
)
...
@@ -386,7 +392,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
...
@@ -386,7 +392,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
break
;
break
;
case
MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
case
MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
if
(
!
(
data
&
(
pmu
->
global_ctrl_mask
&
~
(
3ull
<<
62
))))
{
if
(
!
(
data
&
(
pmu
->
global_ctrl_mask
&
~
(
3ull
<<
62
))))
{
pmu
->
global_status
&=
~
data
;
if
(
!
msr_info
->
host_initiated
)
pmu
->
global_status
&=
~
data
;
pmu
->
global_ovf_ctrl
=
data
;
pmu
->
global_ovf_ctrl
=
data
;
return
0
;
return
0
;
}
}
...
@@ -394,7 +401,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
...
@@ -394,7 +401,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
default:
default:
if
((
pmc
=
get_gp_pmc
(
pmu
,
index
,
MSR_IA32_PERFCTR0
))
||
if
((
pmc
=
get_gp_pmc
(
pmu
,
index
,
MSR_IA32_PERFCTR0
))
||
(
pmc
=
get_fixed_pmc
(
pmu
,
index
)))
{
(
pmc
=
get_fixed_pmc
(
pmu
,
index
)))
{
data
=
(
s64
)(
s32
)
data
;
if
(
!
msr_info
->
host_initiated
)
data
=
(
s64
)(
s32
)
data
;
pmc
->
counter
+=
data
-
read_pmc
(
pmc
);
pmc
->
counter
+=
data
-
read_pmc
(
pmc
);
return
0
;
return
0
;
}
else
if
((
pmc
=
get_gp_pmc
(
pmu
,
index
,
MSR_P6_EVNTSEL0
)))
{
}
else
if
((
pmc
=
get_gp_pmc
(
pmu
,
index
,
MSR_P6_EVNTSEL0
)))
{
...
...
arch/x86/kvm/svm.c
View file @
fca7567c
...
@@ -3577,6 +3577,11 @@ static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
...
@@ -3577,6 +3577,11 @@ static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
return
;
return
;
}
}
static
void
svm_sync_pir_to_irr
(
struct
kvm_vcpu
*
vcpu
)
{
return
;
}
static
int
svm_nmi_allowed
(
struct
kvm_vcpu
*
vcpu
)
static
int
svm_nmi_allowed
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
vcpu_svm
*
svm
=
to_svm
(
vcpu
);
struct
vcpu_svm
*
svm
=
to_svm
(
vcpu
);
...
@@ -4233,6 +4238,11 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
...
@@ -4233,6 +4238,11 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
return
ret
;
return
ret
;
}
}
static
void
svm_handle_external_intr
(
struct
kvm_vcpu
*
vcpu
)
{
local_irq_enable
();
}
static
struct
kvm_x86_ops
svm_x86_ops
=
{
static
struct
kvm_x86_ops
svm_x86_ops
=
{
.
cpu_has_kvm_support
=
has_svm
,
.
cpu_has_kvm_support
=
has_svm
,
.
disabled_by_bios
=
is_disabled
,
.
disabled_by_bios
=
is_disabled
,
...
@@ -4300,6 +4310,7 @@ static struct kvm_x86_ops svm_x86_ops = {
...
@@ -4300,6 +4310,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.
vm_has_apicv
=
svm_vm_has_apicv
,
.
vm_has_apicv
=
svm_vm_has_apicv
,
.
load_eoi_exitmap
=
svm_load_eoi_exitmap
,
.
load_eoi_exitmap
=
svm_load_eoi_exitmap
,
.
hwapic_isr_update
=
svm_hwapic_isr_update
,
.
hwapic_isr_update
=
svm_hwapic_isr_update
,
.
sync_pir_to_irr
=
svm_sync_pir_to_irr
,
.
set_tss_addr
=
svm_set_tss_addr
,
.
set_tss_addr
=
svm_set_tss_addr
,
.
get_tdp_level
=
get_npt_level
,
.
get_tdp_level
=
get_npt_level
,
...
@@ -4328,6 +4339,7 @@ static struct kvm_x86_ops svm_x86_ops = {
...
@@ -4328,6 +4339,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.
set_tdp_cr3
=
set_tdp_cr3
,
.
set_tdp_cr3
=
set_tdp_cr3
,
.
check_intercept
=
svm_check_intercept
,
.
check_intercept
=
svm_check_intercept
,
.
handle_external_intr
=
svm_handle_external_intr
,
};
};
static
int
__init
svm_init
(
void
)
static
int
__init
svm_init
(
void
)
...
...
arch/x86/kvm/vmx.c
View file @
fca7567c
This diff is collapsed.
Click to expand it.
arch/x86/kvm/x86.c
View file @
fca7567c
...
@@ -261,6 +261,13 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
...
@@ -261,6 +261,13 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
}
}
EXPORT_SYMBOL_GPL
(
kvm_set_apic_base
);
EXPORT_SYMBOL_GPL
(
kvm_set_apic_base
);
asmlinkage
void
kvm_spurious_fault
(
void
)
{
/* Fault while not rebooting. We want the trace. */
BUG
();
}
EXPORT_SYMBOL_GPL
(
kvm_spurious_fault
);
#define EXCPT_BENIGN 0
#define EXCPT_BENIGN 0
#define EXCPT_CONTRIBUTORY 1
#define EXCPT_CONTRIBUTORY 1
#define EXCPT_PF 2
#define EXCPT_PF 2
...
@@ -2040,7 +2047,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
...
@@ -2040,7 +2047,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case
MSR_P6_EVNTSEL0
:
case
MSR_P6_EVNTSEL0
:
case
MSR_P6_EVNTSEL1
:
case
MSR_P6_EVNTSEL1
:
if
(
kvm_pmu_msr
(
vcpu
,
msr
))
if
(
kvm_pmu_msr
(
vcpu
,
msr
))
return
kvm_pmu_set_msr
(
vcpu
,
msr
,
data
);
return
kvm_pmu_set_msr
(
vcpu
,
msr
_info
);
if
(
pr
||
data
!=
0
)
if
(
pr
||
data
!=
0
)
vcpu_unimpl
(
vcpu
,
"disabled perfctr wrmsr: "
vcpu_unimpl
(
vcpu
,
"disabled perfctr wrmsr: "
...
@@ -2086,7 +2093,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
...
@@ -2086,7 +2093,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if
(
msr
&&
(
msr
==
vcpu
->
kvm
->
arch
.
xen_hvm_config
.
msr
))
if
(
msr
&&
(
msr
==
vcpu
->
kvm
->
arch
.
xen_hvm_config
.
msr
))
return
xen_hvm_config
(
vcpu
,
data
);
return
xen_hvm_config
(
vcpu
,
data
);
if
(
kvm_pmu_msr
(
vcpu
,
msr
))
if
(
kvm_pmu_msr
(
vcpu
,
msr
))
return
kvm_pmu_set_msr
(
vcpu
,
msr
,
data
);
return
kvm_pmu_set_msr
(
vcpu
,
msr
_info
);
if
(
!
ignore_msrs
)
{
if
(
!
ignore_msrs
)
{
vcpu_unimpl
(
vcpu
,
"unhandled wrmsr: 0x%x data %llx
\n
"
,
vcpu_unimpl
(
vcpu
,
"unhandled wrmsr: 0x%x data %llx
\n
"
,
msr
,
data
);
msr
,
data
);
...
@@ -2685,6 +2692,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
...
@@ -2685,6 +2692,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
static
int
kvm_vcpu_ioctl_get_lapic
(
struct
kvm_vcpu
*
vcpu
,
static
int
kvm_vcpu_ioctl_get_lapic
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic_state
*
s
)
struct
kvm_lapic_state
*
s
)
{
{
kvm_x86_ops
->
sync_pir_to_irr
(
vcpu
);
memcpy
(
s
->
regs
,
vcpu
->
arch
.
apic
->
regs
,
sizeof
*
s
);
memcpy
(
s
->
regs
,
vcpu
->
arch
.
apic
->
regs
,
sizeof
*
s
);
return
0
;
return
0
;
...
@@ -3484,13 +3492,15 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
...
@@ -3484,13 +3492,15 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
return
r
;
return
r
;
}
}
int
kvm_vm_ioctl_irq_line
(
struct
kvm
*
kvm
,
struct
kvm_irq_level
*
irq_event
)
int
kvm_vm_ioctl_irq_line
(
struct
kvm
*
kvm
,
struct
kvm_irq_level
*
irq_event
,
bool
line_status
)
{
{
if
(
!
irqchip_in_kernel
(
kvm
))
if
(
!
irqchip_in_kernel
(
kvm
))
return
-
ENXIO
;
return
-
ENXIO
;
irq_event
->
status
=
kvm_set_irq
(
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
irq_event
->
status
=
kvm_set_irq
(
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
irq_event
->
irq
,
irq_event
->
level
);
irq_event
->
irq
,
irq_event
->
level
,
line_status
);
return
0
;
return
0
;
}
}
...
@@ -4758,11 +4768,15 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
...
@@ -4758,11 +4768,15 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
}
}
static
bool
reexecute_instruction
(
struct
kvm_vcpu
*
vcpu
,
gva_t
cr2
,
static
bool
reexecute_instruction
(
struct
kvm_vcpu
*
vcpu
,
gva_t
cr2
,
bool
write_fault_to_shadow_pgtable
)
bool
write_fault_to_shadow_pgtable
,
int
emulation_type
)
{
{
gpa_t
gpa
=
cr2
;
gpa_t
gpa
=
cr2
;
pfn_t
pfn
;
pfn_t
pfn
;
if
(
emulation_type
&
EMULTYPE_NO_REEXECUTE
)
return
false
;
if
(
!
vcpu
->
arch
.
mmu
.
direct_map
)
{
if
(
!
vcpu
->
arch
.
mmu
.
direct_map
)
{
/*
/*
* Write permission should be allowed since only
* Write permission should be allowed since only
...
@@ -4905,8 +4919,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
...
@@ -4905,8 +4919,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if
(
r
!=
EMULATION_OK
)
{
if
(
r
!=
EMULATION_OK
)
{
if
(
emulation_type
&
EMULTYPE_TRAP_UD
)
if
(
emulation_type
&
EMULTYPE_TRAP_UD
)
return
EMULATE_FAIL
;
return
EMULATE_FAIL
;
if
(
reexecute_instruction
(
vcpu
,
cr2
,
if
(
reexecute_instruction
(
vcpu
,
cr2
,
write_fault_to_spt
,
write_fault_to_spt
))
emulation_type
))
return
EMULATE_DONE
;
return
EMULATE_DONE
;
if
(
emulation_type
&
EMULTYPE_SKIP
)
if
(
emulation_type
&
EMULTYPE_SKIP
)
return
EMULATE_FAIL
;
return
EMULATE_FAIL
;
...
@@ -4936,7 +4950,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
...
@@ -4936,7 +4950,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
return
EMULATE_DONE
;
return
EMULATE_DONE
;
if
(
r
==
EMULATION_FAILED
)
{
if
(
r
==
EMULATION_FAILED
)
{
if
(
reexecute_instruction
(
vcpu
,
cr2
,
write_fault_to_spt
))
if
(
reexecute_instruction
(
vcpu
,
cr2
,
write_fault_to_spt
,
emulation_type
))
return
EMULATE_DONE
;
return
EMULATE_DONE
;
return
handle_emulation_failure
(
vcpu
);
return
handle_emulation_failure
(
vcpu
);
...
@@ -5647,14 +5662,20 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
...
@@ -5647,14 +5662,20 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
#endif
#endif
}
}
static
void
update_eoi_exitmap
(
struct
kvm_vcpu
*
vcpu
)
static
void
vcpu_scan_ioapic
(
struct
kvm_vcpu
*
vcpu
)
{
{
u64
eoi_exit_bitmap
[
4
];
u64
eoi_exit_bitmap
[
4
];
u32
tmr
[
8
];
if
(
!
kvm_apic_hw_enabled
(
vcpu
->
arch
.
apic
))
return
;
memset
(
eoi_exit_bitmap
,
0
,
32
);
memset
(
eoi_exit_bitmap
,
0
,
32
);
memset
(
tmr
,
0
,
32
);
kvm_ioapic_
calculate_eoi_exitmap
(
vcpu
,
eoi_exit_bitmap
);
kvm_ioapic_
scan_entry
(
vcpu
,
eoi_exit_bitmap
,
tmr
);
kvm_x86_ops
->
load_eoi_exitmap
(
vcpu
,
eoi_exit_bitmap
);
kvm_x86_ops
->
load_eoi_exitmap
(
vcpu
,
eoi_exit_bitmap
);
kvm_apic_update_tmr
(
vcpu
,
tmr
);
}
}
static
int
vcpu_enter_guest
(
struct
kvm_vcpu
*
vcpu
)
static
int
vcpu_enter_guest
(
struct
kvm_vcpu
*
vcpu
)
...
@@ -5710,8 +5731,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
...
@@ -5710,8 +5731,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_handle_pmu_event
(
vcpu
);
kvm_handle_pmu_event
(
vcpu
);
if
(
kvm_check_request
(
KVM_REQ_PMI
,
vcpu
))
if
(
kvm_check_request
(
KVM_REQ_PMI
,
vcpu
))
kvm_deliver_pmi
(
vcpu
);
kvm_deliver_pmi
(
vcpu
);
if
(
kvm_check_request
(
KVM_REQ_
EOIBITMAP
,
vcpu
))
if
(
kvm_check_request
(
KVM_REQ_
SCAN_IOAPIC
,
vcpu
))
update_eoi_exitmap
(
vcpu
);
vcpu_scan_ioapic
(
vcpu
);
}
}
if
(
kvm_check_request
(
KVM_REQ_EVENT
,
vcpu
)
||
req_int_win
)
{
if
(
kvm_check_request
(
KVM_REQ_EVENT
,
vcpu
)
||
req_int_win
)
{
...
@@ -5806,7 +5827,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
...
@@ -5806,7 +5827,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu
->
mode
=
OUTSIDE_GUEST_MODE
;
vcpu
->
mode
=
OUTSIDE_GUEST_MODE
;
smp_wmb
();
smp_wmb
();
local_irq_enable
();
/* Interrupt is enabled by handle_external_intr() */
kvm_x86_ops
->
handle_external_intr
(
vcpu
);
++
vcpu
->
stat
.
exits
;
++
vcpu
->
stat
.
exits
;
...
...
drivers/s390/kvm/kvm_virtio.c
View file @
fca7567c
...
@@ -443,29 +443,30 @@ static int __init test_devices_support(unsigned long addr)
...
@@ -443,29 +443,30 @@ static int __init test_devices_support(unsigned long addr)
}
}
/*
/*
* Init function for virtio
* Init function for virtio
* devices are in a single page above top of "normal" mem
* devices are in a single page above top of "normal"
+ standby
mem
*/
*/
static
int
__init
kvm_devices_init
(
void
)
static
int
__init
kvm_devices_init
(
void
)
{
{
int
rc
;
int
rc
;
unsigned
long
total_memory_size
=
sclp_get_rzm
()
*
sclp_get_rnmax
();
if
(
!
MACHINE_IS_KVM
)
if
(
!
MACHINE_IS_KVM
)
return
-
ENODEV
;
return
-
ENODEV
;
if
(
test_devices_support
(
re
al_memory_size
)
<
0
)
if
(
test_devices_support
(
tot
al_memory_size
)
<
0
)
return
-
ENODEV
;
return
-
ENODEV
;
rc
=
vmem_add_mapping
(
re
al_memory_size
,
PAGE_SIZE
);
rc
=
vmem_add_mapping
(
tot
al_memory_size
,
PAGE_SIZE
);
if
(
rc
)
if
(
rc
)
return
rc
;
return
rc
;
kvm_devices
=
(
void
*
)
re
al_memory_size
;
kvm_devices
=
(
void
*
)
tot
al_memory_size
;
kvm_root
=
root_device_register
(
"kvm_s390"
);
kvm_root
=
root_device_register
(
"kvm_s390"
);
if
(
IS_ERR
(
kvm_root
))
{
if
(
IS_ERR
(
kvm_root
))
{
rc
=
PTR_ERR
(
kvm_root
);
rc
=
PTR_ERR
(
kvm_root
);
printk
(
KERN_ERR
"Could not register kvm_s390 root device"
);
printk
(
KERN_ERR
"Could not register kvm_s390 root device"
);
vmem_remove_mapping
(
re
al_memory_size
,
PAGE_SIZE
);
vmem_remove_mapping
(
tot
al_memory_size
,
PAGE_SIZE
);
return
rc
;
return
rc
;
}
}
...
...
drivers/s390/kvm/virtio_ccw.c
View file @
fca7567c
...
@@ -133,8 +133,11 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
...
@@ -133,8 +133,11 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
do
{
do
{
spin_lock_irqsave
(
get_ccwdev_lock
(
vcdev
->
cdev
),
flags
);
spin_lock_irqsave
(
get_ccwdev_lock
(
vcdev
->
cdev
),
flags
);
ret
=
ccw_device_start
(
vcdev
->
cdev
,
ccw
,
intparm
,
0
,
0
);
ret
=
ccw_device_start
(
vcdev
->
cdev
,
ccw
,
intparm
,
0
,
0
);
if
(
!
ret
)
if
(
!
ret
)
{
if
(
!
vcdev
->
curr_io
)
vcdev
->
err
=
0
;
vcdev
->
curr_io
|=
flag
;
vcdev
->
curr_io
|=
flag
;
}
spin_unlock_irqrestore
(
get_ccwdev_lock
(
vcdev
->
cdev
),
flags
);
spin_unlock_irqrestore
(
get_ccwdev_lock
(
vcdev
->
cdev
),
flags
);
cpu_relax
();
cpu_relax
();
}
while
(
ret
==
-
EBUSY
);
}
while
(
ret
==
-
EBUSY
);
...
...
include/linux/kvm_host.h
View file @
fca7567c
...
@@ -126,7 +126,7 @@ static inline bool is_error_page(struct page *page)
...
@@ -126,7 +126,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_MASTERCLOCK_UPDATE 19
#define KVM_REQ_MASTERCLOCK_UPDATE 19
#define KVM_REQ_MCLOCK_INPROGRESS 20
#define KVM_REQ_MCLOCK_INPROGRESS 20
#define KVM_REQ_EPR_EXIT 21
#define KVM_REQ_EPR_EXIT 21
#define KVM_REQ_
EOIBITMAP
22
#define KVM_REQ_
SCAN_IOAPIC
22
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
...
@@ -135,6 +135,9 @@ struct kvm;
...
@@ -135,6 +135,9 @@ struct kvm;
struct
kvm_vcpu
;
struct
kvm_vcpu
;
extern
struct
kmem_cache
*
kvm_vcpu_cache
;
extern
struct
kmem_cache
*
kvm_vcpu_cache
;
extern
raw_spinlock_t
kvm_lock
;
extern
struct
list_head
vm_list
;
struct
kvm_io_range
{
struct
kvm_io_range
{
gpa_t
addr
;
gpa_t
addr
;
int
len
;
int
len
;
...
@@ -289,7 +292,8 @@ struct kvm_kernel_irq_routing_entry {
...
@@ -289,7 +292,8 @@ struct kvm_kernel_irq_routing_entry {
u32
gsi
;
u32
gsi
;
u32
type
;
u32
type
;
int
(
*
set
)(
struct
kvm_kernel_irq_routing_entry
*
e
,
int
(
*
set
)(
struct
kvm_kernel_irq_routing_entry
*
e
,
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
);
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
,
bool
line_status
);
union
{
union
{
struct
{
struct
{
unsigned
irqchip
;
unsigned
irqchip
;
...
@@ -571,7 +575,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
...
@@ -571,7 +575,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void
kvm_flush_remote_tlbs
(
struct
kvm
*
kvm
);
void
kvm_flush_remote_tlbs
(
struct
kvm
*
kvm
);
void
kvm_reload_remote_mmus
(
struct
kvm
*
kvm
);
void
kvm_reload_remote_mmus
(
struct
kvm
*
kvm
);
void
kvm_make_mclock_inprogress_request
(
struct
kvm
*
kvm
);
void
kvm_make_mclock_inprogress_request
(
struct
kvm
*
kvm
);
void
kvm_make_
update_eoibitmap
_request
(
struct
kvm
*
kvm
);
void
kvm_make_
scan_ioapic
_request
(
struct
kvm
*
kvm
);
long
kvm_arch_dev_ioctl
(
struct
file
*
filp
,
long
kvm_arch_dev_ioctl
(
struct
file
*
filp
,
unsigned
int
ioctl
,
unsigned
long
arg
);
unsigned
int
ioctl
,
unsigned
long
arg
);
...
@@ -588,7 +592,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
...
@@ -588,7 +592,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int
kvm_vm_ioctl_set_memory_region
(
struct
kvm
*
kvm
,
int
kvm_vm_ioctl_set_memory_region
(
struct
kvm
*
kvm
,
struct
kvm_userspace_memory_region
*
mem
);
struct
kvm_userspace_memory_region
*
mem
);
int
kvm_vm_ioctl_irq_line
(
struct
kvm
*
kvm
,
struct
kvm_irq_level
*
irq_level
);
int
kvm_vm_ioctl_irq_line
(
struct
kvm
*
kvm
,
struct
kvm_irq_level
*
irq_level
,
bool
line_status
);
long
kvm_arch_vm_ioctl
(
struct
file
*
filp
,
long
kvm_arch_vm_ioctl
(
struct
file
*
filp
,
unsigned
int
ioctl
,
unsigned
long
arg
);
unsigned
int
ioctl
,
unsigned
long
arg
);
...
@@ -719,10 +724,11 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
...
@@ -719,10 +724,11 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
union
kvm_ioapic_redirect_entry
*
entry
,
union
kvm_ioapic_redirect_entry
*
entry
,
unsigned
long
*
deliver_bitmask
);
unsigned
long
*
deliver_bitmask
);
#endif
#endif
int
kvm_set_irq
(
struct
kvm
*
kvm
,
int
irq_source_id
,
u32
irq
,
int
level
);
int
kvm_set_irq
(
struct
kvm
*
kvm
,
int
irq_source_id
,
u32
irq
,
int
level
,
bool
line_status
);
int
kvm_set_irq_inatomic
(
struct
kvm
*
kvm
,
int
irq_source_id
,
u32
irq
,
int
level
);
int
kvm_set_irq_inatomic
(
struct
kvm
*
kvm
,
int
irq_source_id
,
u32
irq
,
int
level
);
int
kvm_set_msi
(
struct
kvm_kernel_irq_routing_entry
*
irq_entry
,
struct
kvm
*
kvm
,
int
kvm_set_msi
(
struct
kvm_kernel_irq_routing_entry
*
irq_entry
,
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
);
int
irq_source_id
,
int
level
,
bool
line_status
);
bool
kvm_irq_has_notifier
(
struct
kvm
*
kvm
,
unsigned
irqchip
,
unsigned
pin
);
bool
kvm_irq_has_notifier
(
struct
kvm
*
kvm
,
unsigned
irqchip
,
unsigned
pin
);
void
kvm_notify_acked_irq
(
struct
kvm
*
kvm
,
unsigned
irqchip
,
unsigned
pin
);
void
kvm_notify_acked_irq
(
struct
kvm
*
kvm
,
unsigned
irqchip
,
unsigned
pin
);
void
kvm_register_irq_ack_notifier
(
struct
kvm
*
kvm
,
void
kvm_register_irq_ack_notifier
(
struct
kvm
*
kvm
,
...
@@ -1058,6 +1064,8 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
...
@@ -1058,6 +1064,8 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
}
}
}
extern
bool
kvm_rebooting
;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
static
inline
void
kvm_vcpu_set_in_spin_loop
(
struct
kvm_vcpu
*
vcpu
,
bool
val
)
static
inline
void
kvm_vcpu_set_in_spin_loop
(
struct
kvm_vcpu
*
vcpu
,
bool
val
)
...
...
virt/kvm/assigned-dev.c
View file @
fca7567c
...
@@ -80,11 +80,12 @@ kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev,
...
@@ -80,11 +80,12 @@ kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev,
spin_lock
(
&
assigned_dev
->
intx_mask_lock
);
spin_lock
(
&
assigned_dev
->
intx_mask_lock
);
if
(
!
(
assigned_dev
->
flags
&
KVM_DEV_ASSIGN_MASK_INTX
))
if
(
!
(
assigned_dev
->
flags
&
KVM_DEV_ASSIGN_MASK_INTX
))
kvm_set_irq
(
assigned_dev
->
kvm
,
kvm_set_irq
(
assigned_dev
->
kvm
,
assigned_dev
->
irq_source_id
,
vector
,
1
);
assigned_dev
->
irq_source_id
,
vector
,
1
,
false
);
spin_unlock
(
&
assigned_dev
->
intx_mask_lock
);
spin_unlock
(
&
assigned_dev
->
intx_mask_lock
);
}
else
}
else
kvm_set_irq
(
assigned_dev
->
kvm
,
assigned_dev
->
irq_source_id
,
kvm_set_irq
(
assigned_dev
->
kvm
,
assigned_dev
->
irq_source_id
,
vector
,
1
);
vector
,
1
,
false
);
}
}
static
irqreturn_t
kvm_assigned_dev_thread_intx
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
kvm_assigned_dev_thread_intx
(
int
irq
,
void
*
dev_id
)
...
@@ -165,7 +166,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
...
@@ -165,7 +166,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
container_of
(
kian
,
struct
kvm_assigned_dev_kernel
,
container_of
(
kian
,
struct
kvm_assigned_dev_kernel
,
ack_notifier
);
ack_notifier
);
kvm_set_irq
(
dev
->
kvm
,
dev
->
irq_source_id
,
dev
->
guest_irq
,
0
);
kvm_set_irq
(
dev
->
kvm
,
dev
->
irq_source_id
,
dev
->
guest_irq
,
0
,
false
);
spin_lock
(
&
dev
->
intx_mask_lock
);
spin_lock
(
&
dev
->
intx_mask_lock
);
...
@@ -188,7 +189,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
...
@@ -188,7 +189,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
if
(
reassert
)
if
(
reassert
)
kvm_set_irq
(
dev
->
kvm
,
dev
->
irq_source_id
,
kvm_set_irq
(
dev
->
kvm
,
dev
->
irq_source_id
,
dev
->
guest_irq
,
1
);
dev
->
guest_irq
,
1
,
false
);
}
}
spin_unlock
(
&
dev
->
intx_mask_lock
);
spin_unlock
(
&
dev
->
intx_mask_lock
);
...
@@ -202,7 +203,7 @@ static void deassign_guest_irq(struct kvm *kvm,
...
@@ -202,7 +203,7 @@ static void deassign_guest_irq(struct kvm *kvm,
&
assigned_dev
->
ack_notifier
);
&
assigned_dev
->
ack_notifier
);
kvm_set_irq
(
assigned_dev
->
kvm
,
assigned_dev
->
irq_source_id
,
kvm_set_irq
(
assigned_dev
->
kvm
,
assigned_dev
->
irq_source_id
,
assigned_dev
->
guest_irq
,
0
);
assigned_dev
->
guest_irq
,
0
,
false
);
if
(
assigned_dev
->
irq_source_id
!=
-
1
)
if
(
assigned_dev
->
irq_source_id
!=
-
1
)
kvm_free_irq_source_id
(
kvm
,
assigned_dev
->
irq_source_id
);
kvm_free_irq_source_id
(
kvm
,
assigned_dev
->
irq_source_id
);
...
@@ -901,7 +902,7 @@ static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
...
@@ -901,7 +902,7 @@ static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
if
(
match
->
irq_requested_type
&
KVM_DEV_IRQ_GUEST_INTX
)
{
if
(
match
->
irq_requested_type
&
KVM_DEV_IRQ_GUEST_INTX
)
{
if
(
assigned_dev
->
flags
&
KVM_DEV_ASSIGN_MASK_INTX
)
{
if
(
assigned_dev
->
flags
&
KVM_DEV_ASSIGN_MASK_INTX
)
{
kvm_set_irq
(
match
->
kvm
,
match
->
irq_source_id
,
kvm_set_irq
(
match
->
kvm
,
match
->
irq_source_id
,
match
->
guest_irq
,
0
);
match
->
guest_irq
,
0
,
false
);
/*
/*
* Masking at hardware-level is performed on demand,
* Masking at hardware-level is performed on demand,
* i.e. when an IRQ actually arrives at the host.
* i.e. when an IRQ actually arrives at the host.
...
...
virt/kvm/eventfd.c
View file @
fca7567c
...
@@ -100,11 +100,13 @@ irqfd_inject(struct work_struct *work)
...
@@ -100,11 +100,13 @@ irqfd_inject(struct work_struct *work)
struct
kvm
*
kvm
=
irqfd
->
kvm
;
struct
kvm
*
kvm
=
irqfd
->
kvm
;
if
(
!
irqfd
->
resampler
)
{
if
(
!
irqfd
->
resampler
)
{
kvm_set_irq
(
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
irqfd
->
gsi
,
1
);
kvm_set_irq
(
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
irqfd
->
gsi
,
1
,
kvm_set_irq
(
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
irqfd
->
gsi
,
0
);
false
);
kvm_set_irq
(
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
irqfd
->
gsi
,
0
,
false
);
}
else
}
else
kvm_set_irq
(
kvm
,
KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
kvm_set_irq
(
kvm
,
KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
irqfd
->
gsi
,
1
);
irqfd
->
gsi
,
1
,
false
);
}
}
/*
/*
...
@@ -121,7 +123,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
...
@@ -121,7 +123,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
resampler
=
container_of
(
kian
,
struct
_irqfd_resampler
,
notifier
);
resampler
=
container_of
(
kian
,
struct
_irqfd_resampler
,
notifier
);
kvm_set_irq
(
resampler
->
kvm
,
KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
kvm_set_irq
(
resampler
->
kvm
,
KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
resampler
->
notifier
.
gsi
,
0
);
resampler
->
notifier
.
gsi
,
0
,
false
);
rcu_read_lock
();
rcu_read_lock
();
...
@@ -146,7 +148,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd)
...
@@ -146,7 +148,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd)
list_del
(
&
resampler
->
link
);
list_del
(
&
resampler
->
link
);
kvm_unregister_irq_ack_notifier
(
kvm
,
&
resampler
->
notifier
);
kvm_unregister_irq_ack_notifier
(
kvm
,
&
resampler
->
notifier
);
kvm_set_irq
(
kvm
,
KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
kvm_set_irq
(
kvm
,
KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
resampler
->
notifier
.
gsi
,
0
);
resampler
->
notifier
.
gsi
,
0
,
false
);
kfree
(
resampler
);
kfree
(
resampler
);
}
}
...
@@ -225,7 +227,8 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
...
@@ -225,7 +227,8 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
irq
=
rcu_dereference
(
irqfd
->
irq_entry
);
irq
=
rcu_dereference
(
irqfd
->
irq_entry
);
/* An event has been signaled, inject an interrupt */
/* An event has been signaled, inject an interrupt */
if
(
irq
)
if
(
irq
)
kvm_set_msi
(
irq
,
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
1
);
kvm_set_msi
(
irq
,
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
1
,
false
);
else
else
schedule_work
(
&
irqfd
->
inject
);
schedule_work
(
&
irqfd
->
inject
);
rcu_read_unlock
();
rcu_read_unlock
();
...
@@ -574,6 +577,7 @@ struct _ioeventfd {
...
@@ -574,6 +577,7 @@ struct _ioeventfd {
struct
eventfd_ctx
*
eventfd
;
struct
eventfd_ctx
*
eventfd
;
u64
datamatch
;
u64
datamatch
;
struct
kvm_io_device
dev
;
struct
kvm_io_device
dev
;
u8
bus_idx
;
bool
wildcard
;
bool
wildcard
;
};
};
...
@@ -666,7 +670,8 @@ ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
...
@@ -666,7 +670,8 @@ ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
struct
_ioeventfd
*
_p
;
struct
_ioeventfd
*
_p
;
list_for_each_entry
(
_p
,
&
kvm
->
ioeventfds
,
list
)
list_for_each_entry
(
_p
,
&
kvm
->
ioeventfds
,
list
)
if
(
_p
->
addr
==
p
->
addr
&&
_p
->
length
==
p
->
length
&&
if
(
_p
->
bus_idx
==
p
->
bus_idx
&&
_p
->
addr
==
p
->
addr
&&
_p
->
length
==
p
->
length
&&
(
_p
->
wildcard
||
p
->
wildcard
||
(
_p
->
wildcard
||
p
->
wildcard
||
_p
->
datamatch
==
p
->
datamatch
))
_p
->
datamatch
==
p
->
datamatch
))
return
true
;
return
true
;
...
@@ -723,6 +728,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
...
@@ -723,6 +728,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
INIT_LIST_HEAD
(
&
p
->
list
);
INIT_LIST_HEAD
(
&
p
->
list
);
p
->
addr
=
args
->
addr
;
p
->
addr
=
args
->
addr
;
p
->
bus_idx
=
bus_idx
;
p
->
length
=
args
->
len
;
p
->
length
=
args
->
len
;
p
->
eventfd
=
eventfd
;
p
->
eventfd
=
eventfd
;
...
@@ -781,7 +787,8 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
...
@@ -781,7 +787,8 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
list_for_each_entry_safe
(
p
,
tmp
,
&
kvm
->
ioeventfds
,
list
)
{
list_for_each_entry_safe
(
p
,
tmp
,
&
kvm
->
ioeventfds
,
list
)
{
bool
wildcard
=
!
(
args
->
flags
&
KVM_IOEVENTFD_FLAG_DATAMATCH
);
bool
wildcard
=
!
(
args
->
flags
&
KVM_IOEVENTFD_FLAG_DATAMATCH
);
if
(
p
->
eventfd
!=
eventfd
||
if
(
p
->
bus_idx
!=
bus_idx
||
p
->
eventfd
!=
eventfd
||
p
->
addr
!=
args
->
addr
||
p
->
addr
!=
args
->
addr
||
p
->
length
!=
args
->
len
||
p
->
length
!=
args
->
len
||
p
->
wildcard
!=
wildcard
)
p
->
wildcard
!=
wildcard
)
...
...
virt/kvm/ioapic.c
View file @
fca7567c
...
@@ -50,7 +50,8 @@
...
@@ -50,7 +50,8 @@
#else
#else
#define ioapic_debug(fmt, arg...)
#define ioapic_debug(fmt, arg...)
#endif
#endif
static
int
ioapic_deliver
(
struct
kvm_ioapic
*
vioapic
,
int
irq
);
static
int
ioapic_deliver
(
struct
kvm_ioapic
*
vioapic
,
int
irq
,
bool
line_status
);
static
unsigned
long
ioapic_read_indirect
(
struct
kvm_ioapic
*
ioapic
,
static
unsigned
long
ioapic_read_indirect
(
struct
kvm_ioapic
*
ioapic
,
unsigned
long
addr
,
unsigned
long
addr
,
...
@@ -90,7 +91,80 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
...
@@ -90,7 +91,80 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
return
result
;
return
result
;
}
}
static
int
ioapic_service
(
struct
kvm_ioapic
*
ioapic
,
unsigned
int
idx
)
static
void
rtc_irq_eoi_tracking_reset
(
struct
kvm_ioapic
*
ioapic
)
{
ioapic
->
rtc_status
.
pending_eoi
=
0
;
bitmap_zero
(
ioapic
->
rtc_status
.
dest_map
,
KVM_MAX_VCPUS
);
}
static
void
__rtc_irq_eoi_tracking_restore_one
(
struct
kvm_vcpu
*
vcpu
)
{
bool
new_val
,
old_val
;
struct
kvm_ioapic
*
ioapic
=
vcpu
->
kvm
->
arch
.
vioapic
;
union
kvm_ioapic_redirect_entry
*
e
;
e
=
&
ioapic
->
redirtbl
[
RTC_GSI
];
if
(
!
kvm_apic_match_dest
(
vcpu
,
NULL
,
0
,
e
->
fields
.
dest_id
,
e
->
fields
.
dest_mode
))
return
;
new_val
=
kvm_apic_pending_eoi
(
vcpu
,
e
->
fields
.
vector
);
old_val
=
test_bit
(
vcpu
->
vcpu_id
,
ioapic
->
rtc_status
.
dest_map
);
if
(
new_val
==
old_val
)
return
;
if
(
new_val
)
{
__set_bit
(
vcpu
->
vcpu_id
,
ioapic
->
rtc_status
.
dest_map
);
ioapic
->
rtc_status
.
pending_eoi
++
;
}
else
{
__clear_bit
(
vcpu
->
vcpu_id
,
ioapic
->
rtc_status
.
dest_map
);
ioapic
->
rtc_status
.
pending_eoi
--
;
}
WARN_ON
(
ioapic
->
rtc_status
.
pending_eoi
<
0
);
}
void
kvm_rtc_eoi_tracking_restore_one
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_ioapic
*
ioapic
=
vcpu
->
kvm
->
arch
.
vioapic
;
spin_lock
(
&
ioapic
->
lock
);
__rtc_irq_eoi_tracking_restore_one
(
vcpu
);
spin_unlock
(
&
ioapic
->
lock
);
}
static
void
kvm_rtc_eoi_tracking_restore_all
(
struct
kvm_ioapic
*
ioapic
)
{
struct
kvm_vcpu
*
vcpu
;
int
i
;
if
(
RTC_GSI
>=
IOAPIC_NUM_PINS
)
return
;
rtc_irq_eoi_tracking_reset
(
ioapic
);
kvm_for_each_vcpu
(
i
,
vcpu
,
ioapic
->
kvm
)
__rtc_irq_eoi_tracking_restore_one
(
vcpu
);
}
static
void
rtc_irq_eoi
(
struct
kvm_ioapic
*
ioapic
,
struct
kvm_vcpu
*
vcpu
)
{
if
(
test_and_clear_bit
(
vcpu
->
vcpu_id
,
ioapic
->
rtc_status
.
dest_map
))
--
ioapic
->
rtc_status
.
pending_eoi
;
WARN_ON
(
ioapic
->
rtc_status
.
pending_eoi
<
0
);
}
static
bool
rtc_irq_check_coalesced
(
struct
kvm_ioapic
*
ioapic
)
{
if
(
ioapic
->
rtc_status
.
pending_eoi
>
0
)
return
true
;
/* coalesced */
return
false
;
}
static
int
ioapic_service
(
struct
kvm_ioapic
*
ioapic
,
unsigned
int
idx
,
bool
line_status
)
{
{
union
kvm_ioapic_redirect_entry
*
pent
;
union
kvm_ioapic_redirect_entry
*
pent
;
int
injected
=
-
1
;
int
injected
=
-
1
;
...
@@ -98,7 +172,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
...
@@ -98,7 +172,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
pent
=
&
ioapic
->
redirtbl
[
idx
];
pent
=
&
ioapic
->
redirtbl
[
idx
];
if
(
!
pent
->
fields
.
mask
)
{
if
(
!
pent
->
fields
.
mask
)
{
injected
=
ioapic_deliver
(
ioapic
,
idx
);
injected
=
ioapic_deliver
(
ioapic
,
idx
,
line_status
);
if
(
injected
&&
pent
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
)
if
(
injected
&&
pent
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
)
pent
->
fields
.
remote_irr
=
1
;
pent
->
fields
.
remote_irr
=
1
;
}
}
...
@@ -119,41 +193,48 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic)
...
@@ -119,41 +193,48 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic)
smp_wmb
();
smp_wmb
();
}
}
void
kvm_ioapic_
calculate_eoi_exitmap
(
struct
kvm_vcpu
*
vcpu
,
void
kvm_ioapic_
scan_entry
(
struct
kvm_vcpu
*
vcpu
,
u64
*
eoi_exit_bitmap
,
u64
*
eoi_exit_bitmap
)
u32
*
tmr
)
{
{
struct
kvm_ioapic
*
ioapic
=
vcpu
->
kvm
->
arch
.
vioapic
;
struct
kvm_ioapic
*
ioapic
=
vcpu
->
kvm
->
arch
.
vioapic
;
union
kvm_ioapic_redirect_entry
*
e
;
union
kvm_ioapic_redirect_entry
*
e
;
struct
kvm_lapic_irq
irqe
;
int
index
;
int
index
;
spin_lock
(
&
ioapic
->
lock
);
spin_lock
(
&
ioapic
->
lock
);
/* traverse ioapic entry to set eoi exit bitmap*/
for
(
index
=
0
;
index
<
IOAPIC_NUM_PINS
;
index
++
)
{
for
(
index
=
0
;
index
<
IOAPIC_NUM_PINS
;
index
++
)
{
e
=
&
ioapic
->
redirtbl
[
index
];
e
=
&
ioapic
->
redirtbl
[
index
];
if
(
!
e
->
fields
.
mask
&&
if
(
!
e
->
fields
.
mask
&&
(
e
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
||
(
e
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
||
kvm_irq_has_notifier
(
ioapic
->
kvm
,
KVM_IRQCHIP_IOAPIC
,
kvm_irq_has_notifier
(
ioapic
->
kvm
,
KVM_IRQCHIP_IOAPIC
,
index
)))
{
index
)
||
index
==
RTC_GSI
))
{
irqe
.
dest_id
=
e
->
fields
.
dest_id
;
if
(
kvm_apic_match_dest
(
vcpu
,
NULL
,
0
,
irqe
.
vector
=
e
->
fields
.
vector
;
e
->
fields
.
dest_id
,
e
->
fields
.
dest_mode
))
{
irqe
.
dest_mode
=
e
->
fields
.
dest_mode
;
__set_bit
(
e
->
fields
.
vector
,
irqe
.
delivery_mode
=
e
->
fields
.
delivery_mode
<<
8
;
(
unsigned
long
*
)
eoi_exit_bitmap
);
kvm_calculate_eoi_exitmap
(
vcpu
,
&
irqe
,
eoi_exit_bitmap
);
if
(
e
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
)
__set_bit
(
e
->
fields
.
vector
,
(
unsigned
long
*
)
tmr
);
}
}
}
}
}
spin_unlock
(
&
ioapic
->
lock
);
spin_unlock
(
&
ioapic
->
lock
);
}
}
EXPORT_SYMBOL_GPL
(
kvm_ioapic_calculate_eoi_exitmap
);
void
kvm_ioapic_make_eoibitmap_request
(
struct
kvm
*
kvm
)
#ifdef CONFIG_X86
void
kvm_vcpu_request_scan_ioapic
(
struct
kvm
*
kvm
)
{
{
struct
kvm_ioapic
*
ioapic
=
kvm
->
arch
.
vioapic
;
struct
kvm_ioapic
*
ioapic
=
kvm
->
arch
.
vioapic
;
if
(
!
kvm_apic_vid_enabled
(
kvm
)
||
!
ioapic
)
if
(
!
ioapic
)
return
;
return
;
kvm_make_
update_eoibitmap
_request
(
kvm
);
kvm_make_
scan_ioapic
_request
(
kvm
);
}
}
#else
void
kvm_vcpu_request_scan_ioapic
(
struct
kvm
*
kvm
)
{
return
;
}
#endif
static
void
ioapic_write_indirect
(
struct
kvm_ioapic
*
ioapic
,
u32
val
)
static
void
ioapic_write_indirect
(
struct
kvm_ioapic
*
ioapic
,
u32
val
)
{
{
...
@@ -195,16 +276,17 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
...
@@ -195,16 +276,17 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
kvm_fire_mask_notifiers
(
ioapic
->
kvm
,
KVM_IRQCHIP_IOAPIC
,
index
,
mask_after
);
kvm_fire_mask_notifiers
(
ioapic
->
kvm
,
KVM_IRQCHIP_IOAPIC
,
index
,
mask_after
);
if
(
e
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
if
(
e
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
&&
ioapic
->
irr
&
(
1
<<
index
))
&&
ioapic
->
irr
&
(
1
<<
index
))
ioapic_service
(
ioapic
,
index
);
ioapic_service
(
ioapic
,
index
,
false
);
kvm_
ioapic_make_eoibitmap_request
(
ioapic
->
kvm
);
kvm_
vcpu_request_scan_ioapic
(
ioapic
->
kvm
);
break
;
break
;
}
}
}
}
static
int
ioapic_deliver
(
struct
kvm_ioapic
*
ioapic
,
int
irq
)
static
int
ioapic_deliver
(
struct
kvm_ioapic
*
ioapic
,
int
irq
,
bool
line_status
)
{
{
union
kvm_ioapic_redirect_entry
*
entry
=
&
ioapic
->
redirtbl
[
irq
];
union
kvm_ioapic_redirect_entry
*
entry
=
&
ioapic
->
redirtbl
[
irq
];
struct
kvm_lapic_irq
irqe
;
struct
kvm_lapic_irq
irqe
;
int
ret
;
ioapic_debug
(
"dest=%x dest_mode=%x delivery_mode=%x "
ioapic_debug
(
"dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x
\n
"
,
"vector=%x trig_mode=%x
\n
"
,
...
@@ -220,11 +302,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
...
@@ -220,11 +302,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe
.
level
=
1
;
irqe
.
level
=
1
;
irqe
.
shorthand
=
0
;
irqe
.
shorthand
=
0
;
return
kvm_irq_delivery_to_apic
(
ioapic
->
kvm
,
NULL
,
&
irqe
);
if
(
irq
==
RTC_GSI
&&
line_status
)
{
BUG_ON
(
ioapic
->
rtc_status
.
pending_eoi
!=
0
);
ret
=
kvm_irq_delivery_to_apic
(
ioapic
->
kvm
,
NULL
,
&
irqe
,
ioapic
->
rtc_status
.
dest_map
);
ioapic
->
rtc_status
.
pending_eoi
=
ret
;
}
else
ret
=
kvm_irq_delivery_to_apic
(
ioapic
->
kvm
,
NULL
,
&
irqe
,
NULL
);
return
ret
;
}
}
int
kvm_ioapic_set_irq
(
struct
kvm_ioapic
*
ioapic
,
int
irq
,
int
irq_source_id
,
int
kvm_ioapic_set_irq
(
struct
kvm_ioapic
*
ioapic
,
int
irq
,
int
irq_source_id
,
int
level
)
int
level
,
bool
line_status
)
{
{
u32
old_irr
;
u32
old_irr
;
u32
mask
=
1
<<
irq
;
u32
mask
=
1
<<
irq
;
...
@@ -244,13 +334,20 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
...
@@ -244,13 +334,20 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
ret
=
1
;
ret
=
1
;
}
else
{
}
else
{
int
edge
=
(
entry
.
fields
.
trig_mode
==
IOAPIC_EDGE_TRIG
);
int
edge
=
(
entry
.
fields
.
trig_mode
==
IOAPIC_EDGE_TRIG
);
if
(
irq
==
RTC_GSI
&&
line_status
&&
rtc_irq_check_coalesced
(
ioapic
))
{
ret
=
0
;
/* coalesced */
goto
out
;
}
ioapic
->
irr
|=
mask
;
ioapic
->
irr
|=
mask
;
if
((
edge
&&
old_irr
!=
ioapic
->
irr
)
||
if
((
edge
&&
old_irr
!=
ioapic
->
irr
)
||
(
!
edge
&&
!
entry
.
fields
.
remote_irr
))
(
!
edge
&&
!
entry
.
fields
.
remote_irr
))
ret
=
ioapic_service
(
ioapic
,
irq
);
ret
=
ioapic_service
(
ioapic
,
irq
,
line_status
);
else
else
ret
=
0
;
/* report coalesced interrupt */
ret
=
0
;
/* report coalesced interrupt */
}
}
out:
trace_kvm_ioapic_set_irq
(
entry
.
bits
,
irq
,
ret
==
0
);
trace_kvm_ioapic_set_irq
(
entry
.
bits
,
irq
,
ret
==
0
);
spin_unlock
(
&
ioapic
->
lock
);
spin_unlock
(
&
ioapic
->
lock
);
...
@@ -267,8 +364,8 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
...
@@ -267,8 +364,8 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
spin_unlock
(
&
ioapic
->
lock
);
spin_unlock
(
&
ioapic
->
lock
);
}
}
static
void
__kvm_ioapic_update_eoi
(
struct
kvm_
ioapic
*
ioapic
,
int
vector
,
static
void
__kvm_ioapic_update_eoi
(
struct
kvm_
vcpu
*
vcpu
,
int
trigger_mode
)
struct
kvm_ioapic
*
ioapic
,
int
vector
,
int
trigger_mode
)
{
{
int
i
;
int
i
;
...
@@ -278,6 +375,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
...
@@ -278,6 +375,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
if
(
ent
->
fields
.
vector
!=
vector
)
if
(
ent
->
fields
.
vector
!=
vector
)
continue
;
continue
;
if
(
i
==
RTC_GSI
)
rtc_irq_eoi
(
ioapic
,
vcpu
);
/*
/*
* We are dropping lock while calling ack notifiers because ack
* We are dropping lock while calling ack notifiers because ack
* notifier callbacks for assigned devices call into IOAPIC
* notifier callbacks for assigned devices call into IOAPIC
...
@@ -296,7 +395,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
...
@@ -296,7 +395,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
ASSERT
(
ent
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
);
ASSERT
(
ent
->
fields
.
trig_mode
==
IOAPIC_LEVEL_TRIG
);
ent
->
fields
.
remote_irr
=
0
;
ent
->
fields
.
remote_irr
=
0
;
if
(
!
ent
->
fields
.
mask
&&
(
ioapic
->
irr
&
(
1
<<
i
)))
if
(
!
ent
->
fields
.
mask
&&
(
ioapic
->
irr
&
(
1
<<
i
)))
ioapic_service
(
ioapic
,
i
);
ioapic_service
(
ioapic
,
i
,
false
);
}
}
}
}
...
@@ -307,12 +406,12 @@ bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
...
@@ -307,12 +406,12 @@ bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
return
test_bit
(
vector
,
ioapic
->
handled_vectors
);
return
test_bit
(
vector
,
ioapic
->
handled_vectors
);
}
}
void
kvm_ioapic_update_eoi
(
struct
kvm
*
kvm
,
int
vector
,
int
trigger_mode
)
void
kvm_ioapic_update_eoi
(
struct
kvm
_vcpu
*
vcpu
,
int
vector
,
int
trigger_mode
)
{
{
struct
kvm_ioapic
*
ioapic
=
kvm
->
arch
.
vioapic
;
struct
kvm_ioapic
*
ioapic
=
vcpu
->
kvm
->
arch
.
vioapic
;
spin_lock
(
&
ioapic
->
lock
);
spin_lock
(
&
ioapic
->
lock
);
__kvm_ioapic_update_eoi
(
ioapic
,
vector
,
trigger_mode
);
__kvm_ioapic_update_eoi
(
vcpu
,
ioapic
,
vector
,
trigger_mode
);
spin_unlock
(
&
ioapic
->
lock
);
spin_unlock
(
&
ioapic
->
lock
);
}
}
...
@@ -410,7 +509,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
...
@@ -410,7 +509,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
break
;
break
;
#ifdef CONFIG_IA64
#ifdef CONFIG_IA64
case
IOAPIC_REG_EOI
:
case
IOAPIC_REG_EOI
:
__kvm_ioapic_update_eoi
(
ioapic
,
data
,
IOAPIC_LEVEL_TRIG
);
__kvm_ioapic_update_eoi
(
NULL
,
ioapic
,
data
,
IOAPIC_LEVEL_TRIG
);
break
;
break
;
#endif
#endif
...
@@ -431,6 +530,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
...
@@ -431,6 +530,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic
->
ioregsel
=
0
;
ioapic
->
ioregsel
=
0
;
ioapic
->
irr
=
0
;
ioapic
->
irr
=
0
;
ioapic
->
id
=
0
;
ioapic
->
id
=
0
;
rtc_irq_eoi_tracking_reset
(
ioapic
);
update_handled_vectors
(
ioapic
);
update_handled_vectors
(
ioapic
);
}
}
...
@@ -496,7 +596,8 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
...
@@ -496,7 +596,8 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock
(
&
ioapic
->
lock
);
spin_lock
(
&
ioapic
->
lock
);
memcpy
(
ioapic
,
state
,
sizeof
(
struct
kvm_ioapic_state
));
memcpy
(
ioapic
,
state
,
sizeof
(
struct
kvm_ioapic_state
));
update_handled_vectors
(
ioapic
);
update_handled_vectors
(
ioapic
);
kvm_ioapic_make_eoibitmap_request
(
kvm
);
kvm_vcpu_request_scan_ioapic
(
kvm
);
kvm_rtc_eoi_tracking_restore_all
(
ioapic
);
spin_unlock
(
&
ioapic
->
lock
);
spin_unlock
(
&
ioapic
->
lock
);
return
0
;
return
0
;
}
}
virt/kvm/ioapic.h
View file @
fca7567c
...
@@ -34,6 +34,17 @@ struct kvm_vcpu;
...
@@ -34,6 +34,17 @@ struct kvm_vcpu;
#define IOAPIC_INIT 0x5
#define IOAPIC_INIT 0x5
#define IOAPIC_EXTINT 0x7
#define IOAPIC_EXTINT 0x7
#ifdef CONFIG_X86
#define RTC_GSI 8
#else
#define RTC_GSI -1U
#endif
struct
rtc_status
{
int
pending_eoi
;
DECLARE_BITMAP
(
dest_map
,
KVM_MAX_VCPUS
);
};
struct
kvm_ioapic
{
struct
kvm_ioapic
{
u64
base_address
;
u64
base_address
;
u32
ioregsel
;
u32
ioregsel
;
...
@@ -47,6 +58,7 @@ struct kvm_ioapic {
...
@@ -47,6 +58,7 @@ struct kvm_ioapic {
void
(
*
ack_notifier
)(
void
*
opaque
,
int
irq
);
void
(
*
ack_notifier
)(
void
*
opaque
,
int
irq
);
spinlock_t
lock
;
spinlock_t
lock
;
DECLARE_BITMAP
(
handled_vectors
,
256
);
DECLARE_BITMAP
(
handled_vectors
,
256
);
struct
rtc_status
rtc_status
;
};
};
#ifdef DEBUG
#ifdef DEBUG
...
@@ -67,24 +79,25 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
...
@@ -67,24 +79,25 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
return
kvm
->
arch
.
vioapic
;
return
kvm
->
arch
.
vioapic
;
}
}
void
kvm_rtc_eoi_tracking_restore_one
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_apic_match_dest
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic
*
source
,
int
kvm_apic_match_dest
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_lapic
*
source
,
int
short_hand
,
int
dest
,
int
dest_mode
);
int
short_hand
,
int
dest
,
int
dest_mode
);
int
kvm_apic_compare_prio
(
struct
kvm_vcpu
*
vcpu1
,
struct
kvm_vcpu
*
vcpu2
);
int
kvm_apic_compare_prio
(
struct
kvm_vcpu
*
vcpu1
,
struct
kvm_vcpu
*
vcpu2
);
void
kvm_ioapic_update_eoi
(
struct
kvm
*
kvm
,
int
vector
,
int
trigger_mode
);
void
kvm_ioapic_update_eoi
(
struct
kvm_vcpu
*
vcpu
,
int
vector
,
int
trigger_mode
);
bool
kvm_ioapic_handles_vector
(
struct
kvm
*
kvm
,
int
vector
);
bool
kvm_ioapic_handles_vector
(
struct
kvm
*
kvm
,
int
vector
);
int
kvm_ioapic_init
(
struct
kvm
*
kvm
);
int
kvm_ioapic_init
(
struct
kvm
*
kvm
);
void
kvm_ioapic_destroy
(
struct
kvm
*
kvm
);
void
kvm_ioapic_destroy
(
struct
kvm
*
kvm
);
int
kvm_ioapic_set_irq
(
struct
kvm_ioapic
*
ioapic
,
int
irq
,
int
irq_source_id
,
int
kvm_ioapic_set_irq
(
struct
kvm_ioapic
*
ioapic
,
int
irq
,
int
irq_source_id
,
int
level
);
int
level
,
bool
line_status
);
void
kvm_ioapic_clear_all
(
struct
kvm_ioapic
*
ioapic
,
int
irq_source_id
);
void
kvm_ioapic_clear_all
(
struct
kvm_ioapic
*
ioapic
,
int
irq_source_id
);
void
kvm_ioapic_reset
(
struct
kvm_ioapic
*
ioapic
);
void
kvm_ioapic_reset
(
struct
kvm_ioapic
*
ioapic
);
int
kvm_irq_delivery_to_apic
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
src
,
int
kvm_irq_delivery_to_apic
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
src
,
struct
kvm_lapic_irq
*
irq
);
struct
kvm_lapic_irq
*
irq
,
unsigned
long
*
dest_map
);
int
kvm_get_ioapic
(
struct
kvm
*
kvm
,
struct
kvm_ioapic_state
*
state
);
int
kvm_get_ioapic
(
struct
kvm
*
kvm
,
struct
kvm_ioapic_state
*
state
);
int
kvm_set_ioapic
(
struct
kvm
*
kvm
,
struct
kvm_ioapic_state
*
state
);
int
kvm_set_ioapic
(
struct
kvm
*
kvm
,
struct
kvm_ioapic_state
*
state
);
void
kvm_ioapic_make_eoibitmap_request
(
struct
kvm
*
kvm
);
void
kvm_vcpu_request_scan_ioapic
(
struct
kvm
*
kvm
);
void
kvm_ioapic_calculate_eoi_exitmap
(
struct
kvm_vcpu
*
vcpu
,
void
kvm_ioapic_scan_entry
(
struct
kvm_vcpu
*
vcpu
,
u64
*
eoi_exit_bitmap
,
u64
*
eoi_exit_bitmap
);
u32
*
tmr
);
#endif
#endif
virt/kvm/irq_comm.c
View file @
fca7567c
...
@@ -35,7 +35,8 @@
...
@@ -35,7 +35,8 @@
#include "ioapic.h"
#include "ioapic.h"
static
int
kvm_set_pic_irq
(
struct
kvm_kernel_irq_routing_entry
*
e
,
static
int
kvm_set_pic_irq
(
struct
kvm_kernel_irq_routing_entry
*
e
,
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
)
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
,
bool
line_status
)
{
{
#ifdef CONFIG_X86
#ifdef CONFIG_X86
struct
kvm_pic
*
pic
=
pic_irqchip
(
kvm
);
struct
kvm_pic
*
pic
=
pic_irqchip
(
kvm
);
...
@@ -46,10 +47,12 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
...
@@ -46,10 +47,12 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
}
}
static
int
kvm_set_ioapic_irq
(
struct
kvm_kernel_irq_routing_entry
*
e
,
static
int
kvm_set_ioapic_irq
(
struct
kvm_kernel_irq_routing_entry
*
e
,
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
)
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
,
bool
line_status
)
{
{
struct
kvm_ioapic
*
ioapic
=
kvm
->
arch
.
vioapic
;
struct
kvm_ioapic
*
ioapic
=
kvm
->
arch
.
vioapic
;
return
kvm_ioapic_set_irq
(
ioapic
,
e
->
irqchip
.
pin
,
irq_source_id
,
level
);
return
kvm_ioapic_set_irq
(
ioapic
,
e
->
irqchip
.
pin
,
irq_source_id
,
level
,
line_status
);
}
}
inline
static
bool
kvm_is_dm_lowest_prio
(
struct
kvm_lapic_irq
*
irq
)
inline
static
bool
kvm_is_dm_lowest_prio
(
struct
kvm_lapic_irq
*
irq
)
...
@@ -63,7 +66,7 @@ inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
...
@@ -63,7 +66,7 @@ inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
}
}
int
kvm_irq_delivery_to_apic
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
src
,
int
kvm_irq_delivery_to_apic
(
struct
kvm
*
kvm
,
struct
kvm_lapic
*
src
,
struct
kvm_lapic_irq
*
irq
)
struct
kvm_lapic_irq
*
irq
,
unsigned
long
*
dest_map
)
{
{
int
i
,
r
=
-
1
;
int
i
,
r
=
-
1
;
struct
kvm_vcpu
*
vcpu
,
*
lowest
=
NULL
;
struct
kvm_vcpu
*
vcpu
,
*
lowest
=
NULL
;
...
@@ -74,7 +77,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
...
@@ -74,7 +77,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
irq
->
delivery_mode
=
APIC_DM_FIXED
;
irq
->
delivery_mode
=
APIC_DM_FIXED
;
}
}
if
(
kvm_irq_delivery_to_apic_fast
(
kvm
,
src
,
irq
,
&
r
))
if
(
kvm_irq_delivery_to_apic_fast
(
kvm
,
src
,
irq
,
&
r
,
dest_map
))
return
r
;
return
r
;
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
...
@@ -88,7 +91,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
...
@@ -88,7 +91,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
if
(
!
kvm_is_dm_lowest_prio
(
irq
))
{
if
(
!
kvm_is_dm_lowest_prio
(
irq
))
{
if
(
r
<
0
)
if
(
r
<
0
)
r
=
0
;
r
=
0
;
r
+=
kvm_apic_set_irq
(
vcpu
,
irq
);
r
+=
kvm_apic_set_irq
(
vcpu
,
irq
,
dest_map
);
}
else
if
(
kvm_lapic_enabled
(
vcpu
))
{
}
else
if
(
kvm_lapic_enabled
(
vcpu
))
{
if
(
!
lowest
)
if
(
!
lowest
)
lowest
=
vcpu
;
lowest
=
vcpu
;
...
@@ -98,7 +101,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
...
@@ -98,7 +101,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
}
}
if
(
lowest
)
if
(
lowest
)
r
=
kvm_apic_set_irq
(
lowest
,
irq
);
r
=
kvm_apic_set_irq
(
lowest
,
irq
,
dest_map
);
return
r
;
return
r
;
}
}
...
@@ -121,7 +124,7 @@ static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
...
@@ -121,7 +124,7 @@ static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
}
}
int
kvm_set_msi
(
struct
kvm_kernel_irq_routing_entry
*
e
,
int
kvm_set_msi
(
struct
kvm_kernel_irq_routing_entry
*
e
,
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
)
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
,
bool
line_status
)
{
{
struct
kvm_lapic_irq
irq
;
struct
kvm_lapic_irq
irq
;
...
@@ -130,7 +133,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
...
@@ -130,7 +133,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
kvm_set_msi_irq
(
e
,
&
irq
);
kvm_set_msi_irq
(
e
,
&
irq
);
return
kvm_irq_delivery_to_apic
(
kvm
,
NULL
,
&
irq
);
return
kvm_irq_delivery_to_apic
(
kvm
,
NULL
,
&
irq
,
NULL
);
}
}
...
@@ -142,7 +145,7 @@ static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
...
@@ -142,7 +145,7 @@ static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
kvm_set_msi_irq
(
e
,
&
irq
);
kvm_set_msi_irq
(
e
,
&
irq
);
if
(
kvm_irq_delivery_to_apic_fast
(
kvm
,
NULL
,
&
irq
,
&
r
))
if
(
kvm_irq_delivery_to_apic_fast
(
kvm
,
NULL
,
&
irq
,
&
r
,
NULL
))
return
r
;
return
r
;
else
else
return
-
EWOULDBLOCK
;
return
-
EWOULDBLOCK
;
...
@@ -159,7 +162,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
...
@@ -159,7 +162,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
route
.
msi
.
address_hi
=
msi
->
address_hi
;
route
.
msi
.
address_hi
=
msi
->
address_hi
;
route
.
msi
.
data
=
msi
->
data
;
route
.
msi
.
data
=
msi
->
data
;
return
kvm_set_msi
(
&
route
,
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
1
);
return
kvm_set_msi
(
&
route
,
kvm
,
KVM_USERSPACE_IRQ_SOURCE_ID
,
1
,
false
);
}
}
/*
/*
...
@@ -168,7 +171,8 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
...
@@ -168,7 +171,8 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
* = 0 Interrupt was coalesced (previous irq is still pending)
* = 0 Interrupt was coalesced (previous irq is still pending)
* > 0 Number of CPUs interrupt was delivered to
* > 0 Number of CPUs interrupt was delivered to
*/
*/
int
kvm_set_irq
(
struct
kvm
*
kvm
,
int
irq_source_id
,
u32
irq
,
int
level
)
int
kvm_set_irq
(
struct
kvm
*
kvm
,
int
irq_source_id
,
u32
irq
,
int
level
,
bool
line_status
)
{
{
struct
kvm_kernel_irq_routing_entry
*
e
,
irq_set
[
KVM_NR_IRQCHIPS
];
struct
kvm_kernel_irq_routing_entry
*
e
,
irq_set
[
KVM_NR_IRQCHIPS
];
int
ret
=
-
1
,
i
=
0
;
int
ret
=
-
1
,
i
=
0
;
...
@@ -189,7 +193,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
...
@@ -189,7 +193,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
while
(
i
--
)
{
while
(
i
--
)
{
int
r
;
int
r
;
r
=
irq_set
[
i
].
set
(
&
irq_set
[
i
],
kvm
,
irq_source_id
,
level
);
r
=
irq_set
[
i
].
set
(
&
irq_set
[
i
],
kvm
,
irq_source_id
,
level
,
line_status
);
if
(
r
<
0
)
if
(
r
<
0
)
continue
;
continue
;
...
@@ -280,7 +285,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
...
@@ -280,7 +285,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
mutex_lock
(
&
kvm
->
irq_lock
);
mutex_lock
(
&
kvm
->
irq_lock
);
hlist_add_head_rcu
(
&
kian
->
link
,
&
kvm
->
irq_ack_notifier_list
);
hlist_add_head_rcu
(
&
kian
->
link
,
&
kvm
->
irq_ack_notifier_list
);
mutex_unlock
(
&
kvm
->
irq_lock
);
mutex_unlock
(
&
kvm
->
irq_lock
);
kvm_
ioapic_make_eoibitmap_request
(
kvm
);
kvm_
vcpu_request_scan_ioapic
(
kvm
);
}
}
void
kvm_unregister_irq_ack_notifier
(
struct
kvm
*
kvm
,
void
kvm_unregister_irq_ack_notifier
(
struct
kvm
*
kvm
,
...
@@ -290,7 +295,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
...
@@ -290,7 +295,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
hlist_del_init_rcu
(
&
kian
->
link
);
hlist_del_init_rcu
(
&
kian
->
link
);
mutex_unlock
(
&
kvm
->
irq_lock
);
mutex_unlock
(
&
kvm
->
irq_lock
);
synchronize_rcu
();
synchronize_rcu
();
kvm_
ioapic_make_eoibitmap_request
(
kvm
);
kvm_
vcpu_request_scan_ioapic
(
kvm
);
}
}
int
kvm_request_irq_source_id
(
struct
kvm
*
kvm
)
int
kvm_request_irq_source_id
(
struct
kvm
*
kvm
)
...
...
virt/kvm/kvm_main.c
View file @
fca7567c
...
@@ -217,9 +217,9 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)
...
@@ -217,9 +217,9 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)
make_all_cpus_request
(
kvm
,
KVM_REQ_MCLOCK_INPROGRESS
);
make_all_cpus_request
(
kvm
,
KVM_REQ_MCLOCK_INPROGRESS
);
}
}
void
kvm_make_
update_eoibitmap
_request
(
struct
kvm
*
kvm
)
void
kvm_make_
scan_ioapic
_request
(
struct
kvm
*
kvm
)
{
{
make_all_cpus_request
(
kvm
,
KVM_REQ_
EOIBITMAP
);
make_all_cpus_request
(
kvm
,
KVM_REQ_
SCAN_IOAPIC
);
}
}
int
kvm_vcpu_init
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm
*
kvm
,
unsigned
id
)
int
kvm_vcpu_init
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm
*
kvm
,
unsigned
id
)
...
@@ -1078,7 +1078,7 @@ static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
...
@@ -1078,7 +1078,7 @@ static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
return
__copy_from_user_inatomic
(
data
,
hva
,
len
);
return
__copy_from_user_inatomic
(
data
,
hva
,
len
);
}
}
int
get_user_page_nowait
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
,
static
int
get_user_page_nowait
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
,
unsigned
long
start
,
int
write
,
struct
page
**
page
)
unsigned
long
start
,
int
write
,
struct
page
**
page
)
{
{
int
flags
=
FOLL_TOUCH
|
FOLL_NOWAIT
|
FOLL_HWPOISON
|
FOLL_GET
;
int
flags
=
FOLL_TOUCH
|
FOLL_NOWAIT
|
FOLL_HWPOISON
|
FOLL_GET
;
...
@@ -1671,6 +1671,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
...
@@ -1671,6 +1671,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
smp_send_reschedule
(
cpu
);
smp_send_reschedule
(
cpu
);
put_cpu
();
put_cpu
();
}
}
EXPORT_SYMBOL_GPL
(
kvm_vcpu_kick
);
#endif
/* !CONFIG_S390 */
#endif
/* !CONFIG_S390 */
void
kvm_resched
(
struct
kvm_vcpu
*
vcpu
)
void
kvm_resched
(
struct
kvm_vcpu
*
vcpu
)
...
@@ -2258,7 +2259,8 @@ static long kvm_vm_ioctl(struct file *filp,
...
@@ -2258,7 +2259,8 @@ static long kvm_vm_ioctl(struct file *filp,
if
(
copy_from_user
(
&
irq_event
,
argp
,
sizeof
irq_event
))
if
(
copy_from_user
(
&
irq_event
,
argp
,
sizeof
irq_event
))
goto
out
;
goto
out
;
r
=
kvm_vm_ioctl_irq_line
(
kvm
,
&
irq_event
);
r
=
kvm_vm_ioctl_irq_line
(
kvm
,
&
irq_event
,
ioctl
==
KVM_IRQ_LINE_STATUS
);
if
(
r
)
if
(
r
)
goto
out
;
goto
out
;
...
@@ -2572,14 +2574,6 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
...
@@ -2572,14 +2574,6 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
return
NOTIFY_OK
;
return
NOTIFY_OK
;
}
}
asmlinkage
void
kvm_spurious_fault
(
void
)
{
/* Fault while not rebooting. We want the trace. */
BUG
();
}
EXPORT_SYMBOL_GPL
(
kvm_spurious_fault
);
static
int
kvm_reboot
(
struct
notifier_block
*
notifier
,
unsigned
long
val
,
static
int
kvm_reboot
(
struct
notifier_block
*
notifier
,
unsigned
long
val
,
void
*
v
)
void
*
v
)
{
{
...
@@ -2612,7 +2606,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
...
@@ -2612,7 +2606,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
kfree
(
bus
);
kfree
(
bus
);
}
}
int
kvm_io_bus_sort_cmp
(
const
void
*
p1
,
const
void
*
p2
)
static
int
kvm_io_bus_sort_cmp
(
const
void
*
p1
,
const
void
*
p2
)
{
{
const
struct
kvm_io_range
*
r1
=
p1
;
const
struct
kvm_io_range
*
r1
=
p1
;
const
struct
kvm_io_range
*
r2
=
p2
;
const
struct
kvm_io_range
*
r2
=
p2
;
...
@@ -2624,7 +2618,7 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
...
@@ -2624,7 +2618,7 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
return
0
;
return
0
;
}
}
int
kvm_io_bus_insert_dev
(
struct
kvm_io_bus
*
bus
,
struct
kvm_io_device
*
dev
,
static
int
kvm_io_bus_insert_dev
(
struct
kvm_io_bus
*
bus
,
struct
kvm_io_device
*
dev
,
gpa_t
addr
,
int
len
)
gpa_t
addr
,
int
len
)
{
{
bus
->
range
[
bus
->
dev_count
++
]
=
(
struct
kvm_io_range
)
{
bus
->
range
[
bus
->
dev_count
++
]
=
(
struct
kvm_io_range
)
{
...
@@ -2639,7 +2633,7 @@ int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
...
@@ -2639,7 +2633,7 @@ int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
return
0
;
return
0
;
}
}
int
kvm_io_bus_get_first_dev
(
struct
kvm_io_bus
*
bus
,
static
int
kvm_io_bus_get_first_dev
(
struct
kvm_io_bus
*
bus
,
gpa_t
addr
,
int
len
)
gpa_t
addr
,
int
len
)
{
{
struct
kvm_io_range
*
range
,
key
;
struct
kvm_io_range
*
range
,
key
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment