Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
6c6165f8
Commit
6c6165f8
authored
Jul 08, 2020
by
Paolo Bonzini
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-master' into HEAD
Merge 5.8-rc bugfixes.
parents
26d05b36
8038a922
Changes
16
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
74 additions
and
35 deletions
+74
-35
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/arch_gicv3.h
+1
-1
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cpufeature.h
+1
-1
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/hyp-init.S
+7
-4
arch/arm64/kvm/pmu.c
arch/arm64/kvm/pmu.c
+6
-1
arch/arm64/kvm/pvtime.c
arch/arm64/kvm/pvtime.c
+12
-3
arch/arm64/kvm/reset.c
arch/arm64/kvm/reset.c
+7
-3
arch/arm64/kvm/vgic/vgic-v4.c
arch/arm64/kvm/vgic/vgic-v4.c
+8
-0
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/kvm_host.h
+4
-4
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/kvm_cache_regs.h
+1
-1
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu.c
+1
-1
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/nested.c
+2
-2
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.c
+5
-8
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/vmx/vmx.h
+0
-2
arch/x86/kvm/x86.c
arch/x86/kvm/x86.c
+9
-3
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3-its.c
+8
-0
virt/kvm/kvm_main.c
virt/kvm/kvm_main.c
+2
-1
No files found.
arch/arm64/include/asm/arch_gicv3.h
View file @
6c6165f8
...
@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
...
@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
return
read_sysreg_s
(
SYS_ICC_PMR_EL1
);
return
read_sysreg_s
(
SYS_ICC_PMR_EL1
);
}
}
static
inline
void
gic_write_pmr
(
u32
val
)
static
__always_
inline
void
gic_write_pmr
(
u32
val
)
{
{
write_sysreg_s
(
val
,
SYS_ICC_PMR_EL1
);
write_sysreg_s
(
val
,
SYS_ICC_PMR_EL1
);
}
}
...
...
arch/arm64/include/asm/cpufeature.h
View file @
6c6165f8
...
@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
...
@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
cpus_have_const_cap
(
ARM64_HAS_GENERIC_AUTH
);
cpus_have_const_cap
(
ARM64_HAS_GENERIC_AUTH
);
}
}
static
inline
bool
system_uses_irq_prio_masking
(
void
)
static
__always_
inline
bool
system_uses_irq_prio_masking
(
void
)
{
{
return
IS_ENABLED
(
CONFIG_ARM64_PSEUDO_NMI
)
&&
return
IS_ENABLED
(
CONFIG_ARM64_PSEUDO_NMI
)
&&
cpus_have_const_cap
(
ARM64_HAS_IRQ_PRIO_MASKING
);
cpus_have_const_cap
(
ARM64_HAS_IRQ_PRIO_MASKING
);
...
...
arch/arm64/kvm/hyp-init.S
View file @
6c6165f8
...
@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
...
@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
1
:
cmp
x0
,
#
HVC_RESET_VECTORS
1
:
cmp
x0
,
#
HVC_RESET_VECTORS
b.ne
1
f
b.ne
1
f
reset
:
/
*
/
*
*
Reset
kvm
back
to
the
hyp
stub
.
Do
not
clobber
x0
-
x4
in
*
Set
the
HVC_RESET_VECTORS
return
code
before
entering
the
common
*
case
we
coming
via
HVC_SOFT_RESTART
.
*
path
so
that
we
do
not
clobber
x0
-
x2
in
case
we
are
coming
via
*
HVC_SOFT_RESTART
.
*/
*/
mov
x0
,
xzr
reset
:
/
*
Reset
kvm
back
to
the
hyp
stub
.
*/
mrs
x5
,
sctlr_el2
mrs
x5
,
sctlr_el2
mov_q
x6
,
SCTLR_ELx_FLAGS
mov_q
x6
,
SCTLR_ELx_FLAGS
bic
x5
,
x5
,
x6
//
Clear
SCTL_M
and
etc
bic
x5
,
x5
,
x6
//
Clear
SCTL_M
and
etc
...
@@ -151,7 +155,6 @@ reset:
...
@@ -151,7 +155,6 @@ reset:
/
*
Install
stub
vectors
*/
/
*
Install
stub
vectors
*/
adr_l
x5
,
__hyp_stub_vectors
adr_l
x5
,
__hyp_stub_vectors
msr
vbar_el2
,
x5
msr
vbar_el2
,
x5
mov
x0
,
xzr
eret
eret
1
:
/
*
Bad
stub
call
*/
1
:
/
*
Bad
stub
call
*/
...
...
arch/arm64/kvm/pmu.c
View file @
6c6165f8
...
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
...
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
}
}
/*
/*
* On VHE ensure that only guest events have EL0 counting enabled
* On VHE ensure that only guest events have EL0 counting enabled.
* This is called from both vcpu_{load,put} and the sysreg handling.
* Since the latter is preemptible, special care must be taken to
* disable preemption.
*/
*/
void
kvm_vcpu_pmu_restore_guest
(
struct
kvm_vcpu
*
vcpu
)
void
kvm_vcpu_pmu_restore_guest
(
struct
kvm_vcpu
*
vcpu
)
{
{
...
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
...
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
if
(
!
has_vhe
())
if
(
!
has_vhe
())
return
;
return
;
preempt_disable
();
host
=
this_cpu_ptr
(
&
kvm_host_data
);
host
=
this_cpu_ptr
(
&
kvm_host_data
);
events_guest
=
host
->
pmu_events
.
events_guest
;
events_guest
=
host
->
pmu_events
.
events_guest
;
events_host
=
host
->
pmu_events
.
events_host
;
events_host
=
host
->
pmu_events
.
events_host
;
kvm_vcpu_pmu_enable_el0
(
events_guest
);
kvm_vcpu_pmu_enable_el0
(
events_guest
);
kvm_vcpu_pmu_disable_el0
(
events_host
);
kvm_vcpu_pmu_disable_el0
(
events_host
);
preempt_enable
();
}
}
/*
/*
...
...
arch/arm64/kvm/pvtime.c
View file @
6c6165f8
...
@@ -3,6 +3,7 @@
...
@@ -3,6 +3,7 @@
#include <linux/arm-smccc.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/kvm_host.h>
#include <linux/sched/stat.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_mmu.h>
#include <asm/pvclock-abi.h>
#include <asm/pvclock-abi.h>
...
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
...
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return
base
;
return
base
;
}
}
static
bool
kvm_arm_pvtime_supported
(
void
)
{
return
!!
sched_info_on
();
}
int
kvm_arm_pvtime_set_attr
(
struct
kvm_vcpu
*
vcpu
,
int
kvm_arm_pvtime_set_attr
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_device_attr
*
attr
)
struct
kvm_device_attr
*
attr
)
{
{
...
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
...
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
int
ret
=
0
;
int
ret
=
0
;
int
idx
;
int
idx
;
if
(
attr
->
attr
!=
KVM_ARM_VCPU_PVTIME_IPA
)
if
(
!
kvm_arm_pvtime_supported
()
||
attr
->
attr
!=
KVM_ARM_VCPU_PVTIME_IPA
)
return
-
ENXIO
;
return
-
ENXIO
;
if
(
get_user
(
ipa
,
user
))
if
(
get_user
(
ipa
,
user
))
...
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
...
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
u64
__user
*
user
=
(
u64
__user
*
)
attr
->
addr
;
u64
__user
*
user
=
(
u64
__user
*
)
attr
->
addr
;
u64
ipa
;
u64
ipa
;
if
(
attr
->
attr
!=
KVM_ARM_VCPU_PVTIME_IPA
)
if
(
!
kvm_arm_pvtime_supported
()
||
attr
->
attr
!=
KVM_ARM_VCPU_PVTIME_IPA
)
return
-
ENXIO
;
return
-
ENXIO
;
ipa
=
vcpu
->
arch
.
steal
.
base
;
ipa
=
vcpu
->
arch
.
steal
.
base
;
...
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
...
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
{
{
switch
(
attr
->
attr
)
{
switch
(
attr
->
attr
)
{
case
KVM_ARM_VCPU_PVTIME_IPA
:
case
KVM_ARM_VCPU_PVTIME_IPA
:
return
0
;
if
(
kvm_arm_pvtime_supported
())
return
0
;
}
}
return
-
ENXIO
;
return
-
ENXIO
;
}
}
arch/arm64/kvm/reset.c
View file @
6c6165f8
...
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
...
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
*/
*/
int
kvm_reset_vcpu
(
struct
kvm_vcpu
*
vcpu
)
int
kvm_reset_vcpu
(
struct
kvm_vcpu
*
vcpu
)
{
{
int
ret
=
-
EINVAL
;
int
ret
;
bool
loaded
;
bool
loaded
;
u32
pstate
;
u32
pstate
;
...
@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
...
@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if
(
test_bit
(
KVM_ARM_VCPU_PTRAUTH_ADDRESS
,
vcpu
->
arch
.
features
)
||
if
(
test_bit
(
KVM_ARM_VCPU_PTRAUTH_ADDRESS
,
vcpu
->
arch
.
features
)
||
test_bit
(
KVM_ARM_VCPU_PTRAUTH_GENERIC
,
vcpu
->
arch
.
features
))
{
test_bit
(
KVM_ARM_VCPU_PTRAUTH_GENERIC
,
vcpu
->
arch
.
features
))
{
if
(
kvm_vcpu_enable_ptrauth
(
vcpu
))
if
(
kvm_vcpu_enable_ptrauth
(
vcpu
))
{
ret
=
-
EINVAL
;
goto
out
;
goto
out
;
}
}
}
switch
(
vcpu
->
arch
.
target
)
{
switch
(
vcpu
->
arch
.
target
)
{
default:
default:
if
(
test_bit
(
KVM_ARM_VCPU_EL1_32BIT
,
vcpu
->
arch
.
features
))
{
if
(
test_bit
(
KVM_ARM_VCPU_EL1_32BIT
,
vcpu
->
arch
.
features
))
{
if
(
!
cpus_have_const_cap
(
ARM64_HAS_32BIT_EL1
))
if
(
!
cpus_have_const_cap
(
ARM64_HAS_32BIT_EL1
))
{
ret
=
-
EINVAL
;
goto
out
;
goto
out
;
}
pstate
=
VCPU_RESET_PSTATE_SVC
;
pstate
=
VCPU_RESET_PSTATE_SVC
;
}
else
{
}
else
{
pstate
=
VCPU_RESET_PSTATE_EL1
;
pstate
=
VCPU_RESET_PSTATE_EL1
;
...
...
arch/arm64/kvm/vgic/vgic-v4.c
View file @
6c6165f8
...
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
...
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
!
irqd_irq_disabled
(
&
irq_to_desc
(
irq
)
->
irq_data
))
!
irqd_irq_disabled
(
&
irq_to_desc
(
irq
)
->
irq_data
))
disable_irq_nosync
(
irq
);
disable_irq_nosync
(
irq
);
/*
* The v4.1 doorbell can fire concurrently with the vPE being
* made non-resident. Ensure we only update pending_last
* *after* the non-residency sequence has completed.
*/
raw_spin_lock
(
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
.
its_vpe
.
vpe_lock
);
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
.
its_vpe
.
pending_last
=
true
;
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
.
its_vpe
.
pending_last
=
true
;
raw_spin_unlock
(
&
vcpu
->
arch
.
vgic_cpu
.
vgic_v3
.
its_vpe
.
vpe_lock
);
kvm_make_request
(
KVM_REQ_IRQ_PENDING
,
vcpu
);
kvm_make_request
(
KVM_REQ_IRQ_PENDING
,
vcpu
);
kvm_vcpu_kick
(
vcpu
);
kvm_vcpu_kick
(
vcpu
);
...
...
arch/s390/include/asm/kvm_host.h
View file @
6c6165f8
...
@@ -31,12 +31,12 @@
...
@@ -31,12 +31,12 @@
#define KVM_USER_MEM_SLOTS 32
#define KVM_USER_MEM_SLOTS 32
/*
/*
* These seem to be used for allocating ->chip in the routing table,
* These seem to be used for allocating ->chip in the routing table,
which we
*
which we don't use. 4096 is an out-of-thin-air value. If we need
*
don't use. 1 is as small as we can get to reduce the needed memory. If we
* to look at ->chip later on, we'll need to revisit this.
*
need
to look at ->chip later on, we'll need to revisit this.
*/
*/
#define KVM_NR_IRQCHIPS 1
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS
4096
#define KVM_IRQCHIP_NUM_PINS
1
#define KVM_HALT_POLL_NS_DEFAULT 50000
#define KVM_HALT_POLL_NS_DEFAULT 50000
/* s390-specific vcpu->requests bit members */
/* s390-specific vcpu->requests bit members */
...
...
arch/x86/kvm/kvm_cache_regs.h
View file @
6c6165f8
...
@@ -7,7 +7,7 @@
...
@@ -7,7 +7,7 @@
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR4_GUEST_BITS \
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE
| X86_CR4_TSD
)
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
...
...
arch/x86/kvm/mmu/mmu.c
View file @
6c6165f8
...
@@ -4449,7 +4449,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
...
@@ -4449,7 +4449,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
nonleaf_bit8_rsvd
|
rsvd_bits
(
7
,
7
)
|
nonleaf_bit8_rsvd
|
rsvd_bits
(
7
,
7
)
|
rsvd_bits
(
maxphyaddr
,
51
);
rsvd_bits
(
maxphyaddr
,
51
);
rsvd_check
->
rsvd_bits_mask
[
0
][
2
]
=
exb_bit_rsvd
|
rsvd_check
->
rsvd_bits_mask
[
0
][
2
]
=
exb_bit_rsvd
|
nonleaf_bit8_rsvd
|
gbpages_bit_rsvd
|
gbpages_bit_rsvd
|
rsvd_bits
(
maxphyaddr
,
51
);
rsvd_bits
(
maxphyaddr
,
51
);
rsvd_check
->
rsvd_bits_mask
[
0
][
1
]
=
exb_bit_rsvd
|
rsvd_check
->
rsvd_bits_mask
[
0
][
1
]
=
exb_bit_rsvd
|
rsvd_bits
(
maxphyaddr
,
51
);
rsvd_bits
(
maxphyaddr
,
51
);
...
...
arch/x86/kvm/vmx/nested.c
View file @
6c6165f8
...
@@ -4109,7 +4109,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
...
@@ -4109,7 +4109,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* CR0_GUEST_HOST_MASK is already set in the original vmcs01
* CR0_GUEST_HOST_MASK is already set in the original vmcs01
* (KVM doesn't change it);
* (KVM doesn't change it);
*/
*/
vcpu
->
arch
.
cr0_guest_owned_bits
=
X86_CR0_
TS
;
vcpu
->
arch
.
cr0_guest_owned_bits
=
KVM_POSSIBLE_CR0_GUEST_BI
TS
;
vmx_set_cr0
(
vcpu
,
vmcs12
->
host_cr0
);
vmx_set_cr0
(
vcpu
,
vmcs12
->
host_cr0
);
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
...
@@ -4259,7 +4259,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
...
@@ -4259,7 +4259,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
*/
*/
vmx_set_efer
(
vcpu
,
nested_vmx_get_vmcs01_guest_efer
(
vmx
));
vmx_set_efer
(
vcpu
,
nested_vmx_get_vmcs01_guest_efer
(
vmx
));
vcpu
->
arch
.
cr0_guest_owned_bits
=
X86_CR0_
TS
;
vcpu
->
arch
.
cr0_guest_owned_bits
=
KVM_POSSIBLE_CR0_GUEST_BI
TS
;
vmx_set_cr0
(
vcpu
,
vmcs_readl
(
CR0_READ_SHADOW
));
vmx_set_cr0
(
vcpu
,
vmcs_readl
(
CR0_READ_SHADOW
));
vcpu
->
arch
.
cr4_guest_owned_bits
=
~
vmcs_readl
(
CR4_GUEST_HOST_MASK
);
vcpu
->
arch
.
cr4_guest_owned_bits
=
~
vmcs_readl
(
CR4_GUEST_HOST_MASK
);
...
...
arch/x86/kvm/vmx/vmx.c
View file @
6c6165f8
...
@@ -133,9 +133,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
...
@@ -133,9 +133,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#define KVM_VM_CR0_ALWAYS_ON \
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
#define KVM_CR4_GUEST_OWNED_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
...
@@ -4034,9 +4031,9 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
...
@@ -4034,9 +4031,9 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
void
set_cr4_guest_host_mask
(
struct
vcpu_vmx
*
vmx
)
void
set_cr4_guest_host_mask
(
struct
vcpu_vmx
*
vmx
)
{
{
vmx
->
vcpu
.
arch
.
cr4_guest_owned_bits
=
KVM_
CR4_GUEST_OWNED
_BITS
;
vmx
->
vcpu
.
arch
.
cr4_guest_owned_bits
=
KVM_
POSSIBLE_CR4_GUEST
_BITS
;
if
(
enable_ept
)
if
(
!
enable_ept
)
vmx
->
vcpu
.
arch
.
cr4_guest_owned_bits
|=
X86_CR4_PGE
;
vmx
->
vcpu
.
arch
.
cr4_guest_owned_bits
&=
~
X86_CR4_PGE
;
if
(
is_guest_mode
(
&
vmx
->
vcpu
))
if
(
is_guest_mode
(
&
vmx
->
vcpu
))
vmx
->
vcpu
.
arch
.
cr4_guest_owned_bits
&=
vmx
->
vcpu
.
arch
.
cr4_guest_owned_bits
&=
~
get_vmcs12
(
&
vmx
->
vcpu
)
->
cr4_guest_host_mask
;
~
get_vmcs12
(
&
vmx
->
vcpu
)
->
cr4_guest_host_mask
;
...
@@ -4333,8 +4330,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
...
@@ -4333,8 +4330,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
/* 22.2.1, 20.8.1 */
/* 22.2.1, 20.8.1 */
vm_entry_controls_set
(
vmx
,
vmx_vmentry_ctrl
());
vm_entry_controls_set
(
vmx
,
vmx_vmentry_ctrl
());
vmx
->
vcpu
.
arch
.
cr0_guest_owned_bits
=
X86_CR0_
TS
;
vmx
->
vcpu
.
arch
.
cr0_guest_owned_bits
=
KVM_POSSIBLE_CR0_GUEST_BI
TS
;
vmcs_writel
(
CR0_GUEST_HOST_MASK
,
~
X86_CR0_TS
);
vmcs_writel
(
CR0_GUEST_HOST_MASK
,
~
vmx
->
vcpu
.
arch
.
cr0_guest_owned_bits
);
set_cr4_guest_host_mask
(
vmx
);
set_cr4_guest_host_mask
(
vmx
);
...
...
arch/x86/kvm/vmx/vmx.h
View file @
6c6165f8
...
@@ -288,8 +288,6 @@ struct vcpu_vmx {
...
@@ -288,8 +288,6 @@ struct vcpu_vmx {
u64
current_tsc_ratio
;
u64
current_tsc_ratio
;
u32
host_pkru
;
unsigned
long
host_debugctlmsr
;
unsigned
long
host_debugctlmsr
;
/*
/*
...
...
arch/x86/kvm/x86.c
View file @
6c6165f8
...
@@ -975,6 +975,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
...
@@ -975,6 +975,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if
(
is_long_mode
(
vcpu
))
{
if
(
is_long_mode
(
vcpu
))
{
if
(
!
(
cr4
&
X86_CR4_PAE
))
if
(
!
(
cr4
&
X86_CR4_PAE
))
return
1
;
return
1
;
if
((
cr4
^
old_cr4
)
&
X86_CR4_LA57
)
return
1
;
}
else
if
(
is_paging
(
vcpu
)
&&
(
cr4
&
X86_CR4_PAE
)
}
else
if
(
is_paging
(
vcpu
)
&&
(
cr4
&
X86_CR4_PAE
)
&&
((
cr4
^
old_cr4
)
&
pdptr_bits
)
&&
((
cr4
^
old_cr4
)
&
pdptr_bits
)
&&
!
load_pdptrs
(
vcpu
,
vcpu
->
arch
.
walk_mmu
,
&&
!
load_pdptrs
(
vcpu
,
vcpu
->
arch
.
walk_mmu
,
...
@@ -2693,6 +2695,9 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
...
@@ -2693,6 +2695,9 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
if
(
data
&
0x30
)
if
(
data
&
0x30
)
return
1
;
return
1
;
if
(
!
lapic_in_kernel
(
vcpu
))
return
1
;
vcpu
->
arch
.
apf
.
msr_en_val
=
data
;
vcpu
->
arch
.
apf
.
msr_en_val
=
data
;
if
(
!
kvm_pv_async_pf_enabled
(
vcpu
))
{
if
(
!
kvm_pv_async_pf_enabled
(
vcpu
))
{
...
@@ -2856,7 +2861,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
...
@@ -2856,7 +2861,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return
kvm_mtrr_set_msr
(
vcpu
,
msr
,
data
);
return
kvm_mtrr_set_msr
(
vcpu
,
msr
,
data
);
case
MSR_IA32_APICBASE
:
case
MSR_IA32_APICBASE
:
return
kvm_set_apic_base
(
vcpu
,
msr_info
);
return
kvm_set_apic_base
(
vcpu
,
msr_info
);
case
APIC_BASE_MSR
...
APIC_BASE_MSR
+
0x
3
ff
:
case
APIC_BASE_MSR
...
APIC_BASE_MSR
+
0xff
:
return
kvm_x2apic_msr_write
(
vcpu
,
msr
,
data
);
return
kvm_x2apic_msr_write
(
vcpu
,
msr
,
data
);
case
MSR_IA32_TSCDEADLINE
:
case
MSR_IA32_TSCDEADLINE
:
kvm_set_lapic_tscdeadline_msr
(
vcpu
,
data
);
kvm_set_lapic_tscdeadline_msr
(
vcpu
,
data
);
...
@@ -3196,7 +3201,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
...
@@ -3196,7 +3201,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case
MSR_IA32_APICBASE
:
case
MSR_IA32_APICBASE
:
msr_info
->
data
=
kvm_get_apic_base
(
vcpu
);
msr_info
->
data
=
kvm_get_apic_base
(
vcpu
);
break
;
break
;
case
APIC_BASE_MSR
...
APIC_BASE_MSR
+
0x
3
ff
:
case
APIC_BASE_MSR
...
APIC_BASE_MSR
+
0xff
:
return
kvm_x2apic_msr_read
(
vcpu
,
msr_info
->
index
,
&
msr_info
->
data
);
return
kvm_x2apic_msr_read
(
vcpu
,
msr_info
->
index
,
&
msr_info
->
data
);
case
MSR_IA32_TSCDEADLINE
:
case
MSR_IA32_TSCDEADLINE
:
msr_info
->
data
=
kvm_get_lapic_tscdeadline_msr
(
vcpu
);
msr_info
->
data
=
kvm_get_lapic_tscdeadline_msr
(
vcpu
);
...
@@ -4603,7 +4608,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
...
@@ -4603,7 +4608,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r
=
-
EINVAL
;
r
=
-
EINVAL
;
user_tsc_khz
=
(
u32
)
arg
;
user_tsc_khz
=
(
u32
)
arg
;
if
(
user_tsc_khz
>=
kvm_max_guest_tsc_khz
)
if
(
kvm_has_tsc_control
&&
user_tsc_khz
>=
kvm_max_guest_tsc_khz
)
goto
out
;
goto
out
;
if
(
user_tsc_khz
==
0
)
if
(
user_tsc_khz
==
0
)
...
...
drivers/irqchip/irq-gic-v3-its.c
View file @
6c6165f8
...
@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
...
@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
u64
val
;
u64
val
;
if
(
info
->
req_db
)
{
if
(
info
->
req_db
)
{
unsigned
long
flags
;
/*
/*
* vPE is going to block: make the vPE non-resident with
* vPE is going to block: make the vPE non-resident with
* PendingLast clear and DB set. The GIC guarantees that if
* PendingLast clear and DB set. The GIC guarantees that if
* we read-back PendingLast clear, then a doorbell will be
* we read-back PendingLast clear, then a doorbell will be
* delivered when an interrupt comes.
* delivered when an interrupt comes.
*
* Note the locking to deal with the concurrent update of
* pending_last from the doorbell interrupt handler that can
* run concurrently.
*/
*/
raw_spin_lock_irqsave
(
&
vpe
->
vpe_lock
,
flags
);
val
=
its_clear_vpend_valid
(
vlpi_base
,
val
=
its_clear_vpend_valid
(
vlpi_base
,
GICR_VPENDBASER_PendingLast
,
GICR_VPENDBASER_PendingLast
,
GICR_VPENDBASER_4_1_DB
);
GICR_VPENDBASER_4_1_DB
);
vpe
->
pending_last
=
!!
(
val
&
GICR_VPENDBASER_PendingLast
);
vpe
->
pending_last
=
!!
(
val
&
GICR_VPENDBASER_PendingLast
);
raw_spin_unlock_irqrestore
(
&
vpe
->
vpe_lock
,
flags
);
}
else
{
}
else
{
/*
/*
* We're not blocking, so just make the vPE non-resident
* We're not blocking, so just make the vPE non-resident
...
...
virt/kvm/kvm_main.c
View file @
6c6165f8
...
@@ -3350,7 +3350,8 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
...
@@ -3350,7 +3350,8 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
if
(
kvm_sigmask
.
len
!=
sizeof
(
compat_sigset_t
))
if
(
kvm_sigmask
.
len
!=
sizeof
(
compat_sigset_t
))
goto
out
;
goto
out
;
r
=
-
EFAULT
;
r
=
-
EFAULT
;
if
(
get_compat_sigset
(
&
sigset
,
(
void
*
)
sigmask_arg
->
sigset
))
if
(
get_compat_sigset
(
&
sigset
,
(
compat_sigset_t
__user
*
)
sigmask_arg
->
sigset
))
goto
out
;
goto
out
;
r
=
kvm_vcpu_ioctl_set_sigmask
(
vcpu
,
&
sigset
);
r
=
kvm_vcpu_ioctl_set_sigmask
(
vcpu
,
&
sigset
);
}
else
}
else
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment