Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
7529e767
Commit
7529e767
authored
May 27, 2020
by
Paolo Bonzini
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-master' into HEAD
Merge AMD fixes before doing more development work.
parents
4c7ccc3b
e7581cac
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
33 additions
and
39 deletions
+33
-39
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu.h
+1
-1
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu.c
+22
-34
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.c
+1
-1
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.c
+4
-2
arch/x86/kvm/x86.c
arch/x86/kvm/x86.c
+5
-1
No files found.
arch/x86/kvm/mmu.h
View file @
7529e767
...
...
@@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e)
return
((
1ULL
<<
(
e
-
s
+
1
))
-
1
)
<<
s
;
}
void
kvm_mmu_set_mmio_spte_mask
(
u64
mmio_
mask
,
u64
mmio_
value
,
u64
access_mask
);
void
kvm_mmu_set_mmio_spte_mask
(
u64
mmio_value
,
u64
access_mask
);
void
reset_shadow_zero_bits_mask
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_mmu
*
context
);
...
...
arch/x86/kvm/mmu/mmu.c
View file @
7529e767
...
...
@@ -247,7 +247,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static
u64
__read_mostly
shadow_user_mask
;
static
u64
__read_mostly
shadow_accessed_mask
;
static
u64
__read_mostly
shadow_dirty_mask
;
static
u64
__read_mostly
shadow_mmio_mask
;
static
u64
__read_mostly
shadow_mmio_value
;
static
u64
__read_mostly
shadow_mmio_access_mask
;
static
u64
__read_mostly
shadow_present_mask
;
...
...
@@ -334,19 +333,19 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm_flush_remote_tlbs_with_range
(
kvm
,
&
range
);
}
void
kvm_mmu_set_mmio_spte_mask
(
u64
mmio_
mask
,
u64
mmio_
value
,
u64
access_mask
)
void
kvm_mmu_set_mmio_spte_mask
(
u64
mmio_value
,
u64
access_mask
)
{
BUG_ON
((
u64
)(
unsigned
)
access_mask
!=
access_mask
);
BUG_ON
((
mmio_mask
&
mmio_value
)
!=
mmio_value
);
WARN_ON
(
mmio_value
&
(
shadow_nonpresent_or_rsvd_mask
<<
shadow_nonpresent_or_rsvd_mask_len
));
WARN_ON
(
mmio_value
&
shadow_nonpresent_or_rsvd_lower_gfn_mask
);
shadow_mmio_value
=
mmio_value
|
SPTE_MMIO_MASK
;
shadow_mmio_mask
=
mmio_mask
|
SPTE_SPECIAL_MASK
;
shadow_mmio_access_mask
=
access_mask
;
}
EXPORT_SYMBOL_GPL
(
kvm_mmu_set_mmio_spte_mask
);
static
bool
is_mmio_spte
(
u64
spte
)
{
return
(
spte
&
shadow_mmio_mask
)
==
shadow_mmio_value
;
return
(
spte
&
SPTE_SPECIAL_MASK
)
==
SPTE_MMIO_MASK
;
}
static
inline
bool
sp_ad_disabled
(
struct
kvm_mmu_page
*
sp
)
...
...
@@ -569,7 +568,6 @@ static void kvm_mmu_reset_all_pte_masks(void)
shadow_dirty_mask
=
0
;
shadow_nx_mask
=
0
;
shadow_x_mask
=
0
;
shadow_mmio_mask
=
0
;
shadow_present_mask
=
0
;
shadow_acc_track_mask
=
0
;
...
...
@@ -586,16 +584,15 @@ static void kvm_mmu_reset_all_pte_masks(void)
* the most significant bits of legal physical address space.
*/
shadow_nonpresent_or_rsvd_mask
=
0
;
low_phys_bits
=
boot_cpu_data
.
x86_cache_bits
;
if
(
boot_cpu_data
.
x86_cache_bits
<
52
-
shadow_nonpresent_or_rsvd_mask_len
)
{
low_phys_bits
=
boot_cpu_data
.
x86_phys_bits
;
if
(
boot_cpu_has_bug
(
X86_BUG_L1TF
)
&&
!
WARN_ON_ONCE
(
boot_cpu_data
.
x86_cache_bits
>=
52
-
shadow_nonpresent_or_rsvd_mask_len
))
{
low_phys_bits
=
boot_cpu_data
.
x86_cache_bits
-
shadow_nonpresent_or_rsvd_mask_len
;
shadow_nonpresent_or_rsvd_mask
=
rsvd_bits
(
boot_cpu_data
.
x86_cache_bits
-
shadow_nonpresent_or_rsvd_mask_len
,
boot_cpu_data
.
x86_cache_bits
-
1
);
low_phys_bits
-=
shadow_nonpresent_or_rsvd_mask_len
;
}
else
WARN_ON_ONCE
(
boot_cpu_has_bug
(
X86_BUG_L1TF
));
rsvd_bits
(
low_phys_bits
,
boot_cpu_data
.
x86_cache_bits
-
1
);
}
shadow_nonpresent_or_rsvd_lower_gfn_mask
=
GENMASK_ULL
(
low_phys_bits
-
1
,
PAGE_SHIFT
);
...
...
@@ -6134,27 +6131,18 @@ static void kvm_set_mmio_spte_mask(void)
u64
mask
;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
* Set a reserved PA bit in MMIO SPTEs to generate page faults with
* PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
* paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
* 52-bit physical addresses then there are no reserved PA bits in the
* PTEs and so the reserved PA approach must be disabled.
*/
mask
=
1ull
<<
51
;
/* Set the present bit. */
mask
|=
1ull
;
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if
(
shadow_phys_bits
==
52
)
mask
&=
~
1ull
;
if
(
shadow_phys_bits
<
52
)
mask
=
BIT_ULL
(
51
)
|
PT_PRESENT_MASK
;
else
mask
=
0
;
kvm_mmu_set_mmio_spte_mask
(
mask
,
mask
,
ACC_WRITE_MASK
|
ACC_USER_MASK
);
kvm_mmu_set_mmio_spte_mask
(
mask
,
ACC_WRITE_MASK
|
ACC_USER_MASK
);
}
static
bool
get_nx_auto_mode
(
void
)
...
...
arch/x86/kvm/svm/svm.c
View file @
7529e767
...
...
@@ -778,7 +778,7 @@ static __init void svm_adjust_mmio_mask(void)
*/
mask
=
(
mask_bit
<
52
)
?
rsvd_bits
(
mask_bit
,
51
)
|
PT_PRESENT_MASK
:
0
;
kvm_mmu_set_mmio_spte_mask
(
mask
,
mask
,
PT_WRITABLE_MASK
|
PT_USER_MASK
);
kvm_mmu_set_mmio_spte_mask
(
mask
,
PT_WRITABLE_MASK
|
PT_USER_MASK
);
}
static
void
svm_hardware_teardown
(
void
)
...
...
arch/x86/kvm/vmx/vmx.c
View file @
7529e767
...
...
@@ -4233,8 +4233,7 @@ static void ept_set_mmio_spte_mask(void)
* EPT Misconfigurations can be generated if the value of bits 2:0
* of an EPT paging-structure entry is 110b (write/execute).
*/
kvm_mmu_set_mmio_spte_mask
(
VMX_EPT_RWX_MASK
,
VMX_EPT_MISCONFIG_WX_VALUE
,
0
);
kvm_mmu_set_mmio_spte_mask
(
VMX_EPT_MISCONFIG_WX_VALUE
,
0
);
}
#define VMX_XSS_EXIT_BITMAP 0
...
...
@@ -7300,6 +7299,9 @@ static __init void vmx_set_cpu_caps(void)
/* CPUID 0x80000001 */
if
(
!
cpu_has_vmx_rdtscp
())
kvm_cpu_cap_clear
(
X86_FEATURE_RDTSCP
);
if
(
vmx_waitpkg_supported
())
kvm_cpu_cap_check_and_set
(
X86_FEATURE_WAITPKG
);
}
static
void
vmx_request_immediate_exit
(
struct
kvm_vcpu
*
vcpu
)
...
...
arch/x86/kvm/x86.c
View file @
7529e767
...
...
@@ -3799,7 +3799,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
unsigned
bank_num
=
mcg_cap
&
0xff
,
bank
;
r
=
-
EINVAL
;
if
(
!
bank_num
||
bank_num
>
=
KVM_MAX_MCE_BANKS
)
if
(
!
bank_num
||
bank_num
>
KVM_MAX_MCE_BANKS
)
goto
out
;
if
(
mcg_cap
&
~
(
kvm_mce_cap_supported
|
0xff
|
0xff0000
))
goto
out
;
...
...
@@ -5282,6 +5282,10 @@ static void kvm_init_msr_list(void)
if
(
!
kvm_cpu_cap_has
(
X86_FEATURE_RDTSCP
))
continue
;
break
;
case
MSR_IA32_UMWAIT_CONTROL
:
if
(
!
kvm_cpu_cap_has
(
X86_FEATURE_WAITPKG
))
continue
;
break
;
case
MSR_IA32_RTIT_CTL
:
case
MSR_IA32_RTIT_STATUS
:
if
(
!
kvm_cpu_cap_has
(
X86_FEATURE_INTEL_PT
))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment