Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a280e358
Commit
a280e358
authored
Jun 07, 2022
by
Paolo Bonzini
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-5.19-early-fixes' into HEAD
parents
f2906aa8
11d39e8c
Changes
11
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
78 additions
and
33 deletions
+78
-33
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess.h
+1
-1
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu.c
+1
-1
arch/x86/kvm/mmu/tdp_iter.c
arch/x86/kvm/mmu/tdp_iter.c
+9
-0
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_iter.h
+1
-0
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
+32
-6
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/nested.c
+2
-2
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.c
+20
-12
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm.h
+1
-1
kernel/entry/kvm.c
kernel/entry/kvm.c
+0
-6
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
+7
-3
virt/kvm/kvm_main.c
virt/kvm/kvm_main.c
+4
-1
No files found.
arch/x86/include/asm/uaccess.h
View file @
a280e358
...
...
@@ -439,7 +439,7 @@ do { \
[ptr] "+m" (*_ptr), \
[old] "+a" (__old) \
: [new] ltype (__new) \
: "memory"
, "cc"
); \
: "memory"); \
if (unlikely(__err)) \
goto label; \
if (unlikely(!success)) \
...
...
arch/x86/kvm/mmu/mmu.c
View file @
a280e358
...
...
@@ -5179,7 +5179,7 @@ static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
roots_to_free
|=
KVM_MMU_ROOT_CURRENT
;
for
(
i
=
0
;
i
<
KVM_MMU_NUM_PREV_ROOTS
;
i
++
)
{
if
(
is_obsolete_root
(
kvm
,
mmu
->
root
.
hpa
))
if
(
is_obsolete_root
(
kvm
,
mmu
->
prev_roots
[
i
]
.
hpa
))
roots_to_free
|=
KVM_MMU_ROOT_PREVIOUS
(
i
);
}
...
...
arch/x86/kvm/mmu/tdp_iter.c
View file @
a280e358
...
...
@@ -145,6 +145,15 @@ static bool try_step_up(struct tdp_iter *iter)
return
true
;
}
/*
* Step the iterator back up a level in the paging structure. Should only be
* used when the iterator is below the root level.
*/
void
tdp_iter_step_up
(
struct
tdp_iter
*
iter
)
{
WARN_ON
(
!
try_step_up
(
iter
));
}
/*
* Step to the next SPTE in a pre-order traversal of the paging structure.
* To get to the next SPTE, the iterator either steps down towards the goal
...
...
arch/x86/kvm/mmu/tdp_iter.h
View file @
a280e358
...
...
@@ -114,5 +114,6 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
int
min_level
,
gfn_t
next_last_level_gfn
);
void
tdp_iter_next
(
struct
tdp_iter
*
iter
);
void
tdp_iter_restart
(
struct
tdp_iter
*
iter
);
void
tdp_iter_step_up
(
struct
tdp_iter
*
iter
);
#endif
/* __KVM_X86_MMU_TDP_ITER_H */
arch/x86/kvm/mmu/tdp_mmu.c
View file @
a280e358
...
...
@@ -1742,12 +1742,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
gfn_t
start
=
slot
->
base_gfn
;
gfn_t
end
=
start
+
slot
->
npages
;
struct
tdp_iter
iter
;
int
max_mapping_level
;
kvm_pfn_t
pfn
;
rcu_read_lock
();
tdp_root_for_each_pte
(
iter
,
root
,
start
,
end
)
{
retry:
if
(
tdp_mmu_iter_cond_resched
(
kvm
,
&
iter
,
false
,
true
))
continue
;
...
...
@@ -1755,15 +1755,41 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
!
is_last_spte
(
iter
.
old_spte
,
iter
.
level
))
continue
;
/*
* This is a leaf SPTE. Check if the PFN it maps can
* be mapped at a higher level.
*/
pfn
=
spte_to_pfn
(
iter
.
old_spte
);
if
(
kvm_is_reserved_pfn
(
pfn
)
||
iter
.
level
>=
kvm_mmu_max_mapping_level
(
kvm
,
slot
,
iter
.
gfn
,
pfn
,
PG_LEVEL_NUM
))
if
(
kvm_is_reserved_pfn
(
pfn
))
continue
;
max_mapping_level
=
kvm_mmu_max_mapping_level
(
kvm
,
slot
,
iter
.
gfn
,
pfn
,
PG_LEVEL_NUM
);
WARN_ON
(
max_mapping_level
<
iter
.
level
);
/*
* If this page is already mapped at the highest
* viable level, there's nothing more to do.
*/
if
(
max_mapping_level
==
iter
.
level
)
continue
;
/*
* The page can be remapped at a higher level, so step
* up to zap the parent SPTE.
*/
while
(
max_mapping_level
>
iter
.
level
)
tdp_iter_step_up
(
&
iter
);
/* Note, a successful atomic zap also does a remote TLB flush. */
if
(
tdp_mmu_zap_spte_atomic
(
kvm
,
&
iter
))
goto
retry
;
tdp_mmu_zap_spte_atomic
(
kvm
,
&
iter
);
/*
* If the atomic zap fails, the iter will recurse back into
* the same subtree to retry.
*/
}
rcu_read_unlock
();
...
...
arch/x86/kvm/svm/nested.c
View file @
a280e358
...
...
@@ -982,7 +982,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if
(
svm
->
tsc_ratio_msr
!=
kvm_default_tsc_scaling_ratio
)
{
WARN_ON
(
!
svm
->
tsc_scaling_enabled
);
vcpu
->
arch
.
tsc_scaling_ratio
=
vcpu
->
arch
.
l1_tsc_scaling_ratio
;
svm_write_tsc_multiplier
(
vcpu
,
vcpu
->
arch
.
tsc_scaling_ratio
);
__svm_write_tsc_multiplier
(
vcpu
->
arch
.
tsc_scaling_ratio
);
}
svm
->
nested
.
ctl
.
nested_cr3
=
0
;
...
...
@@ -1387,7 +1387,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
vcpu
->
arch
.
tsc_scaling_ratio
=
kvm_calc_nested_tsc_multiplier
(
vcpu
->
arch
.
l1_tsc_scaling_ratio
,
svm
->
tsc_ratio_msr
);
svm_write_tsc_multiplier
(
vcpu
,
vcpu
->
arch
.
tsc_scaling_ratio
);
__svm_write_tsc_multiplier
(
vcpu
->
arch
.
tsc_scaling_ratio
);
}
/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
...
...
arch/x86/kvm/svm/svm.c
View file @
a280e358
...
...
@@ -465,11 +465,24 @@ static int has_svm(void)
return
1
;
}
void
__svm_write_tsc_multiplier
(
u64
multiplier
)
{
preempt_disable
();
if
(
multiplier
==
__this_cpu_read
(
current_tsc_ratio
))
goto
out
;
wrmsrl
(
MSR_AMD64_TSC_RATIO
,
multiplier
);
__this_cpu_write
(
current_tsc_ratio
,
multiplier
);
out:
preempt_enable
();
}
static
void
svm_hardware_disable
(
void
)
{
/* Make sure we clean up behind us */
if
(
tsc_scaling
)
wrmsrl
(
MSR_AMD64_TSC_RATIO
,
SVM_TSC_RATIO_DEFAULT
);
__svm_write_tsc_multiplier
(
SVM_TSC_RATIO_DEFAULT
);
cpu_svm_disable
();
...
...
@@ -515,8 +528,7 @@ static int svm_hardware_enable(void)
* Set the default value, even if we don't use TSC scaling
* to avoid having stale value in the msr
*/
wrmsrl
(
MSR_AMD64_TSC_RATIO
,
SVM_TSC_RATIO_DEFAULT
);
__this_cpu_write
(
current_tsc_ratio
,
SVM_TSC_RATIO_DEFAULT
);
__svm_write_tsc_multiplier
(
SVM_TSC_RATIO_DEFAULT
);
}
...
...
@@ -999,11 +1011,12 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vmcb_mark_dirty
(
svm
->
vmcb
,
VMCB_INTERCEPTS
);
}
void
svm_write_tsc_multiplier
(
struct
kvm_vcpu
*
vcpu
,
u64
multiplier
)
static
void
svm_write_tsc_multiplier
(
struct
kvm_vcpu
*
vcpu
,
u64
multiplier
)
{
wrmsrl
(
MSR_AMD64_TSC_RATIO
,
multiplier
);
__svm_write_tsc_multiplier
(
multiplier
);
}
/* Evaluate instruction intercepts that depend on guest CPUID features. */
static
void
svm_recalc_instruction_intercepts
(
struct
kvm_vcpu
*
vcpu
,
struct
vcpu_svm
*
svm
)
...
...
@@ -1363,13 +1376,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
sev_es_prepare_switch_to_guest
(
hostsa
);
}
if
(
tsc_scaling
)
{
u64
tsc_ratio
=
vcpu
->
arch
.
tsc_scaling_ratio
;
if
(
tsc_ratio
!=
__this_cpu_read
(
current_tsc_ratio
))
{
__this_cpu_write
(
current_tsc_ratio
,
tsc_ratio
);
wrmsrl
(
MSR_AMD64_TSC_RATIO
,
tsc_ratio
);
}
}
if
(
tsc_scaling
)
__svm_write_tsc_multiplier
(
vcpu
->
arch
.
tsc_scaling_ratio
);
if
(
likely
(
tsc_aux_uret_slot
>=
0
))
kvm_set_user_return_msr
(
tsc_aux_uret_slot
,
svm
->
tsc_aux
,
-
1ull
);
...
...
arch/x86/kvm/svm/svm.h
View file @
a280e358
...
...
@@ -590,7 +590,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool
has_error_code
,
u32
error_code
);
int
nested_svm_exit_special
(
struct
vcpu_svm
*
svm
);
void
nested_svm_update_tsc_ratio_msr
(
struct
kvm_vcpu
*
vcpu
);
void
svm_write_tsc_multiplier
(
struct
kvm_vcpu
*
vcpu
,
u64
multiplier
);
void
__svm_write_tsc_multiplier
(
u64
multiplier
);
void
nested_copy_vmcb_control_to_cache
(
struct
vcpu_svm
*
svm
,
struct
vmcb_control_area
*
control
);
void
nested_copy_vmcb_save_to_cache
(
struct
vcpu_svm
*
svm
,
...
...
kernel/entry/kvm.c
View file @
a280e358
...
...
@@ -9,12 +9,6 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
int
ret
;
if
(
ti_work
&
(
_TIF_SIGPENDING
|
_TIF_NOTIFY_SIGNAL
))
{
clear_notify_signal
();
if
(
task_work_pending
(
current
))
task_work_run
();
}
if
(
ti_work
&
_TIF_SIGPENDING
)
{
kvm_handle_signal_exit
(
vcpu
);
return
-
EINTR
;
}
...
...
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
View file @
a280e358
...
...
@@ -44,7 +44,7 @@ static inline void nop_loop(void)
{
int
i
;
for
(
i
=
0
;
i
<
1000000
;
i
++
)
for
(
i
=
0
;
i
<
1000000
00
;
i
++
)
asm
volatile
(
"nop"
);
}
...
...
@@ -56,12 +56,14 @@ static inline void check_tsc_msr_rdtsc(void)
tsc_freq
=
rdmsr
(
HV_X64_MSR_TSC_FREQUENCY
);
GUEST_ASSERT
(
tsc_freq
>
0
);
/* F
irst, check MSR-based clocksource
*/
/* F
or increased accuracy, take mean rdtsc() before and afrer rdmsr()
*/
r1
=
rdtsc
();
t1
=
rdmsr
(
HV_X64_MSR_TIME_REF_COUNT
);
r1
=
(
r1
+
rdtsc
())
/
2
;
nop_loop
();
r2
=
rdtsc
();
t2
=
rdmsr
(
HV_X64_MSR_TIME_REF_COUNT
);
r2
=
(
r2
+
rdtsc
())
/
2
;
GUEST_ASSERT
(
r2
>
r1
&&
t2
>
t1
);
...
...
@@ -181,12 +183,14 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
tsc_freq
=
vcpu_get_msr
(
vm
,
VCPU_ID
,
HV_X64_MSR_TSC_FREQUENCY
);
TEST_ASSERT
(
tsc_freq
>
0
,
"TSC frequency must be nonzero"
);
/* F
irst, check MSR-based clocksource
*/
/* F
or increased accuracy, take mean rdtsc() before and afrer ioctl
*/
r1
=
rdtsc
();
t1
=
vcpu_get_msr
(
vm
,
VCPU_ID
,
HV_X64_MSR_TIME_REF_COUNT
);
r1
=
(
r1
+
rdtsc
())
/
2
;
nop_loop
();
r2
=
rdtsc
();
t2
=
vcpu_get_msr
(
vm
,
VCPU_ID
,
HV_X64_MSR_TIME_REF_COUNT
);
r2
=
(
r2
+
rdtsc
())
/
2
;
TEST_ASSERT
(
t2
>
t1
,
"Time reference MSR is not monotonic (%ld <= %ld)"
,
t1
,
t2
);
...
...
virt/kvm/kvm_main.c
View file @
a280e358
...
...
@@ -4300,7 +4300,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
kvm_put_kvm_no_destroy
(
kvm
);
mutex_lock
(
&
kvm
->
lock
);
list_del
(
&
dev
->
vm_node
);
if
(
ops
->
release
)
ops
->
release
(
dev
);
mutex_unlock
(
&
kvm
->
lock
);
if
(
ops
->
destroy
)
ops
->
destroy
(
dev
);
return
ret
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment