Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9511b5a0
Commit
9511b5a0
authored
Sep 30, 2022
by
Michael Ellerman
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/ppc-kvm' into next
Merge some KVM commits we are keeping in our topic branch.
parents
335e1a91
1a5486b3
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
76 additions
and
33 deletions
+76
-33
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv.c
+76
-32
arch/powerpc/kvm/powerpc.c
arch/powerpc/kvm/powerpc.c
+0
-1
No files found.
arch/powerpc/kvm/book3s_hv.c
View file @
9511b5a0
...
...
@@ -249,6 +249,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
/*
* We use the vcpu_load/put functions to measure stolen time.
*
* Stolen time is counted as time when either the vcpu is able to
* run as part of a virtual core, but the task running the vcore
* is preempted or sleeping, or when the vcpu needs something done
...
...
@@ -278,6 +279,12 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
* lock. The stolen times are measured in units of timebase ticks.
* (Note that the != TB_NIL checks below are purely defensive;
* they should never fail.)
*
* The POWER9 path is simpler, one vcpu per virtual core so the
* former case does not exist. If a vcpu is preempted when it is
* BUSY_IN_HOST and not ceded or otherwise blocked, then accumulate
* the stolen cycles in busy_stolen. RUNNING is not a preemptible
* state in the P9 path.
*/
static
void
kvmppc_core_start_stolen
(
struct
kvmppc_vcore
*
vc
,
u64
tb
)
...
...
@@ -311,8 +318,14 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
unsigned
long
flags
;
u64
now
;
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
{
if
(
vcpu
->
arch
.
busy_preempt
!=
TB_NIL
)
{
WARN_ON_ONCE
(
vcpu
->
arch
.
state
!=
KVMPPC_VCPU_BUSY_IN_HOST
);
vc
->
stolen_tb
+=
mftb
()
-
vcpu
->
arch
.
busy_preempt
;
vcpu
->
arch
.
busy_preempt
=
TB_NIL
;
}
return
;
}
now
=
mftb
();
...
...
@@ -340,8 +353,21 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
unsigned
long
flags
;
u64
now
;
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
{
/*
* In the P9 path, RUNNABLE is not preemptible
* (nor takes host interrupts)
*/
WARN_ON_ONCE
(
vcpu
->
arch
.
state
==
KVMPPC_VCPU_RUNNABLE
);
/*
* Account stolen time when preempted while the vcpu task is
* running in the kernel (but not in qemu, which is INACTIVE).
*/
if
(
task_is_running
(
current
)
&&
vcpu
->
arch
.
state
==
KVMPPC_VCPU_BUSY_IN_HOST
)
vcpu
->
arch
.
busy_preempt
=
mftb
();
return
;
}
now
=
mftb
();
...
...
@@ -740,6 +766,18 @@ static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
vcpu
->
arch
.
dtl
.
dirty
=
true
;
}
static
void
kvmppc_create_dtl_entry_p9
(
struct
kvm_vcpu
*
vcpu
,
struct
kvmppc_vcore
*
vc
,
u64
now
)
{
unsigned
long
stolen
;
stolen
=
vc
->
stolen_tb
-
vcpu
->
arch
.
stolen_logged
;
vcpu
->
arch
.
stolen_logged
=
vc
->
stolen_tb
;
__kvmppc_create_dtl_entry
(
vcpu
,
vc
->
pcpu
,
now
,
stolen
);
}
static
void
kvmppc_create_dtl_entry
(
struct
kvm_vcpu
*
vcpu
,
struct
kvmppc_vcore
*
vc
)
{
...
...
@@ -2517,10 +2555,24 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r
=
set_vpa
(
vcpu
,
&
vcpu
->
arch
.
dtl
,
addr
,
len
);
break
;
case
KVM_REG_PPC_TB_OFFSET
:
{
/* round up to multiple of 2^24 */
vcpu
->
arch
.
vcore
->
tb_offset
=
ALIGN
(
set_reg_val
(
id
,
*
val
),
1UL
<<
24
);
u64
tb_offset
=
ALIGN
(
set_reg_val
(
id
,
*
val
),
1UL
<<
24
);
/*
* Now that we know the timebase offset, update the
* decrementer expiry with a guest timebase value. If
* the userspace does not set DEC_EXPIRY, this ensures
* a migrated vcpu at least starts with an expired
* decrementer, which is better than a large one that
* causes a hang.
*/
if
(
!
vcpu
->
arch
.
dec_expires
&&
tb_offset
)
vcpu
->
arch
.
dec_expires
=
get_tb
()
+
tb_offset
;
vcpu
->
arch
.
vcore
->
tb_offset
=
tb_offset
;
break
;
}
case
KVM_REG_PPC_LPCR
:
kvmppc_set_lpcr
(
vcpu
,
set_reg_val
(
id
,
*
val
),
true
);
break
;
...
...
@@ -3840,23 +3892,17 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
for
(
sub
=
0
;
sub
<
core_info
.
n_subcores
;
++
sub
)
spin_unlock
(
&
core_info
.
vc
[
sub
]
->
lock
);
guest_enter_irqoff
();
guest_
timing_
enter_irqoff
();
srcu_idx
=
srcu_read_lock
(
&
vc
->
kvm
->
srcu
);
guest_state_enter_irqoff
();
this_cpu_disable_ftrace
();
/*
* Interrupts will be enabled once we get into the guest,
* so tell lockdep that we're about to enable interrupts.
*/
trace_hardirqs_on
();
trap
=
__kvmppc_vcore_entry
();
trace_hardirqs_off
();
this_cpu_enable_ftrace
();
guest_state_exit_irqoff
();
srcu_read_unlock
(
&
vc
->
kvm
->
srcu
,
srcu_idx
);
...
...
@@ -3891,11 +3937,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_set_host_core
(
pcpu
);
context_tracking_guest_exit
();
if
(
!
vtime_accounting_enabled_this_cpu
())
{
local_irq_enable
();
/*
* Service IRQs here before
vtime_account_guest_exit
() so any
* Service IRQs here before
guest_timing_exit_irqoff
() so any
* ticks that occurred while running the guest are accounted to
* the guest. If vtime accounting is enabled, accounting uses
* TB rather than ticks, so it can be done without enabling
...
...
@@ -3904,7 +3949,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
local_irq_disable
();
}
vtime_account_guest_exit
();
guest_timing_exit_irqoff
();
local_irq_enable
();
...
...
@@ -4520,7 +4565,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc
=
vcpu
->
arch
.
vcore
;
vcpu
->
arch
.
ceded
=
0
;
vcpu
->
arch
.
run_task
=
current
;
vcpu
->
arch
.
state
=
KVMPPC_VCPU_RUNNABLE
;
vcpu
->
arch
.
last_inst
=
KVM_INST_FETCH_FAILED
;
/* See if the MMU is ready to go */
...
...
@@ -4547,6 +4591,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
/* flags save not required, but irq_pmu has no disable/enable API */
powerpc_local_irq_pmu_save
(
flags
);
vcpu
->
arch
.
state
=
KVMPPC_VCPU_RUNNABLE
;
if
(
signal_pending
(
current
))
goto
sigpend
;
if
(
need_resched
()
||
!
kvm
->
arch
.
mmu_ready
)
...
...
@@ -4591,47 +4637,44 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
tb
=
mftb
();
__kvmppc_create_dtl_entry
(
vcpu
,
pcpu
,
tb
+
vc
->
tb_offset
,
0
);
kvmppc_create_dtl_entry_p9
(
vcpu
,
vc
,
tb
+
vc
->
tb_offset
);
trace_kvm_guest_enter
(
vcpu
);
guest_enter_irqoff
();
guest_
timing_
enter_irqoff
();
srcu_idx
=
srcu_read_lock
(
&
kvm
->
srcu
);
guest_state_enter_irqoff
();
this_cpu_disable_ftrace
();
/* Tell lockdep that we're about to enable interrupts */
trace_hardirqs_on
();
trap
=
kvmhv_p9_guest_entry
(
vcpu
,
time_limit
,
lpcr
,
&
tb
);
vcpu
->
arch
.
trap
=
trap
;
trace_hardirqs_off
();
this_cpu_enable_ftrace
();
guest_state_exit_irqoff
();
srcu_read_unlock
(
&
kvm
->
srcu
,
srcu_idx
);
set_irq_happened
(
trap
);
context_tracking_guest_exit
();
vcpu
->
cpu
=
-
1
;
vcpu
->
arch
.
thread_cpu
=
-
1
;
vcpu
->
arch
.
state
=
KVMPPC_VCPU_BUSY_IN_HOST
;
if
(
!
vtime_accounting_enabled_this_cpu
())
{
local_irq_enable
(
);
powerpc_local_irq_pmu_restore
(
flags
);
/*
* Service IRQs here before
vtime_account_guest_exit
() so any
* Service IRQs here before
guest_timing_exit_irqoff
() so any
* ticks that occurred while running the guest are accounted to
* the guest. If vtime accounting is enabled, accounting uses
* TB rather than ticks, so it can be done without enabling
* interrupts here, which has the problem that it accounts
* interrupt processing overhead to the host.
*/
local_irq_disable
(
);
powerpc_local_irq_pmu_save
(
flags
);
}
vtime_account_guest_exit
();
vcpu
->
cpu
=
-
1
;
vcpu
->
arch
.
thread_cpu
=
-
1
;
guest_timing_exit_irqoff
();
powerpc_local_irq_pmu_restore
(
flags
);
...
...
@@ -4694,6 +4737,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
out:
vcpu
->
cpu
=
-
1
;
vcpu
->
arch
.
thread_cpu
=
-
1
;
vcpu
->
arch
.
state
=
KVMPPC_VCPU_BUSY_IN_HOST
;
powerpc_local_irq_pmu_restore
(
flags
);
preempt_enable
();
goto
done
;
...
...
arch/powerpc/kvm/powerpc.c
View file @
9511b5a0
...
...
@@ -786,7 +786,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
hrtimer_init
(
&
vcpu
->
arch
.
dec_timer
,
CLOCK_REALTIME
,
HRTIMER_MODE_ABS
);
vcpu
->
arch
.
dec_timer
.
function
=
kvmppc_decrementer_wakeup
;
vcpu
->
arch
.
dec_expires
=
get_tb
();
#ifdef CONFIG_KVM_EXIT_TIMING
mutex_init
(
&
vcpu
->
arch
.
exit_timing_lock
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment