Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
e121ee6b
Commit
e121ee6b
authored
Feb 22, 2019
by
Michael Ellerman
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/ppc-kvm' into next
Merge commits we're sharing with kvm-ppc tree.
parents
d0055df0
c0577201
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
54 additions
and
91 deletions
+54
-91
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/kvm_ppc.h
+2
-1
arch/powerpc/include/asm/mce.h
arch/powerpc/include/asm/mce.h
+1
-1
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/mce.c
+5
-3
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s.c
+7
-0
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv.c
+21
-4
arch/powerpc/kvm/book3s_hv_ras.c
arch/powerpc/kvm/book3s_hv_ras.c
+14
-44
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_hv_rmhandlers.S
+3
-37
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/opal.c
+1
-1
No files found.
arch/powerpc/include/asm/kvm_ppc.h
View file @
e121ee6b
...
...
@@ -141,6 +141,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern
int
kvmppc_core_prepare_to_enter
(
struct
kvm_vcpu
*
vcpu
);
extern
int
kvmppc_core_pending_dec
(
struct
kvm_vcpu
*
vcpu
);
extern
void
kvmppc_core_queue_machine_check
(
struct
kvm_vcpu
*
vcpu
,
ulong
flags
);
extern
void
kvmppc_core_queue_program
(
struct
kvm_vcpu
*
vcpu
,
ulong
flags
);
extern
void
kvmppc_core_queue_fpunavail
(
struct
kvm_vcpu
*
vcpu
);
extern
void
kvmppc_core_queue_vec_unavail
(
struct
kvm_vcpu
*
vcpu
);
...
...
@@ -632,7 +633,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned
int
yield_count
);
long
kvmppc_h_random
(
struct
kvm_vcpu
*
vcpu
);
void
kvmhv_commence_exit
(
int
trap
);
long
kvmppc_realmode_machine_check
(
struct
kvm_vcpu
*
vcpu
);
void
kvmppc_realmode_machine_check
(
struct
kvm_vcpu
*
vcpu
);
void
kvmppc_subcore_enter_guest
(
void
);
void
kvmppc_subcore_exit_guest
(
void
);
long
kvmppc_realmode_hmi_handler
(
void
);
...
...
arch/powerpc/include/asm/mce.h
View file @
e121ee6b
...
...
@@ -209,7 +209,7 @@ extern int get_mce_event(struct machine_check_event *mce, bool release);
extern
void
release_mce_event
(
void
);
extern
void
machine_check_queue_event
(
void
);
extern
void
machine_check_print_event_info
(
struct
machine_check_event
*
evt
,
bool
user_mode
);
bool
user_mode
,
bool
in_guest
);
#ifdef CONFIG_PPC_BOOK3S_64
void
flush_and_reload_slb
(
void
);
#endif
/* CONFIG_PPC_BOOK3S_64 */
...
...
arch/powerpc/kernel/mce.c
View file @
e121ee6b
...
...
@@ -301,13 +301,13 @@ static void machine_check_process_queued_event(struct irq_work *work)
while
(
__this_cpu_read
(
mce_queue_count
)
>
0
)
{
index
=
__this_cpu_read
(
mce_queue_count
)
-
1
;
evt
=
this_cpu_ptr
(
&
mce_event_queue
[
index
]);
machine_check_print_event_info
(
evt
,
false
);
machine_check_print_event_info
(
evt
,
false
,
false
);
__this_cpu_dec
(
mce_queue_count
);
}
}
void
machine_check_print_event_info
(
struct
machine_check_event
*
evt
,
bool
user_mode
)
bool
user_mode
,
bool
in_guest
)
{
const
char
*
level
,
*
sevstr
,
*
subtype
;
static
const
char
*
mc_ue_types
[]
=
{
...
...
@@ -387,7 +387,9 @@ void machine_check_print_event_info(struct machine_check_event *evt,
evt
->
disposition
==
MCE_DISPOSITION_RECOVERED
?
"Recovered"
:
"Not recovered"
);
if
(
user_mode
)
{
if
(
in_guest
)
{
printk
(
"%s Guest NIP: %016llx
\n
"
,
level
,
evt
->
srr0
);
}
else
if
(
user_mode
)
{
printk
(
"%s NIP: [%016llx] PID: %d Comm: %s
\n
"
,
level
,
evt
->
srr0
,
current
->
pid
,
current
->
comm
);
}
else
{
...
...
arch/powerpc/kvm/book3s.c
View file @
e121ee6b
...
...
@@ -192,6 +192,13 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
}
EXPORT_SYMBOL_GPL
(
kvmppc_book3s_queue_irqprio
);
void
kvmppc_core_queue_machine_check
(
struct
kvm_vcpu
*
vcpu
,
ulong
flags
)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt
(
vcpu
,
BOOK3S_INTERRUPT_MACHINE_CHECK
,
flags
);
}
EXPORT_SYMBOL_GPL
(
kvmppc_core_queue_machine_check
);
void
kvmppc_core_queue_program
(
struct
kvm_vcpu
*
vcpu
,
ulong
flags
)
{
/* might as well deliver this straight away */
...
...
arch/powerpc/kvm/book3s_hv.c
View file @
e121ee6b
...
...
@@ -1215,6 +1215,22 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
r
=
RESUME_GUEST
;
break
;
case
BOOK3S_INTERRUPT_MACHINE_CHECK
:
/* Print the MCE event to host console. */
machine_check_print_event_info
(
&
vcpu
->
arch
.
mce_evt
,
false
,
true
);
/*
* If the guest can do FWNMI, exit to userspace so it can
* deliver a FWNMI to the guest.
* Otherwise we synthesize a machine check for the guest
* so that it knows that the machine check occurred.
*/
if
(
!
vcpu
->
kvm
->
arch
.
fwnmi_enabled
)
{
ulong
flags
=
vcpu
->
arch
.
shregs
.
msr
&
0x083c0000
;
kvmppc_core_queue_machine_check
(
vcpu
,
flags
);
r
=
RESUME_GUEST
;
break
;
}
/* Exit to guest with KVM_EXIT_NMI as exit reason */
run
->
exit_reason
=
KVM_EXIT_NMI
;
run
->
hw
.
hardware_exit_reason
=
vcpu
->
arch
.
trap
;
...
...
@@ -1227,8 +1243,6 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
run
->
flags
|=
KVM_RUN_PPC_NMI_DISP_NOT_RECOV
;
r
=
RESUME_HOST
;
/* Print the MCE event to host console. */
machine_check_print_event_info
(
&
vcpu
->
arch
.
mce_evt
,
false
);
break
;
case
BOOK3S_INTERRUPT_PROGRAM
:
{
...
...
@@ -1392,7 +1406,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Pass the machine check to the L1 guest */
r
=
RESUME_HOST
;
/* Print the MCE event to host console. */
machine_check_print_event_info
(
&
vcpu
->
arch
.
mce_evt
,
false
);
machine_check_print_event_info
(
&
vcpu
->
arch
.
mce_evt
,
false
,
true
);
break
;
/*
* We get these next two if the guest accesses a page which it thinks
...
...
@@ -3455,6 +3469,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned
long
host_dscr
=
mfspr
(
SPRN_DSCR
);
unsigned
long
host_tidr
=
mfspr
(
SPRN_TIDR
);
unsigned
long
host_iamr
=
mfspr
(
SPRN_IAMR
);
unsigned
long
host_amr
=
mfspr
(
SPRN_AMR
);
s64
dec
;
u64
tb
;
int
trap
,
save_pmu
;
...
...
@@ -3571,13 +3586,15 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr
(
SPRN_PSPB
,
0
);
mtspr
(
SPRN_WORT
,
0
);
mtspr
(
SPRN_AMR
,
0
);
mtspr
(
SPRN_UAMOR
,
0
);
mtspr
(
SPRN_DSCR
,
host_dscr
);
mtspr
(
SPRN_TIDR
,
host_tidr
);
mtspr
(
SPRN_IAMR
,
host_iamr
);
mtspr
(
SPRN_PSPB
,
0
);
if
(
host_amr
!=
vcpu
->
arch
.
amr
)
mtspr
(
SPRN_AMR
,
host_amr
);
msr_check_and_set
(
MSR_FP
|
MSR_VEC
|
MSR_VSX
);
store_fp_state
(
&
vcpu
->
arch
.
fp
);
#ifdef CONFIG_ALTIVEC
...
...
arch/powerpc/kvm/book3s_hv_ras.c
View file @
e121ee6b
...
...
@@ -66,10 +66,8 @@ static void reload_slb(struct kvm_vcpu *vcpu)
/*
* On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition.
*
* Returns: 0 => exit guest, 1 => deliver machine check to guest
*/
static
long
kvmppc_realmode_mc_power7
(
struct
kvm_vcpu
*
vcpu
)
static
void
kvmppc_realmode_mc_power7
(
struct
kvm_vcpu
*
vcpu
)
{
unsigned
long
srr1
=
vcpu
->
arch
.
shregs
.
msr
;
struct
machine_check_event
mce_evt
;
...
...
@@ -111,52 +109,24 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
}
/*
* See if we have already handled the condition in the linux host.
* We assume that if the condition is recovered then linux host
* will have generated an error log event that we will pick
* up and log later.
* Don't release mce event now. We will queue up the event so that
* we can log the MCE event info on host console.
* Now get the event and stash it in the vcpu struct so it can
* be handled by the primary thread in virtual mode. We can't
* call machine_check_queue_event() here if we are running on
* an offline secondary thread.
*/
if
(
!
get_mce_event
(
&
mce_evt
,
MCE_EVENT_DONTRELEASE
))
goto
out
;
if
(
mce_evt
.
version
==
MCE_V1
&&
(
mce_evt
.
severity
==
MCE_SEV_NO_ERROR
||
mce_evt
.
disposition
==
MCE_DISPOSITION_RECOVERED
))
handled
=
1
;
out:
/*
* For guest that supports FWNMI capability, hook the MCE event into
* vcpu structure. We are going to exit the guest with KVM_EXIT_NMI
* exit reason. On our way to exit we will pull this event from vcpu
* structure and print it from thread 0 of the core/subcore.
*
* For guest that does not support FWNMI capability (old QEMU):
* We are now going enter guest either through machine check
* interrupt (for unhandled errors) or will continue from
* current HSRR0 (for handled errors) in guest. Hence
* queue up the event so that we can log it from host console later.
*/
if
(
vcpu
->
kvm
->
arch
.
fwnmi_enabled
)
{
/*
* Hook up the mce event on to vcpu structure.
* First clear the old event.
*/
memset
(
&
vcpu
->
arch
.
mce_evt
,
0
,
sizeof
(
vcpu
->
arch
.
mce_evt
));
if
(
get_mce_event
(
&
mce_evt
,
MCE_EVENT_RELEASE
))
{
vcpu
->
arch
.
mce_evt
=
mce_evt
;
}
}
else
machine_check_queue_event
();
if
(
get_mce_event
(
&
mce_evt
,
MCE_EVENT_RELEASE
))
{
if
(
handled
&&
mce_evt
.
version
==
MCE_V1
)
mce_evt
.
disposition
=
MCE_DISPOSITION_RECOVERED
;
}
else
{
memset
(
&
mce_evt
,
0
,
sizeof
(
mce_evt
));
}
return
handled
;
vcpu
->
arch
.
mce_evt
=
mce_evt
;
}
long
kvmppc_realmode_machine_check
(
struct
kvm_vcpu
*
vcpu
)
void
kvmppc_realmode_machine_check
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvmppc_realmode_mc_power7
(
vcpu
);
kvmppc_realmode_mc_power7
(
vcpu
);
}
/* Check if dynamic split is in force and return subcore size accordingly. */
...
...
arch/powerpc/kvm/book3s_hv_rmhandlers.S
View file @
e121ee6b
...
...
@@ -2826,49 +2826,15 @@ kvm_cede_exit:
#endif /* CONFIG_KVM_XICS */
3
:
b
guest_exit_cont
/
*
Try
to
handle
a
machine
check
in
real
mode
*/
/
*
Try
to
do
machine
check
recovery
in
real
mode
*/
machine_check_realmode
:
mr
r3
,
r9
/*
get
vcpu
pointer
*/
bl
kvmppc_realmode_machine_check
nop
/
*
all
machine
checks
go
to
virtual
mode
for
further
handling
*/
ld
r9
,
HSTATE_KVM_VCPU
(
r13
)
li
r12
,
BOOK3S_INTERRUPT_MACHINE_CHECK
/
*
*
For
the
guest
that
is
FWNMI
capable
,
deliver
all
the
MCE
errors
*
(
handled
/
unhandled
)
by
exiting
the
guest
with
KVM_EXIT_NMI
exit
*
reason
.
This
new
approach
injects
machine
check
errors
in
guest
*
address
space
to
guest
with
additional
information
in
the
form
*
of
RTAS
event
,
thus
enabling
guest
kernel
to
suitably
handle
*
such
errors
.
*
*
For
the
guest
that
is
not
FWNMI
capable
(
old
QEMU
)
fallback
*
to
old
behaviour
for
backward
compatibility
:
*
Deliver
unhandled
/
fatal
(
e
.
g
.
UE
)
MCE
errors
to
guest
either
*
through
machine
check
interrupt
(
set
HSRR0
to
0x200
)
.
*
For
handled
errors
(
no
-
fatal
),
just
go
back
to
guest
execution
*
with
current
HSRR0
.
*
if
we
receive
machine
check
with
MSR
(
RI
=
0
)
then
deliver
it
to
*
guest
as
machine
check
causing
guest
to
crash
.
*/
ld
r11
,
VCPU_MSR
(
r9
)
rldicl
.
r0
,
r11
,
64
-
MSR_HV_LG
,
63
/*
check
if
it
happened
in
HV
mode
*/
bne
guest_exit_cont
/*
if
so
,
exit
to
host
*/
/
*
Check
if
guest
is
capable
of
handling
NMI
exit
*/
ld
r10
,
VCPU_KVM
(
r9
)
lbz
r10
,
KVM_FWNMI
(
r10
)
cmpdi
r10
,
1
/*
FWNMI
capable
?
*/
beq
guest_exit_cont
/*
if
so
,
exit
with
KVM_EXIT_NMI
.
*/
/
*
if
not
,
fall
through
for
backward
compatibility
.
*/
andi
.
r10
,
r11
,
MSR_RI
/*
check
for
unrecoverable
exception
*/
beq
1
f
/*
Deliver
a
machine
check
to
guest
*/
ld
r10
,
VCPU_PC
(
r9
)
cmpdi
r3
,
0
/*
Did
we
handle
MCE
?
*/
bne
2
f
/*
Continue
guest
execution
.
*/
/
*
If
not
,
deliver
a
machine
check
.
SRR0
/
1
are
already
set
*/
1
:
li
r10
,
BOOK3S_INTERRUPT_MACHINE_CHECK
bl
kvmppc_msr_interrupt
2
:
b
fast_interrupt_c_return
b
guest_exit_cont
/*
*
Call
C
code
to
handle
a
HMI
in
real
mode
.
...
...
arch/powerpc/platforms/powernv/opal.c
View file @
e121ee6b
...
...
@@ -586,7 +586,7 @@ int opal_machine_check(struct pt_regs *regs)
evt
.
version
);
return
0
;
}
machine_check_print_event_info
(
&
evt
,
user_mode
(
regs
));
machine_check_print_event_info
(
&
evt
,
user_mode
(
regs
)
,
false
);
if
(
opal_recover_mce
(
regs
,
&
evt
))
return
1
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment