Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a394cf6e
Commit
a394cf6e
authored
Jul 28, 2020
by
Marc Zyngier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-arm64/misc-5.9' into kvmarm-master/next-WIP
Signed-off-by:
Marc Zyngier
<
maz@kernel.org
>
parents
c9dc9500
a59a2edb
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
85 additions
and
90 deletions
+85
-90
arch/arm64/Kconfig
arch/arm64/Kconfig
+0
-16
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_asm.h
+0
-1
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h
+17
-17
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpu_errata.c
+2
-2
arch/arm64/kvm/Kconfig
arch/arm64/kvm/Kconfig
+1
-1
arch/arm64/kvm/arm.c
arch/arm64/kvm/arm.c
+0
-1
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/handle_exit.c
+16
-16
arch/arm64/kvm/hyp/aarch32.c
arch/arm64/kvm/hyp/aarch32.c
+1
-1
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/switch.h
+7
-7
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
+2
-2
arch/arm64/kvm/mmu.c
arch/arm64/kvm/mmu.c
+3
-3
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.c
+14
-14
arch/arm64/kvm/va_layout.c
arch/arm64/kvm/va_layout.c
+1
-1
arch/arm64/kvm/vgic/vgic-irqfd.c
arch/arm64/kvm/vgic/vgic-irqfd.c
+19
-5
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic-its.c
+1
-2
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
+1
-1
No files found.
arch/arm64/Kconfig
View file @
a394cf6e
...
@@ -1182,22 +1182,6 @@ config HARDEN_BRANCH_PREDICTOR
...
@@ -1182,22 +1182,6 @@ config HARDEN_BRANCH_PREDICTOR
If unsure, say Y.
If unsure, say Y.
config HARDEN_EL2_VECTORS
bool "Harden EL2 vector mapping against system register leak" if EXPERT
default y
help
Speculation attacks against some high-performance processors can
be used to leak privileged information such as the vector base
register, resulting in a potential defeat of the EL2 layout
randomization.
This config option will map the vectors to a fixed location,
independent of the EL2 code mapping, so that revealing VBAR_EL2
to an attacker does not give away any extra information. This
only gets enabled on affected CPUs.
If unsure, say Y.
config ARM64_SSBD
config ARM64_SSBD
bool "Speculative Store Bypass Disable" if EXPERT
bool "Speculative Store Bypass Disable" if EXPERT
default y
default y
...
...
arch/arm64/include/asm/kvm_asm.h
View file @
a394cf6e
...
@@ -191,7 +191,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
...
@@ -191,7 +191,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
.
macro
get_vcpu_ptr
vcpu
,
ctxt
.
macro
get_vcpu_ptr
vcpu
,
ctxt
get_host_ctxt
\
ctxt
,
\
vcpu
get_host_ctxt
\
ctxt
,
\
vcpu
ldr
\
vcpu
,
[
\
ctxt
,
#
HOST_CONTEXT_VCPU
]
ldr
\
vcpu
,
[
\
ctxt
,
#
HOST_CONTEXT_VCPU
]
kern_hyp_va
\
vcpu
.
endm
.
endm
#endif
#endif
...
...
arch/arm64/include/asm/kvm_emulate.h
View file @
a394cf6e
...
@@ -238,14 +238,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
...
@@ -238,14 +238,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
return
mode
!=
PSR_MODE_EL0t
;
return
mode
!=
PSR_MODE_EL0t
;
}
}
static
__always_inline
u32
kvm_vcpu_get_
h
sr
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
u32
kvm_vcpu_get_
e
sr
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
vcpu
->
arch
.
fault
.
esr_el2
;
return
vcpu
->
arch
.
fault
.
esr_el2
;
}
}
static
__always_inline
int
kvm_vcpu_get_condition
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
int
kvm_vcpu_get_condition
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
u32
esr
=
kvm_vcpu_get_
h
sr
(
vcpu
);
u32
esr
=
kvm_vcpu_get_
e
sr
(
vcpu
);
if
(
esr
&
ESR_ELx_CV
)
if
(
esr
&
ESR_ELx_CV
)
return
(
esr
&
ESR_ELx_COND_MASK
)
>>
ESR_ELx_COND_SHIFT
;
return
(
esr
&
ESR_ELx_COND_MASK
)
>>
ESR_ELx_COND_SHIFT
;
...
@@ -270,64 +270,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
...
@@ -270,64 +270,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
static
inline
u32
kvm_vcpu_hvc_get_imm
(
const
struct
kvm_vcpu
*
vcpu
)
static
inline
u32
kvm_vcpu_hvc_get_imm
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_xVC_IMM_MASK
;
return
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_xVC_IMM_MASK
;
}
}
static
__always_inline
bool
kvm_vcpu_dabt_isvalid
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
bool
kvm_vcpu_dabt_isvalid
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
!!
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_ISV
);
return
!!
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_ISV
);
}
}
static
inline
unsigned
long
kvm_vcpu_dabt_iss_nisv_sanitized
(
const
struct
kvm_vcpu
*
vcpu
)
static
inline
unsigned
long
kvm_vcpu_dabt_iss_nisv_sanitized
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
kvm_vcpu_get_
h
sr
(
vcpu
)
&
(
ESR_ELx_CM
|
ESR_ELx_WNR
|
ESR_ELx_FSC
);
return
kvm_vcpu_get_
e
sr
(
vcpu
)
&
(
ESR_ELx_CM
|
ESR_ELx_WNR
|
ESR_ELx_FSC
);
}
}
static
inline
bool
kvm_vcpu_dabt_issext
(
const
struct
kvm_vcpu
*
vcpu
)
static
inline
bool
kvm_vcpu_dabt_issext
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
!!
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_SSE
);
return
!!
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_SSE
);
}
}
static
inline
bool
kvm_vcpu_dabt_issf
(
const
struct
kvm_vcpu
*
vcpu
)
static
inline
bool
kvm_vcpu_dabt_issf
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
!!
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_SF
);
return
!!
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_SF
);
}
}
static
__always_inline
int
kvm_vcpu_dabt_get_rd
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
int
kvm_vcpu_dabt_get_rd
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_SRT_MASK
)
>>
ESR_ELx_SRT_SHIFT
;
return
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_SRT_MASK
)
>>
ESR_ELx_SRT_SHIFT
;
}
}
static
__always_inline
bool
kvm_vcpu_dabt_iss1tw
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
bool
kvm_vcpu_dabt_iss1tw
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
!!
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_S1PTW
);
return
!!
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_S1PTW
);
}
}
static
__always_inline
bool
kvm_vcpu_dabt_iswrite
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
bool
kvm_vcpu_dabt_iswrite
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
!!
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_WNR
)
||
return
!!
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_WNR
)
||
kvm_vcpu_dabt_iss1tw
(
vcpu
);
/* AF/DBM update */
kvm_vcpu_dabt_iss1tw
(
vcpu
);
/* AF/DBM update */
}
}
static
inline
bool
kvm_vcpu_dabt_is_cm
(
const
struct
kvm_vcpu
*
vcpu
)
static
inline
bool
kvm_vcpu_dabt_is_cm
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
!!
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_CM
);
return
!!
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_CM
);
}
}
static
__always_inline
unsigned
int
kvm_vcpu_dabt_get_as
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
unsigned
int
kvm_vcpu_dabt_get_as
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
1
<<
((
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_SAS
)
>>
ESR_ELx_SAS_SHIFT
);
return
1
<<
((
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_SAS
)
>>
ESR_ELx_SAS_SHIFT
);
}
}
/* This one is not specific to Data Abort */
/* This one is not specific to Data Abort */
static
__always_inline
bool
kvm_vcpu_trap_il_is32bit
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
bool
kvm_vcpu_trap_il_is32bit
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
!!
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_IL
);
return
!!
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_IL
);
}
}
static
__always_inline
u8
kvm_vcpu_trap_get_class
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
u8
kvm_vcpu_trap_get_class
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
ESR_ELx_EC
(
kvm_vcpu_get_
h
sr
(
vcpu
));
return
ESR_ELx_EC
(
kvm_vcpu_get_
e
sr
(
vcpu
));
}
}
static
inline
bool
kvm_vcpu_trap_is_iabt
(
const
struct
kvm_vcpu
*
vcpu
)
static
inline
bool
kvm_vcpu_trap_is_iabt
(
const
struct
kvm_vcpu
*
vcpu
)
...
@@ -337,12 +337,12 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
...
@@ -337,12 +337,12 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
static
__always_inline
u8
kvm_vcpu_trap_get_fault
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
u8
kvm_vcpu_trap_get_fault
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_FSC
;
return
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_FSC
;
}
}
static
__always_inline
u8
kvm_vcpu_trap_get_fault_type
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
u8
kvm_vcpu_trap_get_fault_type
(
const
struct
kvm_vcpu
*
vcpu
)
{
{
return
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_FSC_TYPE
;
return
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_FSC_TYPE
;
}
}
static
__always_inline
bool
kvm_vcpu_dabt_isextabt
(
const
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
bool
kvm_vcpu_dabt_isextabt
(
const
struct
kvm_vcpu
*
vcpu
)
...
@@ -366,7 +366,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
...
@@ -366,7 +366,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
static
__always_inline
int
kvm_vcpu_sys_get_rt
(
struct
kvm_vcpu
*
vcpu
)
static
__always_inline
int
kvm_vcpu_sys_get_rt
(
struct
kvm_vcpu
*
vcpu
)
{
{
u32
esr
=
kvm_vcpu_get_
h
sr
(
vcpu
);
u32
esr
=
kvm_vcpu_get_
e
sr
(
vcpu
);
return
ESR_ELx_SYS64_ISS_RT
(
esr
);
return
ESR_ELx_SYS64_ISS_RT
(
esr
);
}
}
...
...
arch/arm64/kernel/cpu_errata.c
View file @
a394cf6e
...
@@ -637,7 +637,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
...
@@ -637,7 +637,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
return
is_midr_in_range
(
midr
,
&
range
)
&&
has_dic
;
return
is_midr_in_range
(
midr
,
&
range
)
&&
has_dic
;
}
}
#if
defined(CONFIG_HARDEN_EL2_VECTORS)
#if
def CONFIG_RANDOMIZE_BASE
static
const
struct
midr_range
ca57_a72
[]
=
{
static
const
struct
midr_range
ca57_a72
[]
=
{
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A57
),
MIDR_ALL_VERSIONS
(
MIDR_CORTEX_A57
),
...
@@ -882,7 +882,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
...
@@ -882,7 +882,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.
type
=
ARM64_CPUCAP_LOCAL_CPU_ERRATUM
,
.
type
=
ARM64_CPUCAP_LOCAL_CPU_ERRATUM
,
.
matches
=
check_branch_predictor
,
.
matches
=
check_branch_predictor
,
},
},
#ifdef CONFIG_
HARDEN_EL2_VECTORS
#ifdef CONFIG_
RANDOMIZE_BASE
{
{
.
desc
=
"EL2 vector hardening"
,
.
desc
=
"EL2 vector hardening"
,
.
capability
=
ARM64_HARDEN_EL2_VECTORS
,
.
capability
=
ARM64_HARDEN_EL2_VECTORS
,
...
...
arch/arm64/kvm/Kconfig
View file @
a394cf6e
...
@@ -58,7 +58,7 @@ config KVM_ARM_PMU
...
@@ -58,7 +58,7 @@ config KVM_ARM_PMU
virtual machines.
virtual machines.
config KVM_INDIRECT_VECTORS
config KVM_INDIRECT_VECTORS
def_bool HARDEN_BRANCH_PREDICTOR ||
HARDEN_EL2_VECTORS
def_bool HARDEN_BRANCH_PREDICTOR ||
RANDOMIZE_BASE
endif # KVM
endif # KVM
...
...
arch/arm64/kvm/arm.c
View file @
a394cf6e
...
@@ -456,7 +456,6 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
...
@@ -456,7 +456,6 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
/**
/**
* update_vmid - Update the vmid with a valid VMID for the current generation
* update_vmid - Update the vmid with a valid VMID for the current generation
* @kvm: The guest that struct vmid belongs to
* @vmid: The stage-2 VMID information struct
* @vmid: The stage-2 VMID information struct
*/
*/
static
void
update_vmid
(
struct
kvm_vmid
*
vmid
)
static
void
update_vmid
(
struct
kvm_vmid
*
vmid
)
...
...
arch/arm64/kvm/handle_exit.c
View file @
a394cf6e
...
@@ -89,7 +89,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -89,7 +89,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
*/
static
int
kvm_handle_wfx
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
static
int
kvm_handle_wfx
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
{
if
(
kvm_vcpu_get_
h
sr
(
vcpu
)
&
ESR_ELx_WFx_ISS_WFE
)
{
if
(
kvm_vcpu_get_
e
sr
(
vcpu
)
&
ESR_ELx_WFx_ISS_WFE
)
{
trace_kvm_wfx_arm64
(
*
vcpu_pc
(
vcpu
),
true
);
trace_kvm_wfx_arm64
(
*
vcpu_pc
(
vcpu
),
true
);
vcpu
->
stat
.
wfe_exit_stat
++
;
vcpu
->
stat
.
wfe_exit_stat
++
;
kvm_vcpu_on_spin
(
vcpu
,
vcpu_mode_priv
(
vcpu
));
kvm_vcpu_on_spin
(
vcpu
,
vcpu_mode_priv
(
vcpu
));
...
@@ -119,13 +119,13 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -119,13 +119,13 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
*/
static
int
kvm_handle_guest_debug
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
static
int
kvm_handle_guest_debug
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
{
u32
hsr
=
kvm_vcpu_get_h
sr
(
vcpu
);
u32
esr
=
kvm_vcpu_get_e
sr
(
vcpu
);
int
ret
=
0
;
int
ret
=
0
;
run
->
exit_reason
=
KVM_EXIT_DEBUG
;
run
->
exit_reason
=
KVM_EXIT_DEBUG
;
run
->
debug
.
arch
.
hsr
=
h
sr
;
run
->
debug
.
arch
.
hsr
=
e
sr
;
switch
(
ESR_ELx_EC
(
h
sr
))
{
switch
(
ESR_ELx_EC
(
e
sr
))
{
case
ESR_ELx_EC_WATCHPT_LOW
:
case
ESR_ELx_EC_WATCHPT_LOW
:
run
->
debug
.
arch
.
far
=
vcpu
->
arch
.
fault
.
far_el2
;
run
->
debug
.
arch
.
far
=
vcpu
->
arch
.
fault
.
far_el2
;
/* fall through */
/* fall through */
...
@@ -135,8 +135,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -135,8 +135,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
case
ESR_ELx_EC_BRK64
:
case
ESR_ELx_EC_BRK64
:
break
;
break
;
default:
default:
kvm_err
(
"%s: un-handled case
h
sr: %#08x
\n
"
,
kvm_err
(
"%s: un-handled case
e
sr: %#08x
\n
"
,
__func__
,
(
unsigned
int
)
h
sr
);
__func__
,
(
unsigned
int
)
e
sr
);
ret
=
-
1
;
ret
=
-
1
;
break
;
break
;
}
}
...
@@ -146,10 +146,10 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -146,10 +146,10 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
static
int
kvm_handle_unknown_ec
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
static
int
kvm_handle_unknown_ec
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
{
u32
hsr
=
kvm_vcpu_get_h
sr
(
vcpu
);
u32
esr
=
kvm_vcpu_get_e
sr
(
vcpu
);
kvm_pr_unimpl
(
"Unknown exception class:
h
sr: %#08x -- %s
\n
"
,
kvm_pr_unimpl
(
"Unknown exception class:
e
sr: %#08x -- %s
\n
"
,
hsr
,
esr_get_class_string
(
h
sr
));
esr
,
esr_get_class_string
(
e
sr
));
kvm_inject_undefined
(
vcpu
);
kvm_inject_undefined
(
vcpu
);
return
1
;
return
1
;
...
@@ -200,10 +200,10 @@ static exit_handle_fn arm_exit_handlers[] = {
...
@@ -200,10 +200,10 @@ static exit_handle_fn arm_exit_handlers[] = {
static
exit_handle_fn
kvm_get_exit_handler
(
struct
kvm_vcpu
*
vcpu
)
static
exit_handle_fn
kvm_get_exit_handler
(
struct
kvm_vcpu
*
vcpu
)
{
{
u32
hsr
=
kvm_vcpu_get_h
sr
(
vcpu
);
u32
esr
=
kvm_vcpu_get_e
sr
(
vcpu
);
u8
hsr_ec
=
ESR_ELx_EC
(
h
sr
);
u8
esr_ec
=
ESR_ELx_EC
(
e
sr
);
return
arm_exit_handlers
[
h
sr_ec
];
return
arm_exit_handlers
[
e
sr_ec
];
}
}
/*
/*
...
@@ -241,15 +241,15 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
...
@@ -241,15 +241,15 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int
exception_index
)
int
exception_index
)
{
{
if
(
ARM_SERROR_PENDING
(
exception_index
))
{
if
(
ARM_SERROR_PENDING
(
exception_index
))
{
u8
hsr_ec
=
ESR_ELx_EC
(
kvm_vcpu_get_h
sr
(
vcpu
));
u8
esr_ec
=
ESR_ELx_EC
(
kvm_vcpu_get_e
sr
(
vcpu
));
/*
/*
* HVC/SMC already have an adjusted PC, which we need
* HVC/SMC already have an adjusted PC, which we need
* to correct in order to return to after having
* to correct in order to return to after having
* injected the SError.
* injected the SError.
*/
*/
if
(
hsr_ec
==
ESR_ELx_EC_HVC32
||
h
sr_ec
==
ESR_ELx_EC_HVC64
||
if
(
esr_ec
==
ESR_ELx_EC_HVC32
||
e
sr_ec
==
ESR_ELx_EC_HVC64
||
hsr_ec
==
ESR_ELx_EC_SMC32
||
h
sr_ec
==
ESR_ELx_EC_SMC64
)
{
esr_ec
==
ESR_ELx_EC_SMC32
||
e
sr_ec
==
ESR_ELx_EC_SMC64
)
{
u32
adj
=
kvm_vcpu_trap_il_is32bit
(
vcpu
)
?
4
:
2
;
u32
adj
=
kvm_vcpu_trap_il_is32bit
(
vcpu
)
?
4
:
2
;
*
vcpu_pc
(
vcpu
)
-=
adj
;
*
vcpu_pc
(
vcpu
)
-=
adj
;
}
}
...
@@ -307,5 +307,5 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
...
@@ -307,5 +307,5 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
exception_index
=
ARM_EXCEPTION_CODE
(
exception_index
);
exception_index
=
ARM_EXCEPTION_CODE
(
exception_index
);
if
(
exception_index
==
ARM_EXCEPTION_EL1_SERROR
)
if
(
exception_index
==
ARM_EXCEPTION_EL1_SERROR
)
kvm_handle_guest_serror
(
vcpu
,
kvm_vcpu_get_
h
sr
(
vcpu
));
kvm_handle_guest_serror
(
vcpu
,
kvm_vcpu_get_
e
sr
(
vcpu
));
}
}
arch/arm64/kvm/hyp/aarch32.c
View file @
a394cf6e
...
@@ -51,7 +51,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
...
@@ -51,7 +51,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
int
cond
;
int
cond
;
/* Top two bits non-zero? Unconditional. */
/* Top two bits non-zero? Unconditional. */
if
(
kvm_vcpu_get_
h
sr
(
vcpu
)
>>
30
)
if
(
kvm_vcpu_get_
e
sr
(
vcpu
)
>>
30
)
return
true
;
return
true
;
/* Is condition field valid? */
/* Is condition field valid? */
...
...
arch/arm64/kvm/hyp/include/hyp/switch.h
View file @
a394cf6e
...
@@ -199,7 +199,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
...
@@ -199,7 +199,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
static
inline
bool
__hyp_handle_fpsimd
(
struct
kvm_vcpu
*
vcpu
)
static
inline
bool
__hyp_handle_fpsimd
(
struct
kvm_vcpu
*
vcpu
)
{
{
bool
vhe
,
sve_guest
,
sve_host
;
bool
vhe
,
sve_guest
,
sve_host
;
u8
h
sr_ec
;
u8
e
sr_ec
;
if
(
!
system_supports_fpsimd
())
if
(
!
system_supports_fpsimd
())
return
false
;
return
false
;
...
@@ -219,14 +219,14 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
...
@@ -219,14 +219,14 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
vhe
=
has_vhe
();
vhe
=
has_vhe
();
}
}
h
sr_ec
=
kvm_vcpu_trap_get_class
(
vcpu
);
e
sr_ec
=
kvm_vcpu_trap_get_class
(
vcpu
);
if
(
h
sr_ec
!=
ESR_ELx_EC_FP_ASIMD
&&
if
(
e
sr_ec
!=
ESR_ELx_EC_FP_ASIMD
&&
h
sr_ec
!=
ESR_ELx_EC_SVE
)
e
sr_ec
!=
ESR_ELx_EC_SVE
)
return
false
;
return
false
;
/* Don't handle SVE traps for non-SVE vcpus here: */
/* Don't handle SVE traps for non-SVE vcpus here: */
if
(
!
sve_guest
)
if
(
!
sve_guest
)
if
(
h
sr_ec
!=
ESR_ELx_EC_FP_ASIMD
)
if
(
e
sr_ec
!=
ESR_ELx_EC_FP_ASIMD
)
return
false
;
return
false
;
/* Valid trap. Switch the context: */
/* Valid trap. Switch the context: */
...
@@ -284,7 +284,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
...
@@ -284,7 +284,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
static
inline
bool
handle_tx2_tvm
(
struct
kvm_vcpu
*
vcpu
)
static
inline
bool
handle_tx2_tvm
(
struct
kvm_vcpu
*
vcpu
)
{
{
u32
sysreg
=
esr_sys64_to_sysreg
(
kvm_vcpu_get_
h
sr
(
vcpu
));
u32
sysreg
=
esr_sys64_to_sysreg
(
kvm_vcpu_get_
e
sr
(
vcpu
));
int
rt
=
kvm_vcpu_sys_get_rt
(
vcpu
);
int
rt
=
kvm_vcpu_sys_get_rt
(
vcpu
);
u64
val
=
vcpu_get_reg
(
vcpu
,
rt
);
u64
val
=
vcpu_get_reg
(
vcpu
,
rt
);
...
@@ -379,7 +379,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
...
@@ -379,7 +379,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
u64
val
;
u64
val
;
if
(
!
vcpu_has_ptrauth
(
vcpu
)
||
if
(
!
vcpu_has_ptrauth
(
vcpu
)
||
!
esr_is_ptrauth_trap
(
kvm_vcpu_get_
h
sr
(
vcpu
)))
!
esr_is_ptrauth_trap
(
kvm_vcpu_get_
e
sr
(
vcpu
)))
return
false
;
return
false
;
ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
...
...
arch/arm64/kvm/hyp/vgic-v3-sr.c
View file @
a394cf6e
...
@@ -426,7 +426,7 @@ static int __vgic_v3_bpr_min(void)
...
@@ -426,7 +426,7 @@ static int __vgic_v3_bpr_min(void)
static
int
__vgic_v3_get_group
(
struct
kvm_vcpu
*
vcpu
)
static
int
__vgic_v3_get_group
(
struct
kvm_vcpu
*
vcpu
)
{
{
u32
esr
=
kvm_vcpu_get_
h
sr
(
vcpu
);
u32
esr
=
kvm_vcpu_get_
e
sr
(
vcpu
);
u8
crm
=
(
esr
&
ESR_ELx_SYS64_ISS_CRM_MASK
)
>>
ESR_ELx_SYS64_ISS_CRM_SHIFT
;
u8
crm
=
(
esr
&
ESR_ELx_SYS64_ISS_CRM_MASK
)
>>
ESR_ELx_SYS64_ISS_CRM_SHIFT
;
return
crm
!=
8
;
return
crm
!=
8
;
...
@@ -978,7 +978,7 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
...
@@ -978,7 +978,7 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
bool
is_read
;
bool
is_read
;
u32
sysreg
;
u32
sysreg
;
esr
=
kvm_vcpu_get_
h
sr
(
vcpu
);
esr
=
kvm_vcpu_get_
e
sr
(
vcpu
);
if
(
vcpu_mode_is_32bit
(
vcpu
))
{
if
(
vcpu_mode_is_32bit
(
vcpu
))
{
if
(
!
kvm_condition_valid
(
vcpu
))
{
if
(
!
kvm_condition_valid
(
vcpu
))
{
__kvm_skip_instr
(
vcpu
);
__kvm_skip_instr
(
vcpu
);
...
...
arch/arm64/kvm/mmu.c
View file @
a394cf6e
...
@@ -2116,7 +2116,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -2116,7 +2116,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
* For RAS the host kernel may handle this abort.
* For RAS the host kernel may handle this abort.
* There is no need to pass the error into the guest.
* There is no need to pass the error into the guest.
*/
*/
if
(
!
kvm_handle_guest_sea
(
fault_ipa
,
kvm_vcpu_get_
h
sr
(
vcpu
)))
if
(
!
kvm_handle_guest_sea
(
fault_ipa
,
kvm_vcpu_get_
e
sr
(
vcpu
)))
return
1
;
return
1
;
if
(
unlikely
(
!
is_iabt
))
{
if
(
unlikely
(
!
is_iabt
))
{
...
@@ -2125,7 +2125,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -2125,7 +2125,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
}
}
}
trace_kvm_guest_fault
(
*
vcpu_pc
(
vcpu
),
kvm_vcpu_get_
h
sr
(
vcpu
),
trace_kvm_guest_fault
(
*
vcpu_pc
(
vcpu
),
kvm_vcpu_get_
e
sr
(
vcpu
),
kvm_vcpu_get_hfar
(
vcpu
),
fault_ipa
);
kvm_vcpu_get_hfar
(
vcpu
),
fault_ipa
);
/* Check the stage-2 fault is trans. fault or write fault */
/* Check the stage-2 fault is trans. fault or write fault */
...
@@ -2134,7 +2134,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -2134,7 +2134,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_err
(
"Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx
\n
"
,
kvm_err
(
"Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx
\n
"
,
kvm_vcpu_trap_get_class
(
vcpu
),
kvm_vcpu_trap_get_class
(
vcpu
),
(
unsigned
long
)
kvm_vcpu_trap_get_fault
(
vcpu
),
(
unsigned
long
)
kvm_vcpu_trap_get_fault
(
vcpu
),
(
unsigned
long
)
kvm_vcpu_get_
h
sr
(
vcpu
));
(
unsigned
long
)
kvm_vcpu_get_
e
sr
(
vcpu
));
return
-
EFAULT
;
return
-
EFAULT
;
}
}
...
...
arch/arm64/kvm/sys_regs.c
View file @
a394cf6e
...
@@ -2220,10 +2220,10 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
...
@@ -2220,10 +2220,10 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
static
void
unhandled_cp_access
(
struct
kvm_vcpu
*
vcpu
,
static
void
unhandled_cp_access
(
struct
kvm_vcpu
*
vcpu
,
struct
sys_reg_params
*
params
)
struct
sys_reg_params
*
params
)
{
{
u8
h
sr_ec
=
kvm_vcpu_trap_get_class
(
vcpu
);
u8
e
sr_ec
=
kvm_vcpu_trap_get_class
(
vcpu
);
int
cp
=
-
1
;
int
cp
=
-
1
;
switch
(
h
sr_ec
)
{
switch
(
e
sr_ec
)
{
case
ESR_ELx_EC_CP15_32
:
case
ESR_ELx_EC_CP15_32
:
case
ESR_ELx_EC_CP15_64
:
case
ESR_ELx_EC_CP15_64
:
cp
=
15
;
cp
=
15
;
...
@@ -2252,17 +2252,17 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
...
@@ -2252,17 +2252,17 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
size_t
nr_global
)
size_t
nr_global
)
{
{
struct
sys_reg_params
params
;
struct
sys_reg_params
params
;
u32
hsr
=
kvm_vcpu_get_h
sr
(
vcpu
);
u32
esr
=
kvm_vcpu_get_e
sr
(
vcpu
);
int
Rt
=
kvm_vcpu_sys_get_rt
(
vcpu
);
int
Rt
=
kvm_vcpu_sys_get_rt
(
vcpu
);
int
Rt2
=
(
h
sr
>>
10
)
&
0x1f
;
int
Rt2
=
(
e
sr
>>
10
)
&
0x1f
;
params
.
is_aarch32
=
true
;
params
.
is_aarch32
=
true
;
params
.
is_32bit
=
false
;
params
.
is_32bit
=
false
;
params
.
CRm
=
(
h
sr
>>
1
)
&
0xf
;
params
.
CRm
=
(
e
sr
>>
1
)
&
0xf
;
params
.
is_write
=
((
h
sr
&
1
)
==
0
);
params
.
is_write
=
((
e
sr
&
1
)
==
0
);
params
.
Op0
=
0
;
params
.
Op0
=
0
;
params
.
Op1
=
(
h
sr
>>
16
)
&
0xf
;
params
.
Op1
=
(
e
sr
>>
16
)
&
0xf
;
params
.
Op2
=
0
;
params
.
Op2
=
0
;
params
.
CRn
=
0
;
params
.
CRn
=
0
;
...
@@ -2304,18 +2304,18 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
...
@@ -2304,18 +2304,18 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
size_t
nr_global
)
size_t
nr_global
)
{
{
struct
sys_reg_params
params
;
struct
sys_reg_params
params
;
u32
hsr
=
kvm_vcpu_get_h
sr
(
vcpu
);
u32
esr
=
kvm_vcpu_get_e
sr
(
vcpu
);
int
Rt
=
kvm_vcpu_sys_get_rt
(
vcpu
);
int
Rt
=
kvm_vcpu_sys_get_rt
(
vcpu
);
params
.
is_aarch32
=
true
;
params
.
is_aarch32
=
true
;
params
.
is_32bit
=
true
;
params
.
is_32bit
=
true
;
params
.
CRm
=
(
h
sr
>>
1
)
&
0xf
;
params
.
CRm
=
(
e
sr
>>
1
)
&
0xf
;
params
.
regval
=
vcpu_get_reg
(
vcpu
,
Rt
);
params
.
regval
=
vcpu_get_reg
(
vcpu
,
Rt
);
params
.
is_write
=
((
h
sr
&
1
)
==
0
);
params
.
is_write
=
((
e
sr
&
1
)
==
0
);
params
.
CRn
=
(
h
sr
>>
10
)
&
0xf
;
params
.
CRn
=
(
e
sr
>>
10
)
&
0xf
;
params
.
Op0
=
0
;
params
.
Op0
=
0
;
params
.
Op1
=
(
h
sr
>>
14
)
&
0x7
;
params
.
Op1
=
(
e
sr
>>
14
)
&
0x7
;
params
.
Op2
=
(
h
sr
>>
17
)
&
0x7
;
params
.
Op2
=
(
e
sr
>>
17
)
&
0x7
;
if
(
!
emulate_cp
(
vcpu
,
&
params
,
global
,
nr_global
))
{
if
(
!
emulate_cp
(
vcpu
,
&
params
,
global
,
nr_global
))
{
if
(
!
params
.
is_write
)
if
(
!
params
.
is_write
)
...
@@ -2397,7 +2397,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
...
@@ -2397,7 +2397,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
int
kvm_handle_sys_reg
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
int
kvm_handle_sys_reg
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
{
struct
sys_reg_params
params
;
struct
sys_reg_params
params
;
unsigned
long
esr
=
kvm_vcpu_get_
h
sr
(
vcpu
);
unsigned
long
esr
=
kvm_vcpu_get_
e
sr
(
vcpu
);
int
Rt
=
kvm_vcpu_sys_get_rt
(
vcpu
);
int
Rt
=
kvm_vcpu_sys_get_rt
(
vcpu
);
int
ret
;
int
ret
;
...
...
arch/arm64/kvm/va_layout.c
View file @
a394cf6e
...
@@ -48,7 +48,7 @@ __init void kvm_compute_layout(void)
...
@@ -48,7 +48,7 @@ __init void kvm_compute_layout(void)
va_mask
=
GENMASK_ULL
(
tag_lsb
-
1
,
0
);
va_mask
=
GENMASK_ULL
(
tag_lsb
-
1
,
0
);
tag_val
=
hyp_va_msb
;
tag_val
=
hyp_va_msb
;
if
(
tag_lsb
!=
(
vabits_actual
-
1
))
{
if
(
IS_ENABLED
(
CONFIG_RANDOMIZE_BASE
)
&&
tag_lsb
!=
(
vabits_actual
-
1
))
{
/* We have some free bits to insert a random tag. */
/* We have some free bits to insert a random tag. */
tag_val
|=
get_random_long
()
&
GENMASK_ULL
(
vabits_actual
-
2
,
tag_lsb
);
tag_val
|=
get_random_long
()
&
GENMASK_ULL
(
vabits_actual
-
2
,
tag_lsb
);
}
}
...
...
arch/arm64/kvm/vgic/vgic-irqfd.c
View file @
a394cf6e
...
@@ -100,19 +100,33 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
...
@@ -100,19 +100,33 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
/**
/**
* kvm_arch_set_irq_inatomic: fast-path for irqfd injection
* kvm_arch_set_irq_inatomic: fast-path for irqfd injection
*
* Currently only direct MSI injection is supported.
*/
*/
int
kvm_arch_set_irq_inatomic
(
struct
kvm_kernel_irq_routing_entry
*
e
,
int
kvm_arch_set_irq_inatomic
(
struct
kvm_kernel_irq_routing_entry
*
e
,
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
,
struct
kvm
*
kvm
,
int
irq_source_id
,
int
level
,
bool
line_status
)
bool
line_status
)
{
{
if
(
e
->
type
==
KVM_IRQ_ROUTING_MSI
&&
vgic_has_its
(
kvm
)
&&
level
)
{
if
(
!
level
)
return
-
EWOULDBLOCK
;
switch
(
e
->
type
)
{
case
KVM_IRQ_ROUTING_MSI
:
{
struct
kvm_msi
msi
;
struct
kvm_msi
msi
;
if
(
!
vgic_has_its
(
kvm
))
break
;
kvm_populate_msi
(
e
,
&
msi
);
kvm_populate_msi
(
e
,
&
msi
);
if
(
!
vgic_its_inject_cached_translation
(
kvm
,
&
msi
))
return
vgic_its_inject_cached_translation
(
kvm
,
&
msi
);
return
0
;
}
case
KVM_IRQ_ROUTING_IRQCHIP
:
/*
* Injecting SPIs is always possible in atomic context
* as long as the damn vgic is initialized.
*/
if
(
unlikely
(
!
vgic_initialized
(
kvm
)))
break
;
return
vgic_irqfd_set_irq
(
e
,
kvm
,
irq_source_id
,
1
,
line_status
);
}
}
return
-
EWOULDBLOCK
;
return
-
EWOULDBLOCK
;
...
...
arch/arm64/kvm/vgic/vgic-its.c
View file @
a394cf6e
...
@@ -757,9 +757,8 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
...
@@ -757,9 +757,8 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
db
=
(
u64
)
msi
->
address_hi
<<
32
|
msi
->
address_lo
;
db
=
(
u64
)
msi
->
address_hi
<<
32
|
msi
->
address_lo
;
irq
=
vgic_its_check_cache
(
kvm
,
db
,
msi
->
devid
,
msi
->
data
);
irq
=
vgic_its_check_cache
(
kvm
,
db
,
msi
->
devid
,
msi
->
data
);
if
(
!
irq
)
if
(
!
irq
)
return
-
1
;
return
-
EWOULDBLOCK
;
raw_spin_lock_irqsave
(
&
irq
->
irq_lock
,
flags
);
raw_spin_lock_irqsave
(
&
irq
->
irq_lock
,
flags
);
irq
->
pending_latch
=
true
;
irq
->
pending_latch
=
true
;
...
...
arch/arm64/kvm/vgic/vgic-mmio-v3.c
View file @
a394cf6e
...
@@ -389,7 +389,7 @@ u64 vgic_sanitise_outer_cacheability(u64 field)
...
@@ -389,7 +389,7 @@ u64 vgic_sanitise_outer_cacheability(u64 field)
case
GIC_BASER_CACHE_nC
:
case
GIC_BASER_CACHE_nC
:
return
field
;
return
field
;
default:
default:
return
GIC_BASER_CACHE_
nC
;
return
GIC_BASER_CACHE_
SameAsInner
;
}
}
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment