Commit d192f538 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "I've picked up a handful of arm64 fixes while Catalin's been away, so
  here they are. Below is the usual summary, but we have basically have
  two cleanups, a fix for an SME crash and a fix for hibernation:

   - Fix saving of SME state after SVE vector length is changed

   - Fix sparse warnings for missing vDSO function prototypes

   - Fix hibernation resume path when kfence is enabled

   - Fix field names for the HFGxTR_EL2 register"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/fpsimd: Ensure SME storage is allocated after SVE VL changes
  arm64: vdso: Clear common make C=2 warnings
  arm64: mm: Make hibernation aware of KFENCE
  arm64: Fix HFGxTR_EL2 field naming
parents 892d7c1b d4d5be94
......@@ -847,6 +847,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
int vec_set_vector_length(struct task_struct *task, enum vec_type type,
unsigned long vl, unsigned long flags)
{
bool free_sme = false;
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
PR_SVE_SET_VL_ONEXEC))
return -EINVAL;
......@@ -897,21 +899,36 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
task->thread.fp_type = FP_STATE_FPSIMD;
}
if (system_supports_sme() && type == ARM64_VEC_SME) {
task->thread.svcr &= ~(SVCR_SM_MASK |
SVCR_ZA_MASK);
clear_thread_flag(TIF_SME);
if (system_supports_sme()) {
if (type == ARM64_VEC_SME ||
!(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) {
/*
* We are changing the SME VL or weren't using
* SME anyway, discard the state and force a
* reallocation.
*/
task->thread.svcr &= ~(SVCR_SM_MASK |
SVCR_ZA_MASK);
clear_thread_flag(TIF_SME);
free_sme = true;
}
}
if (task == current)
put_cpu_fpsimd_context();
/*
* Force reallocation of task SVE and SME state to the correct
* size on next use:
* Free the changed states if they are not in use, SME will be
* reallocated to the correct size on next use and we just
* allocate SVE now in case it is needed for use in streaming
* mode.
*/
sve_free(task);
if (system_supports_sme() && type == ARM64_VEC_SME)
if (system_supports_sve()) {
sve_free(task);
sve_alloc(task, true);
}
if (free_sme)
sme_free(task);
task_set_vl(task, type, vl);
......
......@@ -6,6 +6,10 @@
*
*/
int __kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
int __kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
int __kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
int __kernel_clock_gettime(clockid_t clock,
struct __kernel_timespec *ts)
{
......
......@@ -24,6 +24,7 @@
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/kfence.h>
static void *trans_alloc(struct trans_pgd_info *info)
{
......@@ -41,7 +42,8 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
* the temporary mappings we use during restore.
*/
set_pte(dst_ptep, pte_mkwrite(pte));
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
} else if ((debug_pagealloc_enabled() ||
is_kfence_address((void *)addr)) && !pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
* the page isn't in use by the resume kernel. It may have
......
......@@ -2017,7 +2017,7 @@ Field 0 SM
EndSysreg
SysregFields HFGxTR_EL2
Field 63 nAMIAIR2_EL1
Field 63 nAMAIR2_EL1
Field 62 nMAIR2_EL1
Field 61 nS2POR_EL1
Field 60 nPOR_EL1
......@@ -2032,9 +2032,9 @@ Field 52 nGCS_EL0
Res0 51
Field 50 nACCDATA_EL1
Field 49 ERXADDR_EL1
Field 48 EXRPFGCDN_EL1
Field 47 EXPFGCTL_EL1
Field 46 EXPFGF_EL1
Field 48 ERXPFGCDN_EL1
Field 47 ERXPFGCTL_EL1
Field 46 ERXPFGF_EL1
Field 45 ERXMISCn_EL1
Field 44 ERXSTATUS_EL1
Field 43 ERXCTLR_EL1
......@@ -2049,8 +2049,8 @@ Field 35 TPIDR_EL0
Field 34 TPIDRRO_EL0
Field 33 TPIDR_EL1
Field 32 TCR_EL1
Field 31 SCTXNUM_EL0
Field 30 SCTXNUM_EL1
Field 31 SCXTNUM_EL0
Field 30 SCXTNUM_EL1
Field 29 SCTLR_EL1
Field 28 REVIDR_EL1
Field 27 PAR_EL1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment