Commit af5ce952 authored by Catalin Marinas's avatar Catalin Marinas

arm64: mte: Allow user control of the generated random tags via prctl()

The IRG, ADDG and SUBG instructions insert a random tag in the resulting
address. Certain tags can be excluded via the GCR_EL1.Exclude bitmap
when, for example, the user wants a certain colour for freed buffers.
Since the GCR_EL1 register is not accessible at EL0, extend the
prctl(PR_SET_TAGGED_ADDR_CTRL) interface to include a 16-bit field in
the first argument for controlling which tags can be generated by the
above instruction (an include rather than exclude mask). Note that by
default all non-zero tags are excluded. This setting is per-thread.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
parent 1c101da8
...@@ -153,6 +153,7 @@ struct thread_struct { ...@@ -153,6 +153,7 @@ struct thread_struct {
#endif #endif
#ifdef CONFIG_ARM64_MTE #ifdef CONFIG_ARM64_MTE
u64 sctlr_tcf0; u64 sctlr_tcf0;
u64 gcr_user_incl;
#endif #endif
}; };
......
...@@ -1078,6 +1078,13 @@ ...@@ -1078,6 +1078,13 @@
write_sysreg(__scs_new, sysreg); \ write_sysreg(__scs_new, sysreg); \
} while (0) } while (0)
#define sysreg_clear_set_s(sysreg, clear, set) do { \
u64 __scs_val = read_sysreg_s(sysreg); \
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
if (__scs_new != __scs_val) \
write_sysreg_s(__scs_new, sysreg); \
} while (0)
#endif #endif
#endif /* __ASM_SYSREG_H */ #endif /* __ASM_SYSREG_H */
...@@ -71,6 +71,25 @@ static void set_sctlr_el1_tcf0(u64 tcf0) ...@@ -71,6 +71,25 @@ static void set_sctlr_el1_tcf0(u64 tcf0)
preempt_enable(); preempt_enable();
} }
static void update_gcr_el1_excl(u64 incl)
{
u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
/*
* Note that 'incl' is an include mask (controlled by the user via
* prctl()) while GCR_EL1 accepts an exclude mask.
* No need for ISB since this only affects EL0 currently, implicit
* with ERET.
*/
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
}
static void set_gcr_el1_excl(u64 incl)
{
current->thread.gcr_user_incl = incl;
update_gcr_el1_excl(incl);
}
void flush_mte_state(void) void flush_mte_state(void)
{ {
if (!system_supports_mte()) if (!system_supports_mte())
...@@ -82,6 +101,8 @@ void flush_mte_state(void) ...@@ -82,6 +101,8 @@ void flush_mte_state(void)
clear_thread_flag(TIF_MTE_ASYNC_FAULT); clear_thread_flag(TIF_MTE_ASYNC_FAULT);
/* disable tag checking */ /* disable tag checking */
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
/* reset tag generation mask */
set_gcr_el1_excl(0);
} }
void mte_thread_switch(struct task_struct *next) void mte_thread_switch(struct task_struct *next)
...@@ -92,6 +113,7 @@ void mte_thread_switch(struct task_struct *next) ...@@ -92,6 +113,7 @@ void mte_thread_switch(struct task_struct *next)
/* avoid expensive SCTLR_EL1 accesses if no change */ /* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
update_gcr_el1_excl(next->thread.gcr_user_incl);
} }
long set_mte_ctrl(unsigned long arg) long set_mte_ctrl(unsigned long arg)
...@@ -116,23 +138,30 @@ long set_mte_ctrl(unsigned long arg) ...@@ -116,23 +138,30 @@ long set_mte_ctrl(unsigned long arg)
} }
set_sctlr_el1_tcf0(tcf0); set_sctlr_el1_tcf0(tcf0);
set_gcr_el1_excl((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT);
return 0; return 0;
} }
long get_mte_ctrl(void) long get_mte_ctrl(void)
{ {
unsigned long ret;
if (!system_supports_mte()) if (!system_supports_mte())
return 0; return 0;
ret = current->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
switch (current->thread.sctlr_tcf0) { switch (current->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE: case SCTLR_EL1_TCF0_NONE:
return PR_MTE_TCF_NONE; return PR_MTE_TCF_NONE;
case SCTLR_EL1_TCF0_SYNC: case SCTLR_EL1_TCF0_SYNC:
return PR_MTE_TCF_SYNC; ret |= PR_MTE_TCF_SYNC;
break;
case SCTLR_EL1_TCF0_ASYNC: case SCTLR_EL1_TCF0_ASYNC:
return PR_MTE_TCF_ASYNC; ret |= PR_MTE_TCF_ASYNC;
break;
} }
return 0; return ret;
} }
...@@ -649,7 +649,7 @@ long set_tagged_addr_ctrl(unsigned long arg) ...@@ -649,7 +649,7 @@ long set_tagged_addr_ctrl(unsigned long arg)
return -EINVAL; return -EINVAL;
if (system_supports_mte()) if (system_supports_mte())
valid_mask |= PR_MTE_TCF_MASK; valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
if (arg & ~valid_mask) if (arg & ~valid_mask)
return -EINVAL; return -EINVAL;
......
...@@ -239,6 +239,9 @@ struct prctl_mm_map { ...@@ -239,6 +239,9 @@ struct prctl_mm_map {
# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
/* MTE tag inclusion mask */
# define PR_MTE_TAG_SHIFT 3
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
/* Control reclaim behavior when allocating memory */ /* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57 #define PR_SET_IO_FLUSHER 57
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment