arm64: mte: Allow user control of the tag check mode via prctl()

By default, even if PROT_MTE is set on a memory range, there is no tag
check fault reporting (SIGSEGV). Introduce a set of option to the
exiting prctl(PR_SET_TAGGED_ADDR_CTRL) to allow user control of the tag
check fault mode:

  PR_MTE_TCF_NONE  - no reporting (default)
  PR_MTE_TCF_SYNC  - synchronous tag check fault reporting
  PR_MTE_TCF_ASYNC - asynchronous tag check fault reporting

These options translate into the corresponding SCTLR_EL1.TCF0 bitfield,
context-switched by the kernel. Note that the kernel accesses to the
user address space (e.g. read() system call) are not checked if the user
thread tag checking mode is PR_MTE_TCF_NONE or PR_MTE_TCF_ASYNC. If the
tag checking mode is PR_MTE_TCF_SYNC, the kernel makes a best effort to
check its user address accesses, however it cannot always guarantee it.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
This commit is contained in:
Catalin Marinas
2019-11-27 10:30:15 +00:00
parent 51b0bff2f7
commit 1c101da8b9
5 changed files with 123 additions and 3 deletions

View File

@@ -5,6 +5,8 @@
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/prctl.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -49,6 +51,26 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret;
}
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
isb();
}
static void set_sctlr_el1_tcf0(u64 tcf0)
{
/*
* mte_thread_switch() checks current->thread.sctlr_tcf0 as an
* optimisation. Disable preemption so that it does not see
* the variable update before the SCTLR_EL1.TCF0 one.
*/
preempt_disable();
current->thread.sctlr_tcf0 = tcf0;
update_sctlr_el1_tcf0(tcf0);
preempt_enable();
}
void flush_mte_state(void)
{
if (!system_supports_mte())
@@ -58,4 +80,59 @@ void flush_mte_state(void)
dsb(ish);
write_sysreg_s(0, SYS_TFSRE0_EL1);
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
/* disable tag checking */
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
}
void mte_thread_switch(struct task_struct *next)
{
if (!system_supports_mte())
return;
/* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
}
long set_mte_ctrl(unsigned long arg)
{
u64 tcf0;
if (!system_supports_mte())
return 0;
switch (arg & PR_MTE_TCF_MASK) {
case PR_MTE_TCF_NONE:
tcf0 = SCTLR_EL1_TCF0_NONE;
break;
case PR_MTE_TCF_SYNC:
tcf0 = SCTLR_EL1_TCF0_SYNC;
break;
case PR_MTE_TCF_ASYNC:
tcf0 = SCTLR_EL1_TCF0_ASYNC;
break;
default:
return -EINVAL;
}
set_sctlr_el1_tcf0(tcf0);
return 0;
}
long get_mte_ctrl(void)
{
if (!system_supports_mte())
return 0;
switch (current->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE:
return PR_MTE_TCF_NONE;
case SCTLR_EL1_TCF0_SYNC:
return PR_MTE_TCF_SYNC;
case SCTLR_EL1_TCF0_ASYNC:
return PR_MTE_TCF_ASYNC;
}
return 0;
}