By default all SME operations in userspace will trap. When this happens we allocate storage space for the SME register state, set up the SVE registers and disable traps. We do not need to initialize ZA since the architecture guarantees that it will be zeroed when enabled and when we trap ZA is disabled. On syscall we exit streaming mode if we were previously in it and ensure that all but the lower 128 bits of the registers are zeroed while preserving the state of ZA. This follows the aarch64 PCS for SME, ZA state is preserved over a function call and streaming mode is exited. Since the traps for SME do not distinguish between streaming mode SVE and ZA usage if ZA is in use rather than reenabling traps we instead zero the parts of the SVE registers not shared with FPSIMD and leave SME enabled, this simplifies handling SME traps. If ZA is not in use then we reenable SME traps and fall through to normal handling of SVE. Signed-off-by: Mark Brown <broonie@kernel.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20220419112247.711548-17-broonie@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit 8bd7f91c03d886f41d35f6108078d20be5a4a1bd) Signed-off-by: Will Deacon <willdeacon@google.com> Bug: 233587962 Bug: 233588291 Change-Id: I98fc449f3310a91bb42e5c1b44f6ff5929d61e4a
81 lines
2.9 KiB
C
81 lines
2.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Based on arch/arm/include/asm/exception.h
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_EXCEPTION_H
|
|
#define __ASM_EXCEPTION_H
|
|
|
|
#include <asm/esr.h>
|
|
#include <asm/kprobes.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
#define __exception_irq_entry __irq_entry
|
|
#else
|
|
#define __exception_irq_entry __kprobes
|
|
#endif
|
|
|
|
static inline u32 disr_to_esr(u64 disr)
|
|
{
|
|
unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
|
|
|
|
if ((disr & DISR_EL1_IDS) == 0)
|
|
esr |= (disr & DISR_EL1_ESR_MASK);
|
|
else
|
|
esr |= (disr & ESR_ELx_ISS_MASK);
|
|
|
|
return esr;
|
|
}
|
|
|
|
asmlinkage void handle_bad_stack(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
|
|
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
|
|
asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
|
|
asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
|
|
asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
|
|
asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
|
|
asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
|
|
asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
|
|
void (*func)(struct pt_regs *));
|
|
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
|
|
|
|
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
|
|
void do_undefinstr(struct pt_regs *regs);
|
|
void do_bti(struct pt_regs *regs);
|
|
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
|
|
struct pt_regs *regs);
|
|
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
|
|
void do_sve_acc(unsigned int esr, struct pt_regs *regs);
|
|
void do_sme_acc(unsigned int esr, struct pt_regs *regs);
|
|
void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs);
|
|
void do_sysinstr(unsigned int esr, struct pt_regs *regs);
|
|
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
|
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
|
|
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
|
|
void do_el0_svc(struct pt_regs *regs);
|
|
void do_el0_svc_compat(struct pt_regs *regs);
|
|
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
|
|
void do_serror(struct pt_regs *regs, unsigned int esr);
|
|
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
|
|
|
|
void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
|
|
#endif /* __ASM_EXCEPTION_H */
|