diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 80936aefb541..52bc8f6206b7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1896,20 +1896,6 @@ config DMI endmenu -config ASYMMETRIC_AARCH32 - bool "Allow support for asymmetric AArch32 support" - depends on COMPAT && EXPERT && !KVM - help - Enable this option to allow support for asymmetric AArch32 EL0 - CPU configurations. Once the AArch32 EL0 support is detected - on a CPU, the feature is made available to user space to allow - the execution of 32-bit (compat) applications. If the affinity - of the 32-bit application contains a non-AArch32 capable CPU - or the last AArch32 capable CPU is offlined, the application - will be killed. - - If unsure say N. - config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index c920fa45e502..7faae6ff3ab4 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -15,7 +15,6 @@ struct cpuinfo_arm64 { struct cpu cpu; struct kobject kobj; - bool aarch32_valid; u32 reg_ctr; u32 reg_cntfrq; u32 reg_dczid; @@ -66,7 +65,6 @@ void cpuinfo_store_cpu(void); void __init cpuinfo_store_boot_cpu(void); void __init init_cpu_features(struct cpuinfo_arm64 *info); -void init_cpu_32bit_features(struct cpuinfo_arm64 *info); void update_cpu_features(int cpu, struct cpuinfo_arm64 *info, struct cpuinfo_arm64 *boot); diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 23ab07cb738e..9a555809b89c 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -17,7 +17,6 @@ #ifndef __ASSEMBLY__ #include -#include #include #include @@ -395,8 +394,6 @@ static __always_inline bool is_hyp_code(void) return is_vhe_hyp_code() || is_nvhe_hyp_code(); } -extern cpumask_t aarch32_el0_mask; - extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index a7554bfc42a3..9f4e3b266f21 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -65,7 +65,6 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ #define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ -#define TIF_CHECK_32BIT_AFFINITY 7 /* Check thread affinity for asymmetric AArch32 */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -93,7 +92,6 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_32BIT (1 << TIF_32BIT) -#define _TIF_CHECK_32BIT_AFFINITY (1 << TIF_CHECK_32BIT_AFFINITY) #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) @@ -101,8 +99,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ - _TIF_NOTIFY_SIGNAL | \ - _TIF_CHECK_32BIT_AFFINITY) + _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ffe025ded80f..3e6331b64932 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -63,6 +63,7 @@ #define pr_fmt(fmt) "CPU features: " fmt #include +#include #include #include #include @@ -218,11 +219,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), -#ifndef CONFIG_ASYMMETRIC_AARCH32 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), -#else - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_HIGHER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), -#endif ARM64_FTR_END, }; @@ -757,7 +754,7 @@ static void __init sort_ftr_regs(void) * Any bits that are not covered by an arm64_ftr_bits entry are considered * RES0 for the system-wide value, and must strictly match. */ -static void init_cpu_ftr_reg(u32 sys_reg, u64 new) +static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) { u64 val = 0; u64 strict_mask = ~0x0ULL; @@ -839,6 +836,30 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { + init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); + init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); + init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); + init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); + init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); + init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); + init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); + init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); + init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); + init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); + init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); + init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); + init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); + init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); + init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); + init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); + init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); + init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); + init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); + init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); + init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); + } + if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); sve_init_vq_map(); @@ -857,31 +878,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) setup_boot_cpu_capabilities(); } -void init_cpu_32bit_features(struct cpuinfo_arm64 *info) -{ - init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); - init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); - init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); - init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); - init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); - init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); - init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); - init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); - init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); - init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); - init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); - init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); - init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); - init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); - init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); - init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); - init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); - init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); - init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); - init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); - init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); -} - static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) { const struct arm64_ftr_bits *ftrp; @@ -1750,16 +1746,6 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); } -#ifdef CONFIG_ASYMMETRIC_AARCH32 -cpumask_t aarch32_el0_mask; - -static void cpu_enable_aarch32_el0(struct arm64_cpu_capabilities const *cap) -{ - if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) - cpumask_set_cpu(smp_processor_id(), &aarch32_el0_mask); -} -#endif - static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -1814,12 +1800,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "32-bit EL0 Support", .capability = ARM64_HAS_32BIT_EL0, -#ifndef CONFIG_ASYMMETRIC_AARCH32 .type = ARM64_CPUCAP_SYSTEM_FEATURE, -#else - .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, - .cpu_enable = cpu_enable_aarch32_el0, -#endif .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR0_EL1, .sign = FTR_UNSIGNED, @@ -2627,8 +2608,7 @@ static void verify_local_cpu_capabilities(void) verify_local_elf_hwcaps(arm64_elf_hwcaps); - if (system_supports_32bit_el0() && - this_cpu_has_cap(ARM64_HAS_32BIT_EL0)) + if (system_supports_32bit_el0()) verify_local_elf_hwcaps(compat_elf_hwcaps); if (system_supports_sve()) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 5776d8c5d022..77605aec25fe 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -371,6 +371,32 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); + /* Update the 32bit ID registers only if AArch32 is implemented */ + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); + info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1); + info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); + info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); + info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); + info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); + info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); + info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); + info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1); + info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); + info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); + info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); + info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); + info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1); + info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1); + info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); + info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1); + + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); + } + if (IS_ENABLED(CONFIG_ARM64_SVE) && id_aa64pfr0_sve(info->reg_id_aa64pfr0)) info->reg_zcr = read_zcr_features(); @@ -378,51 +404,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) cpuinfo_detect_icache_policy(info); } -static void __cpuinfo_store_cpu_32bit(struct cpuinfo_arm64 *info) -{ - info->aarch32_valid = true; - - info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); - info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1); - info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); - info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); - info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); - info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); - info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); - info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); - info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1); - info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); - info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); - info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); - info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); - info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1); - info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1); - info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); - info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); - info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1); - - info->reg_mvfr0 = read_cpuid(MVFR0_EL1); - info->reg_mvfr1 = read_cpuid(MVFR1_EL1); - info->reg_mvfr2 = read_cpuid(MVFR2_EL1); -} - void cpuinfo_store_cpu(void) { struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); __cpuinfo_store_cpu(info); - if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) - __cpuinfo_store_cpu_32bit(info); - /* - * With asymmetric AArch32 support, populate the boot CPU information - * on the first 32-bit capable secondary CPU if the primary one - * skipped this step. - */ - if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) && - !boot_cpu_data.aarch32_valid && - id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { - __cpuinfo_store_cpu_32bit(&boot_cpu_data); - init_cpu_32bit_features(&boot_cpu_data); - } update_cpu_features(smp_processor_id(), info, &boot_cpu_data); } @@ -430,11 +415,7 @@ void __init cpuinfo_store_boot_cpu(void) { struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); __cpuinfo_store_cpu(info); - if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) - __cpuinfo_store_cpu_32bit(info); boot_cpu_data = *info; init_cpu_features(&boot_cpu_data); - if (id_aa64pfr0_32bit_el0(boot_cpu_data.reg_id_aa64pfr0)) - init_cpu_32bit_features(&boot_cpu_data); } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index af09170d770a..4243aaf9d092 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -495,15 +495,6 @@ static void entry_task_switch(struct task_struct *next) __this_cpu_write(__entry_task, next); } -static void aarch32_thread_switch(struct task_struct *next) -{ - struct thread_info *ti = task_thread_info(next); - - if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) && is_compat_thread(ti) && - !cpumask_test_cpu(smp_processor_id(), &aarch32_el0_mask)) - set_ti_thread_flag(ti, TIF_CHECK_32BIT_AFFINITY); -} - /* * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. * Assuming the virtual counter is enabled at the beginning of times: @@ -551,7 +542,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); ssbs_thread_switch(next); erratum_1418040_thread_switch(prev, next); - aarch32_thread_switch(next); /* * vendor hook is needed before the dsb(), * because MPAM is related to cache maintenance. @@ -629,13 +619,6 @@ void arch_setup_new_exec(void) arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE); } - - /* - * If exec'ing a 32-bit task, force the asymmetric 32-bit feature - * check as the task may not go through a switch_to() call. - */ - if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) && is_compat_task()) - set_thread_flag(TIF_CHECK_32BIT_AFFINITY); } #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 236b58a65e79..6237486ff6bb 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -8,7 +8,6 @@ #include #include -#include #include #include #include @@ -912,18 +911,6 @@ static void do_signal(struct pt_regs *regs) restore_saved_sigmask(); } -static void check_aarch32_cpumask(void) -{ - /* - * The task must be a subset of aarch32_el0_mask or it could end up - * migrating and running on the wrong CPU. - */ - if (!cpumask_subset(current->cpus_ptr, &aarch32_el0_mask)) { - pr_warn_once("CPU affinity contains CPUs that are not capable of running 32-bit tasks\n"); - force_sig(SIGKILL); - } -} - asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { @@ -936,12 +923,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, } else { local_daif_restore(DAIF_PROCCTX); - if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) && - thread_flags & _TIF_CHECK_32BIT_AFFINITY) { - clear_thread_flag(TIF_CHECK_32BIT_AFFINITY); - check_aarch32_cpumask(); - } - if (thread_flags & _TIF_UPROBE) uprobe_notify_resume(regs);