Revert "ANDROID: arm64: Add support for asymmetric AArch32 EL0 configurations"
This reverts commit 8a2a23717d.
Bug: 178507149
Signed-off-by: Will Deacon <willdeacon@google.com>
Change-Id: I281024956da915ead240be37d66268ce862d4031
This commit is contained in:
@@ -1896,20 +1896,6 @@ config DMI
|
|||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
config ASYMMETRIC_AARCH32
|
|
||||||
bool "Allow support for asymmetric AArch32 support"
|
|
||||||
depends on COMPAT && EXPERT && !KVM
|
|
||||||
help
|
|
||||||
Enable this option to allow support for asymmetric AArch32 EL0
|
|
||||||
CPU configurations. Once the AArch32 EL0 support is detected
|
|
||||||
on a CPU, the feature is made available to user space to allow
|
|
||||||
the execution of 32-bit (compat) applications. If the affinity
|
|
||||||
of the 32-bit application contains a non-AArch32 capable CPU
|
|
||||||
or the last AArch32 capable CPU is offlined, the application
|
|
||||||
will be killed.
|
|
||||||
|
|
||||||
If unsure say N.
|
|
||||||
|
|
||||||
config SYSVIPC_COMPAT
|
config SYSVIPC_COMPAT
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on COMPAT && SYSVIPC
|
depends on COMPAT && SYSVIPC
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
struct cpuinfo_arm64 {
|
struct cpuinfo_arm64 {
|
||||||
struct cpu cpu;
|
struct cpu cpu;
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
bool aarch32_valid;
|
|
||||||
u32 reg_ctr;
|
u32 reg_ctr;
|
||||||
u32 reg_cntfrq;
|
u32 reg_cntfrq;
|
||||||
u32 reg_dczid;
|
u32 reg_dczid;
|
||||||
@@ -66,7 +65,6 @@ void cpuinfo_store_cpu(void);
|
|||||||
void __init cpuinfo_store_boot_cpu(void);
|
void __init cpuinfo_store_boot_cpu(void);
|
||||||
|
|
||||||
void __init init_cpu_features(struct cpuinfo_arm64 *info);
|
void __init init_cpu_features(struct cpuinfo_arm64 *info);
|
||||||
void init_cpu_32bit_features(struct cpuinfo_arm64 *info);
|
|
||||||
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
|
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
|
||||||
struct cpuinfo_arm64 *boot);
|
struct cpuinfo_arm64 *boot);
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,6 @@
|
|||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
|
||||||
@@ -395,8 +394,6 @@ static __always_inline bool is_hyp_code(void)
|
|||||||
return is_vhe_hyp_code() || is_nvhe_hyp_code();
|
return is_vhe_hyp_code() || is_nvhe_hyp_code();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern cpumask_t aarch32_el0_mask;
|
|
||||||
|
|
||||||
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
|
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
|
||||||
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
|
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
|
||||||
extern struct static_key_false arm64_const_caps_ready;
|
extern struct static_key_false arm64_const_caps_ready;
|
||||||
|
|||||||
@@ -65,7 +65,6 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|||||||
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
|
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
|
||||||
#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
|
#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
|
||||||
#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
|
#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
|
||||||
#define TIF_CHECK_32BIT_AFFINITY 7 /* Check thread affinity for asymmetric AArch32 */
|
|
||||||
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
||||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
|
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
|
||||||
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
|
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
|
||||||
@@ -93,7 +92,6 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|||||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||||
#define _TIF_CHECK_32BIT_AFFINITY (1 << TIF_CHECK_32BIT_AFFINITY)
|
|
||||||
#define _TIF_SVE (1 << TIF_SVE)
|
#define _TIF_SVE (1 << TIF_SVE)
|
||||||
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
|
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
|
||||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||||
@@ -101,8 +99,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
|||||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||||
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
||||||
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
|
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
|
||||||
_TIF_NOTIFY_SIGNAL | \
|
_TIF_NOTIFY_SIGNAL)
|
||||||
_TIF_CHECK_32BIT_AFFINITY)
|
|
||||||
|
|
||||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||||
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
||||||
|
|||||||
@@ -63,6 +63,7 @@
|
|||||||
#define pr_fmt(fmt) "CPU features: " fmt
|
#define pr_fmt(fmt) "CPU features: " fmt
|
||||||
|
|
||||||
#include <linux/bsearch.h>
|
#include <linux/bsearch.h>
|
||||||
|
#include <linux/cpumask.h>
|
||||||
#include <linux/crash_dump.h>
|
#include <linux/crash_dump.h>
|
||||||
#include <linux/sort.h>
|
#include <linux/sort.h>
|
||||||
#include <linux/stop_machine.h>
|
#include <linux/stop_machine.h>
|
||||||
@@ -218,11 +219,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
|
|||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
|
||||||
#ifndef CONFIG_ASYMMETRIC_AARCH32
|
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
|
||||||
#else
|
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_HIGHER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
|
|
||||||
#endif
|
|
||||||
ARM64_FTR_END,
|
ARM64_FTR_END,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -757,7 +754,7 @@ static void __init sort_ftr_regs(void)
|
|||||||
* Any bits that are not covered by an arm64_ftr_bits entry are considered
|
* Any bits that are not covered by an arm64_ftr_bits entry are considered
|
||||||
* RES0 for the system-wide value, and must strictly match.
|
* RES0 for the system-wide value, and must strictly match.
|
||||||
*/
|
*/
|
||||||
static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
|
static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
|
||||||
{
|
{
|
||||||
u64 val = 0;
|
u64 val = 0;
|
||||||
u64 strict_mask = ~0x0ULL;
|
u64 strict_mask = ~0x0ULL;
|
||||||
@@ -839,26 +836,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
|||||||
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
|
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
|
||||||
init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
|
init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
|
||||||
|
|
||||||
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
|
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
|
||||||
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
|
|
||||||
sve_init_vq_map();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialize the indirect array of CPU hwcaps capabilities pointers
|
|
||||||
* before we handle the boot CPU below.
|
|
||||||
*/
|
|
||||||
init_cpu_hwcaps_indirect_list();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Detect and enable early CPU capabilities based on the boot CPU,
|
|
||||||
* after we have initialised the CPU feature infrastructure.
|
|
||||||
*/
|
|
||||||
setup_boot_cpu_capabilities();
|
|
||||||
}
|
|
||||||
|
|
||||||
void init_cpu_32bit_features(struct cpuinfo_arm64 *info)
|
|
||||||
{
|
|
||||||
init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
|
init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
|
||||||
init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
|
init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
|
||||||
init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
|
init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
|
||||||
@@ -880,6 +858,24 @@ void init_cpu_32bit_features(struct cpuinfo_arm64 *info)
|
|||||||
init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
|
init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
|
||||||
init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
|
init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
|
||||||
init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
|
init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
|
||||||
|
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
|
||||||
|
sve_init_vq_map();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize the indirect array of CPU hwcaps capabilities pointers
|
||||||
|
* before we handle the boot CPU below.
|
||||||
|
*/
|
||||||
|
init_cpu_hwcaps_indirect_list();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Detect and enable early CPU capabilities based on the boot CPU,
|
||||||
|
* after we have initialised the CPU feature infrastructure.
|
||||||
|
*/
|
||||||
|
setup_boot_cpu_capabilities();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
|
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
|
||||||
@@ -1750,16 +1746,6 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
|
|||||||
return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
|
return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ASYMMETRIC_AARCH32
|
|
||||||
cpumask_t aarch32_el0_mask;
|
|
||||||
|
|
||||||
static void cpu_enable_aarch32_el0(struct arm64_cpu_capabilities const *cap)
|
|
||||||
{
|
|
||||||
if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU))
|
|
||||||
cpumask_set_cpu(smp_processor_id(), &aarch32_el0_mask);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||||
{
|
{
|
||||||
.desc = "GIC system register CPU interface",
|
.desc = "GIC system register CPU interface",
|
||||||
@@ -1814,12 +1800,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
{
|
{
|
||||||
.desc = "32-bit EL0 Support",
|
.desc = "32-bit EL0 Support",
|
||||||
.capability = ARM64_HAS_32BIT_EL0,
|
.capability = ARM64_HAS_32BIT_EL0,
|
||||||
#ifndef CONFIG_ASYMMETRIC_AARCH32
|
|
||||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||||
#else
|
|
||||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
|
||||||
.cpu_enable = cpu_enable_aarch32_el0,
|
|
||||||
#endif
|
|
||||||
.matches = has_cpuid_feature,
|
.matches = has_cpuid_feature,
|
||||||
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
.sys_reg = SYS_ID_AA64PFR0_EL1,
|
||||||
.sign = FTR_UNSIGNED,
|
.sign = FTR_UNSIGNED,
|
||||||
@@ -2627,8 +2608,7 @@ static void verify_local_cpu_capabilities(void)
|
|||||||
|
|
||||||
verify_local_elf_hwcaps(arm64_elf_hwcaps);
|
verify_local_elf_hwcaps(arm64_elf_hwcaps);
|
||||||
|
|
||||||
if (system_supports_32bit_el0() &&
|
if (system_supports_32bit_el0())
|
||||||
this_cpu_has_cap(ARM64_HAS_32BIT_EL0))
|
|
||||||
verify_local_elf_hwcaps(compat_elf_hwcaps);
|
verify_local_elf_hwcaps(compat_elf_hwcaps);
|
||||||
|
|
||||||
if (system_supports_sve())
|
if (system_supports_sve())
|
||||||
|
|||||||
@@ -371,17 +371,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
|||||||
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
|
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
|
||||||
info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
|
info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
|
/* Update the 32bit ID registers only if AArch32 is implemented */
|
||||||
id_aa64pfr0_sve(info->reg_id_aa64pfr0))
|
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
|
||||||
info->reg_zcr = read_zcr_features();
|
|
||||||
|
|
||||||
cpuinfo_detect_icache_policy(info);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __cpuinfo_store_cpu_32bit(struct cpuinfo_arm64 *info)
|
|
||||||
{
|
|
||||||
info->aarch32_valid = true;
|
|
||||||
|
|
||||||
info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
|
info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
|
||||||
info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
|
info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
|
||||||
info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
|
info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
|
||||||
@@ -404,25 +395,19 @@ static void __cpuinfo_store_cpu_32bit(struct cpuinfo_arm64 *info)
|
|||||||
info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
|
info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
|
||||||
info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
|
info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
|
||||||
info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
|
info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
|
||||||
|
id_aa64pfr0_sve(info->reg_id_aa64pfr0))
|
||||||
|
info->reg_zcr = read_zcr_features();
|
||||||
|
|
||||||
|
cpuinfo_detect_icache_policy(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpuinfo_store_cpu(void)
|
void cpuinfo_store_cpu(void)
|
||||||
{
|
{
|
||||||
struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
|
struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
|
||||||
__cpuinfo_store_cpu(info);
|
__cpuinfo_store_cpu(info);
|
||||||
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
|
|
||||||
__cpuinfo_store_cpu_32bit(info);
|
|
||||||
/*
|
|
||||||
* With asymmetric AArch32 support, populate the boot CPU information
|
|
||||||
* on the first 32-bit capable secondary CPU if the primary one
|
|
||||||
* skipped this step.
|
|
||||||
*/
|
|
||||||
if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) &&
|
|
||||||
!boot_cpu_data.aarch32_valid &&
|
|
||||||
id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
|
|
||||||
__cpuinfo_store_cpu_32bit(&boot_cpu_data);
|
|
||||||
init_cpu_32bit_features(&boot_cpu_data);
|
|
||||||
}
|
|
||||||
update_cpu_features(smp_processor_id(), info, &boot_cpu_data);
|
update_cpu_features(smp_processor_id(), info, &boot_cpu_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -430,11 +415,7 @@ void __init cpuinfo_store_boot_cpu(void)
|
|||||||
{
|
{
|
||||||
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
|
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
|
||||||
__cpuinfo_store_cpu(info);
|
__cpuinfo_store_cpu(info);
|
||||||
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
|
|
||||||
__cpuinfo_store_cpu_32bit(info);
|
|
||||||
|
|
||||||
boot_cpu_data = *info;
|
boot_cpu_data = *info;
|
||||||
init_cpu_features(&boot_cpu_data);
|
init_cpu_features(&boot_cpu_data);
|
||||||
if (id_aa64pfr0_32bit_el0(boot_cpu_data.reg_id_aa64pfr0))
|
|
||||||
init_cpu_32bit_features(&boot_cpu_data);
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -495,15 +495,6 @@ static void entry_task_switch(struct task_struct *next)
|
|||||||
__this_cpu_write(__entry_task, next);
|
__this_cpu_write(__entry_task, next);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aarch32_thread_switch(struct task_struct *next)
|
|
||||||
{
|
|
||||||
struct thread_info *ti = task_thread_info(next);
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) && is_compat_thread(ti) &&
|
|
||||||
!cpumask_test_cpu(smp_processor_id(), &aarch32_el0_mask))
|
|
||||||
set_ti_thread_flag(ti, TIF_CHECK_32BIT_AFFINITY);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
|
* ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
|
||||||
* Assuming the virtual counter is enabled at the beginning of times:
|
* Assuming the virtual counter is enabled at the beginning of times:
|
||||||
@@ -551,7 +542,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
|||||||
entry_task_switch(next);
|
entry_task_switch(next);
|
||||||
ssbs_thread_switch(next);
|
ssbs_thread_switch(next);
|
||||||
erratum_1418040_thread_switch(prev, next);
|
erratum_1418040_thread_switch(prev, next);
|
||||||
aarch32_thread_switch(next);
|
|
||||||
/*
|
/*
|
||||||
* vendor hook is needed before the dsb(),
|
* vendor hook is needed before the dsb(),
|
||||||
* because MPAM is related to cache maintenance.
|
* because MPAM is related to cache maintenance.
|
||||||
@@ -629,13 +619,6 @@ void arch_setup_new_exec(void)
|
|||||||
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
|
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
|
||||||
PR_SPEC_ENABLE);
|
PR_SPEC_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If exec'ing a 32-bit task, force the asymmetric 32-bit feature
|
|
||||||
* check as the task may not go through a switch_to() call.
|
|
||||||
*/
|
|
||||||
if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) && is_compat_task())
|
|
||||||
set_thread_flag(TIF_CHECK_32BIT_AFFINITY);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
|
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
|
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/signal.h>
|
#include <linux/signal.h>
|
||||||
@@ -912,18 +911,6 @@ static void do_signal(struct pt_regs *regs)
|
|||||||
restore_saved_sigmask();
|
restore_saved_sigmask();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void check_aarch32_cpumask(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* The task must be a subset of aarch32_el0_mask or it could end up
|
|
||||||
* migrating and running on the wrong CPU.
|
|
||||||
*/
|
|
||||||
if (!cpumask_subset(current->cpus_ptr, &aarch32_el0_mask)) {
|
|
||||||
pr_warn_once("CPU affinity contains CPUs that are not capable of running 32-bit tasks\n");
|
|
||||||
force_sig(SIGKILL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage void do_notify_resume(struct pt_regs *regs,
|
asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||||
unsigned long thread_flags)
|
unsigned long thread_flags)
|
||||||
{
|
{
|
||||||
@@ -936,12 +923,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
|||||||
} else {
|
} else {
|
||||||
local_daif_restore(DAIF_PROCCTX);
|
local_daif_restore(DAIF_PROCCTX);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) &&
|
|
||||||
thread_flags & _TIF_CHECK_32BIT_AFFINITY) {
|
|
||||||
clear_thread_flag(TIF_CHECK_32BIT_AFFINITY);
|
|
||||||
check_aarch32_cpumask();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (thread_flags & _TIF_UPROBE)
|
if (thread_flags & _TIF_UPROBE)
|
||||||
uprobe_notify_resume(regs);
|
uprobe_notify_resume(regs);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user