Changes in 5.15.44
HID: amd_sfh: Add support for sensor discovery
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
ice: fix crash at allocation failure
ACPI: sysfs: Fix BERT error region memory mapping
MAINTAINERS: co-maintain random.c
MAINTAINERS: add git tree for random.c
lib/crypto: blake2s: include as built-in
lib/crypto: blake2s: move hmac construction into wireguard
lib/crypto: sha1: re-roll loops to reduce code size
lib/crypto: blake2s: avoid indirect calls to compression function for Clang CFI
random: document add_hwgenerator_randomness() with other input functions
random: remove unused irq_flags argument from add_interrupt_randomness()
random: use BLAKE2s instead of SHA1 in extraction
random: do not sign extend bytes for rotation when mixing
random: do not re-init if crng_reseed completes before primary init
random: mix bootloader randomness into pool
random: harmonize "crng init done" messages
random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs
random: early initialization of ChaCha constants
random: avoid superfluous call to RDRAND in CRNG extraction
random: don't reset crng_init_cnt on urandom_read()
random: fix typo in comments
random: cleanup poolinfo abstraction
random: cleanup integer types
random: remove incomplete last_data logic
random: remove unused extract_entropy() reserved argument
random: rather than entropy_store abstraction, use global
random: remove unused OUTPUT_POOL constants
random: de-duplicate INPUT_POOL constants
random: prepend remaining pool constants with POOL_
random: cleanup fractional entropy shift constants
random: access input_pool_data directly rather than through pointer
random: selectively clang-format where it makes sense
random: simplify arithmetic function flow in account()
random: continually use hwgenerator randomness
random: access primary_pool directly rather than through pointer
random: only call crng_finalize_init() for primary_crng
random: use computational hash for entropy extraction
random: simplify entropy debiting
random: use linear min-entropy accumulation crediting
random: always wake up entropy writers after extraction
random: make credit_entropy_bits() always safe
random: remove use_input_pool parameter from crng_reseed()
random: remove batched entropy locking
random: fix locking in crng_fast_load()
random: use RDSEED instead of RDRAND in entropy extraction
random: get rid of secondary crngs
random: inline leaves of rand_initialize()
random: ensure early RDSEED goes through mixer on init
random: do not xor RDRAND when writing into /dev/random
random: absorb fast pool into input pool after fast load
random: use simpler fast key erasure flow on per-cpu keys
random: use hash function for crng_slow_load()
random: make more consistent use of integer types
random: remove outdated INT_MAX >> 6 check in urandom_read()
random: zero buffer after reading entropy from userspace
random: fix locking for crng_init in crng_reseed()
random: tie batched entropy generation to base_crng generation
random: remove ifdef'd out interrupt bench
random: remove unused tracepoints
random: add proper SPDX header
random: deobfuscate irq u32/u64 contributions
random: introduce drain_entropy() helper to declutter crng_reseed()
random: remove useless header comment
random: remove whitespace and reorder includes
random: group initialization wait functions
random: group crng functions
random: group entropy extraction functions
random: group entropy collection functions
random: group userspace read/write functions
random: group sysctl functions
random: rewrite header introductory comment
random: defer fast pool mixing to worker
random: do not take pool spinlock at boot
random: unify early init crng load accounting
random: check for crng_init == 0 in add_device_randomness()
random: pull add_hwgenerator_randomness() declaration into random.h
random: clear fast pool, crng, and batches in cpuhp bring up
random: round-robin registers as ulong, not u32
random: only wake up writers after zap if threshold was passed
random: cleanup UUID handling
random: unify cycles_t and jiffies usage and types
random: do crng pre-init loading in worker rather than irq
random: give sysctl_random_min_urandom_seed a more sensible value
random: don't let 644 read-only sysctls be written to
random: replace custom notifier chain with standard one
random: use SipHash as interrupt entropy accumulator
random: make consistent usage of crng_ready()
random: reseed more often immediately after booting
random: check for signal and try earlier when generating entropy
random: skip fast_init if hwrng provides large chunk of entropy
random: treat bootloader trust toggle the same way as cpu trust toggle
random: re-add removed comment about get_random_{u32,u64} reseeding
random: mix build-time latent entropy into pool at init
random: do not split fast init input in add_hwgenerator_randomness()
random: do not allow user to keep crng key around on stack
random: check for signal_pending() outside of need_resched() check
random: check for signals every PAGE_SIZE chunk of /dev/[u]random
random: allow partial reads if later user copies fail
random: make random_get_entropy() return an unsigned long
random: document crng_fast_key_erasure() destination possibility
random: fix sysctl documentation nits
init: call time_init() before rand_initialize()
ia64: define get_cycles macro for arch-override
s390: define get_cycles macro for arch-override
parisc: define get_cycles macro for arch-override
alpha: define get_cycles macro for arch-override
powerpc: define get_cycles macro for arch-override
timekeeping: Add raw clock fallback for random_get_entropy()
m68k: use fallback for random_get_entropy() instead of zero
riscv: use fallback for random_get_entropy() instead of zero
mips: use fallback for random_get_entropy() instead of just c0 random
arm: use fallback for random_get_entropy() instead of zero
nios2: use fallback for random_get_entropy() instead of zero
x86/tsc: Use fallback for random_get_entropy() instead of zero
um: use fallback for random_get_entropy() instead of zero
sparc: use fallback for random_get_entropy() instead of zero
xtensa: use fallback for random_get_entropy() instead of zero
random: insist on random_get_entropy() existing in order to simplify
random: do not use batches when !crng_ready()
random: use first 128 bits of input as fast init
random: do not pretend to handle premature next security model
random: order timer entropy functions below interrupt functions
random: do not use input pool from hard IRQs
random: help compiler out with fast_mix() by using simpler arguments
siphash: use one source of truth for siphash permutations
random: use symbolic constants for crng_init states
random: avoid initializing twice in credit race
random: move initialization out of reseeding hot path
random: remove ratelimiting for in-kernel unseeded randomness
random: use proper jiffies comparison macro
random: handle latent entropy and command line from random_init()
random: credit architectural init the exact amount
random: use static branch for crng_ready()
random: remove extern from functions in header
random: use proper return types on get_random_{int,long}_wait()
random: make consistent use of buf and len
random: move initialization functions out of hot pages
random: move randomize_page() into mm where it belongs
random: unify batched entropy implementations
random: convert to using fops->read_iter()
random: convert to using fops->write_iter()
random: wire up fops->splice_{read,write}_iter()
random: check for signals after page of pool writes
ALSA: ctxfi: Add SB046x PCI ID
Linux 5.15.44
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I2d874cba14f13379fc5f874e72634c9179b28742
521 lines
17 KiB
C
521 lines
17 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __CPUHOTPLUG_H
|
|
#define __CPUHOTPLUG_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
/*
|
|
* CPU-up CPU-down
|
|
*
|
|
* BP AP BP AP
|
|
*
|
|
* OFFLINE OFFLINE
|
|
* | ^
|
|
* v |
|
|
* BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead)
|
|
* | AP_OFFLINE
|
|
* v (IRQ-off) ,---------------^
|
|
* AP_ONLNE | (stop_machine)
|
|
* | TEARDOWN_CPU <- AP_ONLINE_IDLE
|
|
* | ^
|
|
* v |
|
|
* AP_ACTIVE AP_ACTIVE
|
|
*/
|
|
|
|
/*
|
|
* CPU hotplug states. The state machine invokes the installed state
|
|
* startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
|
|
* during a CPU online operation. During a CPU offline operation the
|
|
* installed teardown callbacks are invoked in the reverse order from
|
|
* CPU_ONLINE - 1 down to CPUHP_OFFLINE.
|
|
*
|
|
* The state space has three sections: PREPARE, STARTING and ONLINE.
|
|
*
|
|
* PREPARE: The callbacks are invoked on a control CPU before the
|
|
* hotplugged CPU is started up or after the hotplugged CPU has died.
|
|
*
|
|
* STARTING: The callbacks are invoked on the hotplugged CPU from the low level
|
|
* hotplug startup/teardown code with interrupts disabled.
|
|
*
|
|
* ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU
|
|
* hotplug thread with interrupts and preemption enabled.
|
|
*
|
|
* Adding explicit states to this enum is only necessary when:
|
|
*
|
|
* 1) The state is within the STARTING section
|
|
*
|
|
* 2) The state has ordering constraints vs. other states in the
|
|
* same section.
|
|
*
|
|
* If neither #1 nor #2 apply, please use the dynamic state space when
|
|
* setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE
|
|
* for the @state argument of the setup function.
|
|
*
|
|
* See Documentation/core-api/cpu_hotplug.rst for further information and
|
|
* examples.
|
|
*/
|
|
enum cpuhp_state {
|
|
CPUHP_INVALID = -1,
|
|
|
|
/* PREPARE section invoked on a control CPU */
|
|
CPUHP_OFFLINE = 0,
|
|
CPUHP_CREATE_THREADS,
|
|
CPUHP_PERF_PREPARE,
|
|
CPUHP_PERF_X86_PREPARE,
|
|
CPUHP_PERF_X86_AMD_UNCORE_PREP,
|
|
CPUHP_PERF_POWER,
|
|
CPUHP_PERF_SUPERH,
|
|
CPUHP_X86_HPET_DEAD,
|
|
CPUHP_X86_APB_DEAD,
|
|
CPUHP_X86_MCE_DEAD,
|
|
CPUHP_VIRT_NET_DEAD,
|
|
CPUHP_SLUB_DEAD,
|
|
CPUHP_DEBUG_OBJ_DEAD,
|
|
CPUHP_MM_WRITEBACK_DEAD,
|
|
/* Must be after CPUHP_MM_VMSTAT_DEAD */
|
|
CPUHP_MM_DEMOTION_DEAD,
|
|
CPUHP_MM_VMSTAT_DEAD,
|
|
CPUHP_SOFTIRQ_DEAD,
|
|
CPUHP_NET_MVNETA_DEAD,
|
|
CPUHP_CPUIDLE_DEAD,
|
|
CPUHP_ARM64_FPSIMD_DEAD,
|
|
CPUHP_ARM_OMAP_WAKE_DEAD,
|
|
CPUHP_IRQ_POLL_DEAD,
|
|
CPUHP_BLOCK_SOFTIRQ_DEAD,
|
|
CPUHP_BIO_DEAD,
|
|
CPUHP_ACPI_CPUDRV_DEAD,
|
|
CPUHP_S390_PFAULT_DEAD,
|
|
CPUHP_BLK_MQ_DEAD,
|
|
CPUHP_FS_BUFF_DEAD,
|
|
CPUHP_PRINTK_DEAD,
|
|
CPUHP_MM_MEMCQ_DEAD,
|
|
CPUHP_XFS_DEAD,
|
|
CPUHP_PERCPU_CNT_DEAD,
|
|
CPUHP_RADIX_DEAD,
|
|
CPUHP_PAGE_ALLOC,
|
|
CPUHP_NET_DEV_DEAD,
|
|
CPUHP_PCI_XGENE_DEAD,
|
|
CPUHP_IOMMU_IOVA_DEAD,
|
|
CPUHP_LUSTRE_CFS_DEAD,
|
|
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
|
|
CPUHP_PADATA_DEAD,
|
|
CPUHP_RANDOM_PREPARE,
|
|
CPUHP_WORKQUEUE_PREP,
|
|
CPUHP_POWER_NUMA_PREPARE,
|
|
CPUHP_HRTIMERS_PREPARE,
|
|
CPUHP_PROFILE_PREPARE,
|
|
CPUHP_X2APIC_PREPARE,
|
|
CPUHP_SMPCFD_PREPARE,
|
|
CPUHP_RELAY_PREPARE,
|
|
CPUHP_SLAB_PREPARE,
|
|
CPUHP_MD_RAID5_PREPARE,
|
|
CPUHP_RCUTREE_PREP,
|
|
CPUHP_CPUIDLE_COUPLED_PREPARE,
|
|
CPUHP_POWERPC_PMAC_PREPARE,
|
|
CPUHP_POWERPC_MMU_CTX_PREPARE,
|
|
CPUHP_XEN_PREPARE,
|
|
CPUHP_XEN_EVTCHN_PREPARE,
|
|
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
|
|
CPUHP_SH_SH3X_PREPARE,
|
|
CPUHP_NET_FLOW_PREPARE,
|
|
CPUHP_TOPOLOGY_PREPARE,
|
|
CPUHP_NET_IUCV_PREPARE,
|
|
CPUHP_ARM_BL_PREPARE,
|
|
CPUHP_TRACE_RB_PREPARE,
|
|
CPUHP_MM_ZS_PREPARE,
|
|
CPUHP_MM_ZSWP_MEM_PREPARE,
|
|
CPUHP_MM_ZSWP_POOL_PREPARE,
|
|
CPUHP_KVM_PPC_BOOK3S_PREPARE,
|
|
CPUHP_ZCOMP_PREPARE,
|
|
CPUHP_TIMERS_PREPARE,
|
|
CPUHP_MIPS_SOC_PREPARE,
|
|
CPUHP_BP_PREPARE_DYN,
|
|
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
|
|
CPUHP_BRINGUP_CPU,
|
|
|
|
/*
|
|
* STARTING section invoked on the hotplugged CPU in low level
|
|
* bringup and teardown code.
|
|
*/
|
|
CPUHP_AP_IDLE_DEAD,
|
|
CPUHP_AP_OFFLINE,
|
|
CPUHP_AP_SCHED_STARTING,
|
|
CPUHP_AP_RCUTREE_DYING,
|
|
CPUHP_AP_CPU_PM_STARTING,
|
|
CPUHP_AP_IRQ_GIC_STARTING,
|
|
CPUHP_AP_IRQ_HIP04_STARTING,
|
|
CPUHP_AP_IRQ_APPLE_AIC_STARTING,
|
|
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
|
CPUHP_AP_IRQ_BCM2836_STARTING,
|
|
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
|
|
CPUHP_AP_IRQ_RISCV_STARTING,
|
|
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
|
|
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
|
CPUHP_AP_MICROCODE_LOADER,
|
|
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
|
CPUHP_AP_PERF_X86_STARTING,
|
|
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
|
|
CPUHP_AP_PERF_X86_CQM_STARTING,
|
|
CPUHP_AP_PERF_X86_CSTATE_STARTING,
|
|
CPUHP_AP_PERF_XTENSA_STARTING,
|
|
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
|
|
CPUHP_AP_ARM_SDEI_STARTING,
|
|
CPUHP_AP_ARM_VFP_STARTING,
|
|
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
|
|
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
|
|
CPUHP_AP_PERF_ARM_ACPI_STARTING,
|
|
CPUHP_AP_PERF_ARM_STARTING,
|
|
CPUHP_AP_ARM_L2X0_STARTING,
|
|
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
|
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
|
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
|
|
CPUHP_AP_JCORE_TIMER_STARTING,
|
|
CPUHP_AP_ARM_TWD_STARTING,
|
|
CPUHP_AP_QCOM_TIMER_STARTING,
|
|
CPUHP_AP_TEGRA_TIMER_STARTING,
|
|
CPUHP_AP_ARMADA_TIMER_STARTING,
|
|
CPUHP_AP_MARCO_TIMER_STARTING,
|
|
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
|
|
CPUHP_AP_ARC_TIMER_STARTING,
|
|
CPUHP_AP_RISCV_TIMER_STARTING,
|
|
CPUHP_AP_CLINT_TIMER_STARTING,
|
|
CPUHP_AP_CSKY_TIMER_STARTING,
|
|
CPUHP_AP_TI_GP_TIMER_STARTING,
|
|
CPUHP_AP_HYPERV_TIMER_STARTING,
|
|
CPUHP_AP_KVM_STARTING,
|
|
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
|
|
CPUHP_AP_KVM_ARM_VGIC_STARTING,
|
|
CPUHP_AP_KVM_ARM_TIMER_STARTING,
|
|
/* Must be the last timer callback */
|
|
CPUHP_AP_DUMMY_TIMER_STARTING,
|
|
CPUHP_AP_ARM_XEN_STARTING,
|
|
CPUHP_AP_ARM_CORESIGHT_STARTING,
|
|
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
|
|
CPUHP_AP_ARM64_ISNDEP_STARTING,
|
|
CPUHP_AP_SMPCFD_DYING,
|
|
CPUHP_AP_X86_TBOOT_DYING,
|
|
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
|
|
CPUHP_AP_ONLINE,
|
|
CPUHP_TEARDOWN_CPU,
|
|
|
|
/* Online section invoked on the hotplugged CPU from the hotplug thread */
|
|
CPUHP_AP_ONLINE_IDLE,
|
|
CPUHP_AP_SCHED_WAIT_EMPTY,
|
|
CPUHP_AP_SMPBOOT_THREADS,
|
|
CPUHP_AP_X86_VDSO_VMA_ONLINE,
|
|
CPUHP_AP_IRQ_AFFINITY_ONLINE,
|
|
CPUHP_AP_BLK_MQ_ONLINE,
|
|
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
|
|
CPUHP_AP_X86_INTEL_EPB_ONLINE,
|
|
CPUHP_AP_PERF_ONLINE,
|
|
CPUHP_AP_PERF_X86_ONLINE,
|
|
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
|
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
|
|
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
|
|
CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
|
CPUHP_AP_PERF_X86_CQM_ONLINE,
|
|
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
|
|
CPUHP_AP_PERF_X86_IDXD_ONLINE,
|
|
CPUHP_AP_PERF_S390_CF_ONLINE,
|
|
CPUHP_AP_PERF_S390_SF_ONLINE,
|
|
CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
|
CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
|
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
|
|
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
|
|
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
|
|
CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
|
|
CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
|
|
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
|
|
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
|
|
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
|
|
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
|
|
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
|
|
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
|
|
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
|
|
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
|
|
CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
|
|
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
|
|
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
|
|
CPUHP_AP_PERF_CSKY_ONLINE,
|
|
CPUHP_AP_WATCHDOG_ONLINE,
|
|
CPUHP_AP_WORKQUEUE_ONLINE,
|
|
CPUHP_AP_RANDOM_ONLINE,
|
|
CPUHP_AP_RCUTREE_ONLINE,
|
|
CPUHP_AP_BASE_CACHEINFO_ONLINE,
|
|
CPUHP_AP_ONLINE_DYN,
|
|
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
|
/* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
|
|
CPUHP_AP_MM_DEMOTION_ONLINE,
|
|
CPUHP_AP_X86_HPET_ONLINE,
|
|
CPUHP_AP_X86_KVM_CLK_ONLINE,
|
|
CPUHP_AP_DTPM_CPU_ONLINE,
|
|
CPUHP_AP_ACTIVE,
|
|
CPUHP_ANDROID_RESERVED_1,
|
|
CPUHP_ANDROID_RESERVED_2,
|
|
CPUHP_ANDROID_RESERVED_3,
|
|
CPUHP_ANDROID_RESERVED_4,
|
|
CPUHP_ONLINE,
|
|
};
|
|
|
|
int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu), bool multi_instance);
|
|
|
|
int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
|
|
bool invoke,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu),
|
|
bool multi_instance);
|
|
/**
|
|
* cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup
|
|
* callback
|
|
* @state: The state for which the calls are installed
|
|
* @name: Name of the callback (will be used in debug output)
|
|
* @startup: startup callback function or NULL if not required
|
|
* @teardown: teardown callback function or NULL if not required
|
|
*
|
|
* Installs the callback functions and invokes the @startup callback on
|
|
* the online cpus which have already reached the @state.
|
|
*/
|
|
static inline int cpuhp_setup_state(enum cpuhp_state state,
|
|
const char *name,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu))
|
|
{
|
|
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling
|
|
* @startup callback from a cpus_read_lock()
|
|
* held region
|
|
* @state: The state for which the calls are installed
|
|
* @name: Name of the callback (will be used in debug output)
|
|
* @startup: startup callback function or NULL if not required
|
|
* @teardown: teardown callback function or NULL if not required
|
|
*
|
|
* Same as cpuhp_setup_state() except that it must be invoked from within a
|
|
* cpus_read_lock() held region.
|
|
*/
|
|
static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
|
|
const char *name,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu))
|
|
{
|
|
return __cpuhp_setup_state_cpuslocked(state, name, true, startup,
|
|
teardown, false);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
|
|
* @startup callback
|
|
* @state: The state for which the calls are installed
|
|
* @name: Name of the callback.
|
|
* @startup: startup callback function or NULL if not required
|
|
* @teardown: teardown callback function or NULL if not required
|
|
*
|
|
* Same as cpuhp_setup_state() except that the @startup callback is not
|
|
* invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n.
|
|
*/
|
|
static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
|
|
const char *name,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu))
|
|
{
|
|
return __cpuhp_setup_state(state, name, false, startup, teardown,
|
|
false);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without
|
|
* invoking the @startup callback from
|
|
* a cpus_read_lock() held region
|
|
* callbacks
|
|
* @state: The state for which the calls are installed
|
|
* @name: Name of the callback.
|
|
* @startup: startup callback function or NULL if not required
|
|
* @teardown: teardown callback function or NULL if not required
|
|
*
|
|
* Same as cpuhp_setup_state_nocalls() except that it must be invoked from
|
|
* within a cpus_read_lock() held region.
|
|
*/
|
|
static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
|
|
const char *name,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu))
|
|
{
|
|
return __cpuhp_setup_state_cpuslocked(state, name, false, startup,
|
|
teardown, false);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_setup_state_multi - Add callbacks for multi state
|
|
* @state: The state for which the calls are installed
|
|
* @name: Name of the callback.
|
|
* @startup: startup callback function or NULL if not required
|
|
* @teardown: teardown callback function or NULL if not required
|
|
*
|
|
* Sets the internal multi_instance flag and prepares a state to work as a multi
|
|
* instance callback. No callbacks are invoked at this point. The callbacks are
|
|
* invoked once an instance for this state are registered via
|
|
* cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls()
|
|
*/
|
|
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
|
|
const char *name,
|
|
int (*startup)(unsigned int cpu,
|
|
struct hlist_node *node),
|
|
int (*teardown)(unsigned int cpu,
|
|
struct hlist_node *node))
|
|
{
|
|
return __cpuhp_setup_state(state, name, false,
|
|
(void *) startup,
|
|
(void *) teardown, true);
|
|
}
|
|
|
|
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
|
|
bool invoke);
|
|
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
|
|
struct hlist_node *node, bool invoke);
|
|
|
|
/**
|
|
* cpuhp_state_add_instance - Add an instance for a state and invoke startup
|
|
* callback.
|
|
* @state: The state for which the instance is installed
|
|
* @node: The node for this individual state.
|
|
*
|
|
* Installs the instance for the @state and invokes the registered startup
|
|
* callback on the online cpus which have already reached the @state. The
|
|
* @state must have been earlier marked as multi-instance by
|
|
* cpuhp_setup_state_multi().
|
|
*/
|
|
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
|
|
struct hlist_node *node)
|
|
{
|
|
return __cpuhp_state_add_instance(state, node, true);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_state_add_instance_nocalls - Add an instance for a state without
|
|
* invoking the startup callback.
|
|
* @state: The state for which the instance is installed
|
|
* @node: The node for this individual state.
|
|
*
|
|
* Installs the instance for the @state. The @state must have been earlier
|
|
* marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or
|
|
* HOTPLUG_CPU=n.
|
|
*/
|
|
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
|
|
struct hlist_node *node)
|
|
{
|
|
return __cpuhp_state_add_instance(state, node, false);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state
|
|
* without invoking the startup
|
|
* callback from a cpus_read_lock()
|
|
* held region.
|
|
* @state: The state for which the instance is installed
|
|
* @node: The node for this individual state.
|
|
*
|
|
* Same as cpuhp_state_add_instance_nocalls() except that it must be
|
|
* invoked from within a cpus_read_lock() held region.
|
|
*/
|
|
static inline int
|
|
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
|
|
struct hlist_node *node)
|
|
{
|
|
return __cpuhp_state_add_instance_cpuslocked(state, node, false);
|
|
}
|
|
|
|
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
|
|
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
|
|
|
|
/**
|
|
* cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
|
|
* @state: The state for which the calls are removed
|
|
*
|
|
* Removes the callback functions and invokes the teardown callback on
|
|
* the online cpus which have already reached the @state.
|
|
*/
|
|
static inline void cpuhp_remove_state(enum cpuhp_state state)
|
|
{
|
|
__cpuhp_remove_state(state, true);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
|
|
* the teardown callback
|
|
* @state: The state for which the calls are removed
|
|
*/
|
|
static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
|
|
{
|
|
__cpuhp_remove_state(state, false);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking
|
|
* teardown from a cpus_read_lock() held region.
|
|
* @state: The state for which the calls are removed
|
|
*
|
|
* Same as cpuhp_remove_state nocalls() except that it must be invoked
|
|
* from within a cpus_read_lock() held region.
|
|
*/
|
|
static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
|
|
{
|
|
__cpuhp_remove_state_cpuslocked(state, false);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_remove_multi_state - Remove hotplug multi state callback
|
|
* @state: The state for which the calls are removed
|
|
*
|
|
* Removes the callback functions from a multi state. This is the reverse of
|
|
* cpuhp_setup_state_multi(). All instances should have been removed before
|
|
* invoking this function.
|
|
*/
|
|
static inline void cpuhp_remove_multi_state(enum cpuhp_state state)
|
|
{
|
|
__cpuhp_remove_state(state, false);
|
|
}
|
|
|
|
int __cpuhp_state_remove_instance(enum cpuhp_state state,
|
|
struct hlist_node *node, bool invoke);
|
|
|
|
/**
|
|
* cpuhp_state_remove_instance - Remove hotplug instance from state and invoke
|
|
* the teardown callback
|
|
* @state: The state from which the instance is removed
|
|
* @node: The node for this individual state.
|
|
*
|
|
* Removes the instance and invokes the teardown callback on the online cpus
|
|
* which have already reached @state.
|
|
*/
|
|
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
|
|
struct hlist_node *node)
|
|
{
|
|
return __cpuhp_state_remove_instance(state, node, true);
|
|
}
|
|
|
|
/**
|
|
* cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
|
|
* without invoking the teardown callback
|
|
* @state: The state from which the instance is removed
|
|
* @node: The node for this individual state.
|
|
*
|
|
* Removes the instance without invoking the teardown callback.
|
|
*/
|
|
static inline int cpuhp_state_remove_instance_nocalls(enum cpuhp_state state,
|
|
struct hlist_node *node)
|
|
{
|
|
return __cpuhp_state_remove_instance(state, node, false);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
void cpuhp_online_idle(enum cpuhp_state state);
|
|
#else
|
|
static inline void cpuhp_online_idle(enum cpuhp_state state) { }
|
|
#endif
|
|
|
|
#endif
|