Files
kernel_arpi/kernel/sched/rt.c
Greg Kroah-Hartman 962d1816e1 Merge 5.15.86 into android14-5.15
Changes in 5.15.86
	drm/amd/display: Manually adjust strobe for DCN303
	usb: musb: remove extra check in musb_gadget_vbus_draw
	arm64: dts: qcom: ipq6018-cp01-c1: use BLSPI1 pins
	arm64: dts: qcom: sm8250-sony-xperia-edo: fix touchscreen bias-disable
	arm64: dts: qcom: msm8996: Add MSM8996 Pro support
	arm64: dts: qcom: msm8996: fix supported-hw in cpufreq OPP tables
	arm64: dts: qcom: msm8996: fix GPU OPP table
	ARM: dts: qcom: apq8064: fix coresight compatible
	arm64: dts: qcom: sdm630: fix UART1 pin bias
	arm64: dts: qcom: sdm845-cheza: fix AP suspend pin bias
	arm64: dts: qcom: msm8916: Drop MSS fallback compatible
	objtool, kcsan: Add volatile read/write instrumentation to whitelist
	ARM: dts: stm32: Drop stm32mp15xc.dtsi from Avenger96
	ARM: dts: stm32: Fix AV96 WLAN regulator gpio property
	drivers: soc: ti: knav_qmss_queue: Mark knav_acc_firmwares as static
	arm64: dts: qcom: pm660: Use unique ADC5_VCOIN address in node name
	arm64: dts: qcom: sm8250: correct LPASS pin pull down
	soc: qcom: llcc: make irq truly optional
	arm64: dts: qcom: Correct QMP PHY child node name
	arm64: dts: qcom: sm8150: fix UFS PHY registers
	arm64: dts: qcom: sm8250: fix UFS PHY registers
	arm64: dts: qcom: sm8350: fix UFS PHY registers
	arm64: dts: qcom: sm8250: drop bogus DP PHY clock
	soc: qcom: apr: make code more reuseable
	soc: qcom: apr: Add check for idr_alloc and of_property_read_string_index
	arm64: dts: qcom: sm6125: fix SDHCI CQE reg names
	arm: dts: spear600: Fix clcd interrupt
	soc: ti: knav_qmss_queue: Use pm_runtime_resume_and_get instead of pm_runtime_get_sync
	soc: ti: knav_qmss_queue: Fix PM disable depth imbalance in knav_queue_probe
	soc: ti: smartreflex: Fix PM disable depth imbalance in omap_sr_probe
	arm64: Treat ESR_ELx as a 64-bit register
	arm64: mm: kfence: only handle translation faults
	perf: arm_dsu: Fix hotplug callback leak in dsu_pmu_init()
	perf/arm_dmc620: Fix hotplug callback leak in dmc620_pmu_init()
	perf/smmuv3: Fix hotplug callback leak in arm_smmu_pmu_init()
	arm64: dts: ti: k3-am65-main: Drop dma-coherent in crypto node
	arm64: dts: ti: k3-j721e-main: Drop dma-coherent in crypto node
	ARM: dts: nuvoton: Remove bogus unit addresses from fixed-partition nodes
	arm64: dts: mt6779: Fix devicetree build warnings
	arm64: dts: mt2712e: Fix unit_address_vs_reg warning for oscillators
	arm64: dts: mt2712e: Fix unit address for pinctrl node
	arm64: dts: mt2712-evb: Fix vproc fixed regulators unit names
	arm64: dts: mt2712-evb: Fix usb vbus regulators unit names
	arm64: dts: mediatek: pumpkin-common: Fix devicetree warnings
	arm64: dts: mediatek: mt6797: Fix 26M oscillator unit name
	ARM: dts: dove: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-370: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-xp: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-375: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-38x: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-39x: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: turris-omnia: Add ethernet aliases
	ARM: dts: turris-omnia: Add switch port 6 node
	arm64: dts: armada-3720-turris-mox: Add missing interrupt for RTC
	seccomp: Move copy_seccomp() to no failure path.
	pstore/ram: Fix error return code in ramoops_probe()
	ARM: mmp: fix timer_read delay
	pstore: Avoid kcore oops by vmap()ing with VM_IOREMAP
	tpm/tpm_ftpm_tee: Fix error handling in ftpm_mod_init()
	tpm/tpm_crb: Fix error message in __crb_relinquish_locality()
	ovl: store lower path in ovl_inode
	ovl: use ovl_copy_{real,upper}attr() wrappers
	ovl: remove privs in ovl_copyfile()
	ovl: remove privs in ovl_fallocate()
	sched/fair: Cleanup task_util and capacity type
	sched/uclamp: Fix relationship between uclamp and migration margin
	sched/uclamp: Make task_fits_capacity() use util_fits_cpu()
	sched/uclamp: Make select_idle_capacity() use util_fits_cpu()
	sched/fair: Removed useless update of p->recent_used_cpu
	sched/core: Introduce sched_asym_cpucap_active()
	sched/uclamp: Make asym_fits_capacity() use util_fits_cpu()
	cpuidle: dt: Return the correct numbers of parsed idle states
	alpha: fix TIF_NOTIFY_SIGNAL handling
	alpha: fix syscall entry in !AUDUT_SYSCALL case
	x86/sgx: Reduce delay and interference of enclave release
	PM: hibernate: Fix mistake in kerneldoc comment
	fs: don't audit the capability check in simple_xattr_list()
	cpufreq: qcom-hw: Fix memory leak in qcom_cpufreq_hw_read_lut()
	selftests/ftrace: event_triggers: wait longer for test_event_enable
	perf: Fix possible memleak in pmu_dev_alloc()
	lib/debugobjects: fix stat count and optimize debug_objects_mem_init
	platform/x86: huawei-wmi: fix return value calculation
	timerqueue: Use rb_entry_safe() in timerqueue_getnext()
	proc: fixup uptime selftest
	lib/fonts: fix undefined behavior in bit shift for get_default_font
	ocfs2: fix memory leak in ocfs2_stack_glue_init()
	MIPS: vpe-mt: fix possible memory leak while module exiting
	MIPS: vpe-cmp: fix possible memory leak while module exiting
	selftests/efivarfs: Add checking of the test return value
	PNP: fix name memory leak in pnp_alloc_dev()
	perf/x86/intel/uncore: Fix reference count leak in sad_cfg_iio_topology()
	perf/x86/intel/uncore: Fix reference count leak in hswep_has_limit_sbox()
	perf/x86/intel/uncore: Fix reference count leak in snr_uncore_mmio_map()
	perf/x86/intel/uncore: Fix reference count leak in __uncore_imc_init_box()
	platform/chrome: cros_usbpd_notify: Fix error handling in cros_usbpd_notify_init()
	thermal: core: fix some possible name leaks in error paths
	irqchip: gic-pm: Use pm_runtime_resume_and_get() in gic_probe()
	irqchip/wpcm450: Fix memory leak in wpcm450_aic_of_init()
	EDAC/i10nm: fix refcount leak in pci_get_dev_wrapper()
	SUNRPC: Return true/false (not 1/0) from bool functions
	NFSD: Finish converting the NFSv2 GETACL result encoder
	nfsd: don't call nfsd_file_put from client states seqfile display
	genirq/irqdesc: Don't try to remove non-existing sysfs files
	cpufreq: amd_freq_sensitivity: Add missing pci_dev_put()
	libfs: add DEFINE_SIMPLE_ATTRIBUTE_SIGNED for signed value
	lib/notifier-error-inject: fix error when writing -errno to debugfs file
	debugfs: fix error when writing negative value to atomic_t debugfs file
	rapidio: fix possible name leaks when rio_add_device() fails
	rapidio: rio: fix possible name leak in rio_register_mport()
	clocksource/drivers/sh_cmt: Access registers according to spec
	mips: ralink: mt7621: define MT7621_SYSC_BASE with __iomem
	mips: ralink: mt7621: soc queries and tests as functions
	mips: ralink: mt7621: do not use kzalloc too early
	futex: Move to kernel/futex/
	futex: Resend potentially swallowed owner death notification
	cpu/hotplug: Make target_store() a nop when target == state
	cpu/hotplug: Do not bail-out in DYING/STARTING sections
	clocksource/drivers/timer-ti-dm: Fix missing clk_disable_unprepare in dmtimer_systimer_init_clock()
	ACPICA: Fix use-after-free in acpi_ut_copy_ipackage_to_ipackage()
	uprobes/x86: Allow to probe a NOP instruction with 0x66 prefix
	x86/xen: Fix memory leak in xen_smp_intr_init{_pv}()
	x86/xen: Fix memory leak in xen_init_lock_cpu()
	xen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource()
	PM: runtime: Do not call __rpm_callback() from rpm_idle()
	platform/chrome: cros_ec_typec: Cleanup switch handle return paths
	platform/chrome: cros_ec_typec: zero out stale pointers
	platform/x86: mxm-wmi: fix memleak in mxm_wmi_call_mx[ds|mx]()
	platform/x86: intel_scu_ipc: fix possible name leak in __intel_scu_ipc_register()
	MIPS: BCM63xx: Add check for NULL for clk in clk_enable
	MIPS: OCTEON: warn only once if deprecated link status is being used
	lockd: set other missing fields when unlocking files
	fs: sysv: Fix sysv_nblocks() returns wrong value
	rapidio: fix possible UAF when kfifo_alloc() fails
	eventfd: change int to __u64 in eventfd_signal() ifndef CONFIG_EVENTFD
	relay: fix type mismatch when allocating memory in relay_create_buf()
	hfs: Fix OOB Write in hfs_asc2mac
	rapidio: devices: fix missing put_device in mport_cdev_open
	platform/mellanox: mlxbf-pmc: Fix event typo
	wifi: ath9k: hif_usb: fix memory leak of urbs in ath9k_hif_usb_dealloc_tx_urbs()
	wifi: ath9k: hif_usb: Fix use-after-free in ath9k_hif_usb_reg_in_cb()
	wifi: rtl8xxxu: Fix reading the vendor of combo chips
	drm/bridge: adv7533: remove dynamic lane switching from adv7533 bridge
	libbpf: Fix use-after-free in btf_dump_name_dups
	libbpf: Fix null-pointer dereference in find_prog_by_sec_insn()
	ata: libata: move ata_{port,link,dev}_dbg to standard pr_XXX() macros
	ata: add/use ata_taskfile::{error|status} fields
	ata: libata: fix NCQ autosense logic
	ipmi: kcs: Poll OBF briefly to reduce OBE latency
	drm/amdgpu/powerplay/psm: Fix memory leak in power state init
	media: v4l2-ctrls: Fix off-by-one error in integer menu control check
	media: coda: jpeg: Add check for kmalloc
	media: adv748x: afe: Select input port when initializing AFE
	media: i2c: ad5820: Fix error path
	venus: pm_helpers: Fix error check in vcodec_domains_get()
	soreuseport: Fix socket selection for SO_INCOMING_CPU.
	media: exynos4-is: don't rely on the v4l2_async_subdev internals
	libbpf: Btf dedup identical struct test needs check for nested structs/arrays
	can: kvaser_usb: do not increase tx statistics when sending error message frames
	can: kvaser_usb: kvaser_usb_leaf: Get capabilities from device
	can: kvaser_usb: kvaser_usb_leaf: Rename {leaf,usbcan}_cmd_error_event to {leaf,usbcan}_cmd_can_error_event
	can: kvaser_usb: kvaser_usb_leaf: Handle CMD_ERROR_EVENT
	can: kvaser_usb_leaf: Set Warning state even without bus errors
	can: kvaser_usb: make use of units.h in assignment of frequency
	can: kvaser_usb_leaf: Fix improved state not being reported
	can: kvaser_usb_leaf: Fix wrong CAN state after stopping
	can: kvaser_usb_leaf: Fix bogus restart events
	can: kvaser_usb: Add struct kvaser_usb_busparams
	can: kvaser_usb: Compare requested bittiming parameters with actual parameters in do_set_{,data}_bittiming
	drm/rockchip: lvds: fix PM usage counter unbalance in poweron
	clk: renesas: r9a06g032: Repair grave increment error
	spi: Update reference to struct spi_controller
	drm/panel/panel-sitronix-st7701: Remove panel on DSI attach failure
	ima: Handle -ESTALE returned by ima_filter_rule_match()
	drm/msm/hdmi: drop unused GPIO support
	drm/msm/hdmi: use devres helper for runtime PM management
	bpf: Fix slot type check in check_stack_write_var_off
	media: vivid: fix compose size exceed boundary
	media: platform: exynos4-is: fix return value check in fimc_md_probe()
	bpf: propagate precision in ALU/ALU64 operations
	bpf: Check the other end of slot_type for STACK_SPILL
	bpf: propagate precision across all frames, not just the last one
	clk: qcom: gcc-sm8250: Use retention mode for USB GDSCs
	mtd: Fix device name leak when register device failed in add_mtd_device()
	Input: joystick - fix Kconfig warning for JOYSTICK_ADC
	wifi: rsi: Fix handling of 802.3 EAPOL frames sent via control port
	media: camss: Clean up received buffers on failed start of streaming
	net, proc: Provide PROC_FS=n fallback for proc_create_net_single_write()
	rxrpc: Fix ack.bufferSize to be 0 when generating an ack
	bfq: fix waker_bfqq inconsistency crash
	drm/radeon: Add the missed acpi_put_table() to fix memory leak
	drm/mediatek: Modify dpi power on/off sequence.
	ASoC: pxa: fix null-pointer dereference in filter()
	libbpf: Fix uninitialized warning in btf_dump_dump_type_data
	nvmet: only allocate a single slab for bvecs
	regulator: core: fix unbalanced of node refcount in regulator_dev_lookup()
	amdgpu/pm: prevent array underflow in vega20_odn_edit_dpm_table()
	nvme: return err on nvme_init_non_mdts_limits fail
	regulator: qcom-rpmh: Fix PMR735a S3 regulator spec
	drm/fourcc: Add packed 10bit YUV 4:2:0 format
	drm/fourcc: Fix vsub/hsub for Q410 and Q401
	integrity: Fix memory leakage in keyring allocation error path
	ima: Fix misuse of dereference of pointer in template_desc_init_fields()
	block: clear ->slave_dir when dropping the main slave_dir reference
	wifi: ath10k: Fix return value in ath10k_pci_init()
	drm/msm/a6xx: Fix speed-bin detection vs probe-defer
	mtd: lpddr2_nvm: Fix possible null-ptr-deref
	Input: elants_i2c - properly handle the reset GPIO when power is off
	media: vidtv: Fix use-after-free in vidtv_bridge_dvb_init()
	media: solo6x10: fix possible memory leak in solo_sysfs_init()
	media: platform: exynos4-is: Fix error handling in fimc_md_init()
	media: videobuf-dma-contig: use dma_mmap_coherent
	inet: add READ_ONCE(sk->sk_bound_dev_if) in inet_csk_bind_conflict()
	mtd: spi-nor: hide jedec_id sysfs attribute if not present
	mtd: spi-nor: Fix the number of bytes for the dummy cycles
	bpf: Move skb->len == 0 checks into __bpf_redirect
	HID: hid-sensor-custom: set fixed size for custom attributes
	pinctrl: k210: call of_node_put()
	ALSA: pcm: fix undefined behavior in bit shift for SNDRV_PCM_RATE_KNOT
	ALSA: seq: fix undefined behavior in bit shift for SNDRV_SEQ_FILTER_USE_EVENT
	regulator: core: use kfree_const() to free space conditionally
	clk: rockchip: Fix memory leak in rockchip_clk_register_pll()
	drm/amdgpu: fix pci device refcount leak
	bonding: fix link recovery in mode 2 when updelay is nonzero
	mtd: maps: pxa2xx-flash: fix memory leak in probe
	drbd: remove call to memset before free device/resource/connection
	drbd: destroy workqueue when drbd device was freed
	ASoC: qcom: Add checks for devm_kcalloc
	media: vimc: Fix wrong function called when vimc_init() fails
	media: imon: fix a race condition in send_packet()
	clk: imx8mn: rename vpu_pll to m7_alt_pll
	clk: imx: replace osc_hdmi with dummy
	clk: imx8mn: fix imx8mn_sai2_sels clocks list
	clk: imx8mn: fix imx8mn_enet_phy_sels clocks list
	pinctrl: pinconf-generic: add missing of_node_put()
	media: dvb-core: Fix ignored return value in dvb_register_frontend()
	media: dvb-usb: az6027: fix null-ptr-deref in az6027_i2c_xfer()
	media: s5p-mfc: Add variant data for MFC v7 hardware for Exynos 3250 SoC
	drm/tegra: Add missing clk_disable_unprepare() in tegra_dc_probe()
	ASoC: dt-bindings: wcd9335: fix reset line polarity in example
	ASoC: mediatek: mtk-btcvsd: Add checks for write and read of mtk_btcvsd_snd
	NFSv4.2: Clear FATTR4_WORD2_SECURITY_LABEL when done decoding
	NFSv4.2: Fix a memory stomp in decode_attr_security_label
	NFSv4.2: Fix initialisation of struct nfs4_label
	NFSv4: Fix a credential leak in _nfs4_discover_trunking()
	NFSv4: Fix a deadlock between nfs4_open_recover_helper() and delegreturn
	NFS: Fix an Oops in nfs_d_automount()
	ALSA: asihpi: fix missing pci_disable_device()
	wifi: iwlwifi: mvm: fix double free on tx path.
	ASoC: mediatek: mt8173: Fix debugfs registration for components
	ASoC: mediatek: mt8173: Enable IRQ when pdata is ready
	drm/amd/pm/smu11: BACO is supported when it's in BACO state
	drm/radeon: Fix PCI device refcount leak in radeon_atrm_get_bios()
	drm/amdgpu: Fix PCI device refcount leak in amdgpu_atrm_get_bios()
	drm/amdkfd: Fix memory leakage
	ASoC: pcm512x: Fix PM disable depth imbalance in pcm512x_probe
	netfilter: conntrack: set icmpv6 redirects as RELATED
	Input: wistron_btns - disable on UML
	bpf, sockmap: Fix repeated calls to sock_put() when msg has more_data
	bpf, sockmap: Fix missing BPF_F_INGRESS flag when using apply_bytes
	bpf, sockmap: Fix data loss caused by using apply_bytes on ingress redirect
	bonding: uninitialized variable in bond_miimon_inspect()
	spi: spidev: mask SPI_CS_HIGH in SPI_IOC_RD_MODE
	wifi: mac80211: fix memory leak in ieee80211_if_add()
	wifi: cfg80211: Fix not unregister reg_pdev when load_builtin_regdb_keys() fails
	mt76: stop the radar detector after leaving dfs channel
	wifi: mt76: mt7921: fix reporting of TX AGGR histogram
	wifi: mt76: fix coverity overrun-call in mt76_get_txpower()
	regulator: core: fix module refcount leak in set_supply()
	clk: qcom: lpass-sc7180: Fix pm_runtime usage
	clk: qcom: clk-krait: fix wrong div2 functions
	hsr: Add a rcu-read lock to hsr_forward_skb().
	hsr: Avoid double remove of a node.
	hsr: Disable netpoll.
	hsr: Synchronize sending frames to have always incremented outgoing seq nr.
	hsr: Synchronize sequence number updates.
	configfs: fix possible memory leak in configfs_create_dir()
	regulator: core: fix resource leak in regulator_register()
	hwmon: (jc42) Convert register access and caching to regmap/regcache
	hwmon: (jc42) Restore the min/max/critical temperatures on resume
	bpf, sockmap: fix race in sock_map_free()
	ALSA: pcm: Set missing stop_operating flag at undoing trigger start
	media: saa7164: fix missing pci_disable_device()
	ALSA: mts64: fix possible null-ptr-defer in snd_mts64_interrupt
	xprtrdma: Fix regbuf data not freed in rpcrdma_req_create()
	SUNRPC: Fix missing release socket in rpc_sockname()
	NFSv4.x: Fail client initialisation if state manager thread can't run
	riscv, bpf: Emit fixed-length instructions for BPF_PSEUDO_FUNC
	mmc: alcor: fix return value check of mmc_add_host()
	mmc: moxart: fix return value check of mmc_add_host()
	mmc: mxcmmc: fix return value check of mmc_add_host()
	mmc: pxamci: fix return value check of mmc_add_host()
	mmc: rtsx_pci: fix return value check of mmc_add_host()
	mmc: rtsx_usb_sdmmc: fix return value check of mmc_add_host()
	mmc: toshsd: fix return value check of mmc_add_host()
	mmc: vub300: fix return value check of mmc_add_host()
	mmc: wmt-sdmmc: fix return value check of mmc_add_host()
	mmc: atmel-mci: fix return value check of mmc_add_host()
	mmc: omap_hsmmc: fix return value check of mmc_add_host()
	mmc: meson-gx: fix return value check of mmc_add_host()
	mmc: via-sdmmc: fix return value check of mmc_add_host()
	mmc: wbsd: fix return value check of mmc_add_host()
	mmc: mmci: fix return value check of mmc_add_host()
	mmc: renesas_sdhi: alway populate SCC pointer
	memstick: ms_block: Add error handling support for add_disk()
	memstick/ms_block: Add check for alloc_ordered_workqueue
	mmc: core: Normalize the error handling branch in sd_read_ext_regs()
	regulator: qcom-labibb: Fix missing of_node_put() in qcom_labibb_regulator_probe()
	media: c8sectpfe: Add of_node_put() when breaking out of loop
	media: coda: Add check for dcoda_iram_alloc
	media: coda: Add check for kmalloc
	clk: samsung: Fix memory leak in _samsung_clk_register_pll()
	spi: spi-gpio: Don't set MOSI as an input if not 3WIRE mode
	wifi: rtl8xxxu: Add __packed to struct rtl8723bu_c2h
	wifi: rtl8xxxu: Fix the channel width reporting
	wifi: brcmfmac: Fix error return code in brcmf_sdio_download_firmware()
	blktrace: Fix output non-blktrace event when blk_classic option enabled
	bpf: Do not zero-extend kfunc return values
	clk: socfpga: Fix memory leak in socfpga_gate_init()
	net: vmw_vsock: vmci: Check memcpy_from_msg()
	net: defxx: Fix missing err handling in dfx_init()
	net: stmmac: selftests: fix potential memleak in stmmac_test_arpoffload()
	net: stmmac: fix possible memory leak in stmmac_dvr_probe()
	drivers: net: qlcnic: Fix potential memory leak in qlcnic_sriov_init()
	of: overlay: fix null pointer dereferencing in find_dup_cset_node_entry() and find_dup_cset_prop()
	ethernet: s2io: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: farsync: Fix kmemleak when rmmods farsync
	net/tunnel: wait until all sk_user_data reader finish before releasing the sock
	net: apple: mace: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: apple: bmac: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: emaclite: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: ethernet: dnet: don't call dev_kfree_skb() under spin_lock_irqsave()
	hamradio: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: amd: lance: don't call dev_kfree_skb() under spin_lock_irqsave()
	af_unix: call proto_unregister() in the error path in af_unix_init()
	net: amd-xgbe: Fix logic around active and passive cables
	net: amd-xgbe: Check only the minimum speed for active/passive cables
	can: tcan4x5x: Remove invalid write in clear_interrupts
	can: m_can: Call the RAM init directly from m_can_chip_config
	can: tcan4x5x: Fix use of register error status mask
	net: lan9303: Fix read error execution path
	ntb_netdev: Use dev_kfree_skb_any() in interrupt context
	sctp: sysctl: make extra pointers netns aware
	Bluetooth: MGMT: Fix error report for ADD_EXT_ADV_PARAMS
	Bluetooth: btintel: Fix missing free skb in btintel_setup_combined()
	Bluetooth: btusb: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_qca: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_ll: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_h5: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_bcsp: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_core: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: RFCOMM: don't call kfree_skb() under spin_lock_irqsave()
	stmmac: fix potential division by 0
	i40e: Fix the inability to attach XDP program on downed interface
	net: dsa: tag_8021q: avoid leaking ctx on dsa_tag_8021q_register() error path
	apparmor: fix a memleak in multi_transaction_new()
	apparmor: fix lockdep warning when removing a namespace
	apparmor: Fix abi check to include v8 abi
	crypto: hisilicon/qm - fix missing destroy qp_idr
	crypto: sun8i-ss - use dma_addr instead u32
	crypto: nitrox - avoid double free on error path in nitrox_sriov_init()
	scsi: core: Fix a race between scsi_done() and scsi_timeout()
	apparmor: Use pointer to struct aa_label for lbs_cred
	PCI: dwc: Fix n_fts[] array overrun
	RDMA/core: Fix order of nldev_exit call
	PCI: pci-epf-test: Register notifier if only core_init_notifier is enabled
	f2fs: Fix the race condition of resize flag between resizefs
	crypto: rockchip - do not do custom power management
	crypto: rockchip - do not store mode globally
	crypto: rockchip - add fallback for cipher
	crypto: rockchip - add fallback for ahash
	crypto: rockchip - better handle cipher key
	crypto: rockchip - remove non-aligned handling
	crypto: rockchip - rework by using crypto_engine
	apparmor: Fix memleak in alloc_ns()
	f2fs: fix to invalidate dcc->f2fs_issue_discard in error path
	f2fs: fix normal discard process
	f2fs: fix to destroy sbi->post_read_wq in error path of f2fs_fill_super()
	RDMA/irdma: Report the correct link speed
	scsi: qla2xxx: Fix set-but-not-used variable warnings
	RDMA/siw: Fix immediate work request flush to completion queue
	IB/mad: Don't call to function that might sleep while in atomic context
	PCI: vmd: Disable MSI remapping after suspend
	RDMA/restrack: Release MR restrack when delete
	RDMA/core: Make sure "ib_port" is valid when access sysfs node
	RDMA/nldev: Return "-EAGAIN" if the cm_id isn't from expected port
	RDMA/siw: Set defined status for work completion with undefined status
	scsi: scsi_debug: Fix a warning in resp_write_scat()
	crypto: ccree - Remove debugfs when platform_driver_register failed
	crypto: cryptd - Use request context instead of stack for sub-request
	crypto: hisilicon/qm - add missing pci_dev_put() in q_num_set()
	RDMA/hns: Repacing 'dseg_len' by macros in fill_ext_sge_inl_data()
	RDMA/hns: Fix ext_sge num error when post send
	PCI: Check for alloc failure in pci_request_irq()
	RDMA/hfi: Decrease PCI device reference count in error path
	crypto: ccree - Make cc_debugfs_global_fini() available for module init function
	RDMA/hns: fix memory leak in hns_roce_alloc_mr()
	RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when socket create failed
	dt-bindings: imx6q-pcie: Fix clock names for imx6sx and imx8mq
	dt-bindings: visconti-pcie: Fix interrupts array max constraints
	scsi: hpsa: Fix possible memory leak in hpsa_init_one()
	crypto: tcrypt - Fix multibuffer skcipher speed test mem leak
	padata: Always leave BHs disabled when running ->parallel()
	padata: Fix list iterator in padata_do_serial()
	scsi: mpt3sas: Fix possible resource leaks in mpt3sas_transport_port_add()
	scsi: hpsa: Fix error handling in hpsa_add_sas_host()
	scsi: hpsa: Fix possible memory leak in hpsa_add_sas_device()
	scsi: efct: Fix possible memleak in efct_device_init()
	scsi: scsi_debug: Fix a warning in resp_verify()
	scsi: scsi_debug: Fix a warning in resp_report_zones()
	scsi: fcoe: Fix possible name leak when device_register() fails
	scsi: scsi_debug: Fix possible name leak in sdebug_add_host_helper()
	scsi: ipr: Fix WARNING in ipr_init()
	scsi: fcoe: Fix transport not deattached when fcoe_if_init() fails
	scsi: snic: Fix possible UAF in snic_tgt_create()
	RDMA/nldev: Add checks for nla_nest_start() in fill_stat_counter_qps()
	f2fs: avoid victim selection from previous victim section
	RDMA/nldev: Fix failure to send large messages
	crypto: amlogic - Remove kcalloc without check
	crypto: omap-sham - Use pm_runtime_resume_and_get() in omap_sham_probe()
	riscv/mm: add arch hook arch_clear_hugepage_flags
	RDMA/hfi1: Fix error return code in parse_platform_config()
	RDMA/srp: Fix error return code in srp_parse_options()
	PCI: mt7621: Rename mt7621_pci_ to mt7621_pcie_
	PCI: mt7621: Add sentinel to quirks table
	orangefs: Fix sysfs not cleanup when dev init failed
	RDMA/hns: Fix AH attr queried by query_qp
	RDMA/hns: Fix PBL page MTR find
	RDMA/hns: Fix page size cap from firmware
	RDMA/hns: Fix error code of CMD
	crypto: img-hash - Fix variable dereferenced before check 'hdev->req'
	hwrng: amd - Fix PCI device refcount leak
	hwrng: geode - Fix PCI device refcount leak
	IB/IPoIB: Fix queue count inconsistency for PKEY child interfaces
	RISC-V: Align the shadow stack
	drivers: dio: fix possible memory leak in dio_init()
	serial: tegra: Read DMA status before terminating
	serial: 8250_bcm7271: Fix error handling in brcmuart_init()
	class: fix possible memory leak in __class_register()
	vfio: platform: Do not pass return buffer to ACPI _RST method
	uio: uio_dmem_genirq: Fix missing unlock in irq configuration
	uio: uio_dmem_genirq: Fix deadlock between irq config and handling
	usb: fotg210-udc: Fix ages old endianness issues
	staging: vme_user: Fix possible UAF in tsi148_dma_list_add
	usb: typec: Check for ops->exit instead of ops->enter in altmode_exit
	usb: typec: tcpci: fix of node refcount leak in tcpci_register_port()
	usb: typec: tipd: Cleanup resources if devm_tps6598_psy_register fails
	usb: typec: tipd: Fix spurious fwnode_handle_put in error path
	extcon: usbc-tusb320: Add support for mode setting and reset
	extcon: usbc-tusb320: Add support for TUSB320L
	usb: typec: Factor out non-PD fwnode properties
	extcon: usbc-tusb320: Factor out extcon into dedicated functions
	extcon: usbc-tusb320: Add USB TYPE-C support
	extcon: usbc-tusb320: Update state on probe even if no IRQ pending
	serial: amba-pl011: avoid SBSA UART accessing DMACR register
	serial: pl011: Do not clear RX FIFO & RX interrupt in unthrottle.
	serial: stm32: move dma_request_chan() before clk_prepare_enable()
	serial: pch: Fix PCI device refcount leak in pch_request_dma()
	tty: serial: clean up stop-tx part in altera_uart_tx_chars()
	tty: serial: altera_uart_{r,t}x_chars() need only uart_port
	serial: altera_uart: fix locking in polling mode
	serial: sunsab: Fix error handling in sunsab_init()
	test_firmware: fix memory leak in test_firmware_init()
	misc: ocxl: fix possible name leak in ocxl_file_register_afu()
	ocxl: fix pci device refcount leak when calling get_function_0()
	misc: tifm: fix possible memory leak in tifm_7xx1_switch_media()
	misc: sgi-gru: fix use-after-free error in gru_set_context_option, gru_fault and gru_handle_user_call_os
	firmware: raspberrypi: fix possible memory leak in rpi_firmware_probe()
	cxl: fix possible null-ptr-deref in cxl_guest_init_afu|adapter()
	cxl: fix possible null-ptr-deref in cxl_pci_init_afu|adapter()
	iio: temperature: ltc2983: make bulk write buffer DMA-safe
	iio: adis: handle devices that cannot unmask the drdy pin
	iio: adis: stylistic changes
	iio:imu:adis: Move exports into IIO_ADISLIB namespace
	iio: adis: add '__adis_enable_irq()' implementation
	counter: stm32-lptimer-cnt: fix the check on arr and cmp registers update
	coresight: trbe: remove cpuhp instance node before remove cpuhp state
	usb: roles: fix of node refcount leak in usb_role_switch_is_parent()
	usb: gadget: f_hid: fix f_hidg lifetime vs cdev
	usb: gadget: f_hid: fix refcount leak on error path
	drivers: mcb: fix resource leak in mcb_probe()
	mcb: mcb-parse: fix error handing in chameleon_parse_gdd()
	chardev: fix error handling in cdev_device_add()
	i2c: pxa-pci: fix missing pci_disable_device() on error in ce4100_i2c_probe
	staging: rtl8192u: Fix use after free in ieee80211_rx()
	staging: rtl8192e: Fix potential use-after-free in rtllib_rx_Monitor()
	vme: Fix error not catched in fake_init()
	gpiolib: Get rid of redundant 'else'
	gpiolib: cdev: fix NULL-pointer dereferences
	gpiolib: make struct comments into real kernel docs
	gpiolib: protect the GPIO device against being dropped while in use by user-space
	i2c: mux: reg: check return value after calling platform_get_resource()
	i2c: ismt: Fix an out-of-bounds bug in ismt_access()
	usb: storage: Add check for kcalloc
	tracing/hist: Fix issue of losting command info in error_log
	ksmbd: Fix resource leak in ksmbd_session_rpc_open()
	samples: vfio-mdev: Fix missing pci_disable_device() in mdpy_fb_probe()
	thermal/drivers/imx8mm_thermal: Validate temperature range
	thermal/drivers/qcom/temp-alarm: Fix inaccurate warning for gen2
	thermal/drivers/qcom/lmh: Fix irq handler return value
	fbdev: ssd1307fb: Drop optional dependency
	fbdev: pm2fb: fix missing pci_disable_device()
	fbdev: via: Fix error in via_core_init()
	fbdev: vermilion: decrease reference count in error path
	fbdev: ep93xx-fb: Add missing clk_disable_unprepare in ep93xxfb_probe()
	fbdev: geode: don't build on UML
	fbdev: uvesafb: don't build on UML
	fbdev: uvesafb: Fixes an error handling path in uvesafb_probe()
	HSI: omap_ssi_core: fix unbalanced pm_runtime_disable()
	HSI: omap_ssi_core: fix possible memory leak in ssi_probe()
	power: supply: fix residue sysfs file in error handle route of __power_supply_register()
	perf trace: Return error if a system call doesn't exist
	perf trace: Use macro RAW_SYSCALL_ARGS_NUM to replace number
	perf trace: Handle failure when trace point folder is missed
	perf symbol: correction while adjusting symbol
	power: supply: z2_battery: Fix possible memleak in z2_batt_probe()
	HSI: omap_ssi_core: Fix error handling in ssi_init()
	power: supply: ab8500: Fix error handling in ab8500_charger_init()
	power: supply: fix null pointer dereferencing in power_supply_get_battery_info
	perf stat: Refactor __run_perf_stat() common code
	perf stat: Do not delay the workload with --delay
	RDMA/siw: Fix pointer cast warning
	fs/ntfs3: Avoid UBSAN error on true_sectors_per_clst()
	overflow: Implement size_t saturating arithmetic helpers
	fs/ntfs3: Harden against integer overflows
	iommu/sun50i: Fix reset release
	iommu/sun50i: Consider all fault sources for reset
	iommu/sun50i: Fix R/W permission check
	iommu/sun50i: Fix flush size
	iommu/rockchip: fix permission bits in page table entries v2
	phy: usb: s2 WoL wakeup_count not incremented for USB->Eth devices
	include/uapi/linux/swab: Fix potentially missing __always_inline
	pwm: tegra: Improve required rate calculation
	fs/ntfs3: Fix slab-out-of-bounds read in ntfs_trim_fs
	dmaengine: idxd: Fix crc_val field for completion record
	rtc: rtc-cmos: Do not check ACPI_FADT_LOW_POWER_S0
	rtc: cmos: Fix event handler registration ordering issue
	rtc: cmos: Fix wake alarm breakage
	rtc: cmos: fix build on non-ACPI platforms
	rtc: cmos: Call cmos_wake_setup() from cmos_do_probe()
	rtc: cmos: Call rtc_wake_setup() from cmos_do_probe()
	rtc: cmos: Eliminate forward declarations of some functions
	rtc: cmos: Rename ACPI-related functions
	rtc: cmos: Disable ACPI RTC event on removal
	rtc: snvs: Allow a time difference on clock register read
	rtc: pcf85063: Fix reading alarm
	iommu/amd: Fix pci device refcount leak in ppr_notifier()
	iommu/fsl_pamu: Fix resource leak in fsl_pamu_probe()
	macintosh: fix possible memory leak in macio_add_one_device()
	macintosh/macio-adb: check the return value of ioremap()
	powerpc/52xx: Fix a resource leak in an error handling path
	cxl: Fix refcount leak in cxl_calc_capp_routing
	powerpc/xmon: Fix -Wswitch-unreachable warning in bpt_cmds
	powerpc/xive: add missing iounmap() in error path in xive_spapr_populate_irq_data()
	powerpc/perf: callchain validate kernel stack pointer bounds
	powerpc/83xx/mpc832x_rdb: call platform_device_put() in error case in of_fsl_spi_probe()
	powerpc/hv-gpci: Fix hv_gpci event list
	selftests/powerpc: Fix resource leaks
	iommu/sun50i: Remove IOMMU_DOMAIN_IDENTITY
	pwm: sifive: Call pwm_sifive_update_clock() while mutex is held
	pwm: mtk-disp: Fix the parameters calculated by the enabled flag of disp_pwm
	pwm: mediatek: always use bus clock for PWM on MT7622
	remoteproc: sysmon: fix memory leak in qcom_add_sysmon_subdev()
	remoteproc: qcom: q6v5: Fix potential null-ptr-deref in q6v5_wcss_init_mmio()
	remoteproc: qcom_q6v5_pas: disable wakeup on probe fail or remove
	remoteproc: qcom_q6v5_pas: detach power domains on remove
	remoteproc: qcom_q6v5_pas: Fix missing of_node_put() in adsp_alloc_memory_region()
	remoteproc: qcom: q6v5: Fix missing clk_disable_unprepare() in q6v5_wcss_qcs404_power_on()
	powerpc/eeh: Drop redundant spinlock initialization
	powerpc/pseries/eeh: use correct API for error log size
	mfd: bd957x: Fix Kconfig dependency on REGMAP_IRQ
	mfd: qcom_rpm: Fix an error handling path in qcom_rpm_probe()
	mfd: pm8008: Remove driver data structure pm8008_data
	mfd: pm8008: Fix return value check in pm8008_probe()
	netfilter: flowtable: really fix NAT IPv6 offload
	rtc: st-lpc: Add missing clk_disable_unprepare in st_rtc_probe()
	rtc: pic32: Move devm_rtc_allocate_device earlier in pic32_rtc_probe()
	rtc: pcf85063: fix pcf85063_clkout_control
	nfsd: under NFSv4.1, fix double svc_xprt_put on rpc_create failure
	net: macsec: fix net device access prior to holding a lock
	mISDN: hfcsusb: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
	mISDN: hfcpci: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
	mISDN: hfcmulti: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
	block, bfq: fix possible uaf for 'bfqq->bic'
	selftests/bpf: Add test for unstable CT lookup API
	net: enetc: avoid buffer leaks on xdp_do_redirect() failure
	nfc: pn533: Clear nfc_target before being used
	unix: Fix race in SOCK_SEQPACKET's unix_dgram_sendmsg()
	r6040: Fix kmemleak in probe and remove
	igc: Enhance Qbv scheduling by using first flag bit
	igc: Use strict cycles for Qbv scheduling
	igc: Add checking for basetime less than zero
	igc: allow BaseTime 0 enrollment for Qbv
	igc: recalculate Qbv end_time by considering cycle time
	igc: Lift TAPRIO schedule restriction
	igc: Set Qbv start_time and end_time to end_time if not being configured in GCL
	rtc: mxc_v2: Add missing clk_disable_unprepare()
	selftests: devlink: fix the fd redirect in dummy_reporter_test
	openvswitch: Fix flow lookup to use unmasked key
	soc: mediatek: pm-domains: Fix the power glitch issue
	arm64: dts: mt8183: Fix Mali GPU clock
	skbuff: Account for tail adjustment during pull operations
	mailbox: mpfs: read the system controller's status
	mailbox: arm_mhuv2: Fix return value check in mhuv2_probe()
	mailbox: zynq-ipi: fix error handling while device_register() fails
	net_sched: reject TCF_EM_SIMPLE case for complex ematch module
	rxrpc: Fix missing unlock in rxrpc_do_sendmsg()
	myri10ge: Fix an error handling path in myri10ge_probe()
	net: stream: purge sk_error_queue in sk_stream_kill_queues()
	HID: amd_sfh: Add missing check for dma_alloc_coherent
	rcu: Fix __this_cpu_read() lockdep warning in rcu_force_quiescent_state()
	arm64: make is_ttbrX_addr() noinstr-safe
	video: hyperv_fb: Avoid taking busy spinlock on panic path
	x86/hyperv: Remove unregister syscore call from Hyper-V cleanup
	binfmt_misc: fix shift-out-of-bounds in check_special_flags
	fs: jfs: fix shift-out-of-bounds in dbAllocAG
	udf: Avoid double brelse() in udf_rename()
	jfs: Fix fortify moan in symlink
	fs: jfs: fix shift-out-of-bounds in dbDiscardAG
	ACPICA: Fix error code path in acpi_ds_call_control_method()
	nilfs2: fix shift-out-of-bounds/overflow in nilfs_sb2_bad_offset()
	nilfs2: fix shift-out-of-bounds due to too large exponent of block size
	acct: fix potential integer overflow in encode_comp_t()
	hfs: fix OOB Read in __hfs_brec_find
	drm/etnaviv: add missing quirks for GC300
	media: imx-jpeg: Disable useless interrupt to avoid kernel panic
	brcmfmac: return error when getting invalid max_flowrings from dongle
	wifi: ath9k: verify the expected usb_endpoints are present
	wifi: ar5523: Fix use-after-free on ar5523_cmd() timed out
	ASoC: codecs: rt298: Add quirk for KBL-R RVP platform
	ipmi: fix memleak when unload ipmi driver
	drm/amd/display: prevent memory leak
	Revert "drm/amd/display: Limit max DSC target bpp for specific monitors"
	qed (gcc13): use u16 for fid to be big enough
	bpf: make sure skb->len != 0 when redirecting to a tunneling device
	net: ethernet: ti: Fix return type of netcp_ndo_start_xmit()
	hamradio: baycom_epp: Fix return type of baycom_send_packet()
	wifi: brcmfmac: Fix potential shift-out-of-bounds in brcmf_fw_alloc_request()
	igb: Do not free q_vector unless new one was allocated
	drm/amdgpu: Fix type of second parameter in trans_msg() callback
	drm/amdgpu: Fix type of second parameter in odn_edit_dpm_table() callback
	s390/ctcm: Fix return type of ctc{mp,}m_tx()
	s390/netiucv: Fix return type of netiucv_tx()
	s390/lcs: Fix return type of lcs_start_xmit()
	drm/msm: Use drm_mode_copy()
	drm/rockchip: Use drm_mode_copy()
	drm/sti: Use drm_mode_copy()
	drm/mediatek: Fix return type of mtk_hdmi_bridge_mode_valid()
	drivers/md/md-bitmap: check the return value of md_bitmap_get_counter()
	md/raid1: stop mdx_raid1 thread when raid1 array run failed
	drm/amd/display: fix array index out of bound error in bios parser
	net: add atomic_long_t to net_device_stats fields
	ipv6/sit: use DEV_STATS_INC() to avoid data-races
	mrp: introduce active flags to prevent UAF when applicant uninit
	ppp: associate skb with a device at tx
	bpf: Prevent decl_tag from being referenced in func_proto arg
	ethtool: avoiding integer overflow in ethtool_phys_id()
	media: dvb-frontends: fix leak of memory fw
	media: dvbdev: adopts refcnt to avoid UAF
	media: dvb-usb: fix memory leak in dvb_usb_adapter_init()
	blk-mq: fix possible memleak when register 'hctx' failed
	drm/amd/display: Use the largest vready_offset in pipe group
	libbpf: Avoid enum forward-declarations in public API in C++ mode
	regulator: core: fix use_count leakage when handling boot-on
	wifi: mt76: do not run mt76u_status_worker if the device is not running
	mmc: f-sdh30: Add quirks for broken timeout clock capability
	mmc: renesas_sdhi: better reset from HS400 mode
	media: si470x: Fix use-after-free in si470x_int_in_callback()
	clk: st: Fix memory leak in st_of_quadfs_setup()
	crypto: hisilicon/hpre - fix resource leak in remove process
	scsi: lpfc: Fix hard lockup when reading the rx_monitor from debugfs
	scsi: ufs: Reduce the START STOP UNIT timeout
	scsi: elx: libefc: Fix second parameter type in state callbacks
	hugetlbfs: fix null-ptr-deref in hugetlbfs_parse_param()
	drm/fsl-dcu: Fix return type of fsl_dcu_drm_connector_mode_valid()
	drm/sti: Fix return type of sti_{dvo,hda,hdmi}_connector_mode_valid()
	orangefs: Fix kmemleak in orangefs_prepare_debugfs_help_string()
	orangefs: Fix kmemleak in orangefs_{kernel,client}_debug_init()
	tools/include: Add _RET_IP_ and math definitions to kernel.h
	KVM: selftests: Fix build regression by using accessor function
	hwmon: (jc42) Fix missing unlock on error in jc42_write()
	ALSA/ASoC: hda: move/rename snd_hdac_ext_stop_streams to hdac_stream.c
	ALSA: hda: add snd_hdac_stop_streams() helper
	ASoC: Intel: Skylake: Fix driver hang during shutdown
	ASoC: mediatek: mt8173-rt5650-rt5514: fix refcount leak in mt8173_rt5650_rt5514_dev_probe()
	ASoC: audio-graph-card: fix refcount leak of cpu_ep in __graph_for_each_link()
	ASoC: rockchip: pdm: Add missing clk_disable_unprepare() in rockchip_pdm_runtime_resume()
	ASoC: mediatek: mt8183: fix refcount leak in mt8183_mt6358_ts3a227_max98357_dev_probe()
	ASoC: wm8994: Fix potential deadlock
	ASoC: rockchip: spdif: Add missing clk_disable_unprepare() in rk_spdif_runtime_resume()
	ASoC: rt5670: Remove unbalanced pm_runtime_put()
	drm/i915/display: Don't disable DDI/Transcoder when setting phy test pattern
	LoadPin: Ignore the "contents" argument of the LSM hooks
	pstore: Switch pmsg_lock to an rt_mutex to avoid priority inversion
	perf debug: Set debug_peo_args and redirect_to_stderr variable to correct values in perf_quiet_option()
	afs: Fix lost servers_outstanding count
	pstore: Make sure CONFIG_PSTORE_PMSG selects CONFIG_RT_MUTEXES
	ima: Simplify ima_lsm_copy_rule
	ALSA: usb-audio: add the quirk for KT0206 device
	ALSA: hda/realtek: Add quirk for Lenovo TianYi510Pro-14IOB
	ALSA: hda/hdmi: Add HP Device 0x8711 to force connect list
	usb: cdnsp: fix lack of ZLP for ep0
	usb: xhci-mtk: fix leakage of shared hcd when fail to set wakeup irq
	arm64: dts: qcom: sm8250: fix USB-DP PHY registers
	usb: dwc3: Fix race between dwc3_set_mode and __dwc3_set_mode
	usb: dwc3: core: defer probe on ulpi_read_id timeout
	xhci: Prevent infinite loop in transaction errors recovery for streams
	HID: wacom: Ensure bootloader PID is usable in hidraw mode
	HID: mcp2221: don't connect hidraw
	loop: Fix the max_loop commandline argument treatment when it is set to 0
	9p: set req refcount to zero to avoid uninitialized usage
	security: Restrict CONFIG_ZERO_CALL_USED_REGS to gcc or clang > 15.0.6
	reiserfs: Add missing calls to reiserfs_security_free()
	iio: fix memory leak in iio_device_register_eventset()
	iio: adc: ad_sigma_delta: do not use internal iio_dev lock
	iio: adc128s052: add proper .data members in adc128_of_match table
	regulator: core: fix deadlock on regulator enable
	floppy: Fix memory leak in do_floppy_init()
	gcov: add support for checksum field
	fbdev: fbcon: release buffer when fbcon_do_set_font() failed
	ovl: fix use inode directly in rcu-walk mode
	btrfs: do not BUG_ON() on ENOMEM when dropping extent items for a range
	scsi: qla2xxx: Fix crash when I/O abort times out
	net: stmmac: fix errno when create_singlethread_workqueue() fails
	media: dvbdev: fix build warning due to comments
	media: dvbdev: fix refcnt bug
	extcon: usbc-tusb320: Call the Type-C IRQ handler only if a port is registered
	mfd: qcom_rpm: Use devm_of_platform_populate() to simplify code
	pwm: tegra: Fix 32 bit build
	Linux 5.15.86

Change-Id: Ic157edd6a65abf4a3167b5d227edeb0564f1be4e
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2023-01-18 12:52:16 +00:00

2994 lines
71 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
* policies)
*/
#include "sched.h"
#include "pelt.h"
#include <trace/hooks/sched.h>
int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
/* More than 4 hours if BW_SHIFT equals 20. */
static const u64 max_rt_runtime = MAX_BW;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
struct rt_bandwidth def_rt_bandwidth;
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
container_of(timer, struct rt_bandwidth, rt_period_timer);
int idle = 0;
int overrun;
raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
overrun = hrtimer_forward_now(timer, rt_b->rt_period);
if (!overrun)
break;
raw_spin_unlock(&rt_b->rt_runtime_lock);
idle = do_sched_rt_period_timer(rt_b, overrun);
raw_spin_lock(&rt_b->rt_runtime_lock);
}
if (idle)
rt_b->rt_period_active = 0;
raw_spin_unlock(&rt_b->rt_runtime_lock);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_HARD);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
raw_spin_lock(&rt_b->rt_runtime_lock);
if (!rt_b->rt_period_active) {
rt_b->rt_period_active = 1;
/*
* SCHED_DEADLINE updates the bandwidth, as a run away
* RT task with a DL task could hog a CPU. But DL does
* not reset the period. If a deadline task was running
* without an RT task running, it can cause RT tasks to
* throttle when they start up. Kick the timer right away
* to update the period.
*/
hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
hrtimer_start_expires(&rt_b->rt_period_timer,
HRTIMER_MODE_ABS_PINNED_HARD);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
do_start_rt_bandwidth(rt_b);
}
void init_rt_rq(struct rt_rq *rt_rq)
{
struct rt_prio_array *array;
int i;
array = &rt_rq->active;
for (i = 0; i < MAX_RT_PRIO; i++) {
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
rt_rq->highest_prio.next = MAX_RT_PRIO-1;
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
rt_rq->rt_queued = 0;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
}
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_SCHED_DEBUG
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif
return container_of(rt_se, struct task_struct, rt);
}
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
return rt_se->rt_rq;
}
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_se->rt_rq;
return rt_rq->rq;
}
void unregister_rt_sched_group(struct task_group *tg)
{
if (tg->rt_se)
destroy_rt_bandwidth(&tg->rt_bandwidth);
}
void free_rt_sched_group(struct task_group *tg)
{
int i;
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
if (tg->rt_se)
kfree(tg->rt_se[i]);
}
kfree(tg->rt_rq);
kfree(tg->rt_se);
}
void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
rt_rq->tg = tg;
tg->rt_rq[cpu] = rt_rq;
tg->rt_se[cpu] = rt_se;
if (!rt_se)
return;
if (!parent)
rt_se->rt_rq = &rq->rt;
else
rt_se->rt_rq = parent->my_q;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
}
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
struct rt_rq *rt_rq;
struct sched_rt_entity *rt_se;
int i;
tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
if (!tg->rt_rq)
goto err;
tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
if (!tg->rt_se)
goto err;
init_rt_bandwidth(&tg->rt_bandwidth,
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
for_each_possible_cpu(i) {
rt_rq = kzalloc_node(sizeof(struct rt_rq),
GFP_KERNEL, cpu_to_node(i));
if (!rt_rq)
goto err;
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
GFP_KERNEL, cpu_to_node(i));
if (!rt_se)
goto err_free_rq;
init_rt_rq(rt_rq);
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}
return 1;
err_free_rq:
kfree(rt_rq);
err:
return 0;
}
#else /* CONFIG_RT_GROUP_SCHED */
#define rt_entity_is_task(rt_se) (1)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
return container_of(rt_se, struct task_struct, rt);
}
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
}
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct task_struct *p = rt_task_of(rt_se);
return task_rq(p);
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
struct rq *rq = rq_of_rt_se(rt_se);
return &rq->rt;
}
void unregister_rt_sched_group(struct task_group *tg) { }
void free_rt_sched_group(struct task_group *tg) { }
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP
static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
return rq->online && rq->rt.highest_prio.curr > prev->prio;
}
static inline int rt_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->rto_count);
}
static inline void rt_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
* the overload count. That is checked to determine
* if we should look at the mask. It would be a shame
* if we looked at the mask, but the mask was not
* updated yet.
*
* Matched by the barrier in pull_rt_task().
*/
smp_wmb();
atomic_inc(&rq->rd->rto_count);
}
static inline void rt_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
/* the order here really doesn't matter */
atomic_dec(&rq->rd->rto_count);
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}
static void update_rt_migration(struct rt_rq *rt_rq)
{
if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1;
}
} else if (rt_rq->overloaded) {
rt_clear_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 0;
}
}
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total++;
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;
update_rt_migration(rt_rq);
}
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total--;
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;
update_rt_migration(rt_rq);
}
static inline int has_pushable_tasks(struct rq *rq)
{
return !plist_head_empty(&rq->rt.pushable_tasks);
}
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
static void push_rt_tasks(struct rq *);
static void pull_rt_task(struct rq *);
static inline void rt_queue_push_tasks(struct rq *rq)
{
if (!has_pushable_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
}
static inline void rt_queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
}
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
plist_node_init(&p->pushable_tasks, p->prio);
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the highest prio pushable task */
if (p->prio < rq->rt.highest_prio.next)
rq->rt.highest_prio.next = p->prio;
}
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the new highest prio pushable task */
if (has_pushable_tasks(rq)) {
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
rq->rt.highest_prio.next = p->prio;
} else {
rq->rt.highest_prio.next = MAX_RT_PRIO-1;
}
}
#else
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
static inline
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
return false;
}
static inline void pull_rt_task(struct rq *this_rq)
{
}
static inline void rt_queue_push_tasks(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->on_rq;
}
#ifdef CONFIG_UCLAMP_TASK
/*
* Verify the fitness of task @p to run on @cpu taking into account the uclamp
* settings.
*
* This check is only important for heterogeneous systems where uclamp_min value
* is higher than the capacity of a @cpu. For non-heterogeneous system this
* function will always return true.
*
* The function will return true if the capacity of the @cpu is >= the
* uclamp_min and false otherwise.
*
* Note that uclamp_min will be clamped to uclamp_max if uclamp_min
* > uclamp_max.
*/
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
unsigned int min_cap;
unsigned int max_cap;
unsigned int cpu_cap;
/* Only heterogeneous systems can benefit from this check */
if (!sched_asym_cpucap_active())
return true;
min_cap = uclamp_eff_value(p, UCLAMP_MIN);
max_cap = uclamp_eff_value(p, UCLAMP_MAX);
cpu_cap = capacity_orig_of(cpu);
return cpu_cap >= min(min_cap, max_cap);
}
#else
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
return true;
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
if (!rt_rq->tg)
return RUNTIME_INF;
return rt_rq->rt_runtime;
}
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}
typedef struct task_group *rt_rq_iter_t;
static inline struct task_group *next_task_group(struct task_group *tg)
{
do {
tg = list_entry_rcu(tg->list.next,
typeof(struct task_group), list);
} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
if (&tg->list == &task_groups)
tg = NULL;
return tg;
}
#define for_each_rt_rq(rt_rq, iter, rq) \
for (iter = container_of(&task_groups, typeof(*iter), list); \
(iter = next_task_group(iter)) && \
(rt_rq = iter->rt_rq[cpu_of(rq)]);)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = rt_se->parent)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->my_q;
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
struct rq *rq = rq_of_rt_rq(rt_rq);
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq);
rt_se = rt_rq->tg->rt_se[cpu];
if (rt_rq->rt_nr_running) {
if (!rt_se)
enqueue_top_rt_rq(rt_rq);
else if (!on_rt_rq(rt_se))
enqueue_rt_entity(rt_se, 0);
if (rt_rq->highest_prio.curr < curr->prio)
resched_curr(rq);
}
}
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
rt_se = rt_rq->tg->rt_se[cpu];
if (!rt_se) {
dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
}
else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se, 0);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}
static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = group_rt_rq(rt_se);
struct task_struct *p;
if (rt_rq)
return !!rt_rq->rt_nr_boosted;
p = rt_task_of(rt_se);
return p->prio != p->normal_prio;
}
#ifdef CONFIG_SMP
static inline const struct cpumask *sched_rt_period_mask(void)
{
return this_rq()->rd->span;
}
#else
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
#endif
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
}
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &rt_rq->tg->rt_bandwidth;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
return rt_rq->rt_runtime;
}
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(def_rt_bandwidth.rt_period);
}
typedef struct rt_rq *rt_rq_iter_t;
#define for_each_rt_rq(rt_rq, iter, rq) \
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = NULL)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return NULL;
}
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
if (!rt_rq->rt_nr_running)
return;
enqueue_top_rt_rq(rt_rq);
resched_curr(rq);
}
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return &cpu_rq(cpu)->rt;
}
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &def_rt_bandwidth;
}
#endif /* CONFIG_RT_GROUP_SCHED */
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
return (hrtimer_active(&rt_b->rt_period_timer) ||
rt_rq->rt_time < rt_b->rt_runtime);
}
#ifdef CONFIG_SMP
/*
* We ran out of runtime, see if we can borrow some from our neighbours.
*/
static void do_balance_runtime(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
int i, weight;
u64 rt_period;
weight = cpumask_weight(rd->span);
raw_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
if (iter == rt_rq)
continue;
raw_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
* indicate its been disabled and disallow stealing.
*/
if (iter->rt_runtime == RUNTIME_INF)
goto next;
/*
* From runqueues with spare time, take 1/n part of their
* spare time, but no more than our period.
*/
diff = iter->rt_runtime - iter->rt_time;
if (diff > 0) {
diff = div_u64((u64)diff, weight);
if (rt_rq->rt_runtime + diff > rt_period)
diff = rt_period - rt_rq->rt_runtime;
iter->rt_runtime -= diff;
rt_rq->rt_runtime += diff;
if (rt_rq->rt_runtime == rt_period) {
raw_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
raw_spin_unlock(&iter->rt_runtime_lock);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
/*
* Ensure this RQ takes back all the runtime it lend to its neighbours.
*/
static void __disable_runtime(struct rq *rq)
{
struct root_domain *rd = rq->rd;
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
s64 want;
int i;
raw_spin_lock(&rt_b->rt_runtime_lock);
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
* exactly the right amount of runtime to take out.
*/
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
* and what we current have, that's the amount of runtime
* we lend and now have to reclaim.
*/
want = rt_b->rt_runtime - rt_rq->rt_runtime;
/*
* Greedy reclaim, take back as much as we can.
*/
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
/*
* Can't reclaim from ourselves or disabled runqueues.
*/
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
want -= diff;
} else {
iter->rt_runtime -= want;
want -= want;
}
raw_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
*/
BUG_ON(want);
balanced:
/*
* Disable all the borrow logic by pretending we have inf
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
/* Make rt_rq available for pick_next_task() */
sched_rt_rq_enqueue(rt_rq);
}
}
static void __enable_runtime(struct rq *rq)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
/*
* Reset each runqueue's bandwidth settings
*/
for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
raw_spin_lock(&rt_b->rt_runtime_lock);
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
static void balance_runtime(struct rt_rq *rt_rq)
{
if (!sched_feat(RT_RUNTIME_SHARE))
return;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
do_balance_runtime(rt_rq);
raw_spin_lock(&rt_rq->rt_runtime_lock);
}
}
#else /* !CONFIG_SMP */
static inline void balance_runtime(struct rt_rq *rt_rq) {}
#endif /* CONFIG_SMP */
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
int i, idle = 1, throttled = 0;
const struct cpumask *span;
span = sched_rt_period_mask();
#ifdef CONFIG_RT_GROUP_SCHED
/*
* FIXME: isolated CPUs should really leave the root task group,
* whether they are isolcpus or were isolated via cpusets, lest
* the timer run on a CPU which does not service all runqueues,
* potentially leaving other CPUs indefinitely throttled. If
* isolation is really required, the user will turn the throttle
* off to kill the perturbations it causes anyway. Meanwhile,
* this maintains functionality for boot and/or troubleshooting.
*/
if (rt_b == &root_task_group.rt_bandwidth)
span = cpu_online_mask;
#endif
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
struct rq_flags rf;
int skip;
/*
* When span == cpu_online_mask, taking each rq->lock
* can be time-consuming. Try to avoid it when possible.
*/
raw_spin_lock(&rt_rq->rt_runtime_lock);
if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
rt_rq->rt_runtime = rt_b->rt_runtime;
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
if (skip)
continue;
rq_lock(rq, &rf);
update_rq_clock(rq);
if (rt_rq->rt_time) {
u64 runtime;
raw_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq->rt_throttled = 0;
enqueue = 1;
/*
* When we're idle and a woken (rt) task is
* throttled check_preempt_curr() will set
* skip_update and the time between the wakeup
* and this unthrottle will get accounted as
* 'runtime'.
*/
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
rq_clock_cancel_skipupdate(rq);
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running) {
idle = 0;
if (!rt_rq_throttled(rt_rq))
enqueue = 1;
}
if (rt_rq->rt_throttled)
throttled = 1;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
rq_unlock(rq, &rf);
}
if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
return 1;
return idle;
}
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_RT_GROUP_SCHED
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq)
return rt_rq->highest_prio.curr;
#endif
return rt_task_of(rt_se)->prio;
}
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
u64 runtime = sched_rt_runtime(rt_rq);
if (rt_rq->rt_throttled)
return rt_rq_throttled(rt_rq);
if (runtime >= sched_rt_period(rt_rq))
return 0;
balance_runtime(rt_rq);
runtime = sched_rt_runtime(rt_rq);
if (runtime == RUNTIME_INF)
return 0;
if (rt_rq->rt_time > runtime) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
/*
* Don't actually throttle groups that have no runtime assigned
* but accrue some time due to boosting.
*/
if (likely(rt_b->rt_runtime)) {
rt_rq->rt_throttled = 1;
printk_deferred_once("sched: RT throttling activated\n");
trace_android_vh_dump_throttled_rt_tasks(
raw_smp_processor_id(),
rq_clock(rq_of_rt_rq(rt_rq)),
sched_rt_period(rt_rq),
runtime,
hrtimer_get_expires_ns(&rt_b->rt_period_timer));
} else {
/*
* In case we did anyway, make it go away,
* replenishment is a joke, since it will replenish us
* with exactly 0 ns.
*/
rt_rq->rt_time = 0;
}
if (rt_rq_throttled(rt_rq)) {
sched_rt_rq_dequeue(rt_rq);
return 1;
}
}
return 0;
}
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
*/
static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_rt_entity *rt_se = &curr->rt;
u64 delta_exec;
u64 now;
if (curr->sched_class != &rt_sched_class)
return;
now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
if (!rt_bandwidth_enabled())
return;
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
int exceeded;
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
exceeded = sched_rt_runtime_exceeded(rt_rq);
if (exceeded)
resched_curr(rq);
raw_spin_unlock(&rt_rq->rt_runtime_lock);
if (exceeded)
do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
}
}
}
static void
dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
BUG_ON(&rq->rt != rt_rq);
if (!rt_rq->rt_queued)
return;
BUG_ON(!rq->nr_running);
sub_nr_running(rq, count);
rt_rq->rt_queued = 0;
}
static void
enqueue_top_rt_rq(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
BUG_ON(&rq->rt != rt_rq);
if (rt_rq->rt_queued)
return;
if (rt_rq_throttled(rt_rq))
return;
if (rt_rq->rt_nr_running) {
add_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 1;
}
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq, 0);
}
#if defined CONFIG_SMP
static void
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && prio < prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
}
static void
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
}
#else /* CONFIG_SMP */
static inline
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
static inline
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
#endif /* CONFIG_SMP */
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
static void
inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (prio < prev_prio)
rt_rq->highest_prio.curr = prio;
inc_rt_prio_smp(rt_rq, prio, prev_prio);
}
static void
dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (rt_rq->rt_nr_running) {
WARN_ON(prio < prev_prio);
/*
* This may have been our highest task, and therefore
* we may have some recomputation to do
*/
if (prio == prev_prio) {
struct rt_prio_array *array = &rt_rq->active;
rt_rq->highest_prio.curr =
sched_find_first_bit(array->bitmap);
}
} else {
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
}
dec_rt_prio_smp(rt_rq, prio, prev_prio);
}
#else
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
if (rt_rq->tg)
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}
static void
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted--;
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
}
#else /* CONFIG_RT_GROUP_SCHED */
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
start_rt_bandwidth(&def_rt_bandwidth);
}
static inline
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
#endif /* CONFIG_RT_GROUP_SCHED */
static inline
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
struct rt_rq *group_rq = group_rt_rq(rt_se);
if (group_rq)
return group_rq->rt_nr_running;
else
return 1;
}
static inline
unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
{
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct task_struct *tsk;
if (group_rq)
return group_rq->rr_nr_running;
tsk = rt_task_of(rt_se);
return (tsk->policy == SCHED_RR) ? 1 : 0;
}
static inline
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
int prio = rt_se_prio(rt_se);
WARN_ON(!rt_prio(prio));
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
inc_rt_prio(rt_rq, prio);
inc_rt_migration(rt_se, rt_rq);
inc_rt_group(rt_se, rt_rq);
}
static inline
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
WARN_ON(!rt_rq->rt_nr_running);
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
dec_rt_migration(rt_se, rt_rq);
dec_rt_group(rt_se, rt_rq);
}
/*
* Change rt_se->run_list location unless SAVE && !MOVE
*
* assumes ENQUEUE/DEQUEUE flags match
*/
static inline bool move_entity(unsigned int flags)
{
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
return false;
return true;
}
static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
{
list_del_init(&rt_se->run_list);
if (list_empty(array->queue + rt_se_prio(rt_se)))
__clear_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 0;
}
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct list_head *queue = array->queue + rt_se_prio(rt_se);
/*
* Don't enqueue the group if its throttled, or when empty.
* The latter is a consequence of the former when a child group
* get throttled and the current group doesn't have any other
* active members.
*/
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
if (rt_se->on_list)
__delist_rt_entity(rt_se, array);
return;
}
if (move_entity(flags)) {
WARN_ON_ONCE(rt_se->on_list);
if (flags & ENQUEUE_HEAD)
list_add(&rt_se->run_list, queue);
else
list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 1;
}
rt_se->on_rq = 1;
inc_rt_tasks(rt_se, rt_rq);
}
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
if (move_entity(flags)) {
WARN_ON_ONCE(!rt_se->on_list);
__delist_rt_entity(rt_se, array);
}
rt_se->on_rq = 0;
dec_rt_tasks(rt_se, rt_rq);
}
/*
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
*/
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct sched_rt_entity *back = NULL;
unsigned int rt_nr_running;
for_each_sched_rt_entity(rt_se) {
rt_se->back = back;
back = rt_se;
}
rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se))
__dequeue_rt_entity(rt_se, flags);
}
dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se)
__enqueue_rt_entity(rt_se, flags);
enqueue_top_rt_rq(&rq->rt);
}
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running)
__enqueue_rt_entity(rt_se, flags);
}
enqueue_top_rt_rq(&rq->rt);
}
#ifdef CONFIG_SMP
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
bool sync)
{
/*
* If the waker is CFS, then an RT sync wakeup would preempt the waker
* and force it to run for a likely small time after the RT wakee is
* done. So, only honor RT sync wakeups from RT wakers.
*/
return sync && task_has_rt_policy(rq->curr) &&
p->prio <= rq->rt.highest_prio.next &&
rq->rt.rt_nr_running <= 2;
}
#else
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
bool sync)
{
return 0;
}
#endif
/*
* Adding/removing a task to/from a priority array:
*/
static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
!should_honor_rt_sync(rq, p, sync))
enqueue_pushable_task(rq, p);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
dequeue_pushable_task(rq, p);
}
/*
* Put task to the head or the end of the run list without the overhead of
* dequeue followed by enqueue.
*/
static void
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{
if (on_rt_rq(rt_se)) {
struct rt_prio_array *array = &rt_rq->active;
struct list_head *queue = array->queue + rt_se_prio(rt_se);
if (head)
list_move(&rt_se->run_list, queue);
else
list_move_tail(&rt_se->run_list, queue);
}
}
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq;
for_each_sched_rt_entity(rt_se) {
rt_rq = rt_rq_of_se(rt_se);
requeue_rt_entity(rt_rq, rt_se, head);
}
}
static void yield_task_rt(struct rq *rq)
{
requeue_task_rt(rq, rq->curr, 0);
}
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
/*
* Return whether the task on the given cpu is currently non-preemptible
* while handling a potentially long softint, or if the task is likely
* to block preemptions soon because it is a ksoftirq thread that is
* handling slow softints.
*/
bool
task_may_not_preempt(struct task_struct *task, int cpu)
{
__u32 softirqs = per_cpu(active_softirqs, cpu) |
local_softirq_pending();
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
return ((softirqs & LONG_SOFTIRQ_MASK) &&
(task == cpu_ksoftirqd ||
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
}
EXPORT_SYMBOL_GPL(task_may_not_preempt);
#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
static int
select_task_rq_rt(struct task_struct *p, int cpu, int flags)
{
struct task_struct *curr;
struct rq *rq;
struct rq *this_cpu_rq;
bool test;
int target_cpu = -1;
bool may_not_preempt;
bool sync = !!(flags & WF_SYNC);
int this_cpu;
trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF,
flags, &target_cpu);
if (target_cpu >= 0)
return target_cpu;
/* For anything but wake ups, just return the task_cpu */
if (!(flags & (WF_TTWU | WF_FORK)))
goto out;
rq = cpu_rq(cpu);
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
this_cpu = smp_processor_id();
this_cpu_rq = cpu_rq(this_cpu);
/*
* If the current task on @p's runqueue is a softirq task,
* it may run without preemption for a time that is
* ill-suited for a waiting RT task. Therefore, try to
* wake this RT task on another runqueue.
*
* Also, if the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
*
* We want to avoid overloading runqueues. If the woken
* task is a higher priority, then it will stay on this CPU
* and the lower prio task should be moved to another CPU.
* Even though this will probably make the lower prio task
* lose its cache, we do not want to bounce a higher task
* around just because it gave up its CPU, perhaps for a
* lock?
*
* For equal prio tasks, we just let the scheduler sort it out.
*
* Otherwise, just let it ride on the affined RQ and the
* post-schedule router will push the preempted task away
*
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*
* We take into account the capacity of the CPU to ensure it fits the
* requirement of the task - which is only important on heterogeneous
* systems like big.LITTLE.
*/
may_not_preempt = task_may_not_preempt(curr, cpu);
test = (curr && (may_not_preempt ||
(unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
/*
* Respect the sync flag as long as the task can run on this CPU.
*/
if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
cpu = this_cpu;
goto out_unlock;
}
if (test || !rt_task_fits_capacity(p, cpu)) {
int target = find_lowest_rq(p);
/*
* Bail out if we were forcing a migration to find a better
* fitting CPU but our search failed.
*/
if (!test && target != -1 && !rt_task_fits_capacity(p, target))
goto out_unlock;
/*
* If cpu is non-preemptible, prefer remote cpu
* even if it's running a higher-prio task.
* Otherwise: Don't bother moving it if the destination CPU is
* not running a lower priority task.
*/
if (target != -1 &&
(may_not_preempt ||
p->prio < cpu_rq(target)->rt.highest_prio.curr))
cpu = target;
}
out_unlock:
rcu_read_unlock();
out:
return cpu;
}
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
cpupri_find(&rq->rd->cpupri, p, NULL))
return;
/*
* There appear to be other CPUs that can accept
* the current task but none can run 'p', so lets reschedule
* to try and push the current task away:
*/
requeue_task_rt(rq, p, 1);
resched_curr(rq);
}
static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
int done = 0;
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we've
* not yet started the picking loop.
*/
rq_unpin_lock(rq, rf);
trace_android_rvh_sched_balance_rt(rq, p, &done);
if (!done)
pull_rt_task(rq);
rq_repin_lock(rq, rf);
}
return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
}
#endif /* CONFIG_SMP */
/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
{
if (p->prio < rq->curr->prio) {
resched_curr(rq);
return;
}
#ifdef CONFIG_SMP
/*
* If:
*
* - the newly woken task is of equal priority to the current task
* - the newly woken task is non-migratable while current is migratable
* - current will be preempted on the next reschedule
*
* we should check to see if current can readily move to a different
* cpu. If so, we will reschedule to allow the push logic to try
* to move current somewhere else, making room for our non-migratable
* task.
*/
if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
check_preempt_equal_prio(rq, p);
#endif
}
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
{
p->se.exec_start = rq_clock_task(rq);
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
if (!first)
return;
/*
* If prev task was rt, put_prev_task() has already updated the
* utilization. We only care of the case where we start to schedule a
* rt task
*/
if (rq->curr->sched_class != &rt_sched_class)
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0);
rt_queue_push_tasks(rq);
}
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
struct rt_rq *rt_rq)
{
struct rt_prio_array *array = &rt_rq->active;
struct sched_rt_entity *next = NULL;
struct list_head *queue;
int idx;
idx = sched_find_first_bit(array->bitmap);
BUG_ON(idx >= MAX_RT_PRIO);
queue = array->queue + idx;
next = list_entry(queue->next, struct sched_rt_entity, run_list);
return next;
}
static struct task_struct *_pick_next_task_rt(struct rq *rq)
{
struct sched_rt_entity *rt_se;
struct rt_rq *rt_rq = &rq->rt;
do {
rt_se = pick_next_rt_entity(rq, rt_rq);
BUG_ON(!rt_se);
rt_rq = group_rt_rq(rt_se);
} while (rt_rq);
return rt_task_of(rt_se);
}
static struct task_struct *pick_task_rt(struct rq *rq)
{
struct task_struct *p;
if (!sched_rt_runnable(rq))
return NULL;
p = _pick_next_task_rt(rq);
return p;
}
static struct task_struct *pick_next_task_rt(struct rq *rq)
{
struct task_struct *p = pick_task_rt(rq);
if (p)
set_next_task_rt(rq, p, true);
return p;
}
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
/*
* The previous task needs to be made eligible for pushing
* if it is still active
*/
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}
#ifdef CONFIG_SMP
/* Only try algorithms three times */
#define RT_MAX_TRIES 3
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, &p->cpus_mask))
return 1;
return 0;
}
/*
* Return the highest pushable rq's task, which is suitable to be executed
* on the CPU, NULL otherwise
*/
struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
{
struct plist_head *head = &rq->rt.pushable_tasks;
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
plist_for_each_entry(p, head, pushable_tasks) {
if (pick_rt_task(rq, p, cpu))
return p;
}
return NULL;
}
EXPORT_SYMBOL_GPL(pick_highest_pushable_task);
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = -1;
int ret;
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
return -1;
if (task->nr_cpus_allowed == 1)
return -1; /* No other targets possible */
/*
* If we're on asym system ensure we consider the different capacities
* of the CPUs when searching for the lowest_mask.
*/
if (sched_asym_cpucap_active()) {
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
task, lowest_mask,
rt_task_fits_capacity);
} else {
ret = cpupri_find(&task_rq(task)->rd->cpupri,
task, lowest_mask);
}
trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
if (cpu >= 0)
return cpu;
if (!ret)
return -1; /* No targets found */
cpu = task_cpu(task);
/*
* At this point we have built a mask of CPUs representing the
* lowest priority tasks in the system. Now we want to elect
* the best one based on our affinity and topology.
*
* We prioritize the last CPU that the task executed on since
* it is most likely cache-hot in that location.
*/
if (cpumask_test_cpu(cpu, lowest_mask))
return cpu;
/*
* Otherwise, we consult the sched_domains span maps to figure
* out which CPU is logically closest to our hot cache data.
*/
if (!cpumask_test_cpu(this_cpu, lowest_mask))
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
/*
* "this_cpu" is cheaper to preempt than a
* remote processor.
*/
if (this_cpu != -1 &&
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
rcu_read_unlock();
return this_cpu;
}
best_cpu = cpumask_any_and_distribute(lowest_mask,
sched_domain_span(sd));
if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
}
}
}
rcu_read_unlock();
/*
* And finally, if there were no matches within the domains
* just give the caller *something* to work with from the compatible
* locations.
*/
if (this_cpu != -1)
return this_cpu;
cpu = cpumask_any_distribute(lowest_mask);
if (cpu < nr_cpu_ids)
return cpu;
return -1;
}
/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
{
struct rq *lowest_rq = NULL;
int tries;
int cpu;
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
cpu = find_lowest_rq(task);
if ((cpu == -1) || (cpu == rq->cpu))
break;
lowest_rq = cpu_rq(cpu);
if (lowest_rq->rt.highest_prio.curr <= task->prio) {
/*
* Target rq has tasks of equal or higher priority,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
lowest_rq = NULL;
break;
}
/* if the prio of this runqueue changed, try again */
if (double_lock_balance(rq, lowest_rq)) {
/*
* We had to unlock the run queue. In
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
task_running(rq, task) ||
!rt_task(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
break;
}
}
/* If this rq is still suitable use it. */
if (lowest_rq->rt.highest_prio.curr > task->prio)
break;
/* try again */
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
}
return lowest_rq;
}
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p));
return p;
}
/*
* If the current CPU has more than one RT task, see if the non
* running task can migrate over to a CPU that is running a task
* of lesser priority.
*/
static int push_rt_task(struct rq *rq, bool pull)
{
struct task_struct *next_task;
struct rq *lowest_rq;
int ret = 0;
if (!rq->rt.overloaded)
return 0;
next_task = pick_next_pushable_task(rq);
if (!next_task)
return 0;
retry:
/*
* It's possible that the next_task slipped in of
* higher priority than current. If that's the case
* just reschedule current.
*/
if (unlikely(next_task->prio < rq->curr->prio)) {
resched_curr(rq);
return 0;
}
if (is_migration_disabled(next_task)) {
struct task_struct *push_task = NULL;
int cpu;
if (!pull || rq->push_busy)
return 0;
/*
* Invoking find_lowest_rq() on anything but an RT task doesn't
* make sense. Per the above priority check, curr has to
* be of higher priority than next_task, so no need to
* reschedule when bailing out.
*
* Note that the stoppers are masqueraded as SCHED_FIFO
* (cf. sched_set_stop_task()), so we can't rely on rt_task().
*/
if (rq->curr->sched_class != &rt_sched_class)
return 0;
cpu = find_lowest_rq(rq->curr);
if (cpu == -1 || cpu == rq->cpu)
return 0;
/*
* Given we found a CPU with lower priority than @next_task,
* therefore it should be running. However we cannot migrate it
* to this other CPU, instead attempt to push the current
* running task on this CPU away.
*/
push_task = get_push_task(rq);
if (push_task) {
raw_spin_rq_unlock(rq);
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
push_task, &rq->push_work);
raw_spin_rq_lock(rq);
}
return 0;
}
if (WARN_ON(next_task == rq->curr))
return 0;
/* We might release rq lock */
get_task_struct(next_task);
/* find_lock_lowest_rq locks the rq if found */
lowest_rq = find_lock_lowest_rq(next_task, rq);
if (!lowest_rq) {
struct task_struct *task;
/*
* find_lock_lowest_rq releases rq->lock
* so it is possible that next_task has migrated.
*
* We need to make sure that the task is still on the same
* run-queue and is also still the next task eligible for
* pushing.
*/
task = pick_next_pushable_task(rq);
if (task == next_task) {
/*
* The task hasn't migrated, and is still the next
* eligible task, but we failed to find a run-queue
* to push it to. Do not retry in this case, since
* other CPUs will pull from us when ready.
*/
goto out;
}
if (!task)
/* No more tasks, just exit */
goto out;
/*
* Something has shifted, try again.
*/
put_task_struct(next_task);
next_task = task;
goto retry;
}
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0);
resched_curr(lowest_rq);
ret = 1;
double_unlock_balance(rq, lowest_rq);
out:
put_task_struct(next_task);
return ret;
}
static void push_rt_tasks(struct rq *rq)
{
/* push_rt_task will return true if it moved an RT */
while (push_rt_task(rq, false))
;
}
#ifdef HAVE_RT_PUSH_IPI
/*
* When a high priority task schedules out from a CPU and a lower priority
* task is scheduled in, a check is made to see if there's any RT tasks
* on other CPUs that are waiting to run because a higher priority RT task
* is currently running on its CPU. In this case, the CPU with multiple RT
* tasks queued on it (overloaded) needs to be notified that a CPU has opened
* up that may be able to run one of its non-running queued RT tasks.
*
* All CPUs with overloaded RT tasks need to be notified as there is currently
* no way to know which of these CPUs have the highest priority task waiting
* to run. Instead of trying to take a spinlock on each of these CPUs,
* which has shown to cause large latency when done on machines with many
* CPUs, sending an IPI to the CPUs to have them push off the overloaded
* RT tasks waiting to run.
*
* Just sending an IPI to each of the CPUs is also an issue, as on large
* count CPU machines, this can cause an IPI storm on a CPU, especially
* if its the only CPU with multiple RT tasks queued, and a large number
* of CPUs scheduling a lower priority task at the same time.
*
* Each root domain has its own irq work function that can iterate over
* all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
* task must be checked if there's one or many CPUs that are lowering
* their priority, there's a single irq work iterator that will try to
* push off RT tasks that are waiting to run.
*
* When a CPU schedules a lower priority task, it will kick off the
* irq work iterator that will jump to each CPU with overloaded RT tasks.
* As it only takes the first CPU that schedules a lower priority task
* to start the process, the rto_start variable is incremented and if
* the atomic result is one, then that CPU will try to take the rto_lock.
* This prevents high contention on the lock as the process handles all
* CPUs scheduling lower priority tasks.
*
* All CPUs that are scheduling a lower priority task will increment the
* rt_loop_next variable. This will make sure that the irq work iterator
* checks all RT overloaded CPUs whenever a CPU schedules a new lower
* priority task, even if the iterator is in the middle of a scan. Incrementing
* the rt_loop_next will cause the iterator to perform another scan.
*
*/
static int rto_next_cpu(struct root_domain *rd)
{
int next;
int cpu;
/*
* When starting the IPI RT pushing, the rto_cpu is set to -1,
* rt_next_cpu() will simply return the first CPU found in
* the rto_mask.
*
* If rto_next_cpu() is called with rto_cpu is a valid CPU, it
* will return the next CPU found in the rto_mask.
*
* If there are no more CPUs left in the rto_mask, then a check is made
* against rto_loop and rto_loop_next. rto_loop is only updated with
* the rto_lock held, but any CPU may increment the rto_loop_next
* without any locking.
*/
for (;;) {
/* When rto_cpu is -1 this acts like cpumask_first() */
cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
/* this will be any CPU in the rd->rto_mask, and can be a halted cpu update it */
trace_android_rvh_rto_next_cpu(rd->rto_cpu, rd->rto_mask, &cpu);
rd->rto_cpu = cpu;
if (cpu < nr_cpu_ids)
return cpu;
rd->rto_cpu = -1;
/*
* ACQUIRE ensures we see the @rto_mask changes
* made prior to the @next value observed.
*
* Matches WMB in rt_set_overload().
*/
next = atomic_read_acquire(&rd->rto_loop_next);
if (rd->rto_loop == next)
break;
rd->rto_loop = next;
}
return -1;
}
static inline bool rto_start_trylock(atomic_t *v)
{
return !atomic_cmpxchg_acquire(v, 0, 1);
}
static inline void rto_start_unlock(atomic_t *v)
{
atomic_set_release(v, 0);
}
static void tell_cpu_to_push(struct rq *rq)
{
int cpu = -1;
/* Keep the loop going if the IPI is currently active */
atomic_inc(&rq->rd->rto_loop_next);
/* Only one CPU can initiate a loop at a time */
if (!rto_start_trylock(&rq->rd->rto_loop_start))
return;
raw_spin_lock(&rq->rd->rto_lock);
/*
* The rto_cpu is updated under the lock, if it has a valid CPU
* then the IPI is still running and will continue due to the
* update to loop_next, and nothing needs to be done here.
* Otherwise it is finishing up and an ipi needs to be sent.
*/
if (rq->rd->rto_cpu < 0)
cpu = rto_next_cpu(rq->rd);
raw_spin_unlock(&rq->rd->rto_lock);
rto_start_unlock(&rq->rd->rto_loop_start);
if (cpu >= 0) {
/* Make sure the rd does not get freed while pushing */
sched_get_rd(rq->rd);
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
}
/* Called from hardirq context */
void rto_push_irq_work_func(struct irq_work *work)
{
struct root_domain *rd =
container_of(work, struct root_domain, rto_push_work);
struct rq *rq;
int cpu;
rq = this_rq();
/*
* We do not need to grab the lock to check for has_pushable_tasks.
* When it gets updated, a check is made if a push is possible.
*/
if (has_pushable_tasks(rq)) {
raw_spin_rq_lock(rq);
while (push_rt_task(rq, true))
;
raw_spin_rq_unlock(rq);
}
raw_spin_lock(&rd->rto_lock);
/* Pass the IPI to the next rt overloaded queue */
cpu = rto_next_cpu(rd);
raw_spin_unlock(&rd->rto_lock);
if (cpu < 0) {
sched_put_rd(rd);
return;
}
/* Try the next RT overloaded CPU */
irq_work_queue_on(&rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */
static void pull_rt_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
bool resched = false;
struct task_struct *p, *push_task;
struct rq *src_rq;
int rt_overload_count = rt_overloaded(this_rq);
if (likely(!rt_overload_count))
return;
/*
* Match the barrier from rt_set_overloaded; this guarantees that if we
* see overloaded we must also see the rto_mask bit.
*/
smp_rmb();
/* If we are the only overloaded CPU do nothing */
if (rt_overload_count == 1 &&
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
return;
#ifdef HAVE_RT_PUSH_IPI
if (sched_feat(RT_PUSH_IPI)) {
tell_cpu_to_push(this_rq);
return;
}
#endif
for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
/*
* Don't bother taking the src_rq->lock if the next highest
* task is known to be lower-priority than our current task.
* This may look racy, but if this value is about to go
* logically higher, the src_rq will push this task away.
* And if its going logically lower, we do not care
*/
if (src_rq->rt.highest_prio.next >=
this_rq->rt.highest_prio.curr)
continue;
/*
* We can potentially drop this_rq's lock in
* double_lock_balance, and another CPU could
* alter this_rq
*/
push_task = NULL;
double_lock_balance(this_rq, src_rq);
/*
* We can pull only a task, which is pushable
* on its rq, and no others.
*/
p = pick_highest_pushable_task(src_rq, this_cpu);
/*
* Do we have an RT task that preempts
* the to-be-scheduled task?
*/
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr);
WARN_ON(!task_on_rq_queued(p));
/*
* There's a chance that p is higher in priority
* than what's currently running on its CPU.
* This is just that p is waking up and hasn't
* had a chance to schedule. We only pull
* p if it is lower in priority than the
* current task on the run queue
*/
if (p->prio < src_rq->curr->prio)
goto skip;
if (is_migration_disabled(p)) {
push_task = get_push_task(src_rq);
} else {
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
resched = true;
}
/*
* We continue with the search, just in
* case there's an even higher prio task
* in another runqueue. (low likelihood
* but possible)
*/
}
skip:
double_unlock_balance(this_rq, src_rq);
if (push_task) {
raw_spin_rq_unlock(this_rq);
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
push_task, &src_rq->push_work);
raw_spin_rq_lock(this_rq);
}
}
if (resched)
resched_curr(this_rq);
}
/*
* If we are not running and we are not going to reschedule soon, we should
* try to push tasks away now
*/
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
bool need_to_push = !task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
p->nr_cpus_allowed > 1 &&
(dl_task(rq->curr) || rt_task(rq->curr)) &&
(rq->curr->nr_cpus_allowed < 2 ||
rq->curr->prio <= p->prio);
if (need_to_push)
push_rt_tasks(rq);
}
/* Assumes rq->lock is held */
static void rq_online_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
__enable_runtime(rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
}
/* Assumes rq->lock is held */
static void rq_offline_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
__disable_runtime(rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
}
/*
* When switch from the rt queue, we bring ourselves to a position
* that we might want to pull RT tasks from other runqueues.
*/
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle
* the balancing. But if we are the last RT task
* we may need to handle the pulling of RT tasks
* now.
*/
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
return;
rt_queue_pull_task(rq);
}
void __init init_sched_rt_class(void)
{
unsigned int i;
for_each_possible_cpu(i) {
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
GFP_KERNEL, cpu_to_node(i));
}
}
#endif /* CONFIG_SMP */
/*
* When switching a task to RT, we may overload the runqueue
* with RT tasks. In this case we try to push them off to
* other runqueues.
*/
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
/*
* If we are running, update the avg_rt tracking, as the running time
* will now on be accounted into the latter.
*/
if (task_current(rq, p)) {
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
return;
}
/*
* If we are not running we may need to preempt the current
* running task. If that current running task is also an RT task
* then see if we can move to another run queue.
*/
if (task_on_rq_queued(p)) {
#ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
rt_queue_push_tasks(rq);
#endif /* CONFIG_SMP */
if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
resched_curr(rq);
}
}
/*
* Priority of the task has changed. This may cause
* us to initiate a push or pull.
*/
static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
if (!task_on_rq_queued(p))
return;
if (task_current(rq, p)) {
#ifdef CONFIG_SMP
/*
* If our priority decreases while running, we
* may need to pull tasks to this runqueue.
*/
if (oldprio < p->prio)
rt_queue_pull_task(rq);
/*
* If there's a higher priority task waiting to run
* then reschedule.
*/
if (p->prio > rq->rt.highest_prio.curr)
resched_curr(rq);
#else
/* For UP simply resched on drop of prio */
if (oldprio < p->prio)
resched_curr(rq);
#endif /* CONFIG_SMP */
} else {
/*
* This task is not running, but if it is
* greater than the current running task
* then reschedule.
*/
if (p->prio < rq->curr->prio)
resched_curr(rq);
}
}
#ifdef CONFIG_POSIX_TIMERS
static void watchdog(struct rq *rq, struct task_struct *p)
{
unsigned long soft, hard;
/* max may change after cur was read, this will be fixed next tick */
soft = task_rlimit(p, RLIMIT_RTTIME);
hard = task_rlimit_max(p, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) {
unsigned long next;
if (p->rt.watchdog_stamp != jiffies) {
p->rt.timeout++;
p->rt.watchdog_stamp = jiffies;
}
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
if (p->rt.timeout > next) {
posix_cputimers_rt_watchdog(&p->posix_cputimers,
p->se.sum_exec_runtime);
}
}
}
#else
static inline void watchdog(struct rq *rq, struct task_struct *p) { }
#endif
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
{
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
watchdog(rq, p);
/*
* RR tasks need a special form of timeslice management.
* FIFO tasks have no timeslices.
*/
if (p->policy != SCHED_RR)
return;
if (--p->rt.time_slice)
return;
p->rt.time_slice = sched_rr_timeslice;
/*
* Requeue to the end of queue if we (and all of our ancestors) are not
* the only element on the queue
*/
for_each_sched_rt_entity(rt_se) {
if (rt_se->run_list.prev != rt_se->run_list.next) {
requeue_task_rt(rq, p, 0);
resched_curr(rq);
return;
}
}
}
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
* Time slice is 0 for SCHED_FIFO tasks
*/
if (task->policy == SCHED_RR)
return sched_rr_timeslice;
else
return 0;
}
DEFINE_SCHED_CLASS(rt) = {
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
.check_preempt_curr = check_preempt_curr_rt,
.pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt,
.set_next_task = set_next_task_rt,
#ifdef CONFIG_SMP
.balance = balance_rt,
.pick_task = pick_task_rt,
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
.find_lock_rq = find_lock_lowest_rq,
#endif
.task_tick = task_tick_rt,
.get_rr_interval = get_rr_interval_rt,
.prio_changed = prio_changed_rt,
.switched_to = switched_to_rt,
.update_curr = update_curr_rt,
#ifdef CONFIG_UCLAMP_TASK
.uclamp_enabled = 1,
#endif
};
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
*/
static DEFINE_MUTEX(rt_constraints_mutex);
static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *task;
struct css_task_iter it;
int ret = 0;
/*
* Autogroups do not have RT tasks; see autogroup_create().
*/
if (task_group_is_autogroup(tg))
return 0;
css_task_iter_start(&tg->css, 0, &it);
while (!ret && (task = css_task_iter_next(&it)))
ret |= rt_task(task);
css_task_iter_end(&it);
return ret;
}
struct rt_schedulable_data {
struct task_group *tg;
u64 rt_period;
u64 rt_runtime;
};
static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
unsigned long total, sum = 0;
u64 period, runtime;
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;
if (tg == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
/*
* Cannot have more runtime than the period.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
/*
* Ensure we don't starve existing RT tasks if runtime turns zero.
*/
if (rt_bandwidth_enabled() && !runtime &&
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
return -EBUSY;
total = to_ratio(period, runtime);
/*
* Nobody can have more than the global setting allows.
*/
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
return -EINVAL;
/*
* The sum of our children's runtime should not exceed our own.
*/
list_for_each_entry_rcu(child, &tg->children, siblings) {
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;
if (child == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
sum += to_ratio(period, runtime);
}
if (sum > total)
return -EINVAL;
return 0;
}
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
int ret;
struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};
rcu_read_lock();
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_unlock();
return ret;
}
static int tg_set_rt_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;
/*
* Disallowing the root group RT runtime is BAD, it would disallow the
* kernel creating (and or operating) RT threads.
*/
if (tg == &root_task_group && rt_runtime == 0)
return -EINVAL;
/* No period doesn't make any sense. */
if (rt_period == 0)
return -EINVAL;
/*
* Bound quota to defend quota against overflow during bandwidth shift.
*/
if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
return -EINVAL;
mutex_lock(&rt_constraints_mutex);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
mutex_unlock(&rt_constraints_mutex);
return err;
}
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;
else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
{
u64 rt_runtime, rt_period;
if (rt_period_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
rt_period = rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_period(struct task_group *tg)
{
u64 rt_period_us;
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
static int sched_rt_global_constraints(void)
{
int ret = 0;
mutex_lock(&rt_constraints_mutex);
ret = __rt_schedulable(NULL, 0, 0);
mutex_unlock(&rt_constraints_mutex);
return ret;
}
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
#endif /* CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_validate(void)
{
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
((u64)sysctl_sched_rt_runtime *
NSEC_PER_USEC > max_rt_runtime)))
return -EINVAL;
return 0;
}
static void sched_rt_do_global(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
def_rt_bandwidth.rt_runtime = global_rt_runtime();
def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
}
int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
int ret;
mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_validate();
if (ret)
goto undo;
ret = sched_dl_global_validate();
if (ret)
goto undo;
ret = sched_rt_global_constraints();
if (ret)
goto undo;
sched_rt_do_global();
sched_dl_do_global();
}
if (0) {
undo:
sysctl_sched_rt_period = old_period;
sysctl_sched_rt_runtime = old_runtime;
}
mutex_unlock(&mutex);
return ret;
}
int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
/*
* Make sure that internally we keep jiffies.
* Also, writing zero resets the timeslice to default:
*/
if (!ret && write) {
sched_rr_timeslice =
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
msecs_to_jiffies(sysctl_sched_rr_timeslice);
}
mutex_unlock(&mutex);
return ret;
}
#ifdef CONFIG_SCHED_DEBUG
void print_rt_stats(struct seq_file *m, int cpu)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
rcu_read_lock();
for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
print_rt_rq(m, cpu, rt_rq);
rcu_read_unlock();
}
#endif /* CONFIG_SCHED_DEBUG */