Changes in 5.15.61
Makefile: link with -z noexecstack --no-warn-rwx-segments
x86: link vdso and boot with -z noexecstack --no-warn-rwx-segments
Revert "pNFS: nfs3_set_ds_client should set NFS_CS_NOPING"
scsi: Revert "scsi: qla2xxx: Fix disk failure to rediscover"
pNFS/flexfiles: Report RDMA connection errors to the server
NFSD: Clean up the show_nf_flags() macro
nfsd: eliminate the NFSD_FILE_BREAK_* flags
ALSA: usb-audio: Add quirk for Behringer UMC202HD
ALSA: bcd2000: Fix a UAF bug on the error path of probing
ALSA: hda/realtek: Add quirk for Clevo NV45PZ
ALSA: hda/realtek: Add quirk for HP Spectre x360 15-eb0xxx
wifi: mac80211_hwsim: fix race condition in pending packet
wifi: mac80211_hwsim: add back erroneously removed cast
wifi: mac80211_hwsim: use 32-bit skb cookie
add barriers to buffer_uptodate and set_buffer_uptodate
lockd: detect and reject lock arguments that overflow
HID: hid-input: add Surface Go battery quirk
HID: wacom: Only report rotation for art pen
HID: wacom: Don't register pad_input for touch switch
KVM: nVMX: Snapshot pre-VM-Enter BNDCFGS for !nested_run_pending case
KVM: nVMX: Snapshot pre-VM-Enter DEBUGCTL for !nested_run_pending case
KVM: SVM: Don't BUG if userspace injects an interrupt with GIF=0
KVM: s390: pv: don't present the ecall interrupt twice
KVM: x86: Split kvm_is_valid_cr4() and export only the non-vendor bits
KVM: nVMX: Let userspace set nVMX MSR to any _host_ supported value
KVM: nVMX: Account for KVM reserved CR4 bits in consistency checks
KVM: nVMX: Inject #UD if VMXON is attempted with incompatible CR0/CR4
KVM: x86: Mark TSS busy during LTR emulation _after_ all fault checks
KVM: x86: Set error code to segment selector on LLDT/LTR non-canonical #GP
KVM: nVMX: Always enable TSC scaling for L2 when it was enabled for L1
KVM: x86: Tag kvm_mmu_x86_module_init() with __init
KVM: x86: do not report preemption if the steal time cache is stale
KVM: x86: revalidate steal time cache if MSR value changes
riscv: set default pm_power_off to NULL
ALSA: hda/conexant: Add quirk for LENOVO 20149 Notebook model
ALSA: hda/cirrus - support for iMac 12,1 model
ALSA: hda/realtek: Add quirk for another Asus K42JZ model
ALSA: hda/realtek: Add a quirk for HP OMEN 15 (8786) mute LED
tty: vt: initialize unicode screen buffer
vfs: Check the truncate maximum size in inode_newsize_ok()
fs: Add missing umask strip in vfs_tmpfile
thermal: sysfs: Fix cooling_device_stats_setup() error code path
fbcon: Fix boundary checks for fbcon=vc:n1-n2 parameters
fbcon: Fix accelerated fbdev scrolling while logo is still shown
usbnet: Fix linkwatch use-after-free on disconnect
fix short copy handling in copy_mc_pipe_to_iter()
crypto: ccp - Use kzalloc for sev ioctl interfaces to prevent kernel memory leak
ovl: drop WARN_ON() dentry is NULL in ovl_encode_fh()
parisc: Fix device names in /proc/iomem
parisc: Drop pa_swapper_pg_lock spinlock
parisc: Check the return value of ioremap() in lba_driver_probe()
parisc: io_pgetevents_time64() needs compat syscall in 32-bit compat mode
riscv:uprobe fix SR_SPIE set/clear handling
dt-bindings: riscv: fix SiFive l2-cache's cache-sets
RISC-V: kexec: Fixup use of smp_processor_id() in preemptible context
RISC-V: Fixup get incorrect user mode PC for kernel mode regs
RISC-V: Fixup schedule out issue in machine_crash_shutdown()
RISC-V: Add modules to virtual kernel memory layout dump
rtc: rx8025: fix 12/24 hour mode detection on RX-8035
drm/gem: Properly annotate WW context on drm_gem_lock_reservations() error
drm/shmem-helper: Add missing vunmap on error
drm/vc4: hdmi: Disable audio if dmas property is present but empty
drm/hyperv-drm: Include framebuffer and EDID headers
drm/nouveau: fix another off-by-one in nvbios_addr
drm/nouveau: Don't pm_runtime_put_sync(), only pm_runtime_put_autosuspend()
drm/nouveau/acpi: Don't print error when we get -EINPROGRESS from pm_runtime
drm/nouveau/kms: Fix failure path for creating DP connectors
drm/amdgpu: Check BO's requested pinning domains against its preferred_domains
drm/amdgpu: fix check in fbdev init
bpf: Fix KASAN use-after-free Read in compute_effective_progs
btrfs: reject log replay if there is unsupported RO compat flag
mtd: rawnand: arasan: Fix clock rate in NV-DDR
mtd: rawnand: arasan: Update NAND bus clock instead of system clock
um: Remove straying parenthesis
um: seed rng using host OS rng
iio: fix iio_format_avail_range() printing for none IIO_VAL_INT
iio: light: isl29028: Fix the warning in isl29028_remove()
scsi: sg: Allow waiting for commands to complete on removed device
scsi: qla2xxx: Fix incorrect display of max frame size
scsi: qla2xxx: Zero undefined mailbox IN registers
soundwire: qcom: Check device status before reading devid
ksmbd: fix memory leak in smb2_handle_negotiate
ksmbd: prevent out of bound read for SMB2_TREE_CONNNECT
ksmbd: fix use-after-free bug in smb2_tree_disconect
fuse: limit nsec
fuse: ioctl: translate ENOSYS
serial: mvebu-uart: uart2 error bits clearing
md-raid: destroy the bitmap after destroying the thread
md-raid10: fix KASAN warning
mbcache: don't reclaim used entries
mbcache: add functions to delete entry if unused
media: [PATCH] pci: atomisp_cmd: fix three missing checks on list iterator
ia64, processor: fix -Wincompatible-pointer-types in ia64_get_irr()
PCI: Add defines for normal and subtractive PCI bridges
powerpc/fsl-pci: Fix Class Code of PCIe Root Port
powerpc/ptdump: Fix display of RW pages on FSL_BOOK3E
powerpc/powernv: Avoid crashing if rng is NULL
MIPS: cpuinfo: Fix a warning for CONFIG_CPUMASK_OFFSTACK
coresight: Clear the connection field properly
usb: typec: ucsi: Acknowledge the GET_ERROR_STATUS command completion
USB: HCD: Fix URB giveback issue in tasklet function
ARM: dts: uniphier: Fix USB interrupts for PXs2 SoC
arm64: dts: uniphier: Fix USB interrupts for PXs3 SoC
usb: dwc3: gadget: refactor dwc3_repare_one_trb
usb: dwc3: gadget: fix high speed multiplier setting
netfilter: nf_tables: do not allow SET_ID to refer to another table
netfilter: nf_tables: do not allow CHAIN_ID to refer to another table
netfilter: nf_tables: do not allow RULE_ID to refer to another chain
netfilter: nf_tables: fix null deref due to zeroed list head
epoll: autoremove wakers even more aggressively
x86: Handle idle=nomwait cmdline properly for x86_idle
arch: make TRACE_IRQFLAGS_NMI_SUPPORT generic
arm64: Do not forget syscall when starting a new thread.
arm64: fix oops in concurrently setting insn_emulation sysctls
arm64: kasan: Revert "arm64: mte: reset the page tag in page->flags"
ext2: Add more validity checks for inode counts
sched/fair: Introduce SIS_UTIL to search idle CPU based on sum of util_avg
genirq: Don't return error on missing optional irq_request_resources()
irqchip/mips-gic: Only register IPI domain when SMP is enabled
genirq: GENERIC_IRQ_IPI depends on SMP
sched/core: Always flush pending blk_plug
irqchip/mips-gic: Check the return value of ioremap() in gic_of_init()
wait: Fix __wait_event_hrtimeout for RT/DL tasks
ARM: dts: imx6ul: add missing properties for sram
ARM: dts: imx6ul: change operating-points to uint32-matrix
ARM: dts: imx6ul: fix keypad compatible
ARM: dts: imx6ul: fix csi node compatible
ARM: dts: imx6ul: fix lcdif node compatible
ARM: dts: imx6ul: fix qspi node compatible
ARM: dts: BCM5301X: Add DT for Meraki MR26
ARM: dts: ux500: Fix Codina accelerometer mounting matrix
ARM: dts: ux500: Fix Gavini accelerometer mounting matrix
spi: synquacer: Add missing clk_disable_unprepare()
ARM: OMAP2+: display: Fix refcount leak bug
ARM: OMAP2+: pdata-quirks: Fix refcount leak bug
ACPI: EC: Remove duplicate ThinkPad X1 Carbon 6th entry from DMI quirks
ACPI: EC: Drop the EC_FLAGS_IGNORE_DSDT_GPE quirk
ACPI: PM: save NVS memory for Lenovo G40-45
ACPI: LPSS: Fix missing check in register_device_clock()
ARM: dts: qcom: sdx55: Fix the IRQ trigger type for UART
arm64: dts: qcom: ipq8074: fix NAND node name
arm64: dts: allwinner: a64: orangepi-win: Fix LED node name
ARM: shmobile: rcar-gen2: Increase refcount for new reference
firmware: tegra: Fix error check return value of debugfs_create_file()
hwmon: (dell-smm) Add Dell XPS 13 7390 to fan control whitelist
hwmon: (sht15) Fix wrong assumptions in device remove callback
PM: hibernate: defer device probing when resuming from hibernation
selinux: fix memleak in security_read_state_kernel()
selinux: Add boundary check in put_entry()
kasan: test: Silence GCC 12 warnings
drm/amdgpu: Remove one duplicated ef removal
powerpc/64s: Disable stack variable initialisation for prom_init
spi: spi-rspi: Fix PIO fallback on RZ platforms
ARM: findbit: fix overflowing offset
meson-mx-socinfo: Fix refcount leak in meson_mx_socinfo_init
arm64: dts: renesas: beacon: Fix regulator node names
spi: spi-altera-dfl: Fix an error handling path
ARM: bcm: Fix refcount leak in bcm_kona_smc_init
ACPI: processor/idle: Annotate more functions to live in cpuidle section
ARM: dts: imx7d-colibri-emmc: add cpu1 supply
soc: renesas: r8a779a0-sysc: Fix A2DP1 and A2CV[2357] PDR values
scsi: hisi_sas: Use managed PCI functions
dt-bindings: iio: accel: Add DT binding doc for ADXL355
soc: amlogic: Fix refcount leak in meson-secure-pwrc.c
arm64: dts: renesas: Fix thermal-sensors on single-zone sensors
x86/pmem: Fix platform-device leak in error path
ARM: dts: ast2500-evb: fix board compatible
ARM: dts: ast2600-evb: fix board compatible
ARM: dts: ast2600-evb-a1: fix board compatible
arm64: dts: mt8192: Fix idle-states nodes naming scheme
arm64: dts: mt8192: Fix idle-states entry-method
arm64: select TRACE_IRQFLAGS_NMI_SUPPORT
arm64: cpufeature: Allow different PMU versions in ID_DFR0_EL1
locking/lockdep: Fix lockdep_init_map_*() confusion
arm64: dts: qcom: sc7180: Remove ipa_fw_mem node on trogdor
soc: fsl: guts: machine variable might be unset
block: fix infinite loop for invalid zone append
ARM: dts: qcom: mdm9615: add missing PMIC GPIO reg
ARM: OMAP2+: Fix refcount leak in omapdss_init_of
ARM: OMAP2+: Fix refcount leak in omap3xxx_prm_late_init
arm64: dts: qcom: sdm630: disable GPU by default
arm64: dts: qcom: sdm630: fix the qusb2phy ref clock
arm64: dts: qcom: sdm630: fix gpu's interconnect path
arm64: dts: qcom: sdm636-sony-xperia-ganges-mermaid: correct sdc2 pinconf
cpufreq: zynq: Fix refcount leak in zynq_get_revision
regulator: qcom_smd: Fix pm8916_pldo range
ACPI: APEI: Fix _EINJ vs EFI_MEMORY_SP
ARM: dts: qcom-msm8974: fix irq type on blsp2_uart1
soc: qcom: ocmem: Fix refcount leak in of_get_ocmem
soc: qcom: aoss: Fix refcount leak in qmp_cooling_devices_register
ARM: dts: qcom: pm8841: add required thermal-sensor-cells
bus: hisi_lpc: fix missing platform_device_put() in hisi_lpc_acpi_probe()
stack: Declare {randomize_,}kstack_offset to fix Sparse warnings
arm64: dts: qcom: msm8916: Fix typo in pronto remoteproc node
ACPI: APEI: explicit init of HEST and GHES in apci_init()
drivers/iio: Remove all strcpy() uses
ACPI: VIOT: Fix ACS setup
arm64: dts: qcom: sm6125: Move sdc2 pinctrl from seine-pdx201 to sm6125
arm64: dts: qcom: sm6125: Append -state suffix to pinctrl nodes
arm64: dts: qcom: sm8250: add missing PCIe PHY clock-cells
arm64: dts: mt7622: fix BPI-R64 WPS button
arm64: tegra: Fixup SYSRAM references
arm64: tegra: Update Tegra234 BPMP channel addresses
arm64: tegra: Mark BPMP channels as no-memory-wc
arm64: tegra: Fix SDMMC1 CD on P2888
erofs: avoid consecutive detection for Highmem memory
blk-mq: don't create hctx debugfs dir until q->debugfs_dir is created
spi: Fix simplification of devm_spi_register_controller
spi: tegra20-slink: fix UAF in tegra_slink_remove()
hwmon: (drivetemp) Add module alias
blktrace: Trace remapped requests correctly
PM: domains: Ensure genpd_debugfs_dir exists before remove
dm writecache: return void from functions
dm writecache: count number of blocks read, not number of read bios
dm writecache: count number of blocks written, not number of write bios
dm writecache: count number of blocks discarded, not number of discard bios
regulator: of: Fix refcount leak bug in of_get_regulation_constraints()
soc: qcom: Make QCOM_RPMPD depend on PM
arm64: dts: qcom: qcs404: Fix incorrect USB2 PHYs assignment
irqdomain: Report irq number for NOMAP domains
drivers/perf: arm_spe: Fix consistency of SYS_PMSCR_EL1.CX
nohz/full, sched/rt: Fix missed tick-reenabling bug in dequeue_task_rt()
x86/extable: Fix ex_handler_msr() print condition
selftests/seccomp: Fix compile warning when CC=clang
thermal/tools/tmon: Include pthread and time headers in tmon.h
dm: return early from dm_pr_call() if DM device is suspended
pwm: sifive: Simplify offset calculation for PWMCMP registers
pwm: sifive: Ensure the clk is enabled exactly once per running PWM
pwm: sifive: Shut down hardware only after pwmchip_remove() completed
pwm: lpc18xx-sct: Reduce number of devm memory allocations
pwm: lpc18xx-sct: Simplify driver by not using pwm_[gs]et_chip_data()
pwm: lpc18xx: Fix period handling
drm/dp: Export symbol / kerneldoc fixes for DP AUX bus
drm/bridge: tc358767: Move (e)DP bridge endpoint parsing into dedicated function
ath10k: do not enforce interrupt trigger type
drm/st7735r: Fix module autoloading for Okaya RH128128T
drm/panel: Fix build error when CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20=y && CONFIG_DRM_DISPLAY_HELPER=m
wifi: rtlwifi: fix error codes in rtl_debugfs_set_write_h2c()
ath11k: fix netdev open race
drm/mipi-dbi: align max_chunk to 2 in spi_transfer
ath11k: Fix incorrect debug_mask mappings
drm/radeon: fix potential buffer overflow in ni_set_mc_special_registers()
drm/mediatek: Modify dsi funcs to atomic operations
drm/mediatek: Separate poweron/poweroff from enable/disable and define new funcs
drm/mediatek: Add pull-down MIPI operation in mtk_dsi_poweroff function
drm/meson: encoder_hdmi: switch to bridge DRM_BRIDGE_ATTACH_NO_CONNECTOR
drm/meson: encoder_hdmi: Fix refcount leak in meson_encoder_hdmi_init
drm/bridge: lt9611uxc: Cancel only driver's work
i2c: npcm: Remove own slave addresses 2:10
i2c: npcm: Correct slave role behavior
i2c: mxs: Silence a clang warning
virtio-gpu: fix a missing check to avoid NULL dereference
drm/shmem-helper: Unexport drm_gem_shmem_create_with_handle()
drm/shmem-helper: Export dedicated wrappers for GEM object functions
drm/shmem-helper: Pass GEM shmem object in public interfaces
drm/virtio: Fix NULL vs IS_ERR checking in virtio_gpu_object_shmem_init
drm: adv7511: override i2c address of cec before accessing it
crypto: sun8i-ss - do not allocate memory when handling hash requests
crypto: sun8i-ss - fix error codes in allocate_flows()
net: fix sk_wmem_schedule() and sk_rmem_schedule() errors
can: netlink: allow configuring of fixed bit rates without need for do_set_bittiming callback
can: netlink: allow configuring of fixed data bit rates without need for do_set_data_bittiming callback
i2c: Fix a potential use after free
crypto: sun8i-ss - fix infinite loop in sun8i_ss_setup_ivs()
media: atmel: atmel-sama7g5-isc: fix warning in configs without OF
media: tw686x: Register the irq at the end of probe
media: imx-jpeg: Correct some definition according specification
media: imx-jpeg: Leave a blank space before the configuration data
media: imx-jpeg: Add pm-runtime support for imx-jpeg
media: imx-jpeg: use NV12M to represent non contiguous NV12
media: imx-jpeg: Set V4L2_BUF_FLAG_LAST at eos
media: imx-jpeg: Refactor function mxc_jpeg_parse
media: imx-jpeg: Identify and handle precision correctly
media: imx-jpeg: Handle source change in a function
media: imx-jpeg: Support dynamic resolution change
media: imx-jpeg: Align upwards buffer size
media: imx-jpeg: Implement drain using v4l2-mem2mem helpers
ath9k: fix use-after-free in ath9k_hif_usb_rx_cb
wifi: iwlegacy: 4965: fix potential off-by-one overflow in il4965_rs_fill_link_cmd()
drm/radeon: fix incorrrect SPDX-License-Identifiers
rcutorture: Warn on individual rcu_torture_init() error conditions
rcutorture: Don't cpuhp_remove_state() if cpuhp_setup_state() failed
rcutorture: Fix ksoftirqd boosting timing and iteration
test_bpf: fix incorrect netdev features
crypto: ccp - During shutdown, check SEV data pointer before using
drm: bridge: adv7511: Add check for mipi_dsi_driver_register
media: imx-jpeg: Disable slot interrupt when frame done
drm/mcde: Fix refcount leak in mcde_dsi_bind
media: hdpvr: fix error value returns in hdpvr_read
media: v4l2-mem2mem: prevent pollerr when last_buffer_dequeued is set
media: driver/nxp/imx-jpeg: fix a unexpected return value problem
media: tw686x: Fix memory leak in tw686x_video_init
drm/vc4: plane: Remove subpixel positioning check
drm/vc4: plane: Fix margin calculations for the right/bottom edges
drm/bridge: Add a function to abstract away panels
drm/vc4: dsi: Switch to devm_drm_of_get_bridge
drm/vc4: Use of_device_get_match_data()
drm/vc4: dsi: Release workaround buffer and DMA
drm/vc4: dsi: Correct DSI divider calculations
drm/vc4: dsi: Correct pixel order for DSI0
drm/vc4: dsi: Register dsi0 as the correct vc4 encoder type
drm/vc4: dsi: Fix dsi0 interrupt support
drm/vc4: dsi: Add correct stop condition to vc4_dsi_encoder_disable iteration
drm/vc4: hdmi: Fix HPD GPIO detection
drm/vc4: hdmi: Avoid full hdmi audio fifo writes
drm/vc4: hdmi: Reset HDMI MISC_CONTROL register
drm/vc4: hdmi: Fix timings for interlaced modes
drm/vc4: hdmi: Correct HDMI timing registers for interlaced modes
crypto: arm64/gcm - Select AEAD for GHASH_ARM64_CE
selftests/xsk: Destroy BPF resources only when ctx refcount drops to 0
drm/rockchip: vop: Don't crash for invalid duplicate_state()
drm/rockchip: Fix an error handling path rockchip_dp_probe()
drm/mediatek: dpi: Remove output format of YUV
drm/mediatek: dpi: Only enable dpi after the bridge is enabled
drm: bridge: sii8620: fix possible off-by-one
hinic: Use the bitmap API when applicable
net: hinic: fix bug that ethtool get wrong stats
net: hinic: avoid kernel hung in hinic_get_stats64()
drm/msm/mdp5: Fix global state lock backoff
crypto: hisilicon/sec - don't sleep when in softirq
crypto: hisilicon - Kunpeng916 crypto driver don't sleep when in softirq
media: platform: mtk-mdp: Fix mdp_ipi_comm structure alignment
drm/msm: Avoid dirtyfb stalls on video mode displays (v2)
drm/msm/dpu: Fix for non-visible planes
mt76: mt76x02u: fix possible memory leak in __mt76x02u_mcu_send_msg
mt76: mt7615: do not update pm stats in case of error
ieee80211: add EHT 1K aggregation definitions
mt76: mt7921: fix aggregation subframes setting to HE max
mt76: mt7921: enlarge maximum VHT MPDU length to 11454
mediatek: mt76: mac80211: Fix missing of_node_put() in mt76_led_init()
mediatek: mt76: eeprom: fix missing of_node_put() in mt76_find_power_limits_node()
skmsg: Fix invalid last sg check in sk_msg_recvmsg()
drm/exynos/exynos7_drm_decon: free resources when clk_set_parent() failed.
tcp: make retransmitted SKB fit into the send window
libbpf: Fix the name of a reused map
selftests: timers: valid-adjtimex: build fix for newer toolchains
selftests: timers: clocksource-switch: fix passing errors from child
bpf: Fix subprog names in stack traces.
fs: check FMODE_LSEEK to control internal pipe splicing
media: cedrus: h265: Fix flag name
media: hantro: postproc: Fix motion vector space size
media: hantro: Simplify postprocessor
media: hevc: Embedded indexes in RPS
media: staging: media: hantro: Fix typos
wifi: wil6210: debugfs: fix info leak in wil_write_file_wmi()
wifi: p54: Fix an error handling path in p54spi_probe()
wifi: p54: add missing parentheses in p54_flush()
selftests/bpf: fix a test for snprintf() overflow
libbpf: fix an snprintf() overflow check
can: pch_can: do not report txerr and rxerr during bus-off
can: rcar_can: do not report txerr and rxerr during bus-off
can: sja1000: do not report txerr and rxerr during bus-off
can: hi311x: do not report txerr and rxerr during bus-off
can: sun4i_can: do not report txerr and rxerr during bus-off
can: kvaser_usb_hydra: do not report txerr and rxerr during bus-off
can: kvaser_usb_leaf: do not report txerr and rxerr during bus-off
can: usb_8dev: do not report txerr and rxerr during bus-off
can: error: specify the values of data[5..7] of CAN error frames
can: pch_can: pch_can_error(): initialize errc before using it
Bluetooth: hci_intel: Add check for platform_driver_register
i2c: cadence: Support PEC for SMBus block read
i2c: mux-gpmux: Add of_node_put() when breaking out of loop
wifi: wil6210: debugfs: fix uninitialized variable use in `wil_write_file_wmi()`
wifi: iwlwifi: mvm: fix double list_add at iwl_mvm_mac_wake_tx_queue
wifi: libertas: Fix possible refcount leak in if_usb_probe()
media: cedrus: hevc: Add check for invalid timestamp
net/mlx5e: Remove WARN_ON when trying to offload an unsupported TLS cipher/version
net/mlx5e: Fix the value of MLX5E_MAX_RQ_NUM_MTTS
net/mlx5: Adjust log_max_qp to be 18 at most
crypto: hisilicon/hpre - don't use GFP_KERNEL to alloc mem during softirq
crypto: inside-secure - Add missing MODULE_DEVICE_TABLE for of
crypto: hisilicon/sec - fix auth key size error
inet: add READ_ONCE(sk->sk_bound_dev_if) in INET_MATCH()
ipv6: add READ_ONCE(sk->sk_bound_dev_if) in INET6_MATCH()
net: allow unbound socket for packets in VRF when tcp_l3mdev_accept set
netdevsim: fib: Fix reference count leak on route deletion failure
wifi: rtw88: check the return value of alloc_workqueue()
iavf: Fix max_rate limiting
iavf: Fix 'tc qdisc show' listing too many queues
netdevsim: Avoid allocation warnings triggered from user space
net: rose: fix netdev reference changes
net: ionic: fix error check for vlan flags in ionic_set_nic_features()
dccp: put dccp_qpolicy_full() and dccp_qpolicy_push() in the same lock
net: usb: make USB_RTL8153_ECM non user configurable
wireguard: ratelimiter: use hrtimer in selftest
wireguard: allowedips: don't corrupt stack when detecting overflow
HID: amd_sfh: Don't show client init failed as error when discovery fails
clk: renesas: r9a06g032: Fix UART clkgrp bitsel
mtd: maps: Fix refcount leak in of_flash_probe_versatile
mtd: maps: Fix refcount leak in ap_flash_init
mtd: rawnand: meson: Fix a potential double free issue
of: check previous kernel's ima-kexec-buffer against memory bounds
scsi: qla2xxx: edif: Reduce Initiator-Initiator thrashing
scsi: qla2xxx: edif: Fix potential stuck session in sa update
scsi: qla2xxx: edif: Reduce connection thrash
scsi: qla2xxx: edif: Fix inconsistent check of db_flags
scsi: qla2xxx: edif: Synchronize NPIV deletion with authentication application
scsi: qla2xxx: edif: Add retry for ELS passthrough
scsi: qla2xxx: edif: Fix n2n discovery issue with secure target
scsi: qla2xxx: edif: Fix n2n login retry for secure device
KVM: SVM: Unwind "speculative" RIP advancement if INTn injection "fails"
KVM: SVM: Stuff next_rip on emulated INT3 injection if NRIPS is supported
phy: samsung: exynosautov9-ufs: correct TSRV register configurations
PCI: microchip: Fix refcount leak in mc_pcie_init_irq_domains()
PCI: tegra194: Fix PM error handling in tegra_pcie_config_ep()
HID: cp2112: prevent a buffer overflow in cp2112_xfer()
mtd: sm_ftl: Fix deadlock caused by cancel_work_sync in sm_release
mtd: partitions: Fix refcount leak in parse_redboot_of
mtd: parsers: ofpart: Fix refcount leak in bcm4908_partitions_fw_offset
mtd: st_spi_fsm: Add a clk_disable_unprepare() in .probe()'s error path
PCI: mediatek-gen3: Fix refcount leak in mtk_pcie_init_irq_domains()
fpga: altera-pr-ip: fix unsigned comparison with less than zero
usb: host: Fix refcount leak in ehci_hcd_ppc_of_probe
usb: ohci-nxp: Fix refcount leak in ohci_hcd_nxp_probe
usb: gadget: tegra-xudc: Fix error check in tegra_xudc_powerdomain_init()
usb: xhci: tegra: Fix error check
netfilter: xtables: Bring SPDX identifier back
scsi: qla2xxx: edif: Send LOGO for unexpected IKE message
scsi: qla2xxx: edif: Reduce disruption due to multiple app start
scsi: qla2xxx: edif: Fix no login after app start
scsi: qla2xxx: edif: Tear down session if keys have been removed
scsi: qla2xxx: edif: Fix session thrash
scsi: qla2xxx: edif: Fix no logout on delete for N2N
iio: accel: bma400: Fix the scale min and max macro values
platform/chrome: cros_ec: Always expose last resume result
iio: accel: bma400: Reordering of header files
clk: mediatek: reset: Fix written reset bit offset
lib/test_hmm: avoid accessing uninitialized pages
memremap: remove support for external pgmap refcounts
mm/memremap: fix memunmap_pages() race with get_dev_pagemap()
KVM: Don't set Accessed/Dirty bits for ZERO_PAGE
mwifiex: Ignore BTCOEX events from the 88W8897 firmware
mwifiex: fix sleep in atomic context bugs caused by dev_coredumpv
scsi: iscsi: Allow iscsi_if_stop_conn() to be called from kernel
scsi: iscsi: Add helper to remove a session from the kernel
scsi: iscsi: Fix session removal on shutdown
dmaengine: dw-edma: Fix eDMA Rd/Wr-channels and DMA-direction semantics
mtd: dataflash: Add SPI ID table
clk: qcom: camcc-sm8250: Fix halt on boot by reducing driver's init level
misc: rtsx: Fix an error handling path in rtsx_pci_probe()
driver core: fix potential deadlock in __driver_attach
clk: qcom: clk-krait: unlock spin after mux completion
clk: qcom: gcc-msm8939: Add missing SYSTEM_MM_NOC_BFDCD_CLK_SRC
clk: qcom: gcc-msm8939: Fix bimc_ddr_clk_src rcgr base address
clk: qcom: gcc-msm8939: Add missing system_mm_noc_bfdcd_clk_src
clk: qcom: gcc-msm8939: Point MM peripherals to system_mm_noc clock
usb: host: xhci: use snprintf() in xhci_decode_trb()
RDMA/rxe: Fix deadlock in rxe_do_local_ops()
clk: qcom: ipq8074: fix NSS core PLL-s
clk: qcom: ipq8074: SW workaround for UBI32 PLL lock
clk: qcom: ipq8074: fix NSS port frequency tables
clk: qcom: ipq8074: set BRANCH_HALT_DELAY flag for UBI clocks
clk: qcom: camcc-sdm845: Fix topology around titan_top power domain
clk: qcom: camcc-sm8250: Fix topology around titan_top power domain
clk: qcom: clk-rcg2: Fail Duty-Cycle configuration if MND divider is not enabled.
clk: qcom: clk-rcg2: Make sure to not write d=0 to the NMD register
mm/mempolicy: fix get_nodes out of bound access
PCI: dwc: Stop link on host_init errors and de-initialization
PCI: dwc: Add unroll iATU space support to dw_pcie_disable_atu()
PCI: dwc: Disable outbound windows only for controllers using iATU
PCI: dwc: Set INCREASE_REGION_SIZE flag based on limit address
PCI: dwc: Deallocate EPC memory on dw_pcie_ep_init() errors
PCI: dwc: Always enable CDM check if "snps,enable-cdm-check" exists
soundwire: bus_type: fix remove and shutdown support
soundwire: revisit driver bind/unbind and callbacks
KVM: arm64: Don't return from void function
dmaengine: sf-pdma: Add multithread support for a DMA channel
PCI: endpoint: Don't stop controller when unbinding endpoint function
scsi: qla2xxx: Check correct variable in qla24xx_async_gffid()
intel_th: Fix a resource leak in an error handling path
intel_th: msu-sink: Potential dereference of null pointer
intel_th: msu: Fix vmalloced buffers
binder: fix redefinition of seq_file attributes
staging: rtl8192u: Fix sleep in atomic context bug in dm_fsync_timer_callback
mmc: sdhci-of-esdhc: Fix refcount leak in esdhc_signal_voltage_switch
mmc: mxcmmc: Silence a clang warning
mmc: renesas_sdhi: Get the reset handle early in the probe
memstick/ms_block: Fix some incorrect memory allocation
memstick/ms_block: Fix a memory leak
mmc: sdhci-of-at91: fix set_uhs_signaling rewriting of MC1R
of: device: Fix missing of_node_put() in of_dma_set_restricted_buffer
mmc: block: Add single read for 4k sector cards
KVM: s390: pv: leak the topmost page table when destroy fails
PCI/portdrv: Don't disable AER reporting in get_port_device_capability()
PCI: qcom: Set up rev 2.1.0 PARF_PHY before enabling clocks
scsi: smartpqi: Fix DMA direction for RAID requests
xtensa: iss/network: provide release() callback
xtensa: iss: fix handling error cases in iss_net_configure()
usb: gadget: udc: amd5536 depends on HAS_DMA
usb: aspeed-vhub: Fix refcount leak bug in ast_vhub_init_desc()
usb: dwc3: core: Deprecate GCTL.CORESOFTRESET
usb: dwc3: core: Do not perform GCTL_CORE_SOFTRESET during bootup
usb: dwc3: qcom: fix missing optional irq warnings
eeprom: idt_89hpesx: uninitialized data in idt_dbgfs_csr_write()
phy: stm32: fix error return in stm32_usbphyc_phy_init
interconnect: imx: fix max_node_id
um: random: Don't initialise hwrng struct with zero
RDMA/irdma: Fix a window for use-after-free
RDMA/irdma: Fix VLAN connection with wildcard address
RDMA/irdma: Fix setting of QP context err_rq_idx_valid field
RDMA/rtrs-srv: Fix modinfo output for stringify
RDMA/rtrs: Fix warning when use poll mode on client side.
RDMA/rtrs: Replace duplicate check with is_pollqueue helper
RDMA/rtrs: Introduce destroy_cq helper
RDMA/rtrs: Do not allow sessname to contain special symbols / and .
RDMA/rtrs: Rename rtrs_sess to rtrs_path
RDMA/rtrs-srv: Rename rtrs_srv_sess to rtrs_srv_path
RDMA/rtrs-clt: Rename rtrs_clt_sess to rtrs_clt_path
RDMA/rtrs-clt: Replace list_next_or_null_rr_rcu with an inline function
RDMA/qedr: Fix potential memory leak in __qedr_alloc_mr()
RDMA/hns: Fix incorrect clearing of interrupt status register
RDMA/siw: Fix duplicated reported IW_CM_EVENT_CONNECT_REPLY event
iio: cros: Register FIFO callback after sensor is registered
clk: qcom: gcc-msm8939: Fix weird field spacing in ftbl_gcc_camss_cci_clk
RDMA/hfi1: fix potential memory leak in setup_base_ctxt()
gpio: gpiolib-of: Fix refcount bugs in of_mm_gpiochip_add_data()
HID: mcp2221: prevent a buffer overflow in mcp_smbus_write()
HID: amd_sfh: Add NULL check for hid device
dmaengine: imx-dma: Cast of_device_get_match_data() with (uintptr_t)
scripts/gdb: lx-dmesg: read records individually
scripts/gdb: fix 'lx-dmesg' on 32 bits arch
RDMA/rxe: Fix mw bind to allow any consumer key portion
mmc: cavium-octeon: Add of_node_put() when breaking out of loop
mmc: cavium-thunderx: Add of_node_put() when breaking out of loop
HID: alps: Declare U1_UNICORN_LEGACY support
RDMA/rxe: For invalidate compare according to set keys in mr
PCI: tegra194: Fix Root Port interrupt handling
PCI: tegra194: Fix link up retry sequence
HID: amd_sfh: Handle condition of "no sensors"
USB: serial: fix tty-port initialized comments
usb: cdns3: change place of 'priv_ep' assignment in cdns3_gadget_ep_dequeue(), cdns3_gadget_ep_enable()
mtd: spi-nor: fix spi_nor_spimem_setup_op() call in spi_nor_erase_{sector,chip}()
KVM: nVMX: Set UMIP bit CR4_FIXED1 MSR when emulating UMIP
platform/olpc: Fix uninitialized data in debugfs write
RDMA/srpt: Duplicate port name members
RDMA/srpt: Introduce a reference count in struct srpt_device
RDMA/srpt: Fix a use-after-free
android: binder: stop saving a pointer to the VMA
mm/mmap.c: fix missing call to vm_unacct_memory in mmap_region
selftests: kvm: set rax before vmcall
of/fdt: declared return type does not match actual return type
RDMA/mlx5: Add missing check for return value in get namespace flow
RDMA/rxe: Add memory barriers to kernel queues
RDMA/rxe: Remove the is_user members of struct rxe_sq/rxe_rq/rxe_srq
RDMA/rxe: Fix error unwind in rxe_create_qp()
block/rnbd-srv: Set keep_id to true after mutex_trylock
null_blk: fix ida error handling in null_add_dev()
nvme: use command_id instead of req->tag in trace_nvme_complete_rq()
nvme: define compat_ioctl again to unbreak 32-bit userspace.
nvme: disable namespace access for unsupported metadata
nvme: don't return an error from nvme_configure_metadata
nvme: catch -ENODEV from nvme_revalidate_zones again
block/bio: remove duplicate append pages code
block: ensure iov_iter advances for added pages
jbd2: fix outstanding credits assert in jbd2_journal_commit_transaction()
ext4: recover csum seed of tmp_inode after migrating to extents
jbd2: fix assertion 'jh->b_frozen_data == NULL' failure when journal aborted
usb: cdns3: Don't use priv_dev uninitialized in cdns3_gadget_ep_enable()
opp: Fix error check in dev_pm_opp_attach_genpd()
ASoC: cros_ec_codec: Fix refcount leak in cros_ec_codec_platform_probe
ASoC: samsung: Fix error handling in aries_audio_probe
ASoC: imx-audmux: Silence a clang warning
ASoC: mediatek: mt8173: Fix refcount leak in mt8173_rt5650_rt5676_dev_probe
ASoC: mt6797-mt6351: Fix refcount leak in mt6797_mt6351_dev_probe
ASoC: codecs: da7210: add check for i2c_add_driver
ASoC: mediatek: mt8173-rt5650: Fix refcount leak in mt8173_rt5650_dev_probe
serial: 8250: Export ICR access helpers for internal use
serial: 8250: dma: Allow driver operations before starting DMA transfers
serial: 8250_dw: Store LSR into lsr_saved_flags in dw8250_tx_wait_empty()
ASoC: codecs: msm8916-wcd-digital: move gains from SX_TLV to S8_TLV
ASoC: codecs: wcd9335: move gains from SX_TLV to S8_TLV
rpmsg: char: Add mutex protection for rpmsg_eptdev_open()
rpmsg: mtk_rpmsg: Fix circular locking dependency
remoteproc: k3-r5: Fix refcount leak in k3_r5_cluster_of_init
selftests/livepatch: better synchronize test_klp_callbacks_busy
profiling: fix shift too large makes kernel panic
remoteproc: imx_rproc: Fix refcount leak in imx_rproc_addr_init
ASoC: samsung: h1940_uda1380: include proepr GPIO consumer header
powerpc/perf: Optimize clearing the pending PMI and remove WARN_ON for PMI check in power_pmu_disable
ASoC: samsung: change gpiod_speaker_power and rx1950_audio from global to static variables
tty: n_gsm: Delete gsmtty open SABM frame when config requester
tty: n_gsm: fix user open not possible at responder until initiator open
tty: n_gsm: fix tty registration before control channel open
tty: n_gsm: fix wrong queuing behavior in gsm_dlci_data_output()
tty: n_gsm: fix missing timer to handle stalled links
tty: n_gsm: fix non flow control frames during mux flow off
tty: n_gsm: fix packet re-transmission without open control channel
tty: n_gsm: fix race condition in gsmld_write()
tty: n_gsm: fix resource allocation order in gsm_activate_mux()
ASoC: qcom: Fix missing of_node_put() in asoc_qcom_lpass_cpu_platform_probe()
ASoC: imx-card: Fix DSD/PDM mclk frequency
remoteproc: qcom: wcnss: Fix handling of IRQs
vfio/ccw: Do not change FSM state in subchannel event
serial: 8250_fsl: Don't report FE, PE and OE twice
tty: n_gsm: fix wrong T1 retry count handling
tty: n_gsm: fix DM command
tty: n_gsm: fix missing corner cases in gsmld_poll()
MIPS: vdso: Utilize __pa() for gic_pfn
swiotlb: fail map correctly with failed io_tlb_default_mem
ASoC: mt6359: Fix refcount leak bug
serial: 8250_bcm7271: Save/restore RTS in suspend/resume
iommu/exynos: Handle failed IOMMU device registration properly
9p: fix a bunch of checkpatch warnings
9p: Drop kref usage
9p: Add client parameter to p9_req_put()
net: 9p: fix refcount leak in p9_read_work() error handling
MIPS: Fixed __debug_virt_addr_valid()
rpmsg: qcom_smd: Fix refcount leak in qcom_smd_parse_edge
kfifo: fix kfifo_to_user() return type
lib/smp_processor_id: fix imbalanced instrumentation_end() call
proc: fix a dentry lock race between release_task and lookup
remoteproc: qcom: pas: Check if coredump is enabled
remoteproc: sysmon: Wait for SSCTL service to come up
mfd: t7l66xb: Drop platform disable callback
mfd: max77620: Fix refcount leak in max77620_initialise_fps
iommu/arm-smmu: qcom_iommu: Add of_node_put() when breaking out of loop
perf tools: Fix dso_id inode generation comparison
s390/dump: fix old lowcore virtual vs physical address confusion
s390/maccess: fix semantics of memcpy_real() and its callers
s390/crash: fix incorrect number of bytes to copy to user space
s390/zcore: fix race when reading from hardware system area
ASoC: fsl_asrc: force cast the asrc_format type
ASoC: fsl-asoc-card: force cast the asrc_format type
ASoC: fsl_easrc: use snd_pcm_format_t type for sample_format
ASoC: imx-card: use snd_pcm_format_t type for asrc_format
ASoC: qcom: q6dsp: Fix an off-by-one in q6adm_alloc_copp()
fuse: Remove the control interface for virtio-fs
ASoC: audio-graph-card: Add of_node_put() in fail path
watchdog: sp5100_tco: Fix a memory leak of EFCH MMIO resource
watchdog: armada_37xx_wdt: check the return value of devm_ioremap() in armada_37xx_wdt_probe()
video: fbdev: amba-clcd: Fix refcount leak bugs
video: fbdev: sis: fix typos in SiS_GetModeID()
ASoC: mchp-spdifrx: disable end of block interrupt on failures
powerpc/32: Call mmu_mark_initmem_nx() regardless of data block mapping.
powerpc/32: Do not allow selection of e5500 or e6500 CPUs on PPC32
powerpc/iommu: Fix iommu_table_in_use for a small default DMA window case
powerpc/pci: Prefer PCI domain assignment via DT 'linux,pci-domain' and alias
tty: serial: fsl_lpuart: correct the count of break characters
s390/dump: fix os_info virtual vs physical address confusion
s390/smp: cleanup target CPU callback starting
s390/smp: cleanup control register update routines
s390/maccess: rework absolute lowcore accessors
s390/smp: enforce lowcore protection on CPU restart
f2fs: fix to remove F2FS_COMPR_FL and tag F2FS_NOCOMP_FL at the same time
powerpc/spufs: Fix refcount leak in spufs_init_isolated_loader
powerpc/xive: Fix refcount leak in xive_get_max_prio
powerpc/cell/axon_msi: Fix refcount leak in setup_msi_msg_address
perf symbol: Fail to read phdr workaround
kprobes: Forbid probing on trampoline and BPF code areas
x86/bus_lock: Don't assume the init value of DEBUGCTLMSR.BUS_LOCK_DETECT to be zero
powerpc/pci: Fix PHB numbering when using opal-phbid
genelf: Use HAVE_LIBCRYPTO_SUPPORT, not the never defined HAVE_LIBCRYPTO
scripts/faddr2line: Fix vmlinux detection on arm64
sched/deadline: Merge dl_task_can_attach() and dl_cpu_busy()
sched, cpuset: Fix dl_cpu_busy() panic due to empty cs->cpus_allowed
x86/numa: Use cpumask_available instead of hardcoded NULL check
video: fbdev: arkfb: Fix a divide-by-zero bug in ark_set_pixclock()
tools/thermal: Fix possible path truncations
sched: Fix the check of nr_running at queue wakelist
sched: Remove the limitation of WF_ON_CPU on wakelist if wakee cpu is idle
sched/core: Do not requeue task on CPU excluded from cpus_mask
x86/entry: Build thunk_$(BITS) only if CONFIG_PREEMPTION=y
f2fs: allow compression for mmap files in compress_mode=user
f2fs: do not allow to decompress files have FI_COMPRESS_RELEASED
video: fbdev: vt8623fb: Check the size of screen before memset_io()
video: fbdev: arkfb: Check the size of screen before memset_io()
video: fbdev: s3fb: Check the size of screen before memset_io()
scsi: ufs: core: Correct ufshcd_shutdown() flow
scsi: zfcp: Fix missing auto port scan and thus missing target ports
scsi: qla2xxx: Fix imbalance vha->vref_count
scsi: qla2xxx: Fix discovery issues in FC-AL topology
scsi: qla2xxx: Turn off multi-queue for 8G adapters
scsi: qla2xxx: Fix crash due to stale SRB access around I/O timeouts
scsi: qla2xxx: Fix excessive I/O error messages by default
scsi: qla2xxx: Fix erroneous mailbox timeout after PCI error injection
scsi: qla2xxx: Wind down adapter after PCIe error
scsi: qla2xxx: Fix losing FCP-2 targets on long port disable with I/Os
scsi: qla2xxx: Fix losing target when it reappears during delete
scsi: qla2xxx: Fix losing FCP-2 targets during port perturbation tests
x86/bugs: Enable STIBP for IBPB mitigated RETBleed
ftrace/x86: Add back ftrace_expected assignment
x86/kprobes: Update kcb status flag after singlestepping
x86/olpc: fix 'logical not is only applied to the left hand side'
SMB3: fix lease break timeout when multiple deferred close handles for the same file.
posix-cpu-timers: Cleanup CPU timers before freeing them during exec
Input: gscps2 - check return value of ioremap() in gscps2_probe()
__follow_mount_rcu(): verify that mount_lock remains unchanged
spmi: trace: fix stack-out-of-bound access in SPMI tracing functions
drm/mediatek: Allow commands to be sent during video mode
drm/mediatek: Keep dsi as LP00 before dcs cmds transfer
crypto: blake2s - remove shash module
drm/dp/mst: Read the extended DPCD capabilities during system resume
drm/vc4: drv: Adopt the dma configuration from the HVS or V3D component
usbnet: smsc95xx: Don't clear read-only PHY interrupt
usbnet: smsc95xx: Avoid link settings race on interrupt reception
usbnet: smsc95xx: Forward PHY interrupts to PHY driver to avoid polling
usbnet: smsc95xx: Fix deadlock on runtime resume
firmware: arm_scpi: Ensure scpi_info is not assigned if the probe fails
scsi: lpfc: Fix EEH support for NVMe I/O
scsi: lpfc: SLI path split: Refactor lpfc_iocbq
scsi: lpfc: SLI path split: Refactor fast and slow paths to native SLI4
scsi: lpfc: SLI path split: Refactor SCSI paths
scsi: lpfc: Remove extra atomic_inc on cmd_pending in queuecommand after VMID
intel_th: pci: Add Meteor Lake-P support
intel_th: pci: Add Raptor Lake-S PCH support
intel_th: pci: Add Raptor Lake-S CPU support
KVM: set_msr_mce: Permit guests to ignore single-bit ECC errors
KVM: x86: Signal #GP, not -EPERM, on bad WRMSR(MCi_CTL/STATUS)
iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE)
PCI/AER: Iterate over error counters instead of error strings
PCI: qcom: Power on PHY before IPQ8074 DBI register accesses
serial: 8250_pci: Refactor the loop in pci_ite887x_init()
serial: 8250_pci: Replace dev_*() by pci_*() macros
serial: 8250: Fold EndRun device support into OxSemi Tornado code
serial: 8250: Add proper clock handling for OxSemi PCIe devices
tty: 8250: Add support for Brainboxes PX cards.
dm writecache: set a default MAX_WRITEBACK_JOBS
kexec, KEYS, s390: Make use of built-in and secondary keyring for signature verification
dm thin: fix use-after-free crash in dm_sm_register_threshold_callback
net/9p: Initialize the iounit field during fid creation
ARM: remove some dead code
timekeeping: contribute wall clock to rng on time change
locking/csd_lock: Change csdlock_debug from early_param to __setup
block: remove the struct blk_queue_ctx forward declaration
block: don't allow the same type rq_qos add more than once
btrfs: ensure pages are unlocked on cow_file_range() failure
btrfs: reset block group chunk force if we have to wait
btrfs: properly flag filesystem with BTRFS_FEATURE_INCOMPAT_BIG_METADATA
ACPI: CPPC: Do not prevent CPPC from working in the future
powerpc/powernv/kvm: Use darn for H_RANDOM on Power9
KVM: x86/pmu: Introduce the ctrl_mask value for fixed counter
KVM: VMX: Mark all PERF_GLOBAL_(OVF)_CTRL bits reserved if there's no vPMU
KVM: x86/pmu: Ignore pmu->global_ctrl check if vPMU doesn't support global_ctrl
KVM: VMX: Add helper to check if the guest PMU has PERF_GLOBAL_CTRL
KVM: nVMX: Attempt to load PERF_GLOBAL_CTRL on nVMX xfer iff it exists
dm raid: fix address sanitizer warning in raid_status
dm raid: fix address sanitizer warning in raid_resume
tracing: Add '__rel_loc' using trace event macros
tracing: Avoid -Warray-bounds warning for __rel_loc macro
ext4: update s_overhead_clusters in the superblock during an on-line resize
ext4: fix extent status tree race in writeback error recovery path
ext4: add EXT4_INODE_HAS_XATTR_SPACE macro in xattr.h
ext4: fix use-after-free in ext4_xattr_set_entry
ext4: correct max_inline_xattr_value_size computing
ext4: correct the misjudgment in ext4_iget_extra_inode
ext4: fix warning in ext4_iomap_begin as race between bmap and write
ext4: check if directory block is within i_size
ext4: make sure ext4_append() always allocates new block
ext4: remove EA inode entry from mbcache on inode eviction
ext4: use kmemdup() to replace kmalloc + memcpy
ext4: unindent codeblock in ext4_xattr_block_set()
ext4: fix race when reusing xattr blocks
KEYS: asymmetric: enforce SM2 signature use pkey algo
tpm: eventlog: Fix section mismatch for DEBUG_SECTION_MISMATCH
xen-blkback: fix persistent grants negotiation
xen-blkback: Apply 'feature_persistent' parameter when connect
xen-blkfront: Apply 'feature_persistent' parameter when connect
powerpc: Fix eh field when calling lwarx on PPC32
tracing: Use a struct alignof to determine trace event field alignment
net_sched: cls_route: remove from list when handle is 0
mac80211: fix a memory leak where sta_info is not freed
tcp: fix over estimation in sk_forced_mem_schedule()
crypto: lib/blake2s - reduce stack frame usage in self test
Revert "mwifiex: fix sleep in atomic context bugs caused by dev_coredumpv"
Revert "s390/smp: enforce lowcore protection on CPU restart"
drm/bridge: tc358767: Fix (e)DP bridge endpoint parsing in dedicated function
net: phy: smsc: Disable Energy Detect Power-Down in interrupt mode
drm/vc4: change vc4_dma_range_matches from a global to static
tracing/perf: Avoid -Warray-bounds warning for __rel_loc macro
drm/msm: Fix dirtyfb refcounting
drm/meson: Fix refcount leak in meson_encoder_hdmi_init
io_uring: mem-account pbuf buckets
Revert "net: usb: ax88179_178a needs FLAG_SEND_ZLP"
Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm regression
drm/bridge: Move devm_drm_of_get_bridge to bridge/panel.c
scsi: lpfc: Fix locking for lpfc_sli_iocbq_lookup()
scsi: lpfc: Fix element offset in __lpfc_sli_release_iocbq_s4()
scsi: lpfc: Resolve some cleanup issues following SLI path refactoring
Linux 5.15.61
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iec359ed301bcbcd6e19b67ee8534418fab26850b
2994 lines
71 KiB
C
2994 lines
71 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
|
|
* policies)
|
|
*/
|
|
#include "sched.h"
|
|
|
|
#include "pelt.h"
|
|
|
|
#include <trace/hooks/sched.h>
|
|
|
|
int sched_rr_timeslice = RR_TIMESLICE;
|
|
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
|
|
/* More than 4 hours if BW_SHIFT equals 20. */
|
|
static const u64 max_rt_runtime = MAX_BW;
|
|
|
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
|
|
|
|
struct rt_bandwidth def_rt_bandwidth;
|
|
|
|
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
|
|
{
|
|
struct rt_bandwidth *rt_b =
|
|
container_of(timer, struct rt_bandwidth, rt_period_timer);
|
|
int idle = 0;
|
|
int overrun;
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
for (;;) {
|
|
overrun = hrtimer_forward_now(timer, rt_b->rt_period);
|
|
if (!overrun)
|
|
break;
|
|
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
idle = do_sched_rt_period_timer(rt_b, overrun);
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
}
|
|
if (idle)
|
|
rt_b->rt_period_active = 0;
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
|
|
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
|
|
}
|
|
|
|
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
|
|
{
|
|
rt_b->rt_period = ns_to_ktime(period);
|
|
rt_b->rt_runtime = runtime;
|
|
|
|
raw_spin_lock_init(&rt_b->rt_runtime_lock);
|
|
|
|
hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
|
|
HRTIMER_MODE_REL_HARD);
|
|
rt_b->rt_period_timer.function = sched_rt_period_timer;
|
|
}
|
|
|
|
static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|
{
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
if (!rt_b->rt_period_active) {
|
|
rt_b->rt_period_active = 1;
|
|
/*
|
|
* SCHED_DEADLINE updates the bandwidth, as a run away
|
|
* RT task with a DL task could hog a CPU. But DL does
|
|
* not reset the period. If a deadline task was running
|
|
* without an RT task running, it can cause RT tasks to
|
|
* throttle when they start up. Kick the timer right away
|
|
* to update the period.
|
|
*/
|
|
hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
|
|
hrtimer_start_expires(&rt_b->rt_period_timer,
|
|
HRTIMER_MODE_ABS_PINNED_HARD);
|
|
}
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
}
|
|
|
|
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|
{
|
|
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
|
|
return;
|
|
|
|
do_start_rt_bandwidth(rt_b);
|
|
}
|
|
|
|
void init_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_prio_array *array;
|
|
int i;
|
|
|
|
array = &rt_rq->active;
|
|
for (i = 0; i < MAX_RT_PRIO; i++) {
|
|
INIT_LIST_HEAD(array->queue + i);
|
|
__clear_bit(i, array->bitmap);
|
|
}
|
|
/* delimiter for bitsearch: */
|
|
__set_bit(MAX_RT_PRIO, array->bitmap);
|
|
|
|
#if defined CONFIG_SMP
|
|
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
|
|
rt_rq->highest_prio.next = MAX_RT_PRIO-1;
|
|
rt_rq->rt_nr_migratory = 0;
|
|
rt_rq->overloaded = 0;
|
|
plist_head_init(&rt_rq->pushable_tasks);
|
|
#endif /* CONFIG_SMP */
|
|
/* We start is dequeued state, because no RT tasks are queued */
|
|
rt_rq->rt_queued = 0;
|
|
|
|
rt_rq->rt_time = 0;
|
|
rt_rq->rt_throttled = 0;
|
|
rt_rq->rt_runtime = 0;
|
|
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
|
|
}
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|
{
|
|
hrtimer_cancel(&rt_b->rt_period_timer);
|
|
}
|
|
|
|
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
|
|
|
|
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
|
{
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
|
|
#endif
|
|
return container_of(rt_se, struct task_struct, rt);
|
|
}
|
|
|
|
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
return rt_rq->rq;
|
|
}
|
|
|
|
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
|
{
|
|
return rt_se->rt_rq;
|
|
}
|
|
|
|
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rt_rq *rt_rq = rt_se->rt_rq;
|
|
|
|
return rt_rq->rq;
|
|
}
|
|
|
|
void unregister_rt_sched_group(struct task_group *tg)
|
|
{
|
|
if (tg->rt_se)
|
|
destroy_rt_bandwidth(&tg->rt_bandwidth);
|
|
|
|
}
|
|
|
|
void free_rt_sched_group(struct task_group *tg)
|
|
{
|
|
int i;
|
|
|
|
for_each_possible_cpu(i) {
|
|
if (tg->rt_rq)
|
|
kfree(tg->rt_rq[i]);
|
|
if (tg->rt_se)
|
|
kfree(tg->rt_se[i]);
|
|
}
|
|
|
|
kfree(tg->rt_rq);
|
|
kfree(tg->rt_se);
|
|
}
|
|
|
|
void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
|
|
struct sched_rt_entity *rt_se, int cpu,
|
|
struct sched_rt_entity *parent)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
|
|
rt_rq->rt_nr_boosted = 0;
|
|
rt_rq->rq = rq;
|
|
rt_rq->tg = tg;
|
|
|
|
tg->rt_rq[cpu] = rt_rq;
|
|
tg->rt_se[cpu] = rt_se;
|
|
|
|
if (!rt_se)
|
|
return;
|
|
|
|
if (!parent)
|
|
rt_se->rt_rq = &rq->rt;
|
|
else
|
|
rt_se->rt_rq = parent->my_q;
|
|
|
|
rt_se->my_q = rt_rq;
|
|
rt_se->parent = parent;
|
|
INIT_LIST_HEAD(&rt_se->run_list);
|
|
}
|
|
|
|
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
struct rt_rq *rt_rq;
|
|
struct sched_rt_entity *rt_se;
|
|
int i;
|
|
|
|
tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
|
|
if (!tg->rt_rq)
|
|
goto err;
|
|
tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
|
|
if (!tg->rt_se)
|
|
goto err;
|
|
|
|
init_rt_bandwidth(&tg->rt_bandwidth,
|
|
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
|
|
|
|
for_each_possible_cpu(i) {
|
|
rt_rq = kzalloc_node(sizeof(struct rt_rq),
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
if (!rt_rq)
|
|
goto err;
|
|
|
|
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
if (!rt_se)
|
|
goto err_free_rq;
|
|
|
|
init_rt_rq(rt_rq);
|
|
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
|
|
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
|
|
}
|
|
|
|
return 1;
|
|
|
|
err_free_rq:
|
|
kfree(rt_rq);
|
|
err:
|
|
return 0;
|
|
}
|
|
|
|
#else /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
#define rt_entity_is_task(rt_se) (1)
|
|
|
|
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
|
{
|
|
return container_of(rt_se, struct task_struct, rt);
|
|
}
|
|
|
|
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
return container_of(rt_rq, struct rq, rt);
|
|
}
|
|
|
|
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct task_struct *p = rt_task_of(rt_se);
|
|
|
|
return task_rq(p);
|
|
}
|
|
|
|
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rq *rq = rq_of_rt_se(rt_se);
|
|
|
|
return &rq->rt;
|
|
}
|
|
|
|
void unregister_rt_sched_group(struct task_group *tg) { }
|
|
|
|
void free_rt_sched_group(struct task_group *tg) { }
|
|
|
|
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
return 1;
|
|
}
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void pull_rt_task(struct rq *this_rq);
|
|
|
|
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
/* Try to pull RT tasks here if we lower this rq's prio */
|
|
return rq->online && rq->rt.highest_prio.curr > prev->prio;
|
|
}
|
|
|
|
static inline int rt_overloaded(struct rq *rq)
|
|
{
|
|
return atomic_read(&rq->rd->rto_count);
|
|
}
|
|
|
|
static inline void rt_set_overload(struct rq *rq)
|
|
{
|
|
if (!rq->online)
|
|
return;
|
|
|
|
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
|
|
/*
|
|
* Make sure the mask is visible before we set
|
|
* the overload count. That is checked to determine
|
|
* if we should look at the mask. It would be a shame
|
|
* if we looked at the mask, but the mask was not
|
|
* updated yet.
|
|
*
|
|
* Matched by the barrier in pull_rt_task().
|
|
*/
|
|
smp_wmb();
|
|
atomic_inc(&rq->rd->rto_count);
|
|
}
|
|
|
|
static inline void rt_clear_overload(struct rq *rq)
|
|
{
|
|
if (!rq->online)
|
|
return;
|
|
|
|
/* the order here really doesn't matter */
|
|
atomic_dec(&rq->rd->rto_count);
|
|
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
|
|
}
|
|
|
|
static void update_rt_migration(struct rt_rq *rt_rq)
|
|
{
|
|
if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
|
|
if (!rt_rq->overloaded) {
|
|
rt_set_overload(rq_of_rt_rq(rt_rq));
|
|
rt_rq->overloaded = 1;
|
|
}
|
|
} else if (rt_rq->overloaded) {
|
|
rt_clear_overload(rq_of_rt_rq(rt_rq));
|
|
rt_rq->overloaded = 0;
|
|
}
|
|
}
|
|
|
|
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
if (!rt_entity_is_task(rt_se))
|
|
return;
|
|
|
|
p = rt_task_of(rt_se);
|
|
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
|
|
|
|
rt_rq->rt_nr_total++;
|
|
if (p->nr_cpus_allowed > 1)
|
|
rt_rq->rt_nr_migratory++;
|
|
|
|
update_rt_migration(rt_rq);
|
|
}
|
|
|
|
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
if (!rt_entity_is_task(rt_se))
|
|
return;
|
|
|
|
p = rt_task_of(rt_se);
|
|
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
|
|
|
|
rt_rq->rt_nr_total--;
|
|
if (p->nr_cpus_allowed > 1)
|
|
rt_rq->rt_nr_migratory--;
|
|
|
|
update_rt_migration(rt_rq);
|
|
}
|
|
|
|
static inline int has_pushable_tasks(struct rq *rq)
|
|
{
|
|
return !plist_head_empty(&rq->rt.pushable_tasks);
|
|
}
|
|
|
|
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
|
|
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
|
|
|
|
static void push_rt_tasks(struct rq *);
|
|
static void pull_rt_task(struct rq *);
|
|
|
|
static inline void rt_queue_push_tasks(struct rq *rq)
|
|
{
|
|
if (!has_pushable_tasks(rq))
|
|
return;
|
|
|
|
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
|
|
}
|
|
|
|
static inline void rt_queue_pull_task(struct rq *rq)
|
|
{
|
|
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
|
|
}
|
|
|
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
plist_node_init(&p->pushable_tasks, p->prio);
|
|
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
|
|
/* Update the highest prio pushable task */
|
|
if (p->prio < rq->rt.highest_prio.next)
|
|
rq->rt.highest_prio.next = p->prio;
|
|
}
|
|
|
|
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
|
|
/* Update the new highest prio pushable task */
|
|
if (has_pushable_tasks(rq)) {
|
|
p = plist_first_entry(&rq->rt.pushable_tasks,
|
|
struct task_struct, pushable_tasks);
|
|
rq->rt.highest_prio.next = p->prio;
|
|
} else {
|
|
rq->rt.highest_prio.next = MAX_RT_PRIO-1;
|
|
}
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
}
|
|
|
|
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
}
|
|
|
|
static inline
|
|
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
}
|
|
|
|
static inline
|
|
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
}
|
|
|
|
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void pull_rt_task(struct rq *this_rq)
|
|
{
|
|
}
|
|
|
|
static inline void rt_queue_push_tasks(struct rq *rq)
|
|
{
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
|
|
static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
|
|
|
|
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
|
|
{
|
|
return rt_se->on_rq;
|
|
}
|
|
|
|
#ifdef CONFIG_UCLAMP_TASK
|
|
/*
|
|
* Verify the fitness of task @p to run on @cpu taking into account the uclamp
|
|
* settings.
|
|
*
|
|
* This check is only important for heterogeneous systems where uclamp_min value
|
|
* is higher than the capacity of a @cpu. For non-heterogeneous system this
|
|
* function will always return true.
|
|
*
|
|
* The function will return true if the capacity of the @cpu is >= the
|
|
* uclamp_min and false otherwise.
|
|
*
|
|
* Note that uclamp_min will be clamped to uclamp_max if uclamp_min
|
|
* > uclamp_max.
|
|
*/
|
|
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
|
|
{
|
|
unsigned int min_cap;
|
|
unsigned int max_cap;
|
|
unsigned int cpu_cap;
|
|
|
|
/* Only heterogeneous systems can benefit from this check */
|
|
if (!static_branch_unlikely(&sched_asym_cpucapacity))
|
|
return true;
|
|
|
|
min_cap = uclamp_eff_value(p, UCLAMP_MIN);
|
|
max_cap = uclamp_eff_value(p, UCLAMP_MAX);
|
|
|
|
cpu_cap = capacity_orig_of(cpu);
|
|
|
|
return cpu_cap >= min(min_cap, max_cap);
|
|
}
|
|
#else
|
|
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
|
|
{
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
|
{
|
|
if (!rt_rq->tg)
|
|
return RUNTIME_INF;
|
|
|
|
return rt_rq->rt_runtime;
|
|
}
|
|
|
|
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
|
{
|
|
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
|
|
}
|
|
|
|
typedef struct task_group *rt_rq_iter_t;
|
|
|
|
static inline struct task_group *next_task_group(struct task_group *tg)
|
|
{
|
|
do {
|
|
tg = list_entry_rcu(tg->list.next,
|
|
typeof(struct task_group), list);
|
|
} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
|
|
|
|
if (&tg->list == &task_groups)
|
|
tg = NULL;
|
|
|
|
return tg;
|
|
}
|
|
|
|
#define for_each_rt_rq(rt_rq, iter, rq) \
|
|
for (iter = container_of(&task_groups, typeof(*iter), list); \
|
|
(iter = next_task_group(iter)) && \
|
|
(rt_rq = iter->rt_rq[cpu_of(rq)]);)
|
|
|
|
#define for_each_sched_rt_entity(rt_se) \
|
|
for (; rt_se; rt_se = rt_se->parent)
|
|
|
|
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
|
|
{
|
|
return rt_se->my_q;
|
|
}
|
|
|
|
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
|
|
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
|
|
|
|
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
|
{
|
|
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
struct sched_rt_entity *rt_se;
|
|
|
|
int cpu = cpu_of(rq);
|
|
|
|
rt_se = rt_rq->tg->rt_se[cpu];
|
|
|
|
if (rt_rq->rt_nr_running) {
|
|
if (!rt_se)
|
|
enqueue_top_rt_rq(rt_rq);
|
|
else if (!on_rt_rq(rt_se))
|
|
enqueue_rt_entity(rt_se, 0);
|
|
|
|
if (rt_rq->highest_prio.curr < curr->prio)
|
|
resched_curr(rq);
|
|
}
|
|
}
|
|
|
|
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
|
{
|
|
struct sched_rt_entity *rt_se;
|
|
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
|
|
|
|
rt_se = rt_rq->tg->rt_se[cpu];
|
|
|
|
if (!rt_se) {
|
|
dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
|
|
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
|
cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
|
|
}
|
|
else if (on_rt_rq(rt_se))
|
|
dequeue_rt_entity(rt_se, 0);
|
|
}
|
|
|
|
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
|
{
|
|
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
|
|
}
|
|
|
|
static int rt_se_boosted(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
|
struct task_struct *p;
|
|
|
|
if (rt_rq)
|
|
return !!rt_rq->rt_nr_boosted;
|
|
|
|
p = rt_task_of(rt_se);
|
|
return p->prio != p->normal_prio;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static inline const struct cpumask *sched_rt_period_mask(void)
|
|
{
|
|
return this_rq()->rd->span;
|
|
}
|
|
#else
|
|
static inline const struct cpumask *sched_rt_period_mask(void)
|
|
{
|
|
return cpu_online_mask;
|
|
}
|
|
#endif
|
|
|
|
static inline
|
|
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
|
|
{
|
|
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
|
|
}
|
|
|
|
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
|
{
|
|
return &rt_rq->tg->rt_bandwidth;
|
|
}
|
|
|
|
#else /* !CONFIG_RT_GROUP_SCHED */
|
|
|
|
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
|
{
|
|
return rt_rq->rt_runtime;
|
|
}
|
|
|
|
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
|
{
|
|
return ktime_to_ns(def_rt_bandwidth.rt_period);
|
|
}
|
|
|
|
typedef struct rt_rq *rt_rq_iter_t;
|
|
|
|
#define for_each_rt_rq(rt_rq, iter, rq) \
|
|
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
|
|
|
|
#define for_each_sched_rt_entity(rt_se) \
|
|
for (; rt_se; rt_se = NULL)
|
|
|
|
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
if (!rt_rq->rt_nr_running)
|
|
return;
|
|
|
|
enqueue_top_rt_rq(rt_rq);
|
|
resched_curr(rq);
|
|
}
|
|
|
|
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
|
{
|
|
dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
|
|
}
|
|
|
|
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
|
{
|
|
return rt_rq->rt_throttled;
|
|
}
|
|
|
|
static inline const struct cpumask *sched_rt_period_mask(void)
|
|
{
|
|
return cpu_online_mask;
|
|
}
|
|
|
|
static inline
|
|
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
|
|
{
|
|
return &cpu_rq(cpu)->rt;
|
|
}
|
|
|
|
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
|
{
|
|
return &def_rt_bandwidth;
|
|
}
|
|
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
|
|
return (hrtimer_active(&rt_b->rt_period_timer) ||
|
|
rt_rq->rt_time < rt_b->rt_runtime);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* We ran out of runtime, see if we can borrow some from our neighbours.
|
|
*/
|
|
static void do_balance_runtime(struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
|
|
int i, weight;
|
|
u64 rt_period;
|
|
|
|
weight = cpumask_weight(rd->span);
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
rt_period = ktime_to_ns(rt_b->rt_period);
|
|
for_each_cpu(i, rd->span) {
|
|
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
|
s64 diff;
|
|
|
|
if (iter == rt_rq)
|
|
continue;
|
|
|
|
raw_spin_lock(&iter->rt_runtime_lock);
|
|
/*
|
|
* Either all rqs have inf runtime and there's nothing to steal
|
|
* or __disable_runtime() below sets a specific rq to inf to
|
|
* indicate its been disabled and disallow stealing.
|
|
*/
|
|
if (iter->rt_runtime == RUNTIME_INF)
|
|
goto next;
|
|
|
|
/*
|
|
* From runqueues with spare time, take 1/n part of their
|
|
* spare time, but no more than our period.
|
|
*/
|
|
diff = iter->rt_runtime - iter->rt_time;
|
|
if (diff > 0) {
|
|
diff = div_u64((u64)diff, weight);
|
|
if (rt_rq->rt_runtime + diff > rt_period)
|
|
diff = rt_period - rt_rq->rt_runtime;
|
|
iter->rt_runtime -= diff;
|
|
rt_rq->rt_runtime += diff;
|
|
if (rt_rq->rt_runtime == rt_period) {
|
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
|
break;
|
|
}
|
|
}
|
|
next:
|
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
|
}
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
}
|
|
|
|
/*
|
|
* Ensure this RQ takes back all the runtime it lend to its neighbours.
|
|
*/
|
|
static void __disable_runtime(struct rq *rq)
|
|
{
|
|
struct root_domain *rd = rq->rd;
|
|
rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
|
|
if (unlikely(!scheduler_running))
|
|
return;
|
|
|
|
for_each_rt_rq(rt_rq, iter, rq) {
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
s64 want;
|
|
int i;
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
/*
|
|
* Either we're all inf and nobody needs to borrow, or we're
|
|
* already disabled and thus have nothing to do, or we have
|
|
* exactly the right amount of runtime to take out.
|
|
*/
|
|
if (rt_rq->rt_runtime == RUNTIME_INF ||
|
|
rt_rq->rt_runtime == rt_b->rt_runtime)
|
|
goto balanced;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
|
|
/*
|
|
* Calculate the difference between what we started out with
|
|
* and what we current have, that's the amount of runtime
|
|
* we lend and now have to reclaim.
|
|
*/
|
|
want = rt_b->rt_runtime - rt_rq->rt_runtime;
|
|
|
|
/*
|
|
* Greedy reclaim, take back as much as we can.
|
|
*/
|
|
for_each_cpu(i, rd->span) {
|
|
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
|
s64 diff;
|
|
|
|
/*
|
|
* Can't reclaim from ourselves or disabled runqueues.
|
|
*/
|
|
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
|
|
continue;
|
|
|
|
raw_spin_lock(&iter->rt_runtime_lock);
|
|
if (want > 0) {
|
|
diff = min_t(s64, iter->rt_runtime, want);
|
|
iter->rt_runtime -= diff;
|
|
want -= diff;
|
|
} else {
|
|
iter->rt_runtime -= want;
|
|
want -= want;
|
|
}
|
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
|
|
|
if (!want)
|
|
break;
|
|
}
|
|
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
/*
|
|
* We cannot be left wanting - that would mean some runtime
|
|
* leaked out of the system.
|
|
*/
|
|
BUG_ON(want);
|
|
balanced:
|
|
/*
|
|
* Disable all the borrow logic by pretending we have inf
|
|
* runtime - in which case borrowing doesn't make sense.
|
|
*/
|
|
rt_rq->rt_runtime = RUNTIME_INF;
|
|
rt_rq->rt_throttled = 0;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
|
|
/* Make rt_rq available for pick_next_task() */
|
|
sched_rt_rq_enqueue(rt_rq);
|
|
}
|
|
}
|
|
|
|
static void __enable_runtime(struct rq *rq)
|
|
{
|
|
rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
|
|
if (unlikely(!scheduler_running))
|
|
return;
|
|
|
|
/*
|
|
* Reset each runqueue's bandwidth settings
|
|
*/
|
|
for_each_rt_rq(rt_rq, iter, rq) {
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
rt_rq->rt_runtime = rt_b->rt_runtime;
|
|
rt_rq->rt_time = 0;
|
|
rt_rq->rt_throttled = 0;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
}
|
|
}
|
|
|
|
static void balance_runtime(struct rt_rq *rt_rq)
|
|
{
|
|
if (!sched_feat(RT_RUNTIME_SHARE))
|
|
return;
|
|
|
|
if (rt_rq->rt_time > rt_rq->rt_runtime) {
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
do_balance_runtime(rt_rq);
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
}
|
|
}
|
|
#else /* !CONFIG_SMP */
|
|
static inline void balance_runtime(struct rt_rq *rt_rq) {}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|
{
|
|
int i, idle = 1, throttled = 0;
|
|
const struct cpumask *span;
|
|
|
|
span = sched_rt_period_mask();
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* FIXME: isolated CPUs should really leave the root task group,
|
|
* whether they are isolcpus or were isolated via cpusets, lest
|
|
* the timer run on a CPU which does not service all runqueues,
|
|
* potentially leaving other CPUs indefinitely throttled. If
|
|
* isolation is really required, the user will turn the throttle
|
|
* off to kill the perturbations it causes anyway. Meanwhile,
|
|
* this maintains functionality for boot and/or troubleshooting.
|
|
*/
|
|
if (rt_b == &root_task_group.rt_bandwidth)
|
|
span = cpu_online_mask;
|
|
#endif
|
|
for_each_cpu(i, span) {
|
|
int enqueue = 0;
|
|
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
struct rq_flags rf;
|
|
int skip;
|
|
|
|
/*
|
|
* When span == cpu_online_mask, taking each rq->lock
|
|
* can be time-consuming. Try to avoid it when possible.
|
|
*/
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
|
|
rt_rq->rt_runtime = rt_b->rt_runtime;
|
|
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
if (skip)
|
|
continue;
|
|
|
|
rq_lock(rq, &rf);
|
|
update_rq_clock(rq);
|
|
|
|
if (rt_rq->rt_time) {
|
|
u64 runtime;
|
|
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
if (rt_rq->rt_throttled)
|
|
balance_runtime(rt_rq);
|
|
runtime = rt_rq->rt_runtime;
|
|
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
|
|
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
|
rt_rq->rt_throttled = 0;
|
|
enqueue = 1;
|
|
|
|
/*
|
|
* When we're idle and a woken (rt) task is
|
|
* throttled check_preempt_curr() will set
|
|
* skip_update and the time between the wakeup
|
|
* and this unthrottle will get accounted as
|
|
* 'runtime'.
|
|
*/
|
|
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
|
|
rq_clock_cancel_skipupdate(rq);
|
|
}
|
|
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
|
idle = 0;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
} else if (rt_rq->rt_nr_running) {
|
|
idle = 0;
|
|
if (!rt_rq_throttled(rt_rq))
|
|
enqueue = 1;
|
|
}
|
|
if (rt_rq->rt_throttled)
|
|
throttled = 1;
|
|
|
|
if (enqueue)
|
|
sched_rt_rq_enqueue(rt_rq);
|
|
rq_unlock(rq, &rf);
|
|
}
|
|
|
|
if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
|
|
return 1;
|
|
|
|
return idle;
|
|
}
|
|
|
|
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
|
|
{
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
|
|
|
if (rt_rq)
|
|
return rt_rq->highest_prio.curr;
|
|
#endif
|
|
|
|
return rt_task_of(rt_se)->prio;
|
|
}
|
|
|
|
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
|
{
|
|
u64 runtime = sched_rt_runtime(rt_rq);
|
|
|
|
if (rt_rq->rt_throttled)
|
|
return rt_rq_throttled(rt_rq);
|
|
|
|
if (runtime >= sched_rt_period(rt_rq))
|
|
return 0;
|
|
|
|
balance_runtime(rt_rq);
|
|
runtime = sched_rt_runtime(rt_rq);
|
|
if (runtime == RUNTIME_INF)
|
|
return 0;
|
|
|
|
if (rt_rq->rt_time > runtime) {
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
|
|
/*
|
|
* Don't actually throttle groups that have no runtime assigned
|
|
* but accrue some time due to boosting.
|
|
*/
|
|
if (likely(rt_b->rt_runtime)) {
|
|
rt_rq->rt_throttled = 1;
|
|
printk_deferred_once("sched: RT throttling activated\n");
|
|
|
|
trace_android_vh_dump_throttled_rt_tasks(
|
|
raw_smp_processor_id(),
|
|
rq_clock(rq_of_rt_rq(rt_rq)),
|
|
sched_rt_period(rt_rq),
|
|
runtime,
|
|
hrtimer_get_expires_ns(&rt_b->rt_period_timer));
|
|
} else {
|
|
/*
|
|
* In case we did anyway, make it go away,
|
|
* replenishment is a joke, since it will replenish us
|
|
* with exactly 0 ns.
|
|
*/
|
|
rt_rq->rt_time = 0;
|
|
}
|
|
|
|
if (rt_rq_throttled(rt_rq)) {
|
|
sched_rt_rq_dequeue(rt_rq);
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Update the current task's runtime statistics. Skip current tasks that
|
|
* are not in our scheduling class.
|
|
*/
|
|
static void update_curr_rt(struct rq *rq)
|
|
{
|
|
struct task_struct *curr = rq->curr;
|
|
struct sched_rt_entity *rt_se = &curr->rt;
|
|
u64 delta_exec;
|
|
u64 now;
|
|
|
|
if (curr->sched_class != &rt_sched_class)
|
|
return;
|
|
|
|
now = rq_clock_task(rq);
|
|
delta_exec = now - curr->se.exec_start;
|
|
if (unlikely((s64)delta_exec <= 0))
|
|
return;
|
|
|
|
schedstat_set(curr->se.statistics.exec_max,
|
|
max(curr->se.statistics.exec_max, delta_exec));
|
|
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
account_group_exec_runtime(curr, delta_exec);
|
|
|
|
curr->se.exec_start = now;
|
|
cgroup_account_cputime(curr, delta_exec);
|
|
|
|
if (!rt_bandwidth_enabled())
|
|
return;
|
|
|
|
for_each_sched_rt_entity(rt_se) {
|
|
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
|
int exceeded;
|
|
|
|
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
rt_rq->rt_time += delta_exec;
|
|
exceeded = sched_rt_runtime_exceeded(rt_rq);
|
|
if (exceeded)
|
|
resched_curr(rq);
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
if (exceeded)
|
|
do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
|
|
}
|
|
}
|
|
}
|
|
|
|
static void
|
|
dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
BUG_ON(&rq->rt != rt_rq);
|
|
|
|
if (!rt_rq->rt_queued)
|
|
return;
|
|
|
|
BUG_ON(!rq->nr_running);
|
|
|
|
sub_nr_running(rq, count);
|
|
rt_rq->rt_queued = 0;
|
|
|
|
}
|
|
|
|
static void
|
|
enqueue_top_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
BUG_ON(&rq->rt != rt_rq);
|
|
|
|
if (rt_rq->rt_queued)
|
|
return;
|
|
|
|
if (rt_rq_throttled(rt_rq))
|
|
return;
|
|
|
|
if (rt_rq->rt_nr_running) {
|
|
add_nr_running(rq, rt_rq->rt_nr_running);
|
|
rt_rq->rt_queued = 1;
|
|
}
|
|
|
|
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
|
cpufreq_update_util(rq, 0);
|
|
}
|
|
|
|
#if defined CONFIG_SMP
|
|
|
|
static void
|
|
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* Change rq's cpupri only if rt_rq is the top queue.
|
|
*/
|
|
if (&rq->rt != rt_rq)
|
|
return;
|
|
#endif
|
|
if (rq->online && prio < prev_prio)
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
|
|
}
|
|
|
|
static void
|
|
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* Change rq's cpupri only if rt_rq is the top queue.
|
|
*/
|
|
if (&rq->rt != rt_rq)
|
|
return;
|
|
#endif
|
|
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
|
|
}
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
static inline
|
|
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
|
|
static inline
|
|
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
|
static void
|
|
inc_rt_prio(struct rt_rq *rt_rq, int prio)
|
|
{
|
|
int prev_prio = rt_rq->highest_prio.curr;
|
|
|
|
if (prio < prev_prio)
|
|
rt_rq->highest_prio.curr = prio;
|
|
|
|
inc_rt_prio_smp(rt_rq, prio, prev_prio);
|
|
}
|
|
|
|
static void
|
|
dec_rt_prio(struct rt_rq *rt_rq, int prio)
|
|
{
|
|
int prev_prio = rt_rq->highest_prio.curr;
|
|
|
|
if (rt_rq->rt_nr_running) {
|
|
|
|
WARN_ON(prio < prev_prio);
|
|
|
|
/*
|
|
* This may have been our highest task, and therefore
|
|
* we may have some recomputation to do
|
|
*/
|
|
if (prio == prev_prio) {
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
|
|
rt_rq->highest_prio.curr =
|
|
sched_find_first_bit(array->bitmap);
|
|
}
|
|
|
|
} else {
|
|
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
|
|
}
|
|
|
|
dec_rt_prio_smp(rt_rq, prio, prev_prio);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
|
|
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
|
|
|
|
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
static void
|
|
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
if (rt_se_boosted(rt_se))
|
|
rt_rq->rt_nr_boosted++;
|
|
|
|
if (rt_rq->tg)
|
|
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
|
|
}
|
|
|
|
static void
|
|
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
if (rt_se_boosted(rt_se))
|
|
rt_rq->rt_nr_boosted--;
|
|
|
|
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
|
|
}
|
|
|
|
#else /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
static void
|
|
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
start_rt_bandwidth(&def_rt_bandwidth);
|
|
}
|
|
|
|
static inline
|
|
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
|
|
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
static inline
|
|
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
|
|
|
if (group_rq)
|
|
return group_rq->rt_nr_running;
|
|
else
|
|
return 1;
|
|
}
|
|
|
|
static inline
|
|
unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
|
struct task_struct *tsk;
|
|
|
|
if (group_rq)
|
|
return group_rq->rr_nr_running;
|
|
|
|
tsk = rt_task_of(rt_se);
|
|
|
|
return (tsk->policy == SCHED_RR) ? 1 : 0;
|
|
}
|
|
|
|
static inline
|
|
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
int prio = rt_se_prio(rt_se);
|
|
|
|
WARN_ON(!rt_prio(prio));
|
|
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
|
|
rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
|
|
|
|
inc_rt_prio(rt_rq, prio);
|
|
inc_rt_migration(rt_se, rt_rq);
|
|
inc_rt_group(rt_se, rt_rq);
|
|
}
|
|
|
|
static inline
|
|
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
|
WARN_ON(!rt_rq->rt_nr_running);
|
|
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
|
|
rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
|
|
|
|
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
|
|
dec_rt_migration(rt_se, rt_rq);
|
|
dec_rt_group(rt_se, rt_rq);
|
|
}
|
|
|
|
/*
|
|
* Change rt_se->run_list location unless SAVE && !MOVE
|
|
*
|
|
* assumes ENQUEUE/DEQUEUE flags match
|
|
*/
|
|
static inline bool move_entity(unsigned int flags)
|
|
{
|
|
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
|
|
{
|
|
list_del_init(&rt_se->run_list);
|
|
|
|
if (list_empty(array->queue + rt_se_prio(rt_se)))
|
|
__clear_bit(rt_se_prio(rt_se), array->bitmap);
|
|
|
|
rt_se->on_list = 0;
|
|
}
|
|
|
|
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
|
|
{
|
|
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
|
struct list_head *queue = array->queue + rt_se_prio(rt_se);
|
|
|
|
/*
|
|
* Don't enqueue the group if its throttled, or when empty.
|
|
* The latter is a consequence of the former when a child group
|
|
* get throttled and the current group doesn't have any other
|
|
* active members.
|
|
*/
|
|
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
|
|
if (rt_se->on_list)
|
|
__delist_rt_entity(rt_se, array);
|
|
return;
|
|
}
|
|
|
|
if (move_entity(flags)) {
|
|
WARN_ON_ONCE(rt_se->on_list);
|
|
if (flags & ENQUEUE_HEAD)
|
|
list_add(&rt_se->run_list, queue);
|
|
else
|
|
list_add_tail(&rt_se->run_list, queue);
|
|
|
|
__set_bit(rt_se_prio(rt_se), array->bitmap);
|
|
rt_se->on_list = 1;
|
|
}
|
|
rt_se->on_rq = 1;
|
|
|
|
inc_rt_tasks(rt_se, rt_rq);
|
|
}
|
|
|
|
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
|
|
{
|
|
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
|
|
if (move_entity(flags)) {
|
|
WARN_ON_ONCE(!rt_se->on_list);
|
|
__delist_rt_entity(rt_se, array);
|
|
}
|
|
rt_se->on_rq = 0;
|
|
|
|
dec_rt_tasks(rt_se, rt_rq);
|
|
}
|
|
|
|
/*
|
|
* Because the prio of an upper entry depends on the lower
|
|
* entries, we must remove entries top - down.
|
|
*/
|
|
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
|
|
{
|
|
struct sched_rt_entity *back = NULL;
|
|
unsigned int rt_nr_running;
|
|
|
|
for_each_sched_rt_entity(rt_se) {
|
|
rt_se->back = back;
|
|
back = rt_se;
|
|
}
|
|
|
|
rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
|
|
|
|
for (rt_se = back; rt_se; rt_se = rt_se->back) {
|
|
if (on_rt_rq(rt_se))
|
|
__dequeue_rt_entity(rt_se, flags);
|
|
}
|
|
|
|
dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
|
|
}
|
|
|
|
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
|
|
{
|
|
struct rq *rq = rq_of_rt_se(rt_se);
|
|
|
|
dequeue_rt_stack(rt_se, flags);
|
|
for_each_sched_rt_entity(rt_se)
|
|
__enqueue_rt_entity(rt_se, flags);
|
|
enqueue_top_rt_rq(&rq->rt);
|
|
}
|
|
|
|
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
|
|
{
|
|
struct rq *rq = rq_of_rt_se(rt_se);
|
|
|
|
dequeue_rt_stack(rt_se, flags);
|
|
|
|
for_each_sched_rt_entity(rt_se) {
|
|
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
|
|
|
if (rt_rq && rt_rq->rt_nr_running)
|
|
__enqueue_rt_entity(rt_se, flags);
|
|
}
|
|
enqueue_top_rt_rq(&rq->rt);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
|
|
bool sync)
|
|
{
|
|
/*
|
|
* If the waker is CFS, then an RT sync wakeup would preempt the waker
|
|
* and force it to run for a likely small time after the RT wakee is
|
|
* done. So, only honor RT sync wakeups from RT wakers.
|
|
*/
|
|
return sync && task_has_rt_policy(rq->curr) &&
|
|
p->prio <= rq->rt.highest_prio.next &&
|
|
rq->rt.rt_nr_running <= 2;
|
|
}
|
|
#else
|
|
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
|
|
bool sync)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Adding/removing a task to/from a priority array:
|
|
*/
|
|
static void
|
|
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
|
|
|
|
if (flags & ENQUEUE_WAKEUP)
|
|
rt_se->timeout = 0;
|
|
|
|
enqueue_rt_entity(rt_se, flags);
|
|
|
|
if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
|
|
!should_honor_rt_sync(rq, p, sync))
|
|
enqueue_pushable_task(rq, p);
|
|
}
|
|
|
|
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
|
|
update_curr_rt(rq);
|
|
dequeue_rt_entity(rt_se, flags);
|
|
|
|
dequeue_pushable_task(rq, p);
|
|
}
|
|
|
|
/*
|
|
* Put task to the head or the end of the run list without the overhead of
|
|
* dequeue followed by enqueue.
|
|
*/
|
|
static void
|
|
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
|
|
{
|
|
if (on_rt_rq(rt_se)) {
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
struct list_head *queue = array->queue + rt_se_prio(rt_se);
|
|
|
|
if (head)
|
|
list_move(&rt_se->run_list, queue);
|
|
else
|
|
list_move_tail(&rt_se->run_list, queue);
|
|
}
|
|
}
|
|
|
|
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
struct rt_rq *rt_rq;
|
|
|
|
for_each_sched_rt_entity(rt_se) {
|
|
rt_rq = rt_rq_of_se(rt_se);
|
|
requeue_rt_entity(rt_rq, rt_se, head);
|
|
}
|
|
}
|
|
|
|
static void yield_task_rt(struct rq *rq)
|
|
{
|
|
requeue_task_rt(rq, rq->curr, 0);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static int find_lowest_rq(struct task_struct *task);
|
|
|
|
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
|
|
/*
|
|
* Return whether the task on the given cpu is currently non-preemptible
|
|
* while handling a potentially long softint, or if the task is likely
|
|
* to block preemptions soon because it is a ksoftirq thread that is
|
|
* handling slow softints.
|
|
*/
|
|
bool
|
|
task_may_not_preempt(struct task_struct *task, int cpu)
|
|
{
|
|
__u32 softirqs = per_cpu(active_softirqs, cpu) |
|
|
local_softirq_pending();
|
|
|
|
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
|
|
return ((softirqs & LONG_SOFTIRQ_MASK) &&
|
|
(task == cpu_ksoftirqd ||
|
|
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
|
|
}
|
|
EXPORT_SYMBOL_GPL(task_may_not_preempt);
|
|
#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
|
|
|
|
static int
|
|
select_task_rq_rt(struct task_struct *p, int cpu, int flags)
|
|
{
|
|
struct task_struct *curr;
|
|
struct rq *rq;
|
|
struct rq *this_cpu_rq;
|
|
bool test;
|
|
int target_cpu = -1;
|
|
bool may_not_preempt;
|
|
bool sync = !!(flags & WF_SYNC);
|
|
int this_cpu;
|
|
|
|
trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF,
|
|
flags, &target_cpu);
|
|
if (target_cpu >= 0)
|
|
return target_cpu;
|
|
|
|
/* For anything but wake ups, just return the task_cpu */
|
|
if (!(flags & (WF_TTWU | WF_FORK)))
|
|
goto out;
|
|
|
|
rq = cpu_rq(cpu);
|
|
|
|
rcu_read_lock();
|
|
curr = READ_ONCE(rq->curr); /* unlocked access */
|
|
this_cpu = smp_processor_id();
|
|
this_cpu_rq = cpu_rq(this_cpu);
|
|
|
|
/*
|
|
* If the current task on @p's runqueue is a softirq task,
|
|
* it may run without preemption for a time that is
|
|
* ill-suited for a waiting RT task. Therefore, try to
|
|
* wake this RT task on another runqueue.
|
|
*
|
|
* Also, if the current task on @p's runqueue is an RT task, then
|
|
* try to see if we can wake this RT task up on another
|
|
* runqueue. Otherwise simply start this RT task
|
|
* on its current runqueue.
|
|
*
|
|
* We want to avoid overloading runqueues. If the woken
|
|
* task is a higher priority, then it will stay on this CPU
|
|
* and the lower prio task should be moved to another CPU.
|
|
* Even though this will probably make the lower prio task
|
|
* lose its cache, we do not want to bounce a higher task
|
|
* around just because it gave up its CPU, perhaps for a
|
|
* lock?
|
|
*
|
|
* For equal prio tasks, we just let the scheduler sort it out.
|
|
*
|
|
* Otherwise, just let it ride on the affined RQ and the
|
|
* post-schedule router will push the preempted task away
|
|
*
|
|
* This test is optimistic, if we get it wrong the load-balancer
|
|
* will have to sort it out.
|
|
*
|
|
* We take into account the capacity of the CPU to ensure it fits the
|
|
* requirement of the task - which is only important on heterogeneous
|
|
* systems like big.LITTLE.
|
|
*/
|
|
may_not_preempt = task_may_not_preempt(curr, cpu);
|
|
test = (curr && (may_not_preempt ||
|
|
(unlikely(rt_task(curr)) &&
|
|
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
|
|
|
|
/*
|
|
* Respect the sync flag as long as the task can run on this CPU.
|
|
*/
|
|
if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
|
|
cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
|
|
cpu = this_cpu;
|
|
goto out_unlock;
|
|
}
|
|
|
|
if (test || !rt_task_fits_capacity(p, cpu)) {
|
|
int target = find_lowest_rq(p);
|
|
|
|
/*
|
|
* Bail out if we were forcing a migration to find a better
|
|
* fitting CPU but our search failed.
|
|
*/
|
|
if (!test && target != -1 && !rt_task_fits_capacity(p, target))
|
|
goto out_unlock;
|
|
|
|
/*
|
|
* If cpu is non-preemptible, prefer remote cpu
|
|
* even if it's running a higher-prio task.
|
|
* Otherwise: Don't bother moving it if the destination CPU is
|
|
* not running a lower priority task.
|
|
*/
|
|
if (target != -1 &&
|
|
(may_not_preempt ||
|
|
p->prio < cpu_rq(target)->rt.highest_prio.curr))
|
|
cpu = target;
|
|
}
|
|
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
|
|
out:
|
|
return cpu;
|
|
}
|
|
|
|
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
|
{
|
|
/*
|
|
* Current can't be migrated, useless to reschedule,
|
|
* let's hope p can move out.
|
|
*/
|
|
if (rq->curr->nr_cpus_allowed == 1 ||
|
|
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
|
|
return;
|
|
|
|
/*
|
|
* p is migratable, so let's not schedule it and
|
|
* see if it is pushed or pulled somewhere else.
|
|
*/
|
|
if (p->nr_cpus_allowed != 1 &&
|
|
cpupri_find(&rq->rd->cpupri, p, NULL))
|
|
return;
|
|
|
|
/*
|
|
* There appear to be other CPUs that can accept
|
|
* the current task but none can run 'p', so lets reschedule
|
|
* to try and push the current task away:
|
|
*/
|
|
requeue_task_rt(rq, p, 1);
|
|
resched_curr(rq);
|
|
}
|
|
|
|
static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
|
|
{
|
|
if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
|
|
int done = 0;
|
|
|
|
/*
|
|
* This is OK, because current is on_cpu, which avoids it being
|
|
* picked for load-balance and preemption/IRQs are still
|
|
* disabled avoiding further scheduler activity on it and we've
|
|
* not yet started the picking loop.
|
|
*/
|
|
rq_unpin_lock(rq, rf);
|
|
trace_android_rvh_sched_balance_rt(rq, p, &done);
|
|
if (!done)
|
|
pull_rt_task(rq);
|
|
rq_repin_lock(rq, rf);
|
|
}
|
|
|
|
return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/*
|
|
* Preempt the current task with a newly woken task if needed:
|
|
*/
|
|
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
if (p->prio < rq->curr->prio) {
|
|
resched_curr(rq);
|
|
return;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* If:
|
|
*
|
|
* - the newly woken task is of equal priority to the current task
|
|
* - the newly woken task is non-migratable while current is migratable
|
|
* - current will be preempted on the next reschedule
|
|
*
|
|
* we should check to see if current can readily move to a different
|
|
* cpu. If so, we will reschedule to allow the push logic to try
|
|
* to move current somewhere else, making room for our non-migratable
|
|
* task.
|
|
*/
|
|
if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
|
|
check_preempt_equal_prio(rq, p);
|
|
#endif
|
|
}
|
|
|
|
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
|
|
{
|
|
p->se.exec_start = rq_clock_task(rq);
|
|
|
|
/* The running task is never eligible for pushing */
|
|
dequeue_pushable_task(rq, p);
|
|
|
|
if (!first)
|
|
return;
|
|
|
|
/*
|
|
* If prev task was rt, put_prev_task() has already updated the
|
|
* utilization. We only care of the case where we start to schedule a
|
|
* rt task
|
|
*/
|
|
if (rq->curr->sched_class != &rt_sched_class)
|
|
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
|
|
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 0);
|
|
|
|
rt_queue_push_tasks(rq);
|
|
}
|
|
|
|
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
|
|
struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
struct sched_rt_entity *next = NULL;
|
|
struct list_head *queue;
|
|
int idx;
|
|
|
|
idx = sched_find_first_bit(array->bitmap);
|
|
BUG_ON(idx >= MAX_RT_PRIO);
|
|
|
|
queue = array->queue + idx;
|
|
next = list_entry(queue->next, struct sched_rt_entity, run_list);
|
|
|
|
return next;
|
|
}
|
|
|
|
static struct task_struct *_pick_next_task_rt(struct rq *rq)
|
|
{
|
|
struct sched_rt_entity *rt_se;
|
|
struct rt_rq *rt_rq = &rq->rt;
|
|
|
|
do {
|
|
rt_se = pick_next_rt_entity(rq, rt_rq);
|
|
BUG_ON(!rt_se);
|
|
rt_rq = group_rt_rq(rt_se);
|
|
} while (rt_rq);
|
|
|
|
return rt_task_of(rt_se);
|
|
}
|
|
|
|
static struct task_struct *pick_task_rt(struct rq *rq)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
if (!sched_rt_runnable(rq))
|
|
return NULL;
|
|
|
|
p = _pick_next_task_rt(rq);
|
|
|
|
return p;
|
|
}
|
|
|
|
static struct task_struct *pick_next_task_rt(struct rq *rq)
|
|
{
|
|
struct task_struct *p = pick_task_rt(rq);
|
|
|
|
if (p)
|
|
set_next_task_rt(rq, p, true);
|
|
|
|
return p;
|
|
}
|
|
|
|
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
update_curr_rt(rq);
|
|
|
|
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
|
|
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
|
|
|
|
/*
|
|
* The previous task needs to be made eligible for pushing
|
|
* if it is still active
|
|
*/
|
|
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
|
|
enqueue_pushable_task(rq, p);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Only try algorithms three times */
|
|
#define RT_MAX_TRIES 3
|
|
|
|
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
cpumask_test_cpu(cpu, &p->cpus_mask))
|
|
return 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Return the highest pushable rq's task, which is suitable to be executed
|
|
* on the CPU, NULL otherwise
|
|
*/
|
|
struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
|
|
{
|
|
struct plist_head *head = &rq->rt.pushable_tasks;
|
|
struct task_struct *p;
|
|
|
|
if (!has_pushable_tasks(rq))
|
|
return NULL;
|
|
|
|
plist_for_each_entry(p, head, pushable_tasks) {
|
|
if (pick_rt_task(rq, p, cpu))
|
|
return p;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(pick_highest_pushable_task);
|
|
|
|
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
|
|
|
|
static int find_lowest_rq(struct task_struct *task)
|
|
{
|
|
struct sched_domain *sd;
|
|
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
|
|
int this_cpu = smp_processor_id();
|
|
int cpu = -1;
|
|
int ret;
|
|
|
|
/* Make sure the mask is initialized first */
|
|
if (unlikely(!lowest_mask))
|
|
return -1;
|
|
|
|
if (task->nr_cpus_allowed == 1)
|
|
return -1; /* No other targets possible */
|
|
|
|
/*
|
|
* If we're on asym system ensure we consider the different capacities
|
|
* of the CPUs when searching for the lowest_mask.
|
|
*/
|
|
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
|
|
|
|
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
|
|
task, lowest_mask,
|
|
rt_task_fits_capacity);
|
|
} else {
|
|
|
|
ret = cpupri_find(&task_rq(task)->rd->cpupri,
|
|
task, lowest_mask);
|
|
}
|
|
|
|
trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
|
|
if (cpu >= 0)
|
|
return cpu;
|
|
|
|
if (!ret)
|
|
return -1; /* No targets found */
|
|
|
|
cpu = task_cpu(task);
|
|
|
|
/*
|
|
* At this point we have built a mask of CPUs representing the
|
|
* lowest priority tasks in the system. Now we want to elect
|
|
* the best one based on our affinity and topology.
|
|
*
|
|
* We prioritize the last CPU that the task executed on since
|
|
* it is most likely cache-hot in that location.
|
|
*/
|
|
if (cpumask_test_cpu(cpu, lowest_mask))
|
|
return cpu;
|
|
|
|
/*
|
|
* Otherwise, we consult the sched_domains span maps to figure
|
|
* out which CPU is logically closest to our hot cache data.
|
|
*/
|
|
if (!cpumask_test_cpu(this_cpu, lowest_mask))
|
|
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
|
|
|
|
rcu_read_lock();
|
|
for_each_domain(cpu, sd) {
|
|
if (sd->flags & SD_WAKE_AFFINE) {
|
|
int best_cpu;
|
|
|
|
/*
|
|
* "this_cpu" is cheaper to preempt than a
|
|
* remote processor.
|
|
*/
|
|
if (this_cpu != -1 &&
|
|
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
|
|
rcu_read_unlock();
|
|
return this_cpu;
|
|
}
|
|
|
|
best_cpu = cpumask_any_and_distribute(lowest_mask,
|
|
sched_domain_span(sd));
|
|
if (best_cpu < nr_cpu_ids) {
|
|
rcu_read_unlock();
|
|
return best_cpu;
|
|
}
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
/*
|
|
* And finally, if there were no matches within the domains
|
|
* just give the caller *something* to work with from the compatible
|
|
* locations.
|
|
*/
|
|
if (this_cpu != -1)
|
|
return this_cpu;
|
|
|
|
cpu = cpumask_any_distribute(lowest_mask);
|
|
if (cpu < nr_cpu_ids)
|
|
return cpu;
|
|
|
|
return -1;
|
|
}
|
|
|
|
/* Will lock the rq it finds */
|
|
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
|
{
|
|
struct rq *lowest_rq = NULL;
|
|
int tries;
|
|
int cpu;
|
|
|
|
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
|
|
cpu = find_lowest_rq(task);
|
|
|
|
if ((cpu == -1) || (cpu == rq->cpu))
|
|
break;
|
|
|
|
lowest_rq = cpu_rq(cpu);
|
|
|
|
if (lowest_rq->rt.highest_prio.curr <= task->prio) {
|
|
/*
|
|
* Target rq has tasks of equal or higher priority,
|
|
* retrying does not release any lock and is unlikely
|
|
* to yield a different result.
|
|
*/
|
|
lowest_rq = NULL;
|
|
break;
|
|
}
|
|
|
|
/* if the prio of this runqueue changed, try again */
|
|
if (double_lock_balance(rq, lowest_rq)) {
|
|
/*
|
|
* We had to unlock the run queue. In
|
|
* the mean time, task could have
|
|
* migrated already or had its affinity changed.
|
|
* Also make sure that it wasn't scheduled on its rq.
|
|
*/
|
|
if (unlikely(task_rq(task) != rq ||
|
|
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
|
|
task_running(rq, task) ||
|
|
!rt_task(task) ||
|
|
!task_on_rq_queued(task))) {
|
|
|
|
double_unlock_balance(rq, lowest_rq);
|
|
lowest_rq = NULL;
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* If this rq is still suitable use it. */
|
|
if (lowest_rq->rt.highest_prio.curr > task->prio)
|
|
break;
|
|
|
|
/* try again */
|
|
double_unlock_balance(rq, lowest_rq);
|
|
lowest_rq = NULL;
|
|
}
|
|
|
|
return lowest_rq;
|
|
}
|
|
|
|
static struct task_struct *pick_next_pushable_task(struct rq *rq)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
if (!has_pushable_tasks(rq))
|
|
return NULL;
|
|
|
|
p = plist_first_entry(&rq->rt.pushable_tasks,
|
|
struct task_struct, pushable_tasks);
|
|
|
|
BUG_ON(rq->cpu != task_cpu(p));
|
|
BUG_ON(task_current(rq, p));
|
|
BUG_ON(p->nr_cpus_allowed <= 1);
|
|
|
|
BUG_ON(!task_on_rq_queued(p));
|
|
BUG_ON(!rt_task(p));
|
|
|
|
return p;
|
|
}
|
|
|
|
/*
|
|
* If the current CPU has more than one RT task, see if the non
|
|
* running task can migrate over to a CPU that is running a task
|
|
* of lesser priority.
|
|
*/
|
|
static int push_rt_task(struct rq *rq, bool pull)
|
|
{
|
|
struct task_struct *next_task;
|
|
struct rq *lowest_rq;
|
|
int ret = 0;
|
|
|
|
if (!rq->rt.overloaded)
|
|
return 0;
|
|
|
|
next_task = pick_next_pushable_task(rq);
|
|
if (!next_task)
|
|
return 0;
|
|
|
|
retry:
|
|
/*
|
|
* It's possible that the next_task slipped in of
|
|
* higher priority than current. If that's the case
|
|
* just reschedule current.
|
|
*/
|
|
if (unlikely(next_task->prio < rq->curr->prio)) {
|
|
resched_curr(rq);
|
|
return 0;
|
|
}
|
|
|
|
if (is_migration_disabled(next_task)) {
|
|
struct task_struct *push_task = NULL;
|
|
int cpu;
|
|
|
|
if (!pull || rq->push_busy)
|
|
return 0;
|
|
|
|
/*
|
|
* Invoking find_lowest_rq() on anything but an RT task doesn't
|
|
* make sense. Per the above priority check, curr has to
|
|
* be of higher priority than next_task, so no need to
|
|
* reschedule when bailing out.
|
|
*
|
|
* Note that the stoppers are masqueraded as SCHED_FIFO
|
|
* (cf. sched_set_stop_task()), so we can't rely on rt_task().
|
|
*/
|
|
if (rq->curr->sched_class != &rt_sched_class)
|
|
return 0;
|
|
|
|
cpu = find_lowest_rq(rq->curr);
|
|
if (cpu == -1 || cpu == rq->cpu)
|
|
return 0;
|
|
|
|
/*
|
|
* Given we found a CPU with lower priority than @next_task,
|
|
* therefore it should be running. However we cannot migrate it
|
|
* to this other CPU, instead attempt to push the current
|
|
* running task on this CPU away.
|
|
*/
|
|
push_task = get_push_task(rq);
|
|
if (push_task) {
|
|
raw_spin_rq_unlock(rq);
|
|
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
|
push_task, &rq->push_work);
|
|
raw_spin_rq_lock(rq);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
if (WARN_ON(next_task == rq->curr))
|
|
return 0;
|
|
|
|
/* We might release rq lock */
|
|
get_task_struct(next_task);
|
|
|
|
/* find_lock_lowest_rq locks the rq if found */
|
|
lowest_rq = find_lock_lowest_rq(next_task, rq);
|
|
if (!lowest_rq) {
|
|
struct task_struct *task;
|
|
/*
|
|
* find_lock_lowest_rq releases rq->lock
|
|
* so it is possible that next_task has migrated.
|
|
*
|
|
* We need to make sure that the task is still on the same
|
|
* run-queue and is also still the next task eligible for
|
|
* pushing.
|
|
*/
|
|
task = pick_next_pushable_task(rq);
|
|
if (task == next_task) {
|
|
/*
|
|
* The task hasn't migrated, and is still the next
|
|
* eligible task, but we failed to find a run-queue
|
|
* to push it to. Do not retry in this case, since
|
|
* other CPUs will pull from us when ready.
|
|
*/
|
|
goto out;
|
|
}
|
|
|
|
if (!task)
|
|
/* No more tasks, just exit */
|
|
goto out;
|
|
|
|
/*
|
|
* Something has shifted, try again.
|
|
*/
|
|
put_task_struct(next_task);
|
|
next_task = task;
|
|
goto retry;
|
|
}
|
|
|
|
deactivate_task(rq, next_task, 0);
|
|
set_task_cpu(next_task, lowest_rq->cpu);
|
|
activate_task(lowest_rq, next_task, 0);
|
|
resched_curr(lowest_rq);
|
|
ret = 1;
|
|
|
|
double_unlock_balance(rq, lowest_rq);
|
|
out:
|
|
put_task_struct(next_task);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void push_rt_tasks(struct rq *rq)
|
|
{
|
|
/* push_rt_task will return true if it moved an RT */
|
|
while (push_rt_task(rq, false))
|
|
;
|
|
}
|
|
|
|
#ifdef HAVE_RT_PUSH_IPI
|
|
|
|
/*
|
|
* When a high priority task schedules out from a CPU and a lower priority
|
|
* task is scheduled in, a check is made to see if there's any RT tasks
|
|
* on other CPUs that are waiting to run because a higher priority RT task
|
|
* is currently running on its CPU. In this case, the CPU with multiple RT
|
|
* tasks queued on it (overloaded) needs to be notified that a CPU has opened
|
|
* up that may be able to run one of its non-running queued RT tasks.
|
|
*
|
|
* All CPUs with overloaded RT tasks need to be notified as there is currently
|
|
* no way to know which of these CPUs have the highest priority task waiting
|
|
* to run. Instead of trying to take a spinlock on each of these CPUs,
|
|
* which has shown to cause large latency when done on machines with many
|
|
* CPUs, sending an IPI to the CPUs to have them push off the overloaded
|
|
* RT tasks waiting to run.
|
|
*
|
|
* Just sending an IPI to each of the CPUs is also an issue, as on large
|
|
* count CPU machines, this can cause an IPI storm on a CPU, especially
|
|
* if its the only CPU with multiple RT tasks queued, and a large number
|
|
* of CPUs scheduling a lower priority task at the same time.
|
|
*
|
|
* Each root domain has its own irq work function that can iterate over
|
|
* all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
|
|
* task must be checked if there's one or many CPUs that are lowering
|
|
* their priority, there's a single irq work iterator that will try to
|
|
* push off RT tasks that are waiting to run.
|
|
*
|
|
* When a CPU schedules a lower priority task, it will kick off the
|
|
* irq work iterator that will jump to each CPU with overloaded RT tasks.
|
|
* As it only takes the first CPU that schedules a lower priority task
|
|
* to start the process, the rto_start variable is incremented and if
|
|
* the atomic result is one, then that CPU will try to take the rto_lock.
|
|
* This prevents high contention on the lock as the process handles all
|
|
* CPUs scheduling lower priority tasks.
|
|
*
|
|
* All CPUs that are scheduling a lower priority task will increment the
|
|
* rt_loop_next variable. This will make sure that the irq work iterator
|
|
* checks all RT overloaded CPUs whenever a CPU schedules a new lower
|
|
* priority task, even if the iterator is in the middle of a scan. Incrementing
|
|
* the rt_loop_next will cause the iterator to perform another scan.
|
|
*
|
|
*/
|
|
static int rto_next_cpu(struct root_domain *rd)
|
|
{
|
|
int next;
|
|
int cpu;
|
|
|
|
/*
|
|
* When starting the IPI RT pushing, the rto_cpu is set to -1,
|
|
* rt_next_cpu() will simply return the first CPU found in
|
|
* the rto_mask.
|
|
*
|
|
* If rto_next_cpu() is called with rto_cpu is a valid CPU, it
|
|
* will return the next CPU found in the rto_mask.
|
|
*
|
|
* If there are no more CPUs left in the rto_mask, then a check is made
|
|
* against rto_loop and rto_loop_next. rto_loop is only updated with
|
|
* the rto_lock held, but any CPU may increment the rto_loop_next
|
|
* without any locking.
|
|
*/
|
|
for (;;) {
|
|
|
|
/* When rto_cpu is -1 this acts like cpumask_first() */
|
|
cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
|
|
|
|
/* this will be any CPU in the rd->rto_mask, and can be a halted cpu update it */
|
|
trace_android_rvh_rto_next_cpu(rd->rto_cpu, rd->rto_mask, &cpu);
|
|
|
|
rd->rto_cpu = cpu;
|
|
|
|
if (cpu < nr_cpu_ids)
|
|
return cpu;
|
|
|
|
rd->rto_cpu = -1;
|
|
|
|
/*
|
|
* ACQUIRE ensures we see the @rto_mask changes
|
|
* made prior to the @next value observed.
|
|
*
|
|
* Matches WMB in rt_set_overload().
|
|
*/
|
|
next = atomic_read_acquire(&rd->rto_loop_next);
|
|
|
|
if (rd->rto_loop == next)
|
|
break;
|
|
|
|
rd->rto_loop = next;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
|
|
static inline bool rto_start_trylock(atomic_t *v)
|
|
{
|
|
return !atomic_cmpxchg_acquire(v, 0, 1);
|
|
}
|
|
|
|
static inline void rto_start_unlock(atomic_t *v)
|
|
{
|
|
atomic_set_release(v, 0);
|
|
}
|
|
|
|
static void tell_cpu_to_push(struct rq *rq)
|
|
{
|
|
int cpu = -1;
|
|
|
|
/* Keep the loop going if the IPI is currently active */
|
|
atomic_inc(&rq->rd->rto_loop_next);
|
|
|
|
/* Only one CPU can initiate a loop at a time */
|
|
if (!rto_start_trylock(&rq->rd->rto_loop_start))
|
|
return;
|
|
|
|
raw_spin_lock(&rq->rd->rto_lock);
|
|
|
|
/*
|
|
* The rto_cpu is updated under the lock, if it has a valid CPU
|
|
* then the IPI is still running and will continue due to the
|
|
* update to loop_next, and nothing needs to be done here.
|
|
* Otherwise it is finishing up and an ipi needs to be sent.
|
|
*/
|
|
if (rq->rd->rto_cpu < 0)
|
|
cpu = rto_next_cpu(rq->rd);
|
|
|
|
raw_spin_unlock(&rq->rd->rto_lock);
|
|
|
|
rto_start_unlock(&rq->rd->rto_loop_start);
|
|
|
|
if (cpu >= 0) {
|
|
/* Make sure the rd does not get freed while pushing */
|
|
sched_get_rd(rq->rd);
|
|
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
|
|
}
|
|
}
|
|
|
|
/* Called from hardirq context */
|
|
void rto_push_irq_work_func(struct irq_work *work)
|
|
{
|
|
struct root_domain *rd =
|
|
container_of(work, struct root_domain, rto_push_work);
|
|
struct rq *rq;
|
|
int cpu;
|
|
|
|
rq = this_rq();
|
|
|
|
/*
|
|
* We do not need to grab the lock to check for has_pushable_tasks.
|
|
* When it gets updated, a check is made if a push is possible.
|
|
*/
|
|
if (has_pushable_tasks(rq)) {
|
|
raw_spin_rq_lock(rq);
|
|
while (push_rt_task(rq, true))
|
|
;
|
|
raw_spin_rq_unlock(rq);
|
|
}
|
|
|
|
raw_spin_lock(&rd->rto_lock);
|
|
|
|
/* Pass the IPI to the next rt overloaded queue */
|
|
cpu = rto_next_cpu(rd);
|
|
|
|
raw_spin_unlock(&rd->rto_lock);
|
|
|
|
if (cpu < 0) {
|
|
sched_put_rd(rd);
|
|
return;
|
|
}
|
|
|
|
/* Try the next RT overloaded CPU */
|
|
irq_work_queue_on(&rd->rto_push_work, cpu);
|
|
}
|
|
#endif /* HAVE_RT_PUSH_IPI */
|
|
|
|
static void pull_rt_task(struct rq *this_rq)
|
|
{
|
|
int this_cpu = this_rq->cpu, cpu;
|
|
bool resched = false;
|
|
struct task_struct *p, *push_task;
|
|
struct rq *src_rq;
|
|
int rt_overload_count = rt_overloaded(this_rq);
|
|
|
|
if (likely(!rt_overload_count))
|
|
return;
|
|
|
|
/*
|
|
* Match the barrier from rt_set_overloaded; this guarantees that if we
|
|
* see overloaded we must also see the rto_mask bit.
|
|
*/
|
|
smp_rmb();
|
|
|
|
/* If we are the only overloaded CPU do nothing */
|
|
if (rt_overload_count == 1 &&
|
|
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
|
|
return;
|
|
|
|
#ifdef HAVE_RT_PUSH_IPI
|
|
if (sched_feat(RT_PUSH_IPI)) {
|
|
tell_cpu_to_push(this_rq);
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
for_each_cpu(cpu, this_rq->rd->rto_mask) {
|
|
if (this_cpu == cpu)
|
|
continue;
|
|
|
|
src_rq = cpu_rq(cpu);
|
|
|
|
/*
|
|
* Don't bother taking the src_rq->lock if the next highest
|
|
* task is known to be lower-priority than our current task.
|
|
* This may look racy, but if this value is about to go
|
|
* logically higher, the src_rq will push this task away.
|
|
* And if its going logically lower, we do not care
|
|
*/
|
|
if (src_rq->rt.highest_prio.next >=
|
|
this_rq->rt.highest_prio.curr)
|
|
continue;
|
|
|
|
/*
|
|
* We can potentially drop this_rq's lock in
|
|
* double_lock_balance, and another CPU could
|
|
* alter this_rq
|
|
*/
|
|
push_task = NULL;
|
|
double_lock_balance(this_rq, src_rq);
|
|
|
|
/*
|
|
* We can pull only a task, which is pushable
|
|
* on its rq, and no others.
|
|
*/
|
|
p = pick_highest_pushable_task(src_rq, this_cpu);
|
|
|
|
/*
|
|
* Do we have an RT task that preempts
|
|
* the to-be-scheduled task?
|
|
*/
|
|
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
|
|
WARN_ON(p == src_rq->curr);
|
|
WARN_ON(!task_on_rq_queued(p));
|
|
|
|
/*
|
|
* There's a chance that p is higher in priority
|
|
* than what's currently running on its CPU.
|
|
* This is just that p is waking up and hasn't
|
|
* had a chance to schedule. We only pull
|
|
* p if it is lower in priority than the
|
|
* current task on the run queue
|
|
*/
|
|
if (p->prio < src_rq->curr->prio)
|
|
goto skip;
|
|
|
|
if (is_migration_disabled(p)) {
|
|
push_task = get_push_task(src_rq);
|
|
} else {
|
|
deactivate_task(src_rq, p, 0);
|
|
set_task_cpu(p, this_cpu);
|
|
activate_task(this_rq, p, 0);
|
|
resched = true;
|
|
}
|
|
/*
|
|
* We continue with the search, just in
|
|
* case there's an even higher prio task
|
|
* in another runqueue. (low likelihood
|
|
* but possible)
|
|
*/
|
|
}
|
|
skip:
|
|
double_unlock_balance(this_rq, src_rq);
|
|
|
|
if (push_task) {
|
|
raw_spin_rq_unlock(this_rq);
|
|
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
|
push_task, &src_rq->push_work);
|
|
raw_spin_rq_lock(this_rq);
|
|
}
|
|
}
|
|
|
|
if (resched)
|
|
resched_curr(this_rq);
|
|
}
|
|
|
|
/*
|
|
* If we are not running and we are not going to reschedule soon, we should
|
|
* try to push tasks away now
|
|
*/
|
|
static void task_woken_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
bool need_to_push = !task_running(rq, p) &&
|
|
!test_tsk_need_resched(rq->curr) &&
|
|
p->nr_cpus_allowed > 1 &&
|
|
(dl_task(rq->curr) || rt_task(rq->curr)) &&
|
|
(rq->curr->nr_cpus_allowed < 2 ||
|
|
rq->curr->prio <= p->prio);
|
|
|
|
if (need_to_push)
|
|
push_rt_tasks(rq);
|
|
}
|
|
|
|
/* Assumes rq->lock is held */
|
|
static void rq_online_rt(struct rq *rq)
|
|
{
|
|
if (rq->rt.overloaded)
|
|
rt_set_overload(rq);
|
|
|
|
__enable_runtime(rq);
|
|
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
|
|
}
|
|
|
|
/* Assumes rq->lock is held */
|
|
static void rq_offline_rt(struct rq *rq)
|
|
{
|
|
if (rq->rt.overloaded)
|
|
rt_clear_overload(rq);
|
|
|
|
__disable_runtime(rq);
|
|
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
|
|
}
|
|
|
|
/*
|
|
* When switch from the rt queue, we bring ourselves to a position
|
|
* that we might want to pull RT tasks from other runqueues.
|
|
*/
|
|
static void switched_from_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
/*
|
|
* If there are other RT tasks then we will reschedule
|
|
* and the scheduling of the other RT tasks will handle
|
|
* the balancing. But if we are the last RT task
|
|
* we may need to handle the pulling of RT tasks
|
|
* now.
|
|
*/
|
|
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
|
|
return;
|
|
|
|
rt_queue_pull_task(rq);
|
|
}
|
|
|
|
void __init init_sched_rt_class(void)
|
|
{
|
|
unsigned int i;
|
|
|
|
for_each_possible_cpu(i) {
|
|
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
}
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/*
|
|
* When switching a task to RT, we may overload the runqueue
|
|
* with RT tasks. In this case we try to push them off to
|
|
* other runqueues.
|
|
*/
|
|
static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
/*
|
|
* If we are running, update the avg_rt tracking, as the running time
|
|
* will now on be accounted into the latter.
|
|
*/
|
|
if (task_current(rq, p)) {
|
|
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* If we are not running we may need to preempt the current
|
|
* running task. If that current running task is also an RT task
|
|
* then see if we can move to another run queue.
|
|
*/
|
|
if (task_on_rq_queued(p)) {
|
|
#ifdef CONFIG_SMP
|
|
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
|
|
rt_queue_push_tasks(rq);
|
|
#endif /* CONFIG_SMP */
|
|
if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
|
|
resched_curr(rq);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Priority of the task has changed. This may cause
|
|
* us to initiate a push or pull.
|
|
*/
|
|
static void
|
|
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
|
|
{
|
|
if (!task_on_rq_queued(p))
|
|
return;
|
|
|
|
if (task_current(rq, p)) {
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* If our priority decreases while running, we
|
|
* may need to pull tasks to this runqueue.
|
|
*/
|
|
if (oldprio < p->prio)
|
|
rt_queue_pull_task(rq);
|
|
|
|
/*
|
|
* If there's a higher priority task waiting to run
|
|
* then reschedule.
|
|
*/
|
|
if (p->prio > rq->rt.highest_prio.curr)
|
|
resched_curr(rq);
|
|
#else
|
|
/* For UP simply resched on drop of prio */
|
|
if (oldprio < p->prio)
|
|
resched_curr(rq);
|
|
#endif /* CONFIG_SMP */
|
|
} else {
|
|
/*
|
|
* This task is not running, but if it is
|
|
* greater than the current running task
|
|
* then reschedule.
|
|
*/
|
|
if (p->prio < rq->curr->prio)
|
|
resched_curr(rq);
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_POSIX_TIMERS
|
|
static void watchdog(struct rq *rq, struct task_struct *p)
|
|
{
|
|
unsigned long soft, hard;
|
|
|
|
/* max may change after cur was read, this will be fixed next tick */
|
|
soft = task_rlimit(p, RLIMIT_RTTIME);
|
|
hard = task_rlimit_max(p, RLIMIT_RTTIME);
|
|
|
|
if (soft != RLIM_INFINITY) {
|
|
unsigned long next;
|
|
|
|
if (p->rt.watchdog_stamp != jiffies) {
|
|
p->rt.timeout++;
|
|
p->rt.watchdog_stamp = jiffies;
|
|
}
|
|
|
|
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
|
|
if (p->rt.timeout > next) {
|
|
posix_cputimers_rt_watchdog(&p->posix_cputimers,
|
|
p->se.sum_exec_runtime);
|
|
}
|
|
}
|
|
}
|
|
#else
|
|
static inline void watchdog(struct rq *rq, struct task_struct *p) { }
|
|
#endif
|
|
|
|
/*
|
|
* scheduler tick hitting a task of our scheduling class.
|
|
*
|
|
* NOTE: This function can be called remotely by the tick offload that
|
|
* goes along full dynticks. Therefore no local assumption can be made
|
|
* and everything must be accessed through the @rq and @curr passed in
|
|
* parameters.
|
|
*/
|
|
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
|
|
update_curr_rt(rq);
|
|
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
|
|
trace_android_rvh_update_rt_rq_load_avg(rq_clock_pelt(rq), rq, p, 1);
|
|
|
|
watchdog(rq, p);
|
|
|
|
/*
|
|
* RR tasks need a special form of timeslice management.
|
|
* FIFO tasks have no timeslices.
|
|
*/
|
|
if (p->policy != SCHED_RR)
|
|
return;
|
|
|
|
if (--p->rt.time_slice)
|
|
return;
|
|
|
|
p->rt.time_slice = sched_rr_timeslice;
|
|
|
|
/*
|
|
* Requeue to the end of queue if we (and all of our ancestors) are not
|
|
* the only element on the queue
|
|
*/
|
|
for_each_sched_rt_entity(rt_se) {
|
|
if (rt_se->run_list.prev != rt_se->run_list.next) {
|
|
requeue_task_rt(rq, p, 0);
|
|
resched_curr(rq);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
|
|
{
|
|
/*
|
|
* Time slice is 0 for SCHED_FIFO tasks
|
|
*/
|
|
if (task->policy == SCHED_RR)
|
|
return sched_rr_timeslice;
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
DEFINE_SCHED_CLASS(rt) = {
|
|
|
|
.enqueue_task = enqueue_task_rt,
|
|
.dequeue_task = dequeue_task_rt,
|
|
.yield_task = yield_task_rt,
|
|
|
|
.check_preempt_curr = check_preempt_curr_rt,
|
|
|
|
.pick_next_task = pick_next_task_rt,
|
|
.put_prev_task = put_prev_task_rt,
|
|
.set_next_task = set_next_task_rt,
|
|
|
|
#ifdef CONFIG_SMP
|
|
.balance = balance_rt,
|
|
.pick_task = pick_task_rt,
|
|
.select_task_rq = select_task_rq_rt,
|
|
.set_cpus_allowed = set_cpus_allowed_common,
|
|
.rq_online = rq_online_rt,
|
|
.rq_offline = rq_offline_rt,
|
|
.task_woken = task_woken_rt,
|
|
.switched_from = switched_from_rt,
|
|
.find_lock_rq = find_lock_lowest_rq,
|
|
#endif
|
|
|
|
.task_tick = task_tick_rt,
|
|
|
|
.get_rr_interval = get_rr_interval_rt,
|
|
|
|
.prio_changed = prio_changed_rt,
|
|
.switched_to = switched_to_rt,
|
|
|
|
.update_curr = update_curr_rt,
|
|
|
|
#ifdef CONFIG_UCLAMP_TASK
|
|
.uclamp_enabled = 1,
|
|
#endif
|
|
};
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* Ensure that the real time constraints are schedulable.
|
|
*/
|
|
static DEFINE_MUTEX(rt_constraints_mutex);
|
|
|
|
static inline int tg_has_rt_tasks(struct task_group *tg)
|
|
{
|
|
struct task_struct *task;
|
|
struct css_task_iter it;
|
|
int ret = 0;
|
|
|
|
/*
|
|
* Autogroups do not have RT tasks; see autogroup_create().
|
|
*/
|
|
if (task_group_is_autogroup(tg))
|
|
return 0;
|
|
|
|
css_task_iter_start(&tg->css, 0, &it);
|
|
while (!ret && (task = css_task_iter_next(&it)))
|
|
ret |= rt_task(task);
|
|
css_task_iter_end(&it);
|
|
|
|
return ret;
|
|
}
|
|
|
|
struct rt_schedulable_data {
|
|
struct task_group *tg;
|
|
u64 rt_period;
|
|
u64 rt_runtime;
|
|
};
|
|
|
|
static int tg_rt_schedulable(struct task_group *tg, void *data)
|
|
{
|
|
struct rt_schedulable_data *d = data;
|
|
struct task_group *child;
|
|
unsigned long total, sum = 0;
|
|
u64 period, runtime;
|
|
|
|
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
|
|
runtime = tg->rt_bandwidth.rt_runtime;
|
|
|
|
if (tg == d->tg) {
|
|
period = d->rt_period;
|
|
runtime = d->rt_runtime;
|
|
}
|
|
|
|
/*
|
|
* Cannot have more runtime than the period.
|
|
*/
|
|
if (runtime > period && runtime != RUNTIME_INF)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Ensure we don't starve existing RT tasks if runtime turns zero.
|
|
*/
|
|
if (rt_bandwidth_enabled() && !runtime &&
|
|
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
|
|
return -EBUSY;
|
|
|
|
total = to_ratio(period, runtime);
|
|
|
|
/*
|
|
* Nobody can have more than the global setting allows.
|
|
*/
|
|
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* The sum of our children's runtime should not exceed our own.
|
|
*/
|
|
list_for_each_entry_rcu(child, &tg->children, siblings) {
|
|
period = ktime_to_ns(child->rt_bandwidth.rt_period);
|
|
runtime = child->rt_bandwidth.rt_runtime;
|
|
|
|
if (child == d->tg) {
|
|
period = d->rt_period;
|
|
runtime = d->rt_runtime;
|
|
}
|
|
|
|
sum += to_ratio(period, runtime);
|
|
}
|
|
|
|
if (sum > total)
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
|
{
|
|
int ret;
|
|
|
|
struct rt_schedulable_data data = {
|
|
.tg = tg,
|
|
.rt_period = period,
|
|
.rt_runtime = runtime,
|
|
};
|
|
|
|
rcu_read_lock();
|
|
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int tg_set_rt_bandwidth(struct task_group *tg,
|
|
u64 rt_period, u64 rt_runtime)
|
|
{
|
|
int i, err = 0;
|
|
|
|
/*
|
|
* Disallowing the root group RT runtime is BAD, it would disallow the
|
|
* kernel creating (and or operating) RT threads.
|
|
*/
|
|
if (tg == &root_task_group && rt_runtime == 0)
|
|
return -EINVAL;
|
|
|
|
/* No period doesn't make any sense. */
|
|
if (rt_period == 0)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Bound quota to defend quota against overflow during bandwidth shift.
|
|
*/
|
|
if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&rt_constraints_mutex);
|
|
err = __rt_schedulable(tg, rt_period, rt_runtime);
|
|
if (err)
|
|
goto unlock;
|
|
|
|
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
|
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
|
|
tg->rt_bandwidth.rt_runtime = rt_runtime;
|
|
|
|
for_each_possible_cpu(i) {
|
|
struct rt_rq *rt_rq = tg->rt_rq[i];
|
|
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
rt_rq->rt_runtime = rt_runtime;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
}
|
|
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
|
unlock:
|
|
mutex_unlock(&rt_constraints_mutex);
|
|
|
|
return err;
|
|
}
|
|
|
|
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
|
{
|
|
u64 rt_runtime, rt_period;
|
|
|
|
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
|
|
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
|
|
if (rt_runtime_us < 0)
|
|
rt_runtime = RUNTIME_INF;
|
|
else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
|
|
return -EINVAL;
|
|
|
|
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
|
|
}
|
|
|
|
long sched_group_rt_runtime(struct task_group *tg)
|
|
{
|
|
u64 rt_runtime_us;
|
|
|
|
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
|
|
return -1;
|
|
|
|
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
|
|
do_div(rt_runtime_us, NSEC_PER_USEC);
|
|
return rt_runtime_us;
|
|
}
|
|
|
|
int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
|
|
{
|
|
u64 rt_runtime, rt_period;
|
|
|
|
if (rt_period_us > U64_MAX / NSEC_PER_USEC)
|
|
return -EINVAL;
|
|
|
|
rt_period = rt_period_us * NSEC_PER_USEC;
|
|
rt_runtime = tg->rt_bandwidth.rt_runtime;
|
|
|
|
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
|
|
}
|
|
|
|
long sched_group_rt_period(struct task_group *tg)
|
|
{
|
|
u64 rt_period_us;
|
|
|
|
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
|
|
do_div(rt_period_us, NSEC_PER_USEC);
|
|
return rt_period_us;
|
|
}
|
|
|
|
static int sched_rt_global_constraints(void)
|
|
{
|
|
int ret = 0;
|
|
|
|
mutex_lock(&rt_constraints_mutex);
|
|
ret = __rt_schedulable(NULL, 0, 0);
|
|
mutex_unlock(&rt_constraints_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
|
|
{
|
|
/* Don't accept realtime tasks when there is no way for them to run */
|
|
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
|
|
return 0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
#else /* !CONFIG_RT_GROUP_SCHED */
|
|
static int sched_rt_global_constraints(void)
|
|
{
|
|
unsigned long flags;
|
|
int i;
|
|
|
|
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
|
|
for_each_possible_cpu(i) {
|
|
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
|
|
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
rt_rq->rt_runtime = global_rt_runtime();
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
}
|
|
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
static int sched_rt_global_validate(void)
|
|
{
|
|
if (sysctl_sched_rt_period <= 0)
|
|
return -EINVAL;
|
|
|
|
if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
|
|
((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
|
|
((u64)sysctl_sched_rt_runtime *
|
|
NSEC_PER_USEC > max_rt_runtime)))
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void sched_rt_do_global(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
|
|
def_rt_bandwidth.rt_runtime = global_rt_runtime();
|
|
def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
|
|
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
|
|
}
|
|
|
|
int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
|
|
size_t *lenp, loff_t *ppos)
|
|
{
|
|
int old_period, old_runtime;
|
|
static DEFINE_MUTEX(mutex);
|
|
int ret;
|
|
|
|
mutex_lock(&mutex);
|
|
old_period = sysctl_sched_rt_period;
|
|
old_runtime = sysctl_sched_rt_runtime;
|
|
|
|
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
|
|
|
if (!ret && write) {
|
|
ret = sched_rt_global_validate();
|
|
if (ret)
|
|
goto undo;
|
|
|
|
ret = sched_dl_global_validate();
|
|
if (ret)
|
|
goto undo;
|
|
|
|
ret = sched_rt_global_constraints();
|
|
if (ret)
|
|
goto undo;
|
|
|
|
sched_rt_do_global();
|
|
sched_dl_do_global();
|
|
}
|
|
if (0) {
|
|
undo:
|
|
sysctl_sched_rt_period = old_period;
|
|
sysctl_sched_rt_runtime = old_runtime;
|
|
}
|
|
mutex_unlock(&mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
|
|
size_t *lenp, loff_t *ppos)
|
|
{
|
|
int ret;
|
|
static DEFINE_MUTEX(mutex);
|
|
|
|
mutex_lock(&mutex);
|
|
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
|
/*
|
|
* Make sure that internally we keep jiffies.
|
|
* Also, writing zero resets the timeslice to default:
|
|
*/
|
|
if (!ret && write) {
|
|
sched_rr_timeslice =
|
|
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
|
|
msecs_to_jiffies(sysctl_sched_rr_timeslice);
|
|
}
|
|
mutex_unlock(&mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
void print_rt_stats(struct seq_file *m, int cpu)
|
|
{
|
|
rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
|
|
rcu_read_lock();
|
|
for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
|
|
print_rt_rq(m, cpu, rt_rq);
|
|
rcu_read_unlock();
|
|
}
|
|
#endif /* CONFIG_SCHED_DEBUG */
|