Files
kernel_arpi/fs/fs-writeback.c
Greg Kroah-Hartman 3d8ac88867 Merge 5.15.46 into android14-5.15
Changes in 5.15.46
	binfmt_flat: do not stop relocating GOT entries prematurely on riscv
	parisc/stifb: Implement fb_is_primary_device()
	parisc/stifb: Keep track of hardware path of graphics card
	RISC-V: Mark IORESOURCE_EXCLUSIVE for reserved mem instead of IORESOURCE_BUSY
	riscv: Initialize thread pointer before calling C functions
	riscv: Fix irq_work when SMP is disabled
	riscv: Wire up memfd_secret in UAPI header
	riscv: Move alternative length validation into subsection
	ALSA: hda/realtek - Add new type for ALC245
	ALSA: hda/realtek: Enable 4-speaker output for Dell XPS 15 9520 laptop
	ALSA: hda/realtek - Fix microphone noise on ASUS TUF B550M-PLUS
	ALSA: usb-audio: Cancel pending work at closing a MIDI substream
	USB: serial: pl2303: fix type detection for odd device
	USB: serial: option: add Quectel BG95 modem
	USB: new quirk for Dell Gen 2 devices
	usb: isp1760: Fix out-of-bounds array access
	usb: dwc3: gadget: Move null pinter check to proper place
	usb: core: hcd: Add support for deferring roothub registration
	fs/ntfs3: Update valid size if -EIOCBQUEUED
	fs/ntfs3: Fix fiemap + fix shrink file size (to remove preallocated space)
	fs/ntfs3: Keep preallocated only if option prealloc enabled
	fs/ntfs3: Check new size for limits
	fs/ntfs3: In function ntfs_set_acl_ex do not change inode->i_mode if called from function ntfs_init_acl
	fs/ntfs3: Fix some memory leaks in an error handling path of 'log_replay()'
	fs/ntfs3: Update i_ctime when xattr is added
	fs/ntfs3: Restore ntfs_xattr_get_acl and ntfs_xattr_set_acl functions
	cifs: fix potential double free during failed mount
	cifs: when extending a file with falloc we should make files not-sparse
	xhci: Allow host runtime PM as default for Intel Alder Lake N xHCI
	platform/x86: intel-hid: fix _DSM function index handling
	x86/MCE/AMD: Fix memory leak when threshold_create_bank() fails
	perf/x86/intel: Fix event constraints for ICL
	x86/kexec: fix memory leak of elf header buffer
	x86/sgx: Set active memcg prior to shmem allocation
	ptrace/um: Replace PT_DTRACE with TIF_SINGLESTEP
	ptrace/xtensa: Replace PT_SINGLESTEP with TIF_SINGLESTEP
	ptrace: Reimplement PTRACE_KILL by always sending SIGKILL
	btrfs: add "0x" prefix for unsupported optional features
	btrfs: return correct error number for __extent_writepage_io()
	btrfs: repair super block num_devices automatically
	btrfs: fix the error handling for submit_extent_page() for btrfs_do_readpage()
	iommu/vt-d: Add RPLS to quirk list to skip TE disabling
	drm/vmwgfx: validate the screen formats
	drm/virtio: fix NULL pointer dereference in virtio_gpu_conn_get_modes
	selftests/bpf: Fix vfs_link kprobe definition
	selftests/bpf: Fix parsing of prog types in UAPI hdr for bpftool sync
	mwifiex: add mutex lock for call in mwifiex_dfs_chan_sw_work_queue
	b43legacy: Fix assigning negative value to unsigned variable
	b43: Fix assigning negative value to unsigned variable
	ipw2x00: Fix potential NULL dereference in libipw_xmit()
	ipv6: fix locking issues with loops over idev->addr_list
	fbcon: Consistently protect deferred_takeover with console_lock()
	x86/platform/uv: Update TSC sync state for UV5
	ACPICA: Avoid cache flush inside virtual machines
	mac80211: minstrel_ht: fix where rate stats are stored (fixes debugfs output)
	drm/komeda: return early if drm_universal_plane_init() fails.
	drm/amd/display: Disabling Z10 on DCN31
	rcu-tasks: Fix race in schedule and flush work
	rcu: Make TASKS_RUDE_RCU select IRQ_WORK
	sfc: ef10: Fix assigning negative value to unsigned variable
	ALSA: jack: Access input_dev under mutex
	rtw88: 8821c: fix debugfs rssi value
	spi: spi-rspi: Remove setting {src,dst}_{addr,addr_width} based on DMA direction
	tools/power turbostat: fix ICX DRAM power numbers
	scsi: lpfc: Move cfg_log_verbose check before calling lpfc_dmp_dbg()
	scsi: lpfc: Fix SCSI I/O completion and abort handler deadlock
	scsi: lpfc: Fix call trace observed during I/O with CMF enabled
	cpuidle: PSCI: Improve support for suspend-to-RAM for PSCI OSI mode
	drm/amd/pm: fix double free in si_parse_power_table()
	ASoC: rsnd: care default case on rsnd_ssiu_busif_err_status_clear()
	ASoC: rsnd: care return value from rsnd_node_fixed_index()
	ath9k: fix QCA9561 PA bias level
	media: venus: hfi: avoid null dereference in deinit
	media: pci: cx23885: Fix the error handling in cx23885_initdev()
	media: cx25821: Fix the warning when removing the module
	md/bitmap: don't set sb values if can't pass sanity check
	mmc: jz4740: Apply DMA engine limits to maximum segment size
	drivers: mmc: sdhci_am654: Add the quirk to set TESTCD bit
	scsi: megaraid: Fix error check return value of register_chrdev()
	drm/amdgpu/sdma: Fix incorrect calculations of the wptr of the doorbells
	scsi: ufs: Use pm_runtime_resume_and_get() instead of pm_runtime_get_sync()
	scsi: lpfc: Fix resource leak in lpfc_sli4_send_seq_to_ulp()
	ath11k: disable spectral scan during spectral deinit
	ASoC: Intel: bytcr_rt5640: Add quirk for the HP Pro Tablet 408
	drm/plane: Move range check for format_count earlier
	drm/amd/pm: fix the compile warning
	ath10k: skip ath10k_halt during suspend for driver state RESTARTING
	arm64: compat: Do not treat syscall number as ESR_ELx for a bad syscall
	drm: msm: fix error check return value of irq_of_parse_and_map()
	scsi: target: tcmu: Fix possible data corruption
	ipv6: Don't send rs packets to the interface of ARPHRD_TUNNEL
	net/mlx5: fs, delete the FTE when there are no rules attached to it
	ASoC: dapm: Don't fold register value changes into notifications
	mlxsw: spectrum_dcb: Do not warn about priority changes
	mlxsw: Treat LLDP packets as control
	drm/amdgpu/psp: move PSP memory alloc from hw_init to sw_init
	drm/amdgpu/ucode: Remove firmware load type check in amdgpu_ucode_free_bo
	regulator: mt6315: Enforce regulator-compatible, not name
	HID: bigben: fix slab-out-of-bounds Write in bigben_probe
	of: Support more than one crash kernel regions for kexec -s
	ASoC: tscs454: Add endianness flag in snd_soc_component_driver
	scsi: lpfc: Alter FPIN stat accounting logic
	net: remove two BUG() from skb_checksum_help()
	s390/preempt: disable __preempt_count_add() optimization for PROFILE_ALL_BRANCHES
	perf/amd/ibs: Cascade pmu init functions' return value
	sched/core: Avoid obvious double update_rq_clock warning
	spi: stm32-qspi: Fix wait_cmd timeout in APM mode
	dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC
	ACPI: PM: Block ASUS B1400CEAE from suspend to idle by default
	ipmi:ssif: Check for NULL msg when handling events and messages
	ipmi: Fix pr_fmt to avoid compilation issues
	rtlwifi: Use pr_warn instead of WARN_ONCE
	mt76: mt7921: accept rx frames with non-standard VHT MCS10-11
	mt76: fix encap offload ethernet type check
	media: rga: fix possible memory leak in rga_probe
	media: coda: limit frame interval enumeration to supported encoder frame sizes
	media: hantro: HEVC: unconditionnaly set pps_{cb/cr}_qp_offset values
	media: ccs-core.c: fix failure to call clk_disable_unprepare
	media: imon: reorganize serialization
	media: cec-adap.c: fix is_configuring state
	usbnet: Run unregister_netdev() before unbind() again
	openrisc: start CPU timer early in boot
	nvme-pci: fix a NULL pointer dereference in nvme_alloc_admin_tags
	ASoC: rt5645: Fix errorenous cleanup order
	nbd: Fix hung on disconnect request if socket is closed before
	drm/amd/pm: update smartshift powerboost calc for smu12
	drm/amd/pm: update smartshift powerboost calc for smu13
	net: phy: micrel: Allow probing without .driver_data
	media: exynos4-is: Fix compile warning
	media: hantro: Stop using H.264 parameter pic_num
	ASoC: max98357a: remove dependency on GPIOLIB
	ASoC: rt1015p: remove dependency on GPIOLIB
	ACPI: CPPC: Assume no transition latency if no PCCT
	nvme: set non-mdts limits in nvme_scan_work
	can: mcp251xfd: silence clang's -Wunaligned-access warning
	x86/microcode: Add explicit CPU vendor dependency
	net: ipa: ignore endianness if there is no header
	m68k: atari: Make Atari ROM port I/O write macros return void
	rxrpc: Return an error to sendmsg if call failed
	rxrpc, afs: Fix selection of abort codes
	afs: Adjust ACK interpretation to try and cope with NAT
	eth: tg3: silence the GCC 12 array-bounds warning
	char: tpm: cr50_i2c: Suppress duplicated error message in .remove()
	selftests/bpf: fix btf_dump/btf_dump due to recent clang change
	gfs2: use i_lock spin_lock for inode qadata
	scsi: target: tcmu: Avoid holding XArray lock when calling lock_page
	IB/rdmavt: add missing locks in rvt_ruc_loopback
	ARM: dts: ox820: align interrupt controller node name with dtschema
	ARM: dts: socfpga: align interrupt controller node name with dtschema
	ARM: dts: s5pv210: align DMA channels with dtschema
	arm64: dts: qcom: msm8994: Fix the cont_splash_mem address
	arm64: dts: qcom: msm8994: Fix BLSP[12]_DMA channels count
	PM / devfreq: rk3399_dmc: Disable edev on remove()
	crypto: ccree - use fine grained DMA mapping dir
	soc: ti: ti_sci_pm_domains: Check for null return of devm_kcalloc
	fs: jfs: fix possible NULL pointer dereference in dbFree()
	arm64: dts: qcom: sdm845-xiaomi-beryllium: fix typo in panel's vddio-supply property
	ALSA: usb-audio: Add quirk bits for enabling/disabling generic implicit fb
	ALSA: usb-audio: Move generic implicit fb quirk entries into quirks.c
	ARM: OMAP1: clock: Fix UART rate reporting algorithm
	powerpc/fadump: Fix fadump to work with a different endian capture kernel
	fat: add ratelimit to fat*_ent_bread()
	pinctrl: renesas: rzn1: Fix possible null-ptr-deref in sh_pfc_map_resources()
	ARM: versatile: Add missing of_node_put in dcscb_init
	ARM: dts: exynos: add atmel,24c128 fallback to Samsung EEPROM
	ARM: hisi: Add missing of_node_put after of_find_compatible_node
	cpufreq: Avoid unnecessary frequency updates due to mismatch
	powerpc/rtas: Keep MSR[RI] set when calling RTAS
	PCI: Avoid pci_dev_lock() AB/BA deadlock with sriov_numvfs_store()
	KVM: PPC: Book3S HV Nested: L2 LPCR should inherit L1 LPES setting
	alpha: fix alloc_zeroed_user_highpage_movable()
	tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate
	powerpc/powernv/vas: Assign real address to rx_fifo in vas_rx_win_attr
	powerpc/xics: fix refcount leak in icp_opal_init()
	powerpc/powernv: fix missing of_node_put in uv_init()
	macintosh/via-pmu: Fix build failure when CONFIG_INPUT is disabled
	powerpc/iommu: Add missing of_node_put in iommu_init_early_dart
	smb3: check for null tcon
	RDMA/hfi1: Prevent panic when SDMA is disabled
	Input: gpio-keys - cancel delayed work only in case of GPIO
	drm: fix EDID struct for old ARM OABI format
	drm/bridge_connector: enable HPD by default if supported
	dt-bindings: display: sitronix, st7735r: Fix backlight in example
	drm/vmwgfx: Fix an invalid read
	ath11k: acquire ab->base_lock in unassign when finding the peer by addr
	drm: bridge: it66121: Fix the register page length
	ath9k: fix ar9003_get_eepmisc
	drm/edid: fix invalid EDID extension block filtering
	drm/bridge: adv7511: clean up CEC adapter when probe fails
	drm: bridge: icn6211: Fix register layout
	drm: bridge: icn6211: Fix HFP_HSW_HBP_HI and HFP_MIN handling
	mtd: spinand: gigadevice: fix Quad IO for GD5F1GQ5UExxG
	spi: qcom-qspi: Add minItems to interconnect-names
	ASoC: mediatek: Fix error handling in mt8173_max98090_dev_probe
	ASoC: mediatek: Fix missing of_node_put in mt2701_wm8960_machine_probe
	x86/delay: Fix the wrong asm constraint in delay_loop()
	drm/vc4: hvs: Fix frame count register readout
	drm/mediatek: Fix mtk_cec_mask()
	drm/vc4: hvs: Reset muxes at probe time
	drm/vc4: txp: Don't set TXP_VSTART_AT_EOF
	drm/vc4: txp: Force alpha to be 0xff if it's disabled
	libbpf: Don't error out on CO-RE relos for overriden weak subprogs
	x86/PCI: Fix ALi M1487 (IBC) PIRQ router link value interpretation
	mptcp: reset the packet scheduler on PRIO change
	nl80211: show SSID for P2P_GO interfaces
	drm/komeda: Fix an undefined behavior bug in komeda_plane_add()
	drm: mali-dp: potential dereference of null pointer
	spi: spi-ti-qspi: Fix return value handling of wait_for_completion_timeout
	scftorture: Fix distribution of short handler delays
	net: dsa: mt7530: 1G can also support 1000BASE-X link mode
	ixp4xx_eth: fix error check return value of platform_get_irq()
	NFC: NULL out the dev->rfkill to prevent UAF
	efi: Add missing prototype for efi_capsule_setup_info
	device property: Check fwnode->secondary when finding properties
	device property: Allow error pointer to be passed to fwnode APIs
	target: remove an incorrect unmap zeroes data deduction
	drbd: fix duplicate array initializer
	EDAC/dmc520: Don't print an error for each unconfigured interrupt line
	mtd: rawnand: denali: Use managed device resources
	HID: hid-led: fix maximum brightness for Dream Cheeky
	HID: elan: Fix potential double free in elan_input_configured
	drm/bridge: Fix error handling in analogix_dp_probe
	regulator: da9121: Fix uninit-value in da9121_assign_chip_model()
	drm/mediatek: dpi: Use mt8183 output formats for mt8192
	signal: Deliver SIGTRAP on perf event asynchronously if blocked
	sched/fair: Fix cfs_rq_clock_pelt() for throttled cfs_rq
	sched/psi: report zeroes for CPU full at the system level
	spi: img-spfi: Fix pm_runtime_get_sync() error checking
	cpufreq: Fix possible race in cpufreq online error path
	printk: use atomic updates for klogd work
	printk: add missing memory barrier to wake_up_klogd()
	printk: wake waiters for safe and NMI contexts
	ath9k_htc: fix potential out of bounds access with invalid rxstatus->rs_keyix
	media: i2c: max9286: Use dev_err_probe() helper
	media: i2c: max9286: Use "maxim,gpio-poc" property
	media: i2c: max9286: fix kernel oops when removing module
	media: hantro: Empty encoder capture buffers by default
	drm/panel: simple: Add missing bus flags for Innolux G070Y2-L01
	ALSA: pcm: Check for null pointer of pointer substream before dereferencing it
	mtdblock: warn if opened on NAND
	inotify: show inotify mask flags in proc fdinfo
	fsnotify: fix wrong lockdep annotations
	spi: rockchip: Stop spi slave dma receiver when cs inactive
	spi: rockchip: Preset cs-high and clk polarity in setup progress
	spi: rockchip: fix missing error on unsupported SPI_CS_HIGH
	of: overlay: do not break notify on NOTIFY_{OK|STOP}
	selftests/damon: add damon to selftests root Makefile
	drm/msm/dp: Modify prototype of encoder based API
	drm/msm/hdmi: switch to drm_bridge_connector
	drm/msm/dpu: adjust display_v_end for eDP and DP
	scsi: iscsi: Fix harmless double shift bug
	scsi: ufs: qcom: Fix ufs_qcom_resume()
	scsi: ufs: core: Exclude UECxx from SFR dump list
	drm/v3d: Fix null pointer dereference of pointer perfmon
	selftests/resctrl: Fix null pointer dereference on open failed
	libbpf: Fix logic for finding matching program for CO-RE relocation
	mtd: spi-nor: core: Check written SR value in spi_nor_write_16bit_sr_and_check()
	x86/pm: Fix false positive kmemleak report in msr_build_context()
	mtd: rawnand: cadence: fix possible null-ptr-deref in cadence_nand_dt_probe()
	mtd: rawnand: intel: fix possible null-ptr-deref in ebu_nand_probe()
	x86/speculation: Add missing prototype for unpriv_ebpf_notify()
	ASoC: rk3328: fix disabling mclk on pclk probe failure
	perf tools: Add missing headers needed by util/data.h
	drm/msm/disp/dpu1: set vbif hw config to NULL to avoid use after memory free during pm runtime resume
	drm/msm/dp: stop event kernel thread when DP unbind
	drm/msm/dp: fix error check return value of irq_of_parse_and_map()
	drm/msm/dp: reset DP controller before transmit phy test pattern
	drm/msm/dp: do not stop transmitting phy test pattern during DP phy compliance test
	drm/msm/dsi: fix error checks and return values for DSI xmit functions
	drm/msm/hdmi: check return value after calling platform_get_resource_byname()
	drm/msm/hdmi: fix error check return value of irq_of_parse_and_map()
	drm/msm: add missing include to msm_drv.c
	drm/panel: panel-simple: Fix proper bpc for AM-1280800N3TZQW-T00H
	kunit: fix debugfs code to use enum kunit_status, not bool
	drm/rockchip: vop: fix possible null-ptr-deref in vop_bind()
	spi: cadence-quadspi: fix Direct Access Mode disable for SoCFPGA
	perf tools: Use Python devtools for version autodetection rather than runtime
	virtio_blk: fix the discard_granularity and discard_alignment queue limits
	nl80211: don't hold RTNL in color change request
	x86: Fix return value of __setup handlers
	irqchip/exiu: Fix acknowledgment of edge triggered interrupts
	irqchip/aspeed-i2c-ic: Fix irq_of_parse_and_map() return value
	irqchip/aspeed-scu-ic: Fix irq_of_parse_and_map() return value
	x86/mm: Cleanup the control_va_addr_alignment() __setup handler
	arm64: fix types in copy_highpage()
	regulator: core: Fix enable_count imbalance with EXCLUSIVE_GET
	drm/msm/dsi: fix address for second DSI PHY on SDM660
	drm/msm/dp: fix event thread stuck in wait_event after kthread_stop()
	drm/msm/mdp5: Return error code in mdp5_pipe_release when deadlock is detected
	drm/msm/mdp5: Return error code in mdp5_mixer_release when deadlock is detected
	drm/msm: return an error pointer in msm_gem_prime_get_sg_table()
	media: uvcvideo: Fix missing check to determine if element is found in list
	arm64: stackleak: fix current_top_of_stack()
	iomap: iomap_write_failed fix
	spi: spi-fsl-qspi: check return value after calling platform_get_resource_byname()
	Revert "cpufreq: Fix possible race in cpufreq online error path"
	regulator: qcom_smd: Fix up PM8950 regulator configuration
	samples: bpf: Don't fail for a missing VMLINUX_BTF when VMLINUX_H is provided
	perf/amd/ibs: Use interrupt regs ip for stack unwinding
	ath11k: Don't check arvif->is_started before sending management frames
	wilc1000: fix crash observed in AP mode with cfg80211_register_netdevice()
	HID: amd_sfh: Modify the bus name
	HID: amd_sfh: Modify the hid name
	ASoC: fsl: Use dev_err_probe() helper
	ASoC: fsl: Fix refcount leak in imx_sgtl5000_probe
	ASoC: imx-hdmi: Fix refcount leak in imx_hdmi_probe
	ASoC: mxs-saif: Fix refcount leak in mxs_saif_probe
	regulator: pfuze100: Fix refcount leak in pfuze_parse_regulators_dt
	dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations
	dma-direct: don't fail on highmem CMA pages in dma_direct_alloc_pages
	ASoC: samsung: Use dev_err_probe() helper
	ASoC: samsung: Fix refcount leak in aries_audio_probe
	block: Fix the bio.bi_opf comment
	kselftest/cgroup: fix test_stress.sh to use OUTPUT dir
	scripts/faddr2line: Fix overlapping text section failures
	media: aspeed: Fix an error handling path in aspeed_video_probe()
	media: exynos4-is: Fix PM disable depth imbalance in fimc_is_probe
	mt76: mt7921: Fix the error handling path of mt7921_pci_probe()
	mt76: do not attempt to reorder received 802.3 packets without agg session
	media: st-delta: Fix PM disable depth imbalance in delta_probe
	media: atmel: atmel-isc: Fix PM disable depth imbalance in atmel_isc_probe
	media: i2c: rdacm2x: properly set subdev entity function
	media: exynos4-is: Change clk_disable to clk_disable_unprepare
	media: pvrusb2: fix array-index-out-of-bounds in pvr2_i2c_core_init
	media: vsp1: Fix offset calculation for plane cropping
	media: atmel: atmel-sama5d2-isc: fix wrong mask in YUYV format check
	media: hantro: HEVC: Fix tile info buffer value computation
	Bluetooth: fix dangling sco_conn and use-after-free in sco_sock_timeout
	Bluetooth: use hdev lock in activate_scan for hci_is_adv_monitoring
	Bluetooth: use hdev lock for accept_list and reject_list in conn req
	nvme: set dma alignment to dword
	m68k: math-emu: Fix dependencies of math emulation support
	sctp: read sk->sk_bound_dev_if once in sctp_rcv()
	net: hinic: add missing destroy_workqueue in hinic_pf_to_mgmt_init
	ASoC: ti: j721e-evm: Fix refcount leak in j721e_soc_probe_*
	kselftest/arm64: bti: force static linking
	media: ov7670: remove ov7670_power_off from ov7670_remove
	media: i2c: ov5648: fix wrong pointer passed to IS_ERR() and PTR_ERR()
	media: staging: media: rkvdec: Make use of the helper function devm_platform_ioremap_resource()
	media: rkvdec: h264: Fix dpb_valid implementation
	media: rkvdec: h264: Fix bit depth wrap in pps packet
	regulator: scmi: Fix refcount leak in scmi_regulator_probe
	ext4: reject the 'commit' option on ext2 filesystems
	drm/msm/a6xx: Fix refcount leak in a6xx_gpu_init
	drm: msm: fix possible memory leak in mdp5_crtc_cursor_set()
	x86/sev: Annotate stack change in the #VC handler
	drm/msm: don't free the IRQ if it was not requested
	selftests/bpf: Add missed ima_setup.sh in Makefile
	drm/msm/dpu: handle pm_runtime_get_sync() errors in bind path
	drm/i915: Fix CFI violation with show_dynamic_id()
	thermal/drivers/bcm2711: Don't clamp temperature at zero
	thermal/drivers/broadcom: Fix potential NULL dereference in sr_thermal_probe
	thermal/core: Fix memory leak in __thermal_cooling_device_register()
	thermal/drivers/imx_sc_thermal: Fix refcount leak in imx_sc_thermal_probe
	bfq: Relax waker detection for shared queues
	bfq: Allow current waker to defend against a tentative one
	ASoC: wm2000: fix missing clk_disable_unprepare() on error in wm2000_anc_transition()
	PM: domains: Fix initialization of genpd's next_wakeup
	net: macb: Fix PTP one step sync support
	NFC: hci: fix sleep in atomic context bugs in nfc_hci_hcp_message_tx
	ASoC: max98090: Move check for invalid values before casting in max98090_put_enab_tlv()
	net: stmmac: selftests: Use kcalloc() instead of kzalloc()
	net: stmmac: fix out-of-bounds access in a selftest
	hv_netvsc: Fix potential dereference of NULL pointer
	hwmon: (pmbus) Check PEC support before reading other registers
	rxrpc: Fix listen() setting the bar too high for the prealloc rings
	rxrpc: Don't try to resend the request if we're receiving the reply
	rxrpc: Fix overlapping ACK accounting
	rxrpc: Don't let ack.previousPacket regress
	rxrpc: Fix decision on when to generate an IDLE ACK
	net: huawei: hinic: Use devm_kcalloc() instead of devm_kzalloc()
	hinic: Avoid some over memory allocation
	net: dsa: restrict SMSC_LAN9303_I2C kconfig
	net/smc: postpone sk_refcnt increment in connect()
	dma-direct: factor out dma_set_{de,en}crypted helpers
	dma-direct: don't call dma_set_decrypted for remapped allocations
	dma-direct: always leak memory that can't be re-encrypted
	dma-direct: don't over-decrypt memory
	arm64: dts: rockchip: Move drive-impedance-ohm to emmc phy on rk3399
	arm64: dts: mt8192: Fix nor_flash status disable typo
	PCI/ACPI: Allow D3 only if Root Port can signal and wake from D3
	memory: samsung: exynos5422-dmc: Avoid some over memory allocation
	ARM: dts: BCM5301X: update CRU block description
	ARM: dts: BCM5301X: Update pin controller node name
	ARM: dts: suniv: F1C100: fix watchdog compatible
	soc: qcom: smp2p: Fix missing of_node_put() in smp2p_parse_ipc
	soc: qcom: smsm: Fix missing of_node_put() in smsm_parse_ipc
	PCI: cadence: Fix find_first_zero_bit() limit
	PCI: rockchip: Fix find_first_zero_bit() limit
	PCI: mediatek: Fix refcount leak in mtk_pcie_subsys_powerup()
	PCI: dwc: Fix setting error return on MSI DMA mapping failure
	ARM: dts: ci4x10: Adapt to changes in imx6qdl.dtsi regarding fec clocks
	soc: qcom: llcc: Add MODULE_DEVICE_TABLE()
	KVM: nVMX: Leave most VM-Exit info fields unmodified on failed VM-Entry
	KVM: nVMX: Clear IDT vectoring on nested VM-Exit for double/triple fault
	crypto: qat - set CIPHER capability for QAT GEN2
	crypto: qat - set COMPRESSION capability for QAT GEN2
	crypto: qat - set CIPHER capability for DH895XCC
	crypto: qat - set COMPRESSION capability for DH895XCC
	platform/chrome: cros_ec: fix error handling in cros_ec_register()
	ARM: dts: imx6dl-colibri: Fix I2C pinmuxing
	platform/chrome: Re-introduce cros_ec_cmd_xfer and use it for ioctls
	can: xilinx_can: mark bit timing constants as const
	ARM: dts: stm32: Fix PHY post-reset delay on Avenger96
	ARM: dts: bcm2835-rpi-zero-w: Fix GPIO line name for Wifi/BT
	ARM: dts: bcm2837-rpi-cm3-io3: Fix GPIO line names for SMPS I2C
	ARM: dts: bcm2837-rpi-3-b-plus: Fix GPIO line name of power LED
	ARM: dts: bcm2835-rpi-b: Fix GPIO line names
	misc: ocxl: fix possible double free in ocxl_file_register_afu
	crypto: marvell/cesa - ECB does not IV
	gpiolib: of: Introduce hook for missing gpio-ranges
	pinctrl: bcm2835: implement hook for missing gpio-ranges
	arm: mediatek: select arch timer for mt7629
	pinctrl/rockchip: support deferring other gpio params
	pinctrl: mediatek: mt8195: enable driver on mtk platforms
	arm64: dts: qcom: qrb5165-rb5: Fix can-clock node name
	Drivers: hv: vmbus: Fix handling of messages with transaction ID of zero
	powerpc/fadump: fix PT_LOAD segment for boot memory area
	mfd: ipaq-micro: Fix error check return value of platform_get_irq()
	scsi: fcoe: Fix Wstringop-overflow warnings in fcoe_wwn_from_mac()
	soc: bcm: Check for NULL return of devm_kzalloc()
	arm64: dts: ti: k3-am64-mcu: remove incorrect UART base clock rates
	ASoC: sh: rz-ssi: Check return value of pm_runtime_resume_and_get()
	ASoC: sh: rz-ssi: Propagate error codes returned from platform_get_irq_byname()
	ASoC: sh: rz-ssi: Release the DMA channels in rz_ssi_probe() error path
	firmware: arm_scmi: Fix list protocols enumeration in the base protocol
	nvdimm: Fix firmware activation deadlock scenarios
	nvdimm: Allow overwrite in the presence of disabled dimms
	pinctrl: mvebu: Fix irq_of_parse_and_map() return value
	drivers/base/node.c: fix compaction sysfs file leak
	dax: fix cache flush on PMD-mapped pages
	drivers/base/memory: fix an unlikely reference counting issue in __add_memory_block()
	firmware: arm_ffa: Fix uuid parameter to ffa_partition_probe
	firmware: arm_ffa: Remove incorrect assignment of driver_data
	list: introduce list_is_head() helper and re-use it in list.h
	list: fix a data-race around ep->rdllist
	drm/msm/dpu: fix error check return value of irq_of_parse_and_map()
	powerpc/8xx: export 'cpm_setbrg' for modules
	pinctrl: renesas: r8a779a0: Fix GPIO function on I2C-capable pins
	pinctrl: renesas: core: Fix possible null-ptr-deref in sh_pfc_map_resources()
	powerpc/idle: Fix return value of __setup() handler
	powerpc/4xx/cpm: Fix return value of __setup() handler
	RDMA/hns: Add the detection for CMDQ status in the device initialization process
	arm64: dts: marvell: espressobin-ultra: fix SPI-NOR config
	arm64: dts: marvell: espressobin-ultra: enable front USB3 port
	ASoC: atmel-pdmic: Remove endianness flag on pdmic component
	ASoC: atmel-classd: Remove endianness flag on class d component
	proc: fix dentry/inode overinstantiating under /proc/${pid}/net
	ipc/mqueue: use get_tree_nodev() in mqueue_get_tree()
	PCI: imx6: Fix PERST# start-up sequence
	tty: fix deadlock caused by calling printk() under tty_port->lock
	crypto: sun8i-ss - rework handling of IV
	crypto: sun8i-ss - handle zero sized sg
	crypto: cryptd - Protect per-CPU resource by disabling BH.
	ARM: dts: at91: sama7g5: remove interrupt-parent from gic node
	hugetlbfs: fix hugetlbfs_statfs() locking
	Input: sparcspkr - fix refcount leak in bbc_beep_probe
	PCI/AER: Clear MULTI_ERR_COR/UNCOR_RCV bits
	PCI: microchip: Fix potential race in interrupt handling
	hwrng: omap3-rom - fix using wrong clk_disable() in omap_rom_rng_runtime_resume()
	powerpc/64: Only WARN if __pa()/__va() called with bad addresses
	powerpc/perf: Fix the threshold compare group constraint for power10
	powerpc/perf: Fix the threshold compare group constraint for power9
	macintosh: via-pmu and via-cuda need RTC_LIB
	powerpc/xive: Add some error handling code to 'xive_spapr_init()'
	powerpc/xive: Fix refcount leak in xive_spapr_init
	powerpc/fsl_rio: Fix refcount leak in fsl_rio_setup
	mfd: davinci_voicecodec: Fix possible null-ptr-deref davinci_vc_probe()
	nfsd: destroy percpu stats counters after reply cache shutdown
	mailbox: forward the hrtimer if not queued and under a lock
	RDMA/hfi1: Prevent use of lock before it is initialized
	KVM: LAPIC: Drop pending LAPIC timer injection when canceling the timer
	Input: stmfts - do not leave device disabled in stmfts_input_open
	OPP: call of_node_put() on error path in _bandwidth_supported()
	f2fs: support fault injection for dquot_initialize()
	f2fs: fix to do sanity check on inline_dots inode
	f2fs: fix dereference of stale list iterator after loop body
	iommu/amd: Enable swiotlb in all cases
	iommu/mediatek: Fix 2 HW sharing pgtable issue
	iommu/mediatek: Add list_del in mtk_iommu_remove
	iommu/mediatek: Remove clk_disable in mtk_iommu_remove
	iommu/mediatek: Add mutex for m4u_group and m4u_dom in data
	i2c: at91: use dma safe buffers
	cpufreq: mediatek: Use module_init and add module_exit
	cpufreq: mediatek: Unregister platform device on exit
	iommu/arm-smmu-v3-sva: Fix mm use-after-free
	MIPS: Loongson: Use hwmon_device_register_with_groups() to register hwmon
	iommu/mediatek: Fix NULL pointer dereference when printing dev_name
	i2c: at91: Initialize dma_buf in at91_twi_xfer()
	dmaengine: idxd: Fix the error handling path in idxd_cdev_register()
	NFS: Do not report EINTR/ERESTARTSYS as mapping errors
	NFS: fsync() should report filesystem errors over EINTR/ERESTARTSYS
	NFS: Don't report ENOSPC write errors twice
	NFS: Do not report flush errors in nfs_write_end()
	NFS: Don't report errors from nfs_pageio_complete() more than once
	NFSv4/pNFS: Do not fail I/O when we fail to allocate the pNFS layout
	NFS: Further fixes to the writeback error handling
	video: fbdev: clcdfb: Fix refcount leak in clcdfb_of_vram_setup
	dmaengine: stm32-mdma: remove GISR1 register
	dmaengine: stm32-mdma: fix chan initialization in stm32_mdma_irq_handler()
	iommu/amd: Increase timeout waiting for GA log enablement
	i2c: npcm: Fix timeout calculation
	i2c: npcm: Correct register access width
	i2c: npcm: Handle spurious interrupts
	i2c: rcar: fix PM ref counts in probe error paths
	perf build: Fix btf__load_from_kernel_by_id() feature check
	perf c2c: Use stdio interface if slang is not supported
	perf jevents: Fix event syntax error caused by ExtSel
	video: fbdev: vesafb: Fix a use-after-free due early fb_info cleanup
	NFS: Always initialise fattr->label in nfs_fattr_alloc()
	NFS: Create a new nfs_alloc_fattr_with_label() function
	NFS: Convert GFP_NOFS to GFP_KERNEL
	NFSv4.1 mark qualified async operations as MOVEABLE tasks
	f2fs: fix to avoid f2fs_bug_on() in dec_valid_node_count()
	f2fs: fix to do sanity check on block address in f2fs_do_zero_range()
	f2fs: fix to clear dirty inode in f2fs_evict_inode()
	f2fs: fix deadloop in foreground GC
	f2fs: don't need inode lock for system hidden quota
	f2fs: fix to do sanity check on total_data_blocks
	f2fs: don't use casefolded comparison for "." and ".."
	f2fs: fix fallocate to use file_modified to update permissions consistently
	f2fs: fix to do sanity check for inline inode
	objtool: Fix objtool regression on x32 systems
	objtool: Fix symbol creation
	wifi: mac80211: fix use-after-free in chanctx code
	iwlwifi: mvm: fix assert 1F04 upon reconfig
	fs-writeback: writeback_sb_inodes:Recalculate 'wrote' according skipped pages
	efi: Do not import certificates from UEFI Secure Boot for T2 Macs
	bfq: Avoid false marking of bic as stably merged
	bfq: Avoid merging queues with different parents
	bfq: Split shared queues on move between cgroups
	bfq: Update cgroup information before merging bio
	bfq: Drop pointless unlock-lock pair
	bfq: Remove pointless bfq_init_rq() calls
	bfq: Track whether bfq_group is still online
	bfq: Get rid of __bio_blkcg() usage
	bfq: Make sure bfqg for which we are queueing requests is online
	ext4: mark group as trimmed only if it was fully scanned
	ext4: fix use-after-free in ext4_rename_dir_prepare
	ext4: fix race condition between ext4_write and ext4_convert_inline_data
	ext4: fix warning in ext4_handle_inode_extension
	ext4: fix bug_on in ext4_writepages
	ext4: filter out EXT4_FC_REPLAY from on-disk superblock field s_state
	ext4: fix bug_on in __es_tree_search
	ext4: verify dir block before splitting it
	ext4: avoid cycles in directory h-tree
	ACPI: property: Release subnode properties with data nodes
	tty: goldfish: Introduce gf_ioread32()/gf_iowrite32()
	tracing: Fix potential double free in create_var_ref()
	tracing: Initialize integer variable to prevent garbage return value
	drm/amdgpu: add beige goby PCI ID
	PCI/PM: Fix bridge_d3_blacklist[] Elo i2 overwrite of Gigabyte X299
	PCI: qcom: Fix runtime PM imbalance on probe errors
	PCI: qcom: Fix unbalanced PHY init on probe errors
	staging: r8188eu: prevent ->Ssid overflow in rtw_wx_set_scan()
	mm, compaction: fast_find_migrateblock() should return pfn in the target zone
	s390/perf: obtain sie_block from the right address
	s390/stp: clock_delta should be signed
	dlm: fix plock invalid read
	dlm: uninitialized variable on error in dlm_listen_for_all()
	dlm: fix missing lkb refcount handling
	ocfs2: dlmfs: fix error handling of user_dlm_destroy_lock
	scsi: dc395x: Fix a missing check on list iterator
	scsi: ufs: qcom: Add a readl() to make sure ref_clk gets enabled
	landlock: Add clang-format exceptions
	landlock: Format with clang-format
	selftests/landlock: Add clang-format exceptions
	selftests/landlock: Normalize array assignment
	selftests/landlock: Format with clang-format
	samples/landlock: Add clang-format exceptions
	samples/landlock: Format with clang-format
	landlock: Fix landlock_add_rule(2) documentation
	selftests/landlock: Make tests build with old libc
	selftests/landlock: Extend tests for minimal valid attribute size
	selftests/landlock: Add tests for unknown access rights
	selftests/landlock: Extend access right tests to directories
	selftests/landlock: Fully test file rename with "remove" access
	selftests/landlock: Add tests for O_PATH
	landlock: Change landlock_add_rule(2) argument check ordering
	landlock: Change landlock_restrict_self(2) check ordering
	selftests/landlock: Test landlock_create_ruleset(2) argument check ordering
	landlock: Define access_mask_t to enforce a consistent access mask size
	landlock: Reduce the maximum number of layers to 16
	landlock: Create find_rule() from unmask_layers()
	landlock: Fix same-layer rule unions
	drm/amdgpu/cs: make commands with 0 chunks illegal behaviour.
	drm/nouveau/subdev/bus: Ratelimit logging for fault errors
	drm/etnaviv: check for reaped mapping in etnaviv_iommu_unmap_gem
	drm/nouveau/clk: Fix an incorrect NULL check on list iterator
	drm/nouveau/kms/nv50-: atom: fix an incorrect NULL check on list iterator
	drm/bridge: analogix_dp: Grab runtime PM reference for DP-AUX
	drm/i915/dsi: fix VBT send packet port selection for ICL+
	md: fix an incorrect NULL check in does_sb_need_changing
	md: fix an incorrect NULL check in md_reload_sb
	mtd: cfi_cmdset_0002: Move and rename chip_check/chip_ready/chip_good_for_write
	mtd: cfi_cmdset_0002: Use chip_ready() for write on S29GL064N
	media: coda: Fix reported H264 profile
	media: coda: Add more H264 levels for CODA960
	ima: remove the IMA_TEMPLATE Kconfig option
	Kconfig: Add option for asm goto w/ tied outputs to workaround clang-13 bug
	RDMA/hfi1: Fix potential integer multiplication overflow errors
	mmc: core: Allows to override the timeout value for ioctl() path
	csky: patch_text: Fixup last cpu should be master
	irqchip/armada-370-xp: Do not touch Performance Counter Overflow on A375, A38x, A39x
	irqchip: irq-xtensa-mx: fix initial IRQ affinity
	thermal: devfreq_cooling: use local ops instead of global ops
	cfg80211: declare MODULE_FIRMWARE for regulatory.db
	mac80211: upgrade passive scan to active scan on DFS channels after beacon rx
	um: Use asm-generic/dma-mapping.h
	um: chan_user: Fix winch_tramp() return value
	um: Fix out-of-bounds read in LDT setup
	kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]
	ftrace: Clean up hash direct_functions on register failures
	ksmbd: fix outstanding credits related bugs
	iommu/msm: Fix an incorrect NULL check on list iterator
	iommu/dma: Fix iova map result check bug
	Revert "mm/cma.c: remove redundant cma_mutex lock"
	mm/page_alloc: always attempt to allocate at least one page during bulk allocation
	nodemask.h: fix compilation error with GCC12
	hugetlb: fix huge_pmd_unshare address update
	mm/memremap: fix missing call to untrack_pfn() in pagemap_range()
	xtensa/simdisk: fix proc_read_simdisk()
	rtl818x: Prevent using not initialized queues
	ASoC: rt5514: Fix event generation for "DSP Voice Wake Up" control
	carl9170: tx: fix an incorrect use of list iterator
	stm: ltdc: fix two incorrect NULL checks on list iterator
	bcache: improve multithreaded bch_btree_check()
	bcache: improve multithreaded bch_sectors_dirty_init()
	bcache: remove incremental dirty sector counting for bch_sectors_dirty_init()
	bcache: avoid journal no-space deadlock by reserving 1 journal bucket
	serial: pch: don't overwrite xmit->buf[0] by x_char
	tilcdc: tilcdc_external: fix an incorrect NULL check on list iterator
	gma500: fix an incorrect NULL check on list iterator
	arm64: dts: qcom: ipq8074: fix the sleep clock frequency
	arm64: tegra: Add missing DFLL reset on Tegra210
	clk: tegra: Add missing reset deassertion
	phy: qcom-qmp: fix struct clk leak on probe errors
	ARM: dts: s5pv210: Remove spi-cs-high on panel in Aries
	ARM: pxa: maybe fix gpio lookup tables
	SMB3: EBADF/EIO errors in rename/open caused by race condition in smb2_compound_op
	docs/conf.py: Cope with removal of language=None in Sphinx 5.0.0
	dt-bindings: gpio: altera: correct interrupt-cells
	vdpasim: allow to enable a vq repeatedly
	blk-iolatency: Fix inflight count imbalances and IO hangs on offline
	coresight: core: Fix coresight device probe failure issue
	phy: qcom-qmp: fix reset-controller leak on probe errors
	net: ipa: fix page free in ipa_endpoint_trans_release()
	net: ipa: fix page free in ipa_endpoint_replenish_one()
	kseltest/cgroup: Make test_stress.sh work if run interactively
	list: test: Add a test for list_is_head()
	Revert "random: use static branch for crng_ready()"
	staging: r8188eu: delete rtw_wx_read/write32()
	RDMA/hns: Remove the num_cqc_timer variable
	RDMA/rxe: Generate a completion for unsupported/invalid opcode
	MIPS: IP27: Remove incorrect `cpu_has_fpu' override
	MIPS: IP30: Remove incorrect `cpu_has_fpu' override
	ext4: only allow test_dummy_encryption when supported
	interconnect: qcom: sc7180: Drop IP0 interconnects
	interconnect: qcom: icc-rpmh: Add BCMs to commit list in pre_aggregate
	fs: add two trivial lookup helpers
	exportfs: support idmapped mounts
	fs/ntfs3: Fix invalid free in log_replay
	md: Don't set mddev private to NULL in raid0 pers->free
	md: fix double free of io_acct_set bioset
	md: bcache: check the return value of kzalloc() in detached_dev_do_request()
	pinctrl/rockchip: support setting input-enable param
	block: fix bio_clone_blkg_association() to associate with proper blkcg_gq
	Linux 5.15.46

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I7b65df29c22a01b81a94cd844867a18e73098a15
2022-07-13 11:40:42 +02:00

2758 lines
79 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* fs/fs-writeback.c
*
* Copyright (C) 2002, Linus Torvalds.
*
* Contains all the functions related to writing back and waiting
* upon dirty inodes against superblocks, and writing back dirty
* pages against inodes. ie: data writeback. Writeout of the
* inode itself is not handled here.
*
* 10Apr2002 Andrew Morton
* Split out of fs/inode.c
* Additions for address_space-based writeback
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/kthread.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/tracepoint.h>
#include <linux/device.h>
#include <linux/memcontrol.h>
#include "internal.h"
/*
* 4MB minimal write chunk size
*/
#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
enum writeback_sync_modes sync_mode;
unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned int auto_free:1; /* free on completion */
enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
struct wb_completion *done; /* set if the caller waits */
};
/*
* If an inode is constantly having its pages dirtied, but then the
* updates stop dirtytime_expire_interval seconds in the past, it's
* possible for the worst case time between when an inode has its
* timestamps updated and when they finally get written out to be two
* dirtytime_expire_intervals. We set the default to 12 hours (in
* seconds), which means most of the time inodes will have their
* timestamps written to disk after 12 hours, but in the worst case a
* few inodes might not their timestamps updated for 24 hours.
*/
unsigned int dirtytime_expire_interval = 12 * 60 * 60;
static inline struct inode *wb_inode(struct list_head *head)
{
return list_entry(head, struct inode, i_io_list);
}
/*
* Include the creation of the trace points after defining the
* wb_writeback_work structure and inline functions so that the definition
* remains local to this file.
*/
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
static bool wb_io_lists_populated(struct bdi_writeback *wb)
{
if (wb_has_dirty_io(wb)) {
return false;
} else {
set_bit(WB_has_dirty_io, &wb->state);
WARN_ON_ONCE(!wb->avg_write_bandwidth);
atomic_long_add(wb->avg_write_bandwidth,
&wb->bdi->tot_write_bandwidth);
return true;
}
}
static void wb_io_lists_depopulated(struct bdi_writeback *wb)
{
if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
clear_bit(WB_has_dirty_io, &wb->state);
WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
&wb->bdi->tot_write_bandwidth) < 0);
}
}
/**
* inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
* @inode: inode to be moved
* @wb: target bdi_writeback
* @head: one of @wb->b_{dirty|io|more_io|dirty_time}
*
* Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
* Returns %true if @inode is the first occupant of the !dirty_time IO
* lists; otherwise, %false.
*/
static bool inode_io_list_move_locked(struct inode *inode,
struct bdi_writeback *wb,
struct list_head *head)
{
assert_spin_locked(&wb->list_lock);
list_move(&inode->i_io_list, head);
/* dirty_time doesn't count as dirty_io until expiration */
if (head != &wb->b_dirty_time)
return wb_io_lists_populated(wb);
wb_io_lists_depopulated(wb);
return false;
}
static void wb_wakeup(struct bdi_writeback *wb)
{
spin_lock_bh(&wb->work_lock);
if (test_bit(WB_registered, &wb->state))
mod_delayed_work(bdi_wq, &wb->dwork, 0);
spin_unlock_bh(&wb->work_lock);
}
static void finish_writeback_work(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
struct wb_completion *done = work->done;
if (work->auto_free)
kfree(work);
if (done) {
wait_queue_head_t *waitq = done->waitq;
/* @done can't be accessed after the following dec */
if (atomic_dec_and_test(&done->cnt))
wake_up_all(waitq);
}
}
static void wb_queue_work(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
trace_writeback_queue(wb, work);
if (work->done)
atomic_inc(&work->done->cnt);
spin_lock_bh(&wb->work_lock);
if (test_bit(WB_registered, &wb->state)) {
list_add_tail(&work->list, &wb->work_list);
mod_delayed_work(bdi_wq, &wb->dwork, 0);
} else
finish_writeback_work(wb, work);
spin_unlock_bh(&wb->work_lock);
}
/**
* wb_wait_for_completion - wait for completion of bdi_writeback_works
* @done: target wb_completion
*
* Wait for one or more work items issued to @bdi with their ->done field
* set to @done, which should have been initialized with
* DEFINE_WB_COMPLETION(). This function returns after all such work items
* are completed. Work items which are waited upon aren't freed
* automatically on completion.
*/
void wb_wait_for_completion(struct wb_completion *done)
{
atomic_dec(&done->cnt); /* put down the initial count */
wait_event(*done->waitq, !atomic_read(&done->cnt));
}
#ifdef CONFIG_CGROUP_WRITEBACK
/*
* Parameters for foreign inode detection, see wbc_detach_inode() to see
* how they're used.
*
* These paramters are inherently heuristical as the detection target
* itself is fuzzy. All we want to do is detaching an inode from the
* current owner if it's being written to by some other cgroups too much.
*
* The current cgroup writeback is built on the assumption that multiple
* cgroups writing to the same inode concurrently is very rare and a mode
* of operation which isn't well supported. As such, the goal is not
* taking too long when a different cgroup takes over an inode while
* avoiding too aggressive flip-flops from occasional foreign writes.
*
* We record, very roughly, 2s worth of IO time history and if more than
* half of that is foreign, trigger the switch. The recording is quantized
* to 16 slots. To avoid tiny writes from swinging the decision too much,
* writes smaller than 1/8 of avg size are ignored.
*/
#define WB_FRN_TIME_SHIFT 13 /* 1s = 2^13, upto 8 secs w/ 16bit */
#define WB_FRN_TIME_AVG_SHIFT 3 /* avg = avg * 7/8 + new * 1/8 */
#define WB_FRN_TIME_CUT_DIV 8 /* ignore rounds < avg / 8 */
#define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT)) /* 2s */
#define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */
#define WB_FRN_HIST_UNIT (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
/* each slot's duration is 2s / 16 */
#define WB_FRN_HIST_THR_SLOTS (WB_FRN_HIST_SLOTS / 2)
/* if foreign slots >= 8, switch */
#define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
/* one round can affect upto 5 slots */
#define WB_FRN_MAX_IN_FLIGHT 1024 /* don't queue too many concurrently */
/*
* Maximum inodes per isw. A specific value has been chosen to make
* struct inode_switch_wbs_context fit into 1024 bytes kmalloc.
*/
#define WB_MAX_INODES_PER_ISW ((1024UL - sizeof(struct inode_switch_wbs_context)) \
/ sizeof(struct inode *))
static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
static struct workqueue_struct *isw_wq;
void __inode_attach_wb(struct inode *inode, struct page *page)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
struct bdi_writeback *wb = NULL;
if (inode_cgwb_enabled(inode)) {
struct cgroup_subsys_state *memcg_css;
if (page) {
memcg_css = mem_cgroup_css_from_page(page);
wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
} else {
/* must pin memcg_css, see wb_get_create() */
memcg_css = task_get_css(current, memory_cgrp_id);
wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
css_put(memcg_css);
}
}
if (!wb)
wb = &bdi->wb;
/*
* There may be multiple instances of this function racing to
* update the same inode. Use cmpxchg() to tell the winner.
*/
if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
wb_put(wb);
}
EXPORT_SYMBOL_GPL(__inode_attach_wb);
/**
* inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
* @inode: inode of interest with i_lock held
* @wb: target bdi_writeback
*
* Remove the inode from wb's io lists and if necessarily put onto b_attached
* list. Only inodes attached to cgwb's are kept on this list.
*/
static void inode_cgwb_move_to_attached(struct inode *inode,
struct bdi_writeback *wb)
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
inode->i_state &= ~I_SYNC_QUEUED;
if (wb != &wb->bdi->wb)
list_move(&inode->i_io_list, &wb->b_attached);
else
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
}
/**
* locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
* @inode: inode of interest with i_lock held
*
* Returns @inode's wb with its list_lock held. @inode->i_lock must be
* held on entry and is released on return. The returned wb is guaranteed
* to stay @inode's associated wb until its list_lock is released.
*/
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
__releases(&inode->i_lock)
__acquires(&wb->list_lock)
{
while (true) {
struct bdi_writeback *wb = inode_to_wb(inode);
/*
* inode_to_wb() association is protected by both
* @inode->i_lock and @wb->list_lock but list_lock nests
* outside i_lock. Drop i_lock and verify that the
* association hasn't changed after acquiring list_lock.
*/
wb_get(wb);
spin_unlock(&inode->i_lock);
spin_lock(&wb->list_lock);
/* i_wb may have changed inbetween, can't use inode_to_wb() */
if (likely(wb == inode->i_wb)) {
wb_put(wb); /* @inode already has ref */
return wb;
}
spin_unlock(&wb->list_lock);
wb_put(wb);
cpu_relax();
spin_lock(&inode->i_lock);
}
}
/**
* inode_to_wb_and_lock_list - determine an inode's wb and lock it
* @inode: inode of interest
*
* Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
* on entry.
*/
static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
__acquires(&wb->list_lock)
{
spin_lock(&inode->i_lock);
return locked_inode_to_wb_and_lock_list(inode);
}
struct inode_switch_wbs_context {
struct rcu_work work;
/*
* Multiple inodes can be switched at once. The switching procedure
* consists of two parts, separated by a RCU grace period. To make
* sure that the second part is executed for each inode gone through
* the first part, all inode pointers are placed into a NULL-terminated
* array embedded into struct inode_switch_wbs_context. Otherwise
* an inode could be left in a non-consistent state.
*/
struct bdi_writeback *new_wb;
struct inode *inodes[];
};
static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
{
down_write(&bdi->wb_switch_rwsem);
}
static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
{
up_write(&bdi->wb_switch_rwsem);
}
static bool inode_do_switch_wbs(struct inode *inode,
struct bdi_writeback *old_wb,
struct bdi_writeback *new_wb)
{
struct address_space *mapping = inode->i_mapping;
XA_STATE(xas, &mapping->i_pages, 0);
struct page *page;
bool switched = false;
spin_lock(&inode->i_lock);
xa_lock_irq(&mapping->i_pages);
/*
* Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
* path owns the inode and we shouldn't modify ->i_io_list.
*/
if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
goto skip_switch;
trace_inode_switch_wbs(inode, old_wb, new_wb);
/*
* Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
* to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
* pages actually under writeback.
*/
xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
if (PageDirty(page)) {
dec_wb_stat(old_wb, WB_RECLAIMABLE);
inc_wb_stat(new_wb, WB_RECLAIMABLE);
}
}
xas_set(&xas, 0);
xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
WARN_ON_ONCE(!PageWriteback(page));
dec_wb_stat(old_wb, WB_WRITEBACK);
inc_wb_stat(new_wb, WB_WRITEBACK);
}
if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
atomic_dec(&old_wb->writeback_inodes);
atomic_inc(&new_wb->writeback_inodes);
}
wb_get(new_wb);
/*
* Transfer to @new_wb's IO list if necessary. If the @inode is dirty,
* the specific list @inode was on is ignored and the @inode is put on
* ->b_dirty which is always correct including from ->b_dirty_time.
* The transfer preserves @inode->dirtied_when ordering. If the @inode
* was clean, it means it was on the b_attached list, so move it onto
* the b_attached list of @new_wb.
*/
if (!list_empty(&inode->i_io_list)) {
inode->i_wb = new_wb;
if (inode->i_state & I_DIRTY_ALL) {
struct inode *pos;
list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
if (time_after_eq(inode->dirtied_when,
pos->dirtied_when))
break;
inode_io_list_move_locked(inode, new_wb,
pos->i_io_list.prev);
} else {
inode_cgwb_move_to_attached(inode, new_wb);
}
} else {
inode->i_wb = new_wb;
}
/* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
inode->i_wb_frn_winner = 0;
inode->i_wb_frn_avg_time = 0;
inode->i_wb_frn_history = 0;
switched = true;
skip_switch:
/*
* Paired with load_acquire in unlocked_inode_to_wb_begin() and
* ensures that the new wb is visible if they see !I_WB_SWITCH.
*/
smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&inode->i_lock);
return switched;
}
static void inode_switch_wbs_work_fn(struct work_struct *work)
{
struct inode_switch_wbs_context *isw =
container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
unsigned long nr_switched = 0;
struct inode **inodep;
/*
* If @inode switches cgwb membership while sync_inodes_sb() is
* being issued, sync_inodes_sb() might miss it. Synchronize.
*/
down_read(&bdi->wb_switch_rwsem);
/*
* By the time control reaches here, RCU grace period has passed
* since I_WB_SWITCH assertion and all wb stat update transactions
* between unlocked_inode_to_wb_begin/end() are guaranteed to be
* synchronizing against the i_pages lock.
*
* Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
* gives us exclusion against all wb related operations on @inode
* including IO list manipulations and stat updates.
*/
if (old_wb < new_wb) {
spin_lock(&old_wb->list_lock);
spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock(&new_wb->list_lock);
spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
}
for (inodep = isw->inodes; *inodep; inodep++) {
WARN_ON_ONCE((*inodep)->i_wb != old_wb);
if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
nr_switched++;
}
spin_unlock(&new_wb->list_lock);
spin_unlock(&old_wb->list_lock);
up_read(&bdi->wb_switch_rwsem);
if (nr_switched) {
wb_wakeup(new_wb);
wb_put_many(old_wb, nr_switched);
}
for (inodep = isw->inodes; *inodep; inodep++)
iput(*inodep);
wb_put(new_wb);
kfree(isw);
atomic_dec(&isw_nr_in_flight);
}
static bool inode_prepare_wbs_switch(struct inode *inode,
struct bdi_writeback *new_wb)
{
/*
* Paired with smp_mb() in cgroup_writeback_umount().
* isw_nr_in_flight must be increased before checking SB_ACTIVE and
* grabbing an inode, otherwise isw_nr_in_flight can be observed as 0
* in cgroup_writeback_umount() and the isw_wq will be not flushed.
*/
smp_mb();
if (IS_DAX(inode))
return false;
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
inode_to_wb(inode) == new_wb) {
spin_unlock(&inode->i_lock);
return false;
}
inode->i_state |= I_WB_SWITCH;
__iget(inode);
spin_unlock(&inode->i_lock);
return true;
}
/**
* inode_switch_wbs - change the wb association of an inode
* @inode: target inode
* @new_wb_id: ID of the new wb
*
* Switch @inode's wb association to the wb identified by @new_wb_id. The
* switching is performed asynchronously and may fail silently.
*/
static void inode_switch_wbs(struct inode *inode, int new_wb_id)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
struct cgroup_subsys_state *memcg_css;
struct inode_switch_wbs_context *isw;
/* noop if seems to be already in progress */
if (inode->i_state & I_WB_SWITCH)
return;
/* avoid queueing a new switch if too many are already in flight */
if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
return;
isw = kzalloc(sizeof(*isw) + 2 * sizeof(struct inode *), GFP_ATOMIC);
if (!isw)
return;
atomic_inc(&isw_nr_in_flight);
/* find and pin the new wb */
rcu_read_lock();
memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
if (memcg_css && !css_tryget(memcg_css))
memcg_css = NULL;
rcu_read_unlock();
if (!memcg_css)
goto out_free;
isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
css_put(memcg_css);
if (!isw->new_wb)
goto out_free;
if (!inode_prepare_wbs_switch(inode, isw->new_wb))
goto out_free;
isw->inodes[0] = inode;
/*
* In addition to synchronizing among switchers, I_WB_SWITCH tells
* the RCU protected stat update paths to grab the i_page
* lock so that stat transfer can synchronize against them.
* Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/
INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
queue_rcu_work(isw_wq, &isw->work);
return;
out_free:
atomic_dec(&isw_nr_in_flight);
if (isw->new_wb)
wb_put(isw->new_wb);
kfree(isw);
}
/**
* cleanup_offline_cgwb - detach associated inodes
* @wb: target wb
*
* Switch all inodes attached to @wb to a nearest living ancestor's wb in order
* to eventually release the dying @wb. Returns %true if not all inodes were
* switched and the function has to be restarted.
*/
bool cleanup_offline_cgwb(struct bdi_writeback *wb)
{
struct cgroup_subsys_state *memcg_css;
struct inode_switch_wbs_context *isw;
struct inode *inode;
int nr;
bool restart = false;
isw = kzalloc(sizeof(*isw) + WB_MAX_INODES_PER_ISW *
sizeof(struct inode *), GFP_KERNEL);
if (!isw)
return restart;
atomic_inc(&isw_nr_in_flight);
for (memcg_css = wb->memcg_css->parent; memcg_css;
memcg_css = memcg_css->parent) {
isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
if (isw->new_wb)
break;
}
if (unlikely(!isw->new_wb))
isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
nr = 0;
spin_lock(&wb->list_lock);
list_for_each_entry(inode, &wb->b_attached, i_io_list) {
if (!inode_prepare_wbs_switch(inode, isw->new_wb))
continue;
isw->inodes[nr++] = inode;
if (nr >= WB_MAX_INODES_PER_ISW - 1) {
restart = true;
break;
}
}
spin_unlock(&wb->list_lock);
/* no attached inodes? bail out */
if (nr == 0) {
atomic_dec(&isw_nr_in_flight);
wb_put(isw->new_wb);
kfree(isw);
return restart;
}
/*
* In addition to synchronizing among switchers, I_WB_SWITCH tells
* the RCU protected stat update paths to grab the i_page
* lock so that stat transfer can synchronize against them.
* Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/
INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
queue_rcu_work(isw_wq, &isw->work);
return restart;
}
/**
* wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
* @wbc: writeback_control of interest
* @inode: target inode
*
* @inode is locked and about to be written back under the control of @wbc.
* Record @inode's writeback context into @wbc and unlock the i_lock. On
* writeback completion, wbc_detach_inode() should be called. This is used
* to track the cgroup writeback context.
*/
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
struct inode *inode)
{
if (!inode_cgwb_enabled(inode)) {
spin_unlock(&inode->i_lock);
return;
}
wbc->wb = inode_to_wb(inode);
wbc->inode = inode;
wbc->wb_id = wbc->wb->memcg_css->id;
wbc->wb_lcand_id = inode->i_wb_frn_winner;
wbc->wb_tcand_id = 0;
wbc->wb_bytes = 0;
wbc->wb_lcand_bytes = 0;
wbc->wb_tcand_bytes = 0;
wb_get(wbc->wb);
spin_unlock(&inode->i_lock);
/*
* A dying wb indicates that either the blkcg associated with the
* memcg changed or the associated memcg is dying. In the first
* case, a replacement wb should already be available and we should
* refresh the wb immediately. In the second case, trying to
* refresh will keep failing.
*/
if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
inode_switch_wbs(inode, wbc->wb_id);
}
EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
/**
* wbc_detach_inode - disassociate wbc from inode and perform foreign detection
* @wbc: writeback_control of the just finished writeback
*
* To be called after a writeback attempt of an inode finishes and undoes
* wbc_attach_and_unlock_inode(). Can be called under any context.
*
* As concurrent write sharing of an inode is expected to be very rare and
* memcg only tracks page ownership on first-use basis severely confining
* the usefulness of such sharing, cgroup writeback tracks ownership
* per-inode. While the support for concurrent write sharing of an inode
* is deemed unnecessary, an inode being written to by different cgroups at
* different points in time is a lot more common, and, more importantly,
* charging only by first-use can too readily lead to grossly incorrect
* behaviors (single foreign page can lead to gigabytes of writeback to be
* incorrectly attributed).
*
* To resolve this issue, cgroup writeback detects the majority dirtier of
* an inode and transfers the ownership to it. To avoid unnnecessary
* oscillation, the detection mechanism keeps track of history and gives
* out the switch verdict only if the foreign usage pattern is stable over
* a certain amount of time and/or writeback attempts.
*
* On each writeback attempt, @wbc tries to detect the majority writer
* using Boyer-Moore majority vote algorithm. In addition to the byte
* count from the majority voting, it also counts the bytes written for the
* current wb and the last round's winner wb (max of last round's current
* wb, the winner from two rounds ago, and the last round's majority
* candidate). Keeping track of the historical winner helps the algorithm
* to semi-reliably detect the most active writer even when it's not the
* absolute majority.
*
* Once the winner of the round is determined, whether the winner is
* foreign or not and how much IO time the round consumed is recorded in
* inode->i_wb_frn_history. If the amount of recorded foreign IO time is
* over a certain threshold, the switch verdict is given.
*/
void wbc_detach_inode(struct writeback_control *wbc)
{
struct bdi_writeback *wb = wbc->wb;
struct inode *inode = wbc->inode;
unsigned long avg_time, max_bytes, max_time;
u16 history;
int max_id;
if (!wb)
return;
history = inode->i_wb_frn_history;
avg_time = inode->i_wb_frn_avg_time;
/* pick the winner of this round */
if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
wbc->wb_bytes >= wbc->wb_tcand_bytes) {
max_id = wbc->wb_id;
max_bytes = wbc->wb_bytes;
} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
max_id = wbc->wb_lcand_id;
max_bytes = wbc->wb_lcand_bytes;
} else {
max_id = wbc->wb_tcand_id;
max_bytes = wbc->wb_tcand_bytes;
}
/*
* Calculate the amount of IO time the winner consumed and fold it
* into the running average kept per inode. If the consumed IO
* time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
* deciding whether to switch or not. This is to prevent one-off
* small dirtiers from skewing the verdict.
*/
max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
wb->avg_write_bandwidth);
if (avg_time)
avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
(avg_time >> WB_FRN_TIME_AVG_SHIFT);
else
avg_time = max_time; /* immediate catch up on first run */
if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
int slots;
/*
* The switch verdict is reached if foreign wb's consume
* more than a certain proportion of IO time in a
* WB_FRN_TIME_PERIOD. This is loosely tracked by 16 slot
* history mask where each bit represents one sixteenth of
* the period. Determine the number of slots to shift into
* history from @max_time.
*/
slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
(unsigned long)WB_FRN_HIST_MAX_SLOTS);
history <<= slots;
if (wbc->wb_id != max_id)
history |= (1U << slots) - 1;
if (history)
trace_inode_foreign_history(inode, wbc, history);
/*
* Switch if the current wb isn't the consistent winner.
* If there are multiple closely competing dirtiers, the
* inode may switch across them repeatedly over time, which
* is okay. The main goal is avoiding keeping an inode on
* the wrong wb for an extended period of time.
*/
if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
inode_switch_wbs(inode, max_id);
}
/*
* Multiple instances of this function may race to update the
* following fields but we don't mind occassional inaccuracies.
*/
inode->i_wb_frn_winner = max_id;
inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
inode->i_wb_frn_history = history;
wb_put(wbc->wb);
wbc->wb = NULL;
}
EXPORT_SYMBOL_GPL(wbc_detach_inode);
/**
* wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
* @wbc: writeback_control of the writeback in progress
* @page: page being written out
* @bytes: number of bytes being written out
*
* @bytes from @page are about to written out during the writeback
* controlled by @wbc. Keep the book for foreign inode detection. See
* wbc_detach_inode().
*/
void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
size_t bytes)
{
struct cgroup_subsys_state *css;
int id;
/*
* pageout() path doesn't attach @wbc to the inode being written
* out. This is intentional as we don't want the function to block
* behind a slow cgroup. Ultimately, we want pageout() to kick off
* regular writeback instead of writing things out itself.
*/
if (!wbc->wb || wbc->no_cgroup_owner)
return;
css = mem_cgroup_css_from_page(page);
/* dead cgroups shouldn't contribute to inode ownership arbitration */
if (!(css->flags & CSS_ONLINE))
return;
id = css->id;
if (id == wbc->wb_id) {
wbc->wb_bytes += bytes;
return;
}
if (id == wbc->wb_lcand_id)
wbc->wb_lcand_bytes += bytes;
/* Boyer-Moore majority vote algorithm */
if (!wbc->wb_tcand_bytes)
wbc->wb_tcand_id = id;
if (id == wbc->wb_tcand_id)
wbc->wb_tcand_bytes += bytes;
else
wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
}
EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
/**
* inode_congested - test whether an inode is congested
* @inode: inode to test for congestion (may be NULL)
* @cong_bits: mask of WB_[a]sync_congested bits to test
*
* Tests whether @inode is congested. @cong_bits is the mask of congestion
* bits to test and the return value is the mask of set bits.
*
* If cgroup writeback is enabled for @inode, the congestion state is
* determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
* associated with @inode is congested; otherwise, the root wb's congestion
* state is used.
*
* @inode is allowed to be NULL as this function is often called on
* mapping->host which is NULL for the swapper space.
*/
int inode_congested(struct inode *inode, int cong_bits)
{
/*
* Once set, ->i_wb never becomes NULL while the inode is alive.
* Start transaction iff ->i_wb is visible.
*/
if (inode && inode_to_wb_is_valid(inode)) {
struct bdi_writeback *wb;
struct wb_lock_cookie lock_cookie = {};
bool congested;
wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
congested = wb_congested(wb, cong_bits);
unlocked_inode_to_wb_end(inode, &lock_cookie);
return congested;
}
return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
}
EXPORT_SYMBOL_GPL(inode_congested);
/**
* wb_split_bdi_pages - split nr_pages to write according to bandwidth
* @wb: target bdi_writeback to split @nr_pages to
* @nr_pages: number of pages to write for the whole bdi
*
* Split @wb's portion of @nr_pages according to @wb's write bandwidth in
* relation to the total write bandwidth of all wb's w/ dirty inodes on
* @wb->bdi.
*/
static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
{
unsigned long this_bw = wb->avg_write_bandwidth;
unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
if (nr_pages == LONG_MAX)
return LONG_MAX;
/*
* This may be called on clean wb's and proportional distribution
* may not make sense, just use the original @nr_pages in those
* cases. In general, we wanna err on the side of writing more.
*/
if (!tot_bw || this_bw >= tot_bw)
return nr_pages;
else
return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
}
/**
* bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
* @bdi: target backing_dev_info
* @base_work: wb_writeback_work to issue
* @skip_if_busy: skip wb's which already have writeback in progress
*
* Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
* have dirty inodes. If @base_work->nr_page isn't %LONG_MAX, it's
* distributed to the busy wbs according to each wb's proportion in the
* total active write bandwidth of @bdi.
*/
static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
struct wb_writeback_work *base_work,
bool skip_if_busy)
{
struct bdi_writeback *last_wb = NULL;
struct bdi_writeback *wb = list_entry(&bdi->wb_list,
struct bdi_writeback, bdi_node);
might_sleep();
restart:
rcu_read_lock();
list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
DEFINE_WB_COMPLETION(fallback_work_done, bdi);
struct wb_writeback_work fallback_work;
struct wb_writeback_work *work;
long nr_pages;
if (last_wb) {
wb_put(last_wb);
last_wb = NULL;
}
/* SYNC_ALL writes out I_DIRTY_TIME too */
if (!wb_has_dirty_io(wb) &&
(base_work->sync_mode == WB_SYNC_NONE ||
list_empty(&wb->b_dirty_time)))
continue;
if (skip_if_busy && writeback_in_progress(wb))
continue;
nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) {
*work = *base_work;
work->nr_pages = nr_pages;
work->auto_free = 1;
wb_queue_work(wb, work);
continue;
}
/* alloc failed, execute synchronously using on-stack fallback */
work = &fallback_work;
*work = *base_work;
work->nr_pages = nr_pages;
work->auto_free = 0;
work->done = &fallback_work_done;
wb_queue_work(wb, work);
/*
* Pin @wb so that it stays on @bdi->wb_list. This allows
* continuing iteration from @wb after dropping and
* regrabbing rcu read lock.
*/
wb_get(wb);
last_wb = wb;
rcu_read_unlock();
wb_wait_for_completion(&fallback_work_done);
goto restart;
}
rcu_read_unlock();
if (last_wb)
wb_put(last_wb);
}
/**
* cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
* @bdi_id: target bdi id
* @memcg_id: target memcg css id
* @reason: reason why some writeback work initiated
* @done: target wb_completion
*
* Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
* with the specified parameters.
*/
int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
enum wb_reason reason, struct wb_completion *done)
{
struct backing_dev_info *bdi;
struct cgroup_subsys_state *memcg_css;
struct bdi_writeback *wb;
struct wb_writeback_work *work;
unsigned long dirty;
int ret;
/* lookup bdi and memcg */
bdi = bdi_get_by_id(bdi_id);
if (!bdi)
return -ENOENT;
rcu_read_lock();
memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
if (memcg_css && !css_tryget(memcg_css))
memcg_css = NULL;
rcu_read_unlock();
if (!memcg_css) {
ret = -ENOENT;
goto out_bdi_put;
}
/*
* And find the associated wb. If the wb isn't there already
* there's nothing to flush, don't create one.
*/
wb = wb_get_lookup(bdi, memcg_css);
if (!wb) {
ret = -ENOENT;
goto out_css_put;
}
/*
* The caller is attempting to write out most of
* the currently dirty pages. Let's take the current dirty page
* count and inflate it by 25% which should be large enough to
* flush out most dirty pages while avoiding getting livelocked by
* concurrent dirtiers.
*
* BTW the memcg stats are flushed periodically and this is best-effort
* estimation, so some potential error is ok.
*/
dirty = memcg_page_state(mem_cgroup_from_css(memcg_css), NR_FILE_DIRTY);
dirty = dirty * 10 / 8;
/* issue the writeback work */
work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
if (work) {
work->nr_pages = dirty;
work->sync_mode = WB_SYNC_NONE;
work->range_cyclic = 1;
work->reason = reason;
work->done = done;
work->auto_free = 1;
wb_queue_work(wb, work);
ret = 0;
} else {
ret = -ENOMEM;
}
wb_put(wb);
out_css_put:
css_put(memcg_css);
out_bdi_put:
bdi_put(bdi);
return ret;
}
/**
* cgroup_writeback_umount - flush inode wb switches for umount
*
* This function is called when a super_block is about to be destroyed and
* flushes in-flight inode wb switches. An inode wb switch goes through
* RCU and then workqueue, so the two need to be flushed in order to ensure
* that all previously scheduled switches are finished. As wb switches are
* rare occurrences and synchronize_rcu() can take a while, perform
* flushing iff wb switches are in flight.
*/
void cgroup_writeback_umount(void)
{
/*
* SB_ACTIVE should be reliably cleared before checking
* isw_nr_in_flight, see generic_shutdown_super().
*/
smp_mb();
if (atomic_read(&isw_nr_in_flight)) {
/*
* Use rcu_barrier() to wait for all pending callbacks to
* ensure that all in-flight wb switches are in the workqueue.
*/
rcu_barrier();
flush_workqueue(isw_wq);
}
}
static int __init cgroup_writeback_init(void)
{
isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
if (!isw_wq)
return -ENOMEM;
return 0;
}
fs_initcall(cgroup_writeback_init);
#else /* CONFIG_CGROUP_WRITEBACK */
static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
static void inode_cgwb_move_to_attached(struct inode *inode,
struct bdi_writeback *wb)
{
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
inode->i_state &= ~I_SYNC_QUEUED;
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
}
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
__releases(&inode->i_lock)
__acquires(&wb->list_lock)
{
struct bdi_writeback *wb = inode_to_wb(inode);
spin_unlock(&inode->i_lock);
spin_lock(&wb->list_lock);
return wb;
}
static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
__acquires(&wb->list_lock)
{
struct bdi_writeback *wb = inode_to_wb(inode);
spin_lock(&wb->list_lock);
return wb;
}
static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
{
return nr_pages;
}
static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
struct wb_writeback_work *base_work,
bool skip_if_busy)
{
might_sleep();
if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
base_work->auto_free = 0;
wb_queue_work(&bdi->wb, base_work);
}
}
#endif /* CONFIG_CGROUP_WRITEBACK */
/*
* Add in the number of potentially dirty inodes, because each inode
* write can dirty pagecache in the underlying blockdev.
*/
static unsigned long get_nr_dirty_pages(void)
{
return global_node_page_state(NR_FILE_DIRTY) +
get_nr_dirty_inodes();
}
static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
{
if (!wb_has_dirty_io(wb))
return;
/*
* All callers of this function want to start writeback of all
* dirty pages. Places like vmscan can call this at a very
* high frequency, causing pointless allocations of tons of
* work items and keeping the flusher threads busy retrieving
* that work. Ensure that we only allow one of them pending and
* inflight at the time.
*/
if (test_bit(WB_start_all, &wb->state) ||
test_and_set_bit(WB_start_all, &wb->state))
return;
wb->start_all_reason = reason;
wb_wakeup(wb);
}
/**
* wb_start_background_writeback - start background writeback
* @wb: bdi_writback to write from
*
* Description:
* This makes sure WB_SYNC_NONE background writeback happens. When
* this function returns, it is only guaranteed that for given wb
* some IO is happening if we are over background dirty threshold.
* Caller need not hold sb s_umount semaphore.
*/
void wb_start_background_writeback(struct bdi_writeback *wb)
{
/*
* We just wake up the flusher thread. It will perform background
* writeback as soon as there is no other work to do.
*/
trace_writeback_wake_background(wb);
wb_wakeup(wb);
}
/*
* Remove the inode from the writeback list it is on.
*/
void inode_io_list_del(struct inode *inode)
{
struct bdi_writeback *wb;
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
inode->i_state &= ~I_SYNC_QUEUED;
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
}
EXPORT_SYMBOL(inode_io_list_del);
/*
* mark an inode as under writeback on the sb
*/
void sb_mark_inode_writeback(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
unsigned long flags;
if (list_empty(&inode->i_wb_list)) {
spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
if (list_empty(&inode->i_wb_list)) {
list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
trace_sb_mark_inode_writeback(inode);
}
spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
}
}
/*
* clear an inode as under writeback on the sb
*/
void sb_clear_inode_writeback(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
unsigned long flags;
if (!list_empty(&inode->i_wb_list)) {
spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
if (!list_empty(&inode->i_wb_list)) {
list_del_init(&inode->i_wb_list);
trace_sb_clear_inode_writeback(inode);
}
spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
}
}
/*
* Redirty an inode: set its when-it-was dirtied timestamp and move it to the
* furthest end of its superblock's dirty-inode list.
*
* Before stamping the inode's ->dirtied_when, we check to see whether it is
* already the most-recently-dirtied inode on the b_dirty list. If that is
* the case then the inode must have been redirtied while it was being written
* out and we don't reset its dirtied_when.
*/
static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{
assert_spin_locked(&inode->i_lock);
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
tail = wb_inode(wb->b_dirty.next);
if (time_before(inode->dirtied_when, tail->dirtied_when))
inode->dirtied_when = jiffies;
}
inode_io_list_move_locked(inode, wb, &wb->b_dirty);
inode->i_state &= ~I_SYNC_QUEUED;
}
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
{
spin_lock(&inode->i_lock);
redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock);
}
/*
* requeue inode for re-scanning after bdi->b_io list is exhausted.
*/
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
{
inode_io_list_move_locked(inode, wb, &wb->b_more_io);
}
static void inode_sync_complete(struct inode *inode)
{
inode->i_state &= ~I_SYNC;
/* If inode is clean an unused, put it into LRU now... */
inode_add_lru(inode);
/* Waiters must see I_SYNC cleared before being woken up */
smp_mb();
wake_up_bit(&inode->i_state, __I_SYNC);
}
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
/*
* For inodes being constantly redirtied, dirtied_when can get stuck.
* It _appears_ to be in the future, but is actually in distant past.
* This test is necessary to prevent such wrapped-around relative times
* from permanently stopping the whole bdi writeback.
*/
ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
return ret;
}
#define EXPIRE_DIRTY_ATIME 0x0001
/*
* Move expired (dirtied before dirtied_before) dirty inodes from
* @delaying_queue to @dispatch_queue.
*/
static int move_expired_inodes(struct list_head *delaying_queue,
struct list_head *dispatch_queue,
unsigned long dirtied_before)
{
LIST_HEAD(tmp);
struct list_head *pos, *node;
struct super_block *sb = NULL;
struct inode *inode;
int do_sb_sort = 0;
int moved = 0;
while (!list_empty(delaying_queue)) {
inode = wb_inode(delaying_queue->prev);
if (inode_dirtied_after(inode, dirtied_before))
break;
list_move(&inode->i_io_list, &tmp);
moved++;
spin_lock(&inode->i_lock);
inode->i_state |= I_SYNC_QUEUED;
spin_unlock(&inode->i_lock);
if (sb_is_blkdev_sb(inode->i_sb))
continue;
if (sb && sb != inode->i_sb)
do_sb_sort = 1;
sb = inode->i_sb;
}
/* just one sb in list, splice to dispatch_queue and we're done */
if (!do_sb_sort) {
list_splice(&tmp, dispatch_queue);
goto out;
}
/* Move inodes from one superblock together */
while (!list_empty(&tmp)) {
sb = wb_inode(tmp.prev)->i_sb;
list_for_each_prev_safe(pos, node, &tmp) {
inode = wb_inode(pos);
if (inode->i_sb == sb)
list_move(&inode->i_io_list, dispatch_queue);
}
}
out:
return moved;
}
/*
* Queue all expired dirty inodes for io, eldest first.
* Before
* newly dirtied b_dirty b_io b_more_io
* =============> gf edc BA
* After
* newly dirtied b_dirty b_io b_more_io
* =============> g fBAedc
* |
* +--> dequeue for IO
*/
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
unsigned long dirtied_before)
{
int moved;
unsigned long time_expire_jif = dirtied_before;
assert_spin_locked(&wb->list_lock);
list_splice_init(&wb->b_more_io, &wb->b_io);
moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
if (!work->for_sync)
time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
time_expire_jif);
if (moved)
wb_io_lists_populated(wb);
trace_writeback_queue_io(wb, work, dirtied_before, moved);
}
static int write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
trace_writeback_write_inode_start(inode, wbc);
ret = inode->i_sb->s_op->write_inode(inode, wbc);
trace_writeback_write_inode(inode, wbc);
return ret;
}
return 0;
}
/*
* Wait for writeback on an inode to complete. Called with i_lock held.
* Caller must make sure inode cannot go away when we drop i_lock.
*/
static void __inode_wait_for_writeback(struct inode *inode)
__releases(inode->i_lock)
__acquires(inode->i_lock)
{
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
wait_queue_head_t *wqh;
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
while (inode->i_state & I_SYNC) {
spin_unlock(&inode->i_lock);
__wait_on_bit(wqh, &wq, bit_wait,
TASK_UNINTERRUPTIBLE);
spin_lock(&inode->i_lock);
}
}
/*
* Wait for writeback on an inode to complete. Caller must have inode pinned.
*/
void inode_wait_for_writeback(struct inode *inode)
{
spin_lock(&inode->i_lock);
__inode_wait_for_writeback(inode);
spin_unlock(&inode->i_lock);
}
/*
* Sleep until I_SYNC is cleared. This function must be called with i_lock
* held and drops it. It is aimed for callers not holding any inode reference
* so once i_lock is dropped, inode can go away.
*/
static void inode_sleep_on_writeback(struct inode *inode)
__releases(inode->i_lock)
{
DEFINE_WAIT(wait);
wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
int sleep;
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
sleep = inode->i_state & I_SYNC;
spin_unlock(&inode->i_lock);
if (sleep)
schedule();
finish_wait(wqh, &wait);
}
/*
* Find proper writeback list for the inode depending on its current state and
* possibly also change of its state while we were doing writeback. Here we
* handle things such as livelock prevention or fairness of writeback among
* inodes. This function can be called only by flusher thread - noone else
* processes all inodes in writeback lists and requeueing inodes behind flusher
* thread's back can have unexpected consequences.
*/
static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
struct writeback_control *wbc)
{
if (inode->i_state & I_FREEING)
return;
/*
* Sync livelock prevention. Each inode is tagged and synced in one
* shot. If still dirty, it will be redirty_tail()'ed below. Update
* the dirty time to prevent enqueue and sync it again.
*/
if ((inode->i_state & I_DIRTY) &&
(wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
inode->dirtied_when = jiffies;
if (wbc->pages_skipped) {
/*
* writeback is not making progress due to locked
* buffers. Skip this inode for now.
*/
redirty_tail_locked(inode, wb);
return;
}
if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
/*
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything.
*/
if (wbc->nr_to_write <= 0) {
/* Slice used up. Queue for next turn. */
requeue_io(inode, wb);
} else {
/*
* Writeback blocked by something other than
* congestion. Delay the inode for some time to
* avoid spinning on the CPU (100% iowait)
* retrying writeback of the dirty page/inode
* that cannot be performed immediately.
*/
redirty_tail_locked(inode, wb);
}
} else if (inode->i_state & I_DIRTY) {
/*
* Filesystems can dirty the inode during writeback operations,
* such as delayed allocation during submission or metadata
* updates after data IO completion.
*/
redirty_tail_locked(inode, wb);
} else if (inode->i_state & I_DIRTY_TIME) {
inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
inode->i_state &= ~I_SYNC_QUEUED;
} else {
/* The inode is clean. Remove from writeback lists. */
inode_cgwb_move_to_attached(inode, wb);
}
}
/*
* Write out an inode and its dirty pages (or some of its dirty pages, depending
* on @wbc->nr_to_write), and clear the relevant dirty flags from i_state.
*
* This doesn't remove the inode from the writeback list it is on, except
* potentially to move it from b_dirty_time to b_dirty due to timestamp
* expiration. The caller is otherwise responsible for writeback list handling.
*
* The caller is also responsible for setting the I_SYNC flag beforehand and
* calling inode_sync_complete() to clear it afterwards.
*/
static int
__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
{
struct address_space *mapping = inode->i_mapping;
long nr_to_write = wbc->nr_to_write;
unsigned dirty;
int ret;
WARN_ON(!(inode->i_state & I_SYNC));
trace_writeback_single_inode_start(inode, wbc, nr_to_write);
ret = do_writepages(mapping, wbc);
/*
* Make sure to wait on the data before writing out the metadata.
* This is important for filesystems that modify metadata on data
* I/O completion. We don't do it for sync(2) writeback because it has a
* separate, external IO completion path and ->sync_fs for guaranteeing
* inode metadata is written back correctly.
*/
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
int err = filemap_fdatawait(mapping);
if (ret == 0)
ret = err;
}
/*
* If the inode has dirty timestamps and we need to write them, call
* mark_inode_dirty_sync() to notify the filesystem about it and to
* change I_DIRTY_TIME into I_DIRTY_SYNC.
*/
if ((inode->i_state & I_DIRTY_TIME) &&
(wbc->sync_mode == WB_SYNC_ALL ||
time_after(jiffies, inode->dirtied_time_when +
dirtytime_expire_interval * HZ))) {
trace_writeback_lazytime(inode);
mark_inode_dirty_sync(inode);
}
/*
* Get and clear the dirty flags from i_state. This needs to be done
* after calling writepages because some filesystems may redirty the
* inode during writepages due to delalloc. It also needs to be done
* after handling timestamp expiration, as that may dirty the inode too.
*/
spin_lock(&inode->i_lock);
dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~dirty;
/*
* Paired with smp_mb() in __mark_inode_dirty(). This allows
* __mark_inode_dirty() to test i_state without grabbing i_lock -
* either they see the I_DIRTY bits cleared or we see the dirtied
* inode.
*
* I_DIRTY_PAGES is always cleared together above even if @mapping
* still has dirty pages. The flag is reinstated after smp_mb() if
* necessary. This guarantees that either __mark_inode_dirty()
* sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
*/
smp_mb();
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
inode->i_state |= I_DIRTY_PAGES;
spin_unlock(&inode->i_lock);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & ~I_DIRTY_PAGES) {
int err = write_inode(inode, wbc);
if (ret == 0)
ret = err;
}
trace_writeback_single_inode(inode, wbc, nr_to_write);
return ret;
}
/*
* Write out an inode's dirty data and metadata on-demand, i.e. separately from
* the regular batched writeback done by the flusher threads in
* writeback_sb_inodes(). @wbc controls various aspects of the write, such as
* whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
*
* To prevent the inode from going away, either the caller must have a reference
* to the inode, or the inode must have I_WILL_FREE or I_FREEING set.
*/
static int writeback_single_inode(struct inode *inode,
struct writeback_control *wbc)
{
struct bdi_writeback *wb;
int ret = 0;
spin_lock(&inode->i_lock);
if (!atomic_read(&inode->i_count))
WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
else
WARN_ON(inode->i_state & I_WILL_FREE);
if (inode->i_state & I_SYNC) {
/*
* Writeback is already running on the inode. For WB_SYNC_NONE,
* that's enough and we can just return. For WB_SYNC_ALL, we
* must wait for the existing writeback to complete, then do
* writeback again if there's anything left.
*/
if (wbc->sync_mode != WB_SYNC_ALL)
goto out;
__inode_wait_for_writeback(inode);
}
WARN_ON(inode->i_state & I_SYNC);
/*
* If the inode is already fully clean, then there's nothing to do.
*
* For data-integrity syncs we also need to check whether any pages are
* still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If
* there are any such pages, we'll need to wait for them.
*/
if (!(inode->i_state & I_DIRTY_ALL) &&
(wbc->sync_mode != WB_SYNC_ALL ||
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out;
inode->i_state |= I_SYNC;
wbc_attach_and_unlock_inode(wbc, inode);
ret = __writeback_single_inode(inode, wbc);
wbc_detach_inode(wbc);
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
/*
* If the inode is now fully clean, then it can be safely removed from
* its writeback list (if any). Otherwise the flusher threads are
* responsible for the writeback lists.
*/
if (!(inode->i_state & I_DIRTY_ALL))
inode_cgwb_move_to_attached(inode, wb);
else if (!(inode->i_state & I_SYNC_QUEUED) &&
(inode->i_state & I_DIRTY))
redirty_tail_locked(inode, wb);
spin_unlock(&wb->list_lock);
inode_sync_complete(inode);
out:
spin_unlock(&inode->i_lock);
return ret;
}
static long writeback_chunk_size(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
long pages;
/*
* WB_SYNC_ALL mode does livelock avoidance by syncing dirty
* inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
* here avoids calling into writeback_inodes_wb() more than once.
*
* The intended call sequence for WB_SYNC_ALL writeback is:
*
* wb_writeback()
* writeback_sb_inodes() <== called only once
* write_cache_pages() <== called once for each inode
* (quickly) tag currently dirty pages
* (maybe slowly) sync all tagged pages
*/
if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
pages = LONG_MAX;
else {
pages = min(wb->avg_write_bandwidth / 2,
global_wb_domain.dirty_limit / DIRTY_SCOPE);
pages = min(pages, work->nr_pages);
pages = round_down(pages + MIN_WRITEBACK_PAGES,
MIN_WRITEBACK_PAGES);
}
return pages;
}
/*
* Write a portion of b_io inodes which belong to @sb.
*
* Return the number of pages and/or inodes written.
*
* NOTE! This is called with wb->list_lock held, and will
* unlock and relock that for each inode it ends up doing
* IO for.
*/
static long writeback_sb_inodes(struct super_block *sb,
struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
struct writeback_control wbc = {
.sync_mode = work->sync_mode,
.tagged_writepages = work->tagged_writepages,
.for_kupdate = work->for_kupdate,
.for_background = work->for_background,
.for_sync = work->for_sync,
.range_cyclic = work->range_cyclic,
.range_start = 0,
.range_end = LLONG_MAX,
};
unsigned long start_time = jiffies;
long write_chunk;
long total_wrote = 0; /* count both pages and inodes */
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
struct bdi_writeback *tmp_wb;
long wrote;
if (inode->i_sb != sb) {
if (work->sb) {
/*
* We only want to write back data for this
* superblock, move all inodes not belonging
* to it back onto the dirty list.
*/
redirty_tail(inode, wb);
continue;
}
/*
* The inode belongs to a different superblock.
* Bounce back to the caller to unpin this and
* pin the next superblock.
*/
break;
}
/*
* Don't bother with new inodes or inodes being freed, first
* kind does not need periodic writeout yet, and for the latter
* kind writeout is handled by the freer.
*/
spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock);
continue;
}
if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
/*
* If this inode is locked for writeback and we are not
* doing writeback-for-data-integrity, move it to
* b_more_io so that writeback can proceed with the
* other inodes on s_io.
*
* We'll have another go at writing back this inode
* when we completed a full scan of b_io.
*/
spin_unlock(&inode->i_lock);
requeue_io(inode, wb);
trace_writeback_sb_inodes_requeue(inode);
continue;
}
spin_unlock(&wb->list_lock);
/*
* We already requeued the inode if it had I_SYNC set and we
* are doing WB_SYNC_NONE writeback. So this catches only the
* WB_SYNC_ALL case.
*/
if (inode->i_state & I_SYNC) {
/* Wait for I_SYNC. This function drops i_lock... */
inode_sleep_on_writeback(inode);
/* Inode may be gone, start again */
spin_lock(&wb->list_lock);
continue;
}
inode->i_state |= I_SYNC;
wbc_attach_and_unlock_inode(&wbc, inode);
write_chunk = writeback_chunk_size(wb, work);
wbc.nr_to_write = write_chunk;
wbc.pages_skipped = 0;
/*
* We use I_SYNC to pin the inode in memory. While it is set
* evict_inode() will wait so the inode cannot be freed.
*/
__writeback_single_inode(inode, &wbc);
wbc_detach_inode(&wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write;
wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped;
wrote = wrote < 0 ? 0 : wrote;
total_wrote += wrote;
if (need_resched()) {
/*
* We're trying to balance between building up a nice
* long list of IOs to improve our merge rate, and
* getting those IOs out quickly for anyone throttling
* in balance_dirty_pages(). cond_resched() doesn't
* unplug, so get our IOs out the door before we
* give up the CPU.
*/
blk_flush_plug(current);
cond_resched();
}
/*
* Requeue @inode if still dirty. Be careful as @inode may
* have been switched to another wb in the meantime.
*/
tmp_wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY_ALL))
total_wrote++;
requeue_inode(inode, tmp_wb, &wbc);
inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
if (unlikely(tmp_wb != wb)) {
spin_unlock(&tmp_wb->list_lock);
spin_lock(&wb->list_lock);
}
/*
* bail out to wb_writeback() often enough to check
* background threshold and other termination conditions.
*/
if (total_wrote) {
if (time_is_before_jiffies(start_time + HZ / 10UL))
break;
if (work->nr_pages <= 0)
break;
}
}
return total_wrote;
}
static long __writeback_inodes_wb(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
unsigned long start_time = jiffies;
long wrote = 0;
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
struct super_block *sb = inode->i_sb;
if (!trylock_super(sb)) {
/*
* trylock_super() may fail consistently due to
* s_umount being grabbed by someone else. Don't use
* requeue_io() to avoid busy retrying the inode/sb.
*/
redirty_tail(inode, wb);
continue;
}
wrote += writeback_sb_inodes(sb, wb, work);
up_read(&sb->s_umount);
/* refer to the same tests at the end of writeback_sb_inodes */
if (wrote) {
if (time_is_before_jiffies(start_time + HZ / 10UL))
break;
if (work->nr_pages <= 0)
break;
}
}
/* Leave any unwritten inodes on b_io */
return wrote;
}
static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
enum wb_reason reason)
{
struct wb_writeback_work work = {
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
.range_cyclic = 1,
.reason = reason,
};
struct blk_plug plug;
blk_start_plug(&plug);
spin_lock(&wb->list_lock);
if (list_empty(&wb->b_io))
queue_io(wb, &work, jiffies);
__writeback_inodes_wb(wb, &work);
spin_unlock(&wb->list_lock);
blk_finish_plug(&plug);
return nr_pages - work.nr_pages;
}
/*
* Explicit flushing or periodic writeback of "old" data.
*
* Define "old": the first time one of an inode's pages is dirtied, we mark the
* dirtying-time in the inode's address_space. So this periodic writeback code
* just walks the superblock inode list, writing back any inodes which are
* older than a specific point in time.
*
* Try to run once per dirty_writeback_interval. But if a writeback event
* takes longer than a dirty_writeback_interval interval, then leave a
* one-second gap.
*
* dirtied_before takes precedence over nr_to_write. So we'll only write back
* all dirty pages if they are all attached to "old" mappings.
*/
static long wb_writeback(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
long nr_pages = work->nr_pages;
unsigned long dirtied_before = jiffies;
struct inode *inode;
long progress;
struct blk_plug plug;
blk_start_plug(&plug);
spin_lock(&wb->list_lock);
for (;;) {
/*
* Stop writeback when nr_pages has been consumed
*/
if (work->nr_pages <= 0)
break;
/*
* Background writeout and kupdate-style writeback may
* run forever. Stop them if there is other work to do
* so that e.g. sync can proceed. They'll be restarted
* after the other works are all done.
*/
if ((work->for_background || work->for_kupdate) &&
!list_empty(&wb->work_list))
break;
/*
* For background writeout, stop when we are below the
* background dirty threshold
*/
if (work->for_background && !wb_over_bg_thresh(wb))
break;
/*
* Kupdate and background works are special and we want to
* include all inodes that need writing. Livelock avoidance is
* handled by these works yielding to any other work so we are
* safe.
*/
if (work->for_kupdate) {
dirtied_before = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
} else if (work->for_background)
dirtied_before = jiffies;
trace_writeback_start(wb, work);
if (list_empty(&wb->b_io))
queue_io(wb, work, dirtied_before);
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
progress = __writeback_inodes_wb(wb, work);
trace_writeback_written(wb, work);
/*
* Did we write something? Try for more
*
* Dirty inodes are moved to b_io for writeback in batches.
* The completion of the current batch does not necessarily
* mean the overall work is done. So we keep looping as long
* as made some progress on cleaning pages or inodes.
*/
if (progress)
continue;
/*
* No more inodes for IO, bail
*/
if (list_empty(&wb->b_more_io))
break;
/*
* Nothing written. Wait for some inode to
* become available for writeback. Otherwise
* we'll just busyloop.
*/
trace_writeback_wait(wb, work);
inode = wb_inode(wb->b_more_io.prev);
spin_lock(&inode->i_lock);
spin_unlock(&wb->list_lock);
/* This function drops i_lock... */
inode_sleep_on_writeback(inode);
spin_lock(&wb->list_lock);
}
spin_unlock(&wb->list_lock);
blk_finish_plug(&plug);
return nr_pages - work->nr_pages;
}
/*
* Return the next wb_writeback_work struct that hasn't been processed yet.
*/
static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
{
struct wb_writeback_work *work = NULL;
spin_lock_bh(&wb->work_lock);
if (!list_empty(&wb->work_list)) {
work = list_entry(wb->work_list.next,
struct wb_writeback_work, list);
list_del_init(&work->list);
}
spin_unlock_bh(&wb->work_lock);
return work;
}
static long wb_check_background_flush(struct bdi_writeback *wb)
{
if (wb_over_bg_thresh(wb)) {
struct wb_writeback_work work = {
.nr_pages = LONG_MAX,
.sync_mode = WB_SYNC_NONE,
.for_background = 1,
.range_cyclic = 1,
.reason = WB_REASON_BACKGROUND,
};
return wb_writeback(wb, &work);
}
return 0;
}
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
unsigned long expired;
long nr_pages;
/*
* When set to zero, disable periodic writeback
*/
if (!dirty_writeback_interval)
return 0;
expired = wb->last_old_flush +
msecs_to_jiffies(dirty_writeback_interval * 10);
if (time_before(jiffies, expired))
return 0;
wb->last_old_flush = jiffies;
nr_pages = get_nr_dirty_pages();
if (nr_pages) {
struct wb_writeback_work work = {
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
.for_kupdate = 1,
.range_cyclic = 1,
.reason = WB_REASON_PERIODIC,
};
return wb_writeback(wb, &work);
}
return 0;
}
static long wb_check_start_all(struct bdi_writeback *wb)
{
long nr_pages;
if (!test_bit(WB_start_all, &wb->state))
return 0;
nr_pages = get_nr_dirty_pages();
if (nr_pages) {
struct wb_writeback_work work = {
.nr_pages = wb_split_bdi_pages(wb, nr_pages),
.sync_mode = WB_SYNC_NONE,
.range_cyclic = 1,
.reason = wb->start_all_reason,
};
nr_pages = wb_writeback(wb, &work);
}
clear_bit(WB_start_all, &wb->state);
return nr_pages;
}
/*
* Retrieve work items and do the writeback they describe
*/
static long wb_do_writeback(struct bdi_writeback *wb)
{
struct wb_writeback_work *work;
long wrote = 0;
set_bit(WB_writeback_running, &wb->state);
while ((work = get_next_work_item(wb)) != NULL) {
trace_writeback_exec(wb, work);
wrote += wb_writeback(wb, work);
finish_writeback_work(wb, work);
}
/*
* Check for a flush-everything request
*/
wrote += wb_check_start_all(wb);
/*
* Check for periodic writeback, kupdated() style
*/
wrote += wb_check_old_data_flush(wb);
wrote += wb_check_background_flush(wb);
clear_bit(WB_writeback_running, &wb->state);
return wrote;
}
/*
* Handle writeback of dirty data for the device backed by this bdi. Also
* reschedules periodically and does kupdated style flushing.
*/
void wb_workfn(struct work_struct *work)
{
struct bdi_writeback *wb = container_of(to_delayed_work(work),
struct bdi_writeback, dwork);
long pages_written;
set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
current->flags |= PF_SWAPWRITE;
if (likely(!current_is_workqueue_rescuer() ||
!test_bit(WB_registered, &wb->state))) {
/*
* The normal path. Keep writing back @wb until its
* work_list is empty. Note that this path is also taken
* if @wb is shutting down even when we're running off the
* rescuer as work_list needs to be drained.
*/
do {
pages_written = wb_do_writeback(wb);
trace_writeback_pages_written(pages_written);
} while (!list_empty(&wb->work_list));
} else {
/*
* bdi_wq can't get enough workers and we're running off
* the emergency worker. Don't hog it. Hopefully, 1024 is
* enough for efficient IO.
*/
pages_written = writeback_inodes_wb(wb, 1024,
WB_REASON_FORKER_THREAD);
trace_writeback_pages_written(pages_written);
}
if (!list_empty(&wb->work_list))
wb_wakeup(wb);
else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
wb_wakeup_delayed(wb);
current->flags &= ~PF_SWAPWRITE;
}
/*
* Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
* write back the whole world.
*/
static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason)
{
struct bdi_writeback *wb;
if (!bdi_has_dirty_io(bdi))
return;
list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
wb_start_writeback(wb, reason);
}
void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason)
{
rcu_read_lock();
__wakeup_flusher_threads_bdi(bdi, reason);
rcu_read_unlock();
}
/*
* Wakeup the flusher threads to start writeback of all currently dirty pages
*/
void wakeup_flusher_threads(enum wb_reason reason)
{
struct backing_dev_info *bdi;
/*
* If we are expecting writeback progress we must submit plugged IO.
*/
if (blk_needs_flush_plug(current))
blk_schedule_flush_plug(current);
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
__wakeup_flusher_threads_bdi(bdi, reason);
rcu_read_unlock();
}
/*
* Wake up bdi's periodically to make sure dirtytime inodes gets
* written back periodically. We deliberately do *not* check the
* b_dirtytime list in wb_has_dirty_io(), since this would cause the
* kernel to be constantly waking up once there are any dirtytime
* inodes on the system. So instead we define a separate delayed work
* function which gets called much more rarely. (By default, only
* once every 12 hours.)
*
* If there is any other write activity going on in the file system,
* this function won't be necessary. But if the only thing that has
* happened on the file system is a dirtytime inode caused by an atime
* update, we need this infrastructure below to make sure that inode
* eventually gets pushed out to disk.
*/
static void wakeup_dirtytime_writeback(struct work_struct *w);
static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
static void wakeup_dirtytime_writeback(struct work_struct *w)
{
struct backing_dev_info *bdi;
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
struct bdi_writeback *wb;
list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
if (!list_empty(&wb->b_dirty_time))
wb_wakeup(wb);
}
rcu_read_unlock();
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
}
static int __init start_dirtytime_writeback(void)
{
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
return 0;
}
__initcall(start_dirtytime_writeback);
int dirtytime_interval_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
mod_delayed_work(system_wq, &dirtytime_work, 0);
return ret;
}
/**
* __mark_inode_dirty - internal function to mark an inode dirty
*
* @inode: inode to mark
* @flags: what kind of dirty, e.g. I_DIRTY_SYNC. This can be a combination of
* multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined
* with I_DIRTY_PAGES.
*
* Mark an inode as dirty. We notify the filesystem, then update the inode's
* dirty flags. Then, if needed we add the inode to the appropriate dirty list.
*
* Most callers should use mark_inode_dirty() or mark_inode_dirty_sync()
* instead of calling this directly.
*
* CAREFUL! We only add the inode to the dirty list if it is hashed or if it
* refers to a blockdev. Unhashed inodes will never be added to the dirty list
* even if they are later hashed, as they will have been marked dirty already.
*
* In short, ensure you hash any inodes _before_ you start marking them dirty.
*
* Note that for blockdevs, inode->dirtied_when represents the dirtying time of
* the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
* the kernel-internal blockdev inode represents the dirtying time of the
* blockdev's pages. This is why for I_DIRTY_PAGES we always use
* page->mapping->host, so the page-dirtying time is recorded in the internal
* blockdev inode.
*/
void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
int dirtytime = 0;
trace_writeback_mark_inode_dirty(inode, flags);
if (flags & I_DIRTY_INODE) {
/*
* Notify the filesystem about the inode being dirtied, so that
* (if needed) it can update on-disk fields and journal the
* inode. This is only needed when the inode itself is being
* dirtied now. I.e. it's only needed for I_DIRTY_INODE, not
* for just I_DIRTY_PAGES or I_DIRTY_TIME.
*/
trace_writeback_dirty_inode_start(inode, flags);
if (sb->s_op->dirty_inode)
sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE);
trace_writeback_dirty_inode(inode, flags);
/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
flags &= ~I_DIRTY_TIME;
} else {
/*
* Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing.
* (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME
* in one call to __mark_inode_dirty().)
*/
dirtytime = flags & I_DIRTY_TIME;
WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME);
}
/*
* Paired with smp_mb() in __writeback_single_inode() for the
* following lockless i_state test. See there for details.
*/
smp_mb();
if (((inode->i_state & flags) == flags) ||
(dirtytime && (inode->i_state & I_DIRTY_INODE)))
return;
spin_lock(&inode->i_lock);
if (dirtytime && (inode->i_state & I_DIRTY_INODE))
goto out_unlock_inode;
if ((inode->i_state & flags) != flags) {
const int was_dirty = inode->i_state & I_DIRTY;
inode_attach_wb(inode, NULL);
/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
if (flags & I_DIRTY_INODE)
inode->i_state &= ~I_DIRTY_TIME;
inode->i_state |= flags;
/*
* If the inode is queued for writeback by flush worker, just
* update its dirty state. Once the flush worker is done with
* the inode it will place it on the appropriate superblock
* list, based upon its state.
*/
if (inode->i_state & I_SYNC_QUEUED)
goto out_unlock_inode;
/*
* Only add valid (hashed) inodes to the superblock's
* dirty list. Add blockdev inodes as well.
*/
if (!S_ISBLK(inode->i_mode)) {
if (inode_unhashed(inode))
goto out_unlock_inode;
}
if (inode->i_state & I_FREEING)
goto out_unlock_inode;
/*
* If the inode was already on b_dirty/b_io/b_more_io, don't
* reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
struct bdi_writeback *wb;
struct list_head *dirty_list;
bool wakeup_bdi = false;
wb = locked_inode_to_wb_and_lock_list(inode);
inode->dirtied_when = jiffies;
if (dirtytime)
inode->dirtied_time_when = jiffies;
if (inode->i_state & I_DIRTY)
dirty_list = &wb->b_dirty;
else
dirty_list = &wb->b_dirty_time;
wakeup_bdi = inode_io_list_move_locked(inode, wb,
dirty_list);
spin_unlock(&wb->list_lock);
trace_writeback_dirty_inode_enqueue(inode);
/*
* If this is the first dirty inode for this bdi,
* we have to wake-up the corresponding bdi thread
* to make sure background write-back happens
* later.
*/
if (wakeup_bdi &&
(wb->bdi->capabilities & BDI_CAP_WRITEBACK))
wb_wakeup_delayed(wb);
return;
}
}
out_unlock_inode:
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_NS(__mark_inode_dirty, ANDROID_GKI_VFS_EXPORT_ONLY);
/*
* The @s_sync_lock is used to serialise concurrent sync operations
* to avoid lock contention problems with concurrent wait_sb_inodes() calls.
* Concurrent callers will block on the s_sync_lock rather than doing contending
* walks. The queueing maintains sync(2) required behaviour as all the IO that
* has been issued up to the time this function is enter is guaranteed to be
* completed by the time we have gained the lock and waited for all IO that is
* in progress regardless of the order callers are granted the lock.
*/
static void wait_sb_inodes(struct super_block *sb)
{
LIST_HEAD(sync_list);
/*
* We need to be protected against the filesystem going from
* r/o to r/w or vice versa.
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
mutex_lock(&sb->s_sync_lock);
/*
* Splice the writeback list onto a temporary list to avoid waiting on
* inodes that have started writeback after this point.
*
* Use rcu_read_lock() to keep the inodes around until we have a
* reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
* the local list because inodes can be dropped from either by writeback
* completion.
*/
rcu_read_lock();
spin_lock_irq(&sb->s_inode_wblist_lock);
list_splice_init(&sb->s_inodes_wb, &sync_list);
/*
* Data integrity sync. Must wait for all pages under writeback, because
* there may have been pages dirtied before our sync call, but which had
* writeout started before we write it out. In which case, the inode
* may not be on the dirty list, but we still have to wait for that
* writeout.
*/
while (!list_empty(&sync_list)) {
struct inode *inode = list_first_entry(&sync_list, struct inode,
i_wb_list);
struct address_space *mapping = inode->i_mapping;
/*
* Move each inode back to the wb list before we drop the lock
* to preserve consistency between i_wb_list and the mapping
* writeback tag. Writeback completion is responsible to remove
* the inode from either list once the writeback tag is cleared.
*/
list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
/*
* The mapping can appear untagged while still on-list since we
* do not have the mapping lock. Skip it here, wb completion
* will remove it.
*/
if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
continue;
spin_unlock_irq(&sb->s_inode_wblist_lock);
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
spin_unlock(&inode->i_lock);
spin_lock_irq(&sb->s_inode_wblist_lock);
continue;
}
__iget(inode);
spin_unlock(&inode->i_lock);
rcu_read_unlock();
/*
* We keep the error status of individual mapping so that
* applications can catch the writeback error using fsync(2).
* See filemap_fdatawait_keep_errors() for details.
*/
filemap_fdatawait_keep_errors(mapping);
cond_resched();
iput(inode);
rcu_read_lock();
spin_lock_irq(&sb->s_inode_wblist_lock);
}
spin_unlock_irq(&sb->s_inode_wblist_lock);
rcu_read_unlock();
mutex_unlock(&sb->s_sync_lock);
}
static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
enum wb_reason reason, bool skip_if_busy)
{
struct backing_dev_info *bdi = sb->s_bdi;
DEFINE_WB_COMPLETION(done, bdi);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_NONE,
.tagged_writepages = 1,
.done = &done,
.nr_pages = nr,
.reason = reason,
};
if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
wb_wait_for_completion(&done);
}
/**
* writeback_inodes_sb_nr - writeback dirty inodes from given super_block
* @sb: the superblock
* @nr: the number of pages to write
* @reason: reason why some writeback work initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
* for IO completion of submitted IO.
*/
void writeback_inodes_sb_nr(struct super_block *sb,
unsigned long nr,
enum wb_reason reason)
{
__writeback_inodes_sb_nr(sb, nr, reason, false);
}
EXPORT_SYMBOL(writeback_inodes_sb_nr);
/**
* writeback_inodes_sb - writeback dirty inodes from given super_block
* @sb: the superblock
* @reason: reason why some writeback work was initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
* for IO completion of submitted IO.
*/
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{
return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
}
EXPORT_SYMBOL(writeback_inodes_sb);
/**
* try_to_writeback_inodes_sb - try to start writeback if none underway
* @sb: the superblock
* @reason: reason why some writeback work was initiated
*
* Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
*/
void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{
if (!down_read_trylock(&sb->s_umount))
return;
__writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
up_read(&sb->s_umount);
}
EXPORT_SYMBOL_NS(try_to_writeback_inodes_sb, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* sync_inodes_sb - sync sb inode pages
* @sb: the superblock
*
* This function writes and waits on any dirty inode belonging to this
* super_block.
*/
void sync_inodes_sb(struct super_block *sb)
{
struct backing_dev_info *bdi = sb->s_bdi;
DEFINE_WB_COMPLETION(done, bdi);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
.range_cyclic = 0,
.done = &done,
.reason = WB_REASON_SYNC,
.for_sync = 1,
};
/*
* Can't skip on !bdi_has_dirty() because we should wait for !dirty
* inodes under writeback and I_DIRTY_TIME inodes ignored by
* bdi_has_dirty() need to be written out too.
*/
if (bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
bdi_down_write_wb_switch_rwsem(bdi);
bdi_split_work_to_wbs(bdi, &work, false);
wb_wait_for_completion(&done);
bdi_up_write_wb_switch_rwsem(bdi);
wait_sb_inodes(sb);
}
EXPORT_SYMBOL(sync_inodes_sb);
/**
* write_inode_now - write an inode to disk
* @inode: inode to write to disk
* @sync: whether the write should be synchronous or not
*
* This function commits an inode to disk immediately if it is dirty. This is
* primarily needed by knfsd.
*
* The caller must either have a ref on the inode or must have set I_WILL_FREE.
*/
int write_inode_now(struct inode *inode, int sync)
{
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
.range_start = 0,
.range_end = LLONG_MAX,
};
if (!mapping_can_writeback(inode->i_mapping))
wbc.nr_to_write = 0;
might_sleep();
return writeback_single_inode(inode, &wbc);
}
EXPORT_SYMBOL_NS(write_inode_now, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* sync_inode_metadata - write an inode to disk
* @inode: the inode to sync
* @wait: wait for I/O to complete.
*
* Write an inode to disk and adjust its dirty state after completion.
*
* Note: only writes the actual inode, no associated data or other metadata.
*/
int sync_inode_metadata(struct inode *inode, int wait)
{
struct writeback_control wbc = {
.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
.nr_to_write = 0, /* metadata-only */
};
return writeback_single_inode(inode, &wbc);
}
EXPORT_SYMBOL_NS(sync_inode_metadata, ANDROID_GKI_VFS_EXPORT_ONLY);