Merge 5.15.47 into android14-5.15

Changes in 5.15.47
	pcmcia: db1xxx_ss: restrict to MIPS_DB1XXX boards
	staging: greybus: codecs: fix type confusion of list iterator variable
	iio: adc: ad7124: Remove shift from scan_type
	lkdtm/bugs: Check for the NULL pointer after calling kmalloc
	lkdtm/bugs: Don't expect thread termination without CONFIG_UBSAN_TRAP
	tty: goldfish: Use tty_port_destroy() to destroy port
	tty: serial: owl: Fix missing clk_disable_unprepare() in owl_uart_probe
	tty: n_tty: Restore EOF push handling behavior
	serial: 8250_aspeed_vuart: Fix potential NULL dereference in aspeed_vuart_probe
	tty: serial: fsl_lpuart: fix potential bug when using both of_alias_get_id and ida_simple_get
	remoteproc: imx_rproc: Ignore create mem entry for resource table
	usb: usbip: fix a refcount leak in stub_probe()
	usb: usbip: add missing device lock on tweak configuration cmd
	USB: storage: karma: fix rio_karma_init return
	usb: musb: Fix missing of_node_put() in omap2430_probe
	staging: fieldbus: Fix the error handling path in anybuss_host_common_probe()
	pwm: lp3943: Fix duty calculation in case period was clamped
	pwm: raspberrypi-poe: Fix endianness in firmware struct
	rpmsg: qcom_smd: Fix irq_of_parse_and_map() return value
	usb: dwc3: gadget: Replace list_for_each_entry_safe() if using giveback
	usb: dwc3: pci: Fix pm_runtime_get_sync() error checking
	misc: fastrpc: fix an incorrect NULL check on list iterator
	firmware: stratix10-svc: fix a missing check on list iterator
	usb: typec: mux: Check dev_set_name() return value
	rpmsg: virtio: Fix possible double free in rpmsg_probe()
	rpmsg: virtio: Fix possible double free in rpmsg_virtio_add_ctrl_dev()
	rpmsg: virtio: Fix the unregistration of the device rpmsg_ctrl
	iio: adc: stmpe-adc: Fix wait_for_completion_timeout return value check
	iio: proximity: vl53l0x: Fix return value check of wait_for_completion_timeout
	iio: adc: sc27xx: fix read big scale voltage not right
	iio: adc: sc27xx: Fine tune the scale calibration values
	rpmsg: qcom_smd: Fix returning 0 if irq_of_parse_and_map() fails
	pvpanic: Fix typos in the comments
	misc/pvpanic: Convert regular spinlock into trylock on panic path
	phy: qcom-qmp: fix pipe-clock imbalance on power-on failure
	power: supply: axp288_fuel_gauge: Drop BIOS version check from "T3 MRD" DMI quirk
	serial: sifive: Report actual baud base rather than fixed 115200
	export: fix string handling of namespace in EXPORT_SYMBOL_NS
	soundwire: intel: prevent pm_runtime resume prior to system suspend
	coresight: cpu-debug: Replace mutex with mutex_trylock on panic notifier
	ksmbd: fix reference count leak in smb_check_perm_dacl()
	extcon: ptn5150: Add queue work sync before driver release
	soc: rockchip: Fix refcount leak in rockchip_grf_init
	clocksource/drivers/riscv: Events are stopped during CPU suspend
	ARM: dts: aspeed: ast2600-evb: Enable RX delay for MAC0/MAC1
	rtc: mt6397: check return value after calling platform_get_resource()
	rtc: ftrtc010: Use platform_get_irq() to get the interrupt
	rtc: ftrtc010: Fix error handling in ftrtc010_rtc_probe
	staging: r8188eu: add check for kzalloc
	tty: n_gsm: Don't ignore write return value in gsmld_output()
	tty: n_gsm: Fix packet data hex dump output
	serial: meson: acquire port->lock in startup()
	serial: 8250_fintek: Check SER_RS485_RTS_* only with RS485
	serial: cpm_uart: Fix build error without CONFIG_SERIAL_CPM_CONSOLE
	serial: digicolor-usart: Don't allow CS5-6
	serial: rda-uart: Don't allow CS5-6
	serial: txx9: Don't allow CS5-6
	serial: sh-sci: Don't allow CS5-6
	serial: sifive: Sanitize CSIZE and c_iflag
	serial: st-asc: Sanitize CSIZE and correct PARENB for CS7
	serial: stm32-usart: Correct CSIZE, bits, and parity
	firmware: dmi-sysfs: Fix memory leak in dmi_sysfs_register_handle
	bus: ti-sysc: Fix warnings for unbind for serial
	driver: base: fix UAF when driver_attach failed
	driver core: fix deadlock in __device_attach
	watchdog: rti-wdt: Fix pm_runtime_get_sync() error checking
	watchdog: ts4800_wdt: Fix refcount leak in ts4800_wdt_probe
	blk-mq: don't touch ->tagset in blk_mq_get_sq_hctx
	ASoC: fsl_sai: Fix FSL_SAI_xDR/xFR definition
	clocksource/drivers/oxnas-rps: Fix irq_of_parse_and_map() return value
	s390/crypto: fix scatterwalk_unmap() callers in AES-GCM
	net: sched: fixed barrier to prevent skbuff sticking in qdisc backlog
	net: ethernet: mtk_eth_soc: out of bounds read in mtk_hwlro_get_fdir_entry()
	net: ethernet: ti: am65-cpsw-nuss: Fix some refcount leaks
	net: dsa: mv88e6xxx: Fix refcount leak in mv88e6xxx_mdios_register
	modpost: fix removing numeric suffixes
	jffs2: fix memory leak in jffs2_do_fill_super
	ubi: fastmap: Fix high cpu usage of ubi_bgt by making sure wl_pool not empty
	ubi: ubi_create_volume: Fix use-after-free when volume creation failed
	selftests/bpf: fix selftest after random: Urandom_read tracepoint removal
	selftests/bpf: fix stacktrace_build_id with missing kprobe/urandom_read
	bpf: Fix probe read error in ___bpf_prog_run()
	block: take destination bvec offsets into account in bio_copy_data_iter
	riscv: read-only pages should not be writable
	net/smc: fixes for converting from "struct smc_cdc_tx_pend **" to "struct smc_wr_tx_pend_priv *"
	tcp: add accessors to read/set tp->snd_cwnd
	nfp: only report pause frame configuration for physical device
	sfc: fix considering that all channels have TX queues
	sfc: fix wrong tx channel offset with efx_separate_tx_channels
	block: make bioset_exit() fully resilient against being called twice
	vdpa: Fix error logic in vdpa_nl_cmd_dev_get_doit
	virtio: pci: Fix an error handling path in vp_modern_probe()
	net/mlx5: Don't use already freed action pointer
	net/mlx5e: TC NIC mode, fix tc chains miss table
	net/mlx5: CT: Fix header-rewrite re-use for tupels
	net/mlx5: correct ECE offset in query qp output
	net/mlx5e: Update netdev features after changing XDP state
	net: sched: add barrier to fix packet stuck problem for lockless qdisc
	tcp: tcp_rtx_synack() can be called from process context
	vdpa: ifcvf: set pci driver data in probe
	octeontx2-af: fix error code in is_valid_offset()
	s390/mcck: isolate SIE instruction when setting CIF_MCCK_GUEST flag
	regulator: mt6315-regulator: fix invalid allowed mode
	gpio: pca953x: use the correct register address to do regcache sync
	afs: Fix infinite loop found by xfstest generic/676
	scsi: sd: Fix potential NULL pointer dereference
	tipc: check attribute length for bearer name
	driver core: Fix wait_for_device_probe() & deferred_probe_timeout interaction
	perf c2c: Fix sorting in percent_rmt_hitm_cmp()
	dmaengine: idxd: set DMA_INTERRUPT cap bit
	mips: cpc: Fix refcount leak in mips_cpc_default_phys_base
	bootconfig: Make the bootconfig.o as a normal object file
	tracing: Make tp_printk work on syscall tracepoints
	tracing: Fix sleeping function called from invalid context on RT kernel
	tracing: Avoid adding tracer option before update_tracer_options
	iommu/arm-smmu: fix possible null-ptr-deref in arm_smmu_device_probe()
	iommu/arm-smmu-v3: check return value after calling platform_get_resource()
	f2fs: remove WARN_ON in f2fs_is_valid_blkaddr
	i2c: cadence: Increase timeout per message if necessary
	m68knommu: set ZERO_PAGE() to the allocated zeroed page
	m68knommu: fix undefined reference to `_init_sp'
	dmaengine: zynqmp_dma: In struct zynqmp_dma_chan fix desc_size data type
	NFSv4: Don't hold the layoutget locks across multiple RPC calls
	video: fbdev: hyperv_fb: Allow resolutions with size > 64 MB for Gen1
	video: fbdev: pxa3xx-gcu: release the resources correctly in pxa3xx_gcu_probe/remove()
	RISC-V: use memcpy for kexec_file mode
	m68knommu: fix undefined reference to `mach_get_rtc_pll'
	f2fs: fix to tag gcing flag on page during file defragment
	xprtrdma: treat all calls not a bcall when bc_serv is NULL
	drm/bridge: sn65dsi83: Fix an error handling path in sn65dsi83_probe()
	drm/bridge: ti-sn65dsi83: Handle dsi_lanes == 0 as invalid
	netfilter: nat: really support inet nat without l3 address
	netfilter: nf_tables: use kfree_rcu(ptr, rcu) to release hooks in clean_net path
	netfilter: nf_tables: delete flowtable hooks via transaction list
	powerpc/kasan: Force thread size increase with KASAN
	SUNRPC: Trap RDMA segment overflows
	netfilter: nf_tables: always initialize flowtable hook list in transaction
	ata: pata_octeon_cf: Fix refcount leak in octeon_cf_probe
	netfilter: nf_tables: release new hooks on unsupported flowtable flags
	netfilter: nf_tables: memleak flow rule from commit path
	netfilter: nf_tables: bail out early if hardware offload is not supported
	xen: unexport __init-annotated xen_xlate_map_ballooned_pages()
	stmmac: intel: Fix an error handling path in intel_eth_pci_probe()
	af_unix: Fix a data-race in unix_dgram_peer_wake_me().
	bpf, arm64: Clear prog->jited_len along prog->jited
	net: dsa: lantiq_gswip: Fix refcount leak in gswip_gphy_fw_list
	net/mlx4_en: Fix wrong return value on ioctl EEPROM query failure
	i40e: xsk: Move tmp desc array from driver to pool
	xsk: Fix handling of invalid descriptors in XSK TX batching API
	SUNRPC: Fix the calculation of xdr->end in xdr_get_next_encode_buffer()
	net: mdio: unexport __init-annotated mdio_bus_init()
	net: xfrm: unexport __init-annotated xfrm4_protocol_init()
	net: ipv6: unexport __init-annotated seg6_hmac_init()
	net/mlx5: Lag, filter non compatible devices
	net/mlx5: Fix mlx5_get_next_dev() peer device matching
	net/mlx5: Rearm the FW tracer after each tracer event
	net/mlx5: fs, fail conflicting actions
	ip_gre: test csum_start instead of transport header
	net: altera: Fix refcount leak in altera_tse_mdio_create
	net: dsa: mv88e6xxx: use BMSR_ANEGCOMPLETE bit for filling an_complete
	tcp: use alloc_large_system_hash() to allocate table_perturb
	drm: imx: fix compiler warning with gcc-12
	nfp: flower: restructure flow-key for gre+vlan combination
	iov_iter: Fix iter_xarray_get_pages{,_alloc}()
	iio: dummy: iio_simple_dummy: check the return value of kstrdup()
	staging: rtl8712: fix a potential memory leak in r871xu_drv_init()
	iio: st_sensors: Add a local lock for protecting odr
	lkdtm/usercopy: Expand size of "out of frame" object
	drivers: staging: rtl8723bs: Fix deadlock in rtw_surveydone_event_callback()
	drivers: staging: rtl8192bs: Fix deadlock in rtw_joinbss_event_prehandle()
	tty: synclink_gt: Fix null-pointer-dereference in slgt_clean()
	tty: Fix a possible resource leak in icom_probe
	thunderbolt: Use different lane for second DisplayPort tunnel
	drivers: staging: rtl8192u: Fix deadlock in ieee80211_beacons_stop()
	drivers: staging: rtl8192e: Fix deadlock in rtllib_beacons_stop()
	USB: host: isp116x: check return value after calling platform_get_resource()
	drivers: tty: serial: Fix deadlock in sa1100_set_termios()
	drivers: usb: host: Fix deadlock in oxu_bus_suspend()
	USB: hcd-pci: Fully suspend across freeze/thaw cycle
	char: xillybus: fix a refcount leak in cleanup_dev()
	sysrq: do not omit current cpu when showing backtrace of all active CPUs
	usb: dwc2: gadget: don't reset gadget's driver->bus
	soundwire: qcom: adjust autoenumeration timeout
	misc: rtsx: set NULL intfdata when probe fails
	extcon: Fix extcon_get_extcon_dev() error handling
	extcon: Modify extcon device to be created after driver data is set
	clocksource/drivers/sp804: Avoid error on multiple instances
	staging: rtl8712: fix uninit-value in usb_read8() and friends
	staging: rtl8712: fix uninit-value in r871xu_drv_init()
	serial: msm_serial: disable interrupts in __msm_console_write()
	kernfs: Separate kernfs_pr_cont_buf and rename_lock.
	watchdog: wdat_wdt: Stop watchdog when rebooting the system
	md: protect md_unregister_thread from reentrancy
	scsi: myrb: Fix up null pointer access on myrb_cleanup()
	Revert "net: af_key: add check for pfkey_broadcast in function pfkey_process"
	ceph: allow ceph.dir.rctime xattr to be updatable
	ceph: flush the mdlog for filesystem sync
	drm/amd/display: Check if modulo is 0 before dividing.
	drm/radeon: fix a possible null pointer dereference
	drm/amd/pm: Fix missing thermal throttler status
	um: line: Use separate IRQs per line
	modpost: fix undefined behavior of is_arm_mapping_symbol()
	x86/cpu: Elide KCSAN for cpu_has() and friends
	jump_label,noinstr: Avoid instrumentation for JUMP_LABEL=n builds
	nbd: call genl_unregister_family() first in nbd_cleanup()
	nbd: fix race between nbd_alloc_config() and module removal
	nbd: fix io hung while disconnecting device
	s390/gmap: voluntarily schedule during key setting
	cifs: version operations for smb20 unneeded when legacy support disabled
	drm/amd/pm: use bitmap_{from,to}_arr32 where appropriate
	nodemask: Fix return values to be unsigned
	vringh: Fix loop descriptors check in the indirect cases
	scripts/gdb: change kernel config dumping method
	ALSA: usb-audio: Skip generic sync EP parse for secondary EP
	ALSA: usb-audio: Set up (implicit) sync for Saffire 6
	ALSA: hda/conexant - Fix loopback issue with CX20632
	ALSA: hda/realtek: Fix for quirk to enable speaker output on the Lenovo Yoga DuetITL 2021
	ALSA: hda/realtek: Add quirk for HP Dev One
	cifs: return errors during session setup during reconnects
	cifs: fix reconnect on smb3 mount types
	KEYS: trusted: tpm2: Fix migratable logic
	ata: libata-transport: fix {dma|pio|xfer}_mode sysfs files
	mmc: block: Fix CQE recovery reset success
	net: phy: dp83867: retrigger SGMII AN when link change
	net: openvswitch: fix misuse of the cached connection on tuple changes
	writeback: Fix inode->i_io_list not be protected by inode->i_lock error
	nfc: st21nfca: fix incorrect validating logic in EVT_TRANSACTION
	nfc: st21nfca: fix memory leaks in EVT_TRANSACTION handling
	nfc: st21nfca: fix incorrect sizing calculations in EVT_TRANSACTION
	ixgbe: fix bcast packets Rx on VF after promisc removal
	ixgbe: fix unexpected VLAN Rx in promisc mode on VF
	Input: bcm5974 - set missing URB_NO_TRANSFER_DMA_MAP urb flag
	vduse: Fix NULL pointer dereference on sysfs access
	powerpc: Don't select HAVE_IRQ_EXIT_ON_IRQ_STACK
	drm/bridge: analogix_dp: Support PSR-exit to disable transition
	drm/atomic: Force bridge self-refresh-exit on CRTC switch
	drm/amdgpu: update VCN codec support for Yellow Carp
	powerpc/32: Fix overread/overwrite of thread_struct via ptrace
	powerpc/mm: Switch obsolete dssall to .long
	drm/ast: Create threshold values for AST2600
	random: avoid checking crng_ready() twice in random_init()
	random: mark bootloader randomness code as __init
	random: account for arch randomness in bits
	md/raid0: Ignore RAID0 layout if the second zone has only one device
	net/sched: act_police: more accurate MTU policing
	PCI: qcom: Fix pipe clock imbalance
	zonefs: fix handling of explicit_open option on mount
	iov_iter: fix build issue due to possible type mis-match
	dmaengine: idxd: add missing callback function to support DMA_INTERRUPT
	tcp: fix tcp_mtup_probe_success vs wrong snd_cwnd
	xsk: Fix possible crash when multiple sockets are created
	Linux 5.15.47

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I40e7672edf2bd713fee775919d062e2934d9103d
This commit is contained in:
Greg Kroah-Hartman
2022-07-13 11:55:41 +02:00
297 changed files with 1908 additions and 1118 deletions

View File

@@ -107,13 +107,14 @@ Description:
described in ATA8 7.16 and 7.17. Only valid if
the device is not a PM.
pio_mode: (RO) Transfer modes supported by the device when
in PIO mode. Mostly used by PATA device.
pio_mode: (RO) PIO transfer mode used by the device.
Mostly used by PATA devices.
xfer_mode: (RO) Current transfer mode
xfer_mode: (RO) Current transfer mode. Mostly used by
PATA devices.
dma_mode: (RO) Transfer modes supported by the device when
in DMA mode. Mostly used by PATA device.
dma_mode: (RO) DMA transfer mode used by the device.
Mostly used by PATA devices.
class: (RO) Device class. Can be "ata" for disk,
"atapi" for packet device, "pmp" for PM, or

View File

@@ -55,7 +55,7 @@ examples:
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2 4>;
regulator-allowed-modes = <0 1 2>;
};
vbuck3 {
@@ -63,7 +63,7 @@ examples:
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2 4>;
regulator-allowed-modes = <0 1 2>;
};
};
};

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 46
SUBLEVEL = 47
EXTRAVERSION =
NAME = Trick or Treat

View File

@@ -103,7 +103,7 @@
&mac0 {
status = "okay";
phy-mode = "rgmii";
phy-mode = "rgmii-rxid";
phy-handle = <&ethphy0>;
pinctrl-names = "default";
@@ -114,7 +114,7 @@
&mac1 {
status = "okay";
phy-mode = "rgmii";
phy-mode = "rgmii-rxid";
phy-handle = <&ethphy1>;
pinctrl-names = "default";

View File

@@ -1113,6 +1113,7 @@ skip_init_ctx:
bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
goto out_off;
}
bpf_jit_binary_lock_ro(header);

View File

@@ -335,6 +335,7 @@ comment "Machine Options"
config UBOOT
bool "Support for U-Boot command line parameters"
depends on COLDFIRE
help
If you say Y here kernel will try to collect command
line parameters from the initial u-boot stack.

View File

@@ -42,7 +42,8 @@ extern void paging_init(void);
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
#define ZERO_PAGE(vaddr) (virt_to_page(0))
extern void *empty_zero_page;
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* All 32bit addresses are effectively valid for vmalloc...

View File

@@ -87,15 +87,8 @@ void (*mach_sched_init) (void) __initdata = NULL;
void (*mach_init_IRQ) (void) __initdata = NULL;
void (*mach_get_model) (char *model);
void (*mach_get_hardware_list) (struct seq_file *m);
/* machine dependent timer functions */
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
unsigned int (*mach_get_ss)(void);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
EXPORT_SYMBOL(mach_get_ss);
EXPORT_SYMBOL(mach_get_rtc_pll);
EXPORT_SYMBOL(mach_set_rtc_pll);
void (*mach_reset)( void );
void (*mach_halt)( void );
void (*mach_power_off)( void );

View File

@@ -50,7 +50,6 @@ char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
void (*mach_sched_init)(void) __initdata = NULL;
int (*mach_hwclk) (int, struct rtc_time*);
/* machine dependent reboot functions */
void (*mach_reset)(void);

View File

@@ -63,6 +63,15 @@ void timer_heartbeat(void)
#endif /* CONFIG_HEARTBEAT */
#ifdef CONFIG_M68KCLASSIC
/* machine dependent timer functions */
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
EXPORT_SYMBOL(mach_get_rtc_pll);
EXPORT_SYMBOL(mach_set_rtc_pll);
#if !IS_BUILTIN(CONFIG_RTC_DRV_GENERIC)
void read_persistent_clock64(struct timespec64 *ts)
{

View File

@@ -27,6 +27,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void)
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
if (cpc_node) {
err = of_address_to_resource(cpc_node, 0, &res);
of_node_put(cpc_node);
if (!err)
return res.start;
}

View File

@@ -218,7 +218,6 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE
@@ -769,7 +768,6 @@ config THREAD_SHIFT
range 13 15
default "15" if PPC_256K_PAGES
default "14" if PPC64
default "14" if KASAN
default "13"
help
Used to define the stack size. The default is almost always what you

View File

@@ -249,6 +249,7 @@
#define PPC_INST_COPY 0x7c20060c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
#define PPC_INST_DSSALL 0x7e00066c
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LSWI 0x7c0004aa
@@ -576,6 +577,7 @@
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b))
#define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b))
#define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b))
#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh))
#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_RAW_STQCX(t, a, b))
#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHD(t, a, b, c))

View File

@@ -14,10 +14,16 @@
#ifdef __KERNEL__
#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
#ifdef CONFIG_KASAN
#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
#else
#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
#endif
#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT
#define THREAD_SHIFT PAGE_SHIFT
#else
#define THREAD_SHIFT CONFIG_THREAD_SHIFT
#define THREAD_SHIFT MIN_THREAD_SHIFT
#endif
#define THREAD_SIZE (1 << THREAD_SHIFT)

View File

@@ -82,7 +82,7 @@ void power4_idle(void)
return;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile("DSSALL ; sync" ::: "memory");
asm volatile(PPC_DSSALL " ; sync" ::: "memory");
power4_idle_nap();

View File

@@ -129,7 +129,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
mtspr SPRN_HID0,r4
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */

View File

@@ -96,7 +96,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@@ -292,7 +292,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
isync
/* Stop DST streams */
DSSALL
PPC_DSSALL
sync
/* Get the current enable bit of the L3CR into r4 */
@@ -401,7 +401,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
_GLOBAL(__flush_disable_L1)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync

View File

@@ -17,9 +17,13 @@ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
#ifdef CONFIG_PPC_FPU_REGS
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
if (fpidx < (PT_FPSCR - PT_FPR0)) {
if (IS_ENABLED(CONFIG_PPC32))
// On 32-bit the index we are passed refers to 32-bit words
*data = ((u32 *)child->thread.fp_state.fpr)[fpidx];
else
memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
} else
*data = child->thread.fp_state.fpscr;
#else
*data = 0;
@@ -39,9 +43,13 @@ int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
#ifdef CONFIG_PPC_FPU_REGS
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
if (fpidx < (PT_FPSCR - PT_FPR0)) {
if (IS_ENABLED(CONFIG_PPC32))
// On 32-bit the index we are passed refers to 32-bit words
((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
else
memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
} else
child->thread.fp_state.fpscr = data;
#endif

View File

@@ -446,4 +446,7 @@ void __init pt_regs_check(void)
* real registers.
*/
BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
// ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
}

View File

@@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume)
#ifdef CONFIG_ALTIVEC
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
sync

View File

@@ -142,7 +142,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
_GLOBAL(swsusp_arch_resume)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync

View File

@@ -81,7 +81,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* context
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
asm volatile (PPC_DSSALL);
if (!new_on_cpu)
membarrier_arch_switch_mm(prev, next, tsk);

View File

@@ -48,7 +48,7 @@ flush_disable_75x:
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@@ -197,7 +197,7 @@ flush_disable_745x:
isync
/* Stop prefetch streams */
DSSALL
PPC_DSSALL
sync
/* Disable L2 prefetching */

View File

@@ -65,7 +65,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
if (md->attribute & EFI_MEMORY_RO) {
val = pte_val(pte) & ~_PAGE_WRITE;
val = pte_val(pte) | _PAGE_READ;
val |= _PAGE_READ;
pte = __pte(val);
}
if (md->attribute & EFI_MEMORY_XP) {

View File

@@ -65,7 +65,9 @@ machine_kexec_prepare(struct kimage *image)
if (image->segment[i].memsz <= sizeof(fdt))
continue;
if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
if (image->file_mode)
memcpy(&fdt, image->segment[i].buf, sizeof(fdt));
else if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
continue;
if (fdt_check_header(&fdt))

View File

@@ -701,7 +701,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
unsigned int nbytes)
{
gw->walk_bytes_remain -= nbytes;
scatterwalk_unmap(&gw->walk);
scatterwalk_unmap(gw->walk_ptr);
scatterwalk_advance(&gw->walk, nbytes);
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
gw->walk_ptr = NULL;
@@ -776,7 +776,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
goto out;
}
scatterwalk_unmap(&gw->walk);
scatterwalk_unmap(gw->walk_ptr);
gw->walk_ptr = NULL;
gw->ptr = gw->buf;

View File

@@ -248,6 +248,10 @@ ENTRY(sie64a)
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_entry:
sie 0(%r14)
# Let the next instruction be NOP to avoid triggering a machine check
# and handling it in a guest as result of the instruction execution.
nopr 7
.Lsie_leave:
BPOFF
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
@@ -536,7 +540,7 @@ ENTRY(mcck_int_handler)
jno .Lmcck_panic
#if IS_ENABLED(CONFIG_KVM)
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f
OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f
OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
j 5f
4: CHKSTG .Lmcck_panic

View File

@@ -2601,6 +2601,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
return 0;
}
/*
* Give a chance to schedule after setting a key to 256 pages.
* We only hold the mm lock, which is a rwsem and the kvm srcu.
* Both can sleep.
*/
static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
cond_resched();
return 0;
}
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
unsigned long hmask, unsigned long next,
struct mm_walk *walk)
@@ -2623,12 +2635,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
end = start + HPAGE_SIZE - 1;
__storage_key_init_range(start, end);
set_bit(PG_arch_1, &page->flags);
cond_resched();
return 0;
}
static const struct mm_walk_ops enable_skey_walk_ops = {
.hugetlb_entry = __s390_enable_skey_hugetlb,
.pte_entry = __s390_enable_skey_pte,
.pmd_entry = __s390_enable_skey_pmd,
};
int s390_enable_skey(void)

View File

@@ -133,7 +133,7 @@ static void line_timer_cb(struct work_struct *work)
struct line *line = container_of(work, struct line, task.work);
if (!line->throttled)
chan_interrupt(line, line->driver->read_irq);
chan_interrupt(line, line->read_irq);
}
int enable_chan(struct line *line)
@@ -195,9 +195,9 @@ void free_irqs(void)
chan = list_entry(ele, struct chan, free_list);
if (chan->input && chan->enabled)
um_free_irq(chan->line->driver->read_irq, chan);
um_free_irq(chan->line->read_irq, chan);
if (chan->output && chan->enabled)
um_free_irq(chan->line->driver->write_irq, chan);
um_free_irq(chan->line->write_irq, chan);
chan->enabled = 0;
}
}
@@ -215,9 +215,9 @@ static void close_one_chan(struct chan *chan, int delay_free_irq)
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
} else {
if (chan->input && chan->enabled)
um_free_irq(chan->line->driver->read_irq, chan);
um_free_irq(chan->line->read_irq, chan);
if (chan->output && chan->enabled)
um_free_irq(chan->line->driver->write_irq, chan);
um_free_irq(chan->line->write_irq, chan);
chan->enabled = 0;
}
if (chan->ops->close != NULL)

View File

@@ -139,7 +139,7 @@ static int flush_buffer(struct line *line)
count = line->buffer + LINE_BUFSIZE - line->head;
n = write_chan(line->chan_out, line->head, count,
line->driver->write_irq);
line->write_irq);
if (n < 0)
return n;
if (n == count) {
@@ -156,7 +156,7 @@ static int flush_buffer(struct line *line)
count = line->tail - line->head;
n = write_chan(line->chan_out, line->head, count,
line->driver->write_irq);
line->write_irq);
if (n < 0)
return n;
@@ -195,7 +195,7 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
ret = buffer_data(line, buf, len);
else {
n = write_chan(line->chan_out, buf, len,
line->driver->write_irq);
line->write_irq);
if (n < 0) {
ret = n;
goto out_up;
@@ -215,7 +215,7 @@ void line_throttle(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
deactivate_chan(line->chan_in, line->driver->read_irq);
deactivate_chan(line->chan_in, line->read_irq);
line->throttled = 1;
}
@@ -224,7 +224,7 @@ void line_unthrottle(struct tty_struct *tty)
struct line *line = tty->driver_data;
line->throttled = 0;
chan_interrupt(line, line->driver->read_irq);
chan_interrupt(line, line->read_irq);
}
static irqreturn_t line_write_interrupt(int irq, void *data)
@@ -260,19 +260,23 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
int err;
if (input) {
err = um_request_irq(driver->read_irq, fd, IRQ_READ,
line_interrupt, IRQF_SHARED,
err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_READ,
line_interrupt, 0,
driver->read_irq_name, data);
if (err < 0)
return err;
line->read_irq = err;
}
if (output) {
err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
line_write_interrupt, IRQF_SHARED,
err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_WRITE,
line_write_interrupt, 0,
driver->write_irq_name, data);
if (err < 0)
return err;
line->write_irq = err;
}
return 0;

View File

@@ -23,9 +23,7 @@ struct line_driver {
const short minor_start;
const short type;
const short subtype;
const int read_irq;
const char *read_irq_name;
const int write_irq;
const char *write_irq_name;
struct mc_device mc;
struct tty_driver *driver;
@@ -35,6 +33,8 @@ struct line {
struct tty_port port;
int valid;
int read_irq, write_irq;
char *init_str;
struct list_head chan_list;
struct chan *chan_in, *chan_out;

View File

@@ -47,9 +47,7 @@ static struct line_driver driver = {
.minor_start = 64,
.type = TTY_DRIVER_TYPE_SERIAL,
.subtype = 0,
.read_irq = SSL_IRQ,
.read_irq_name = "ssl",
.write_irq = SSL_WRITE_IRQ,
.write_irq_name = "ssl-write",
.mc = {
.list = LIST_HEAD_INIT(driver.mc.list),

View File

@@ -53,9 +53,7 @@ static struct line_driver driver = {
.minor_start = 0,
.type = TTY_DRIVER_TYPE_CONSOLE,
.subtype = SYSTEM_TYPE_CONSOLE,
.read_irq = CONSOLE_IRQ,
.read_irq_name = "console",
.write_irq = CONSOLE_WRITE_IRQ,
.write_irq_name = "console-write",
.mc = {
.list = LIST_HEAD_INIT(driver.mc.list),

View File

@@ -4,19 +4,15 @@
#define TIMER_IRQ 0
#define UMN_IRQ 1
#define CONSOLE_IRQ 2
#define CONSOLE_WRITE_IRQ 3
#define UBD_IRQ 4
#define UM_ETH_IRQ 5
#define SSL_IRQ 6
#define SSL_WRITE_IRQ 7
#define ACCEPT_IRQ 8
#define MCONSOLE_IRQ 9
#define WINCH_IRQ 10
#define SIGIO_WRITE_IRQ 11
#define TELNETD_IRQ 12
#define XTERM_IRQ 13
#define RANDOM_IRQ 14
#define UBD_IRQ 2
#define UM_ETH_IRQ 3
#define ACCEPT_IRQ 4
#define MCONSOLE_IRQ 5
#define WINCH_IRQ 6
#define SIGIO_WRITE_IRQ 7
#define TELNETD_IRQ 8
#define XTERM_IRQ 9
#define RANDOM_IRQ 10
#ifdef CONFIG_UML_NET_VECTOR

View File

@@ -51,7 +51,7 @@ extern const char * const x86_power_flags[32];
extern const char * const x86_bug_flags[NBUGINTS*32];
#define test_cpu_cap(c, bit) \
test_bit(bit, (unsigned long *)((c)->x86_capability))
arch_test_bit(bit, (unsigned long *)((c)->x86_capability))
/*
* There are 32 bits/features in each mask word. The high bits

View File

@@ -668,6 +668,7 @@ static void bio_alloc_cache_destroy(struct bio_set *bs)
bio_alloc_cache_prune(cache, -1U);
}
free_percpu(bs->cache);
bs->cache = NULL;
}
/**
@@ -1292,10 +1293,12 @@ void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
void *src_buf;
void *src_buf = bvec_kmap_local(&src_bv);
void *dst_buf = bvec_kmap_local(&dst_bv);
src_buf = bvec_kmap_local(&src_bv);
memcpy_to_bvec(&dst_bv, src_buf);
memcpy(dst_buf, src_buf, bytes);
kunmap_local(dst_buf);
kunmap_local(src_buf);
bio_advance_iter_single(src, src_iter, bytes);

View File

@@ -1642,8 +1642,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q)
*/
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
/*
* If the IO scheduler does not respect hardware queues when
* dispatching, we just don't bother with multiple HW queues and
@@ -1651,8 +1650,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
* just causes lock contention inside the scheduler and pointless cache
* bouncing.
*/
hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
raw_smp_processor_id());
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
if (!blk_mq_hctx_stopped(hctx))
return hctx;
return NULL;

View File

@@ -196,7 +196,7 @@ static struct {
{ XFER_PIO_0, "XFER_PIO_0" },
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
};
ata_bitfield_name_match(xfer,ata_xfer_names)
ata_bitfield_name_search(xfer, ata_xfer_names)
/*
* ATA Port attributes

View File

@@ -888,12 +888,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
int i;
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
if (!res_dma) {
put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
resource_size(res_dma));
if (!cf_port->dma_base) {
put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
@@ -903,6 +905,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
irq = i;
irq_handler = octeon_cf_interrupt;
}
put_device(&dma_dev->dev);
}
of_node_put(dma_node);
}

View File

@@ -617,7 +617,7 @@ int bus_add_driver(struct device_driver *drv)
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
goto out_unregister;
goto out_del_list;
}
module_add_driver(drv->owner, drv);
@@ -644,6 +644,8 @@ int bus_add_driver(struct device_driver *drv)
return 0;
out_del_list:
klist_del(&priv->knode_bus);
out_unregister:
kobject_put(&priv->kobj);
/* drv->p is freed in driver_release() */

View File

@@ -257,7 +257,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs);
int driver_deferred_probe_timeout;
EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
static int __init deferred_probe_timeout_setup(char *str)
{
@@ -312,7 +311,6 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
wake_up_all(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -719,9 +717,6 @@ int driver_probe_done(void)
*/
void wait_for_device_probe(void)
{
/* wait for probe timeout */
wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
/* wait for the deferred probe workqueue to finish */
flush_work(&deferred_probe_work);
@@ -944,6 +939,7 @@ out_unlock:
static int __device_attach(struct device *dev, bool allow_async)
{
int ret = 0;
bool async = false;
device_lock(dev);
if (dev->p->dead) {
@@ -982,7 +978,7 @@ static int __device_attach(struct device *dev, bool allow_async)
*/
dev_dbg(dev, "scheduling asynchronous probe\n");
get_device(dev);
async_schedule_dev(__device_attach_async_helper, dev);
async = true;
} else {
pm_request_idle(dev);
}
@@ -992,6 +988,8 @@ static int __device_attach(struct device *dev, bool allow_async)
}
out_unlock:
device_unlock(dev);
if (async)
async_schedule_dev(__device_attach_async_helper, dev);
return ret;
}

View File

@@ -1368,7 +1368,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
sock_shutdown(nbd);
nbd_clear_sock(nbd);
__invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
@@ -1467,15 +1467,20 @@ static struct nbd_config *nbd_alloc_config(void)
{
struct nbd_config *config;
if (!try_module_get(THIS_MODULE))
return ERR_PTR(-ENODEV);
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
if (!config)
return NULL;
if (!config) {
module_put(THIS_MODULE);
return ERR_PTR(-ENOMEM);
}
atomic_set(&config->recv_threads, 0);
init_waitqueue_head(&config->recv_wq);
init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
try_module_get(THIS_MODULE);
return config;
}
@@ -1502,12 +1507,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
mutex_unlock(&nbd->config_lock);
goto out;
}
config = nbd->config = nbd_alloc_config();
if (!config) {
ret = -ENOMEM;
config = nbd_alloc_config();
if (IS_ERR(config)) {
ret = PTR_ERR(config);
mutex_unlock(&nbd->config_lock);
goto out;
}
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
@@ -1914,13 +1920,14 @@ again:
nbd_put(nbd);
return -EINVAL;
}
config = nbd->config = nbd_alloc_config();
if (!nbd->config) {
config = nbd_alloc_config();
if (IS_ERR(config)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
printk(KERN_ERR "nbd: couldn't allocate config\n");
return -ENOMEM;
return PTR_ERR(config);
}
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
@@ -2478,6 +2485,12 @@ static void __exit nbd_cleanup(void)
struct nbd_device *nbd;
LIST_HEAD(del_list);
/*
* Unregister netlink interface prior to waiting
* for the completion of netlink commands.
*/
genl_unregister_family(&nbd_genl_family);
nbd_dbg_close();
mutex_lock(&nbd_index_mutex);
@@ -2487,6 +2500,9 @@ static void __exit nbd_cleanup(void)
while (!list_empty(&del_list)) {
nbd = list_first_entry(&del_list, struct nbd_device, list);
list_del_init(&nbd->list);
if (refcount_read(&nbd->config_refs))
printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
refcount_read(&nbd->config_refs));
if (refcount_read(&nbd->refs) != 1)
printk(KERN_ERR "nbd: possibly leaking a device\n");
nbd_put(nbd);
@@ -2496,7 +2512,6 @@ static void __exit nbd_cleanup(void)
destroy_workqueue(nbd_del_wq);
idr_destroy(&nbd_index_idr);
genl_unregister_family(&nbd_genl_family);
unregister_blkdev(NBD_MAJOR, "nbd");
}

View File

@@ -3325,7 +3325,9 @@ static int sysc_remove(struct platform_device *pdev)
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
cancel_delayed_work_sync(&ddata->idle_work);
/* Device can still be enabled, see deferred idle quirk in probe */
if (cancel_delayed_work_sync(&ddata->idle_work))
ti_sysc_idle(&ddata->idle_work.work);
error = pm_runtime_resume_and_get(ddata->dev);
if (error < 0) {

View File

@@ -789,8 +789,8 @@ static void __cold _credit_init_bits(size_t bits)
*
**********************************************************************/
static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
static int __init parse_trust_cpu(char *arg)
{
return kstrtobool(arg, &trust_cpu);
@@ -813,7 +813,7 @@ early_param("random.trust_bootloader", parse_trust_bootloader);
int __init random_init(const char *command_line)
{
ktime_t now = ktime_get_real();
unsigned int i, arch_bytes;
unsigned int i, arch_bits;
unsigned long entropy;
#if defined(LATENT_ENTROPY_PLUGIN)
@@ -821,12 +821,12 @@ int __init random_init(const char *command_line)
_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
#endif
for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
if (!arch_get_random_seed_long_early(&entropy) &&
!arch_get_random_long_early(&entropy)) {
entropy = random_get_entropy();
arch_bytes -= sizeof(entropy);
arch_bits -= sizeof(entropy) * 8;
}
_mix_pool_bytes(&entropy, sizeof(entropy));
}
@@ -838,7 +838,7 @@ int __init random_init(const char *command_line)
if (crng_ready())
crng_reseed();
else if (trust_cpu)
credit_init_bits(arch_bytes * 8);
_credit_init_bits(arch_bits);
return 0;
}
@@ -886,13 +886,12 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
* Handle random seed passed by bootloader, and credit it if
* CONFIG_RANDOM_TRUST_BOOTLOADER is set.
*/
void __cold add_bootloader_randomness(const void *buf, size_t len)
void __init add_bootloader_randomness(const void *buf, size_t len)
{
mix_pool_bytes(buf, len);
if (trust_bootloader)
credit_init_bits(len * 8);
}
EXPORT_SYMBOL_GPL(add_bootloader_randomness);
struct fast_pool {
struct work_struct mix;

View File

@@ -549,6 +549,7 @@ static void cleanup_dev(struct kref *kref)
if (xdev->workq)
destroy_workqueue(xdev->workq);
usb_put_dev(xdev->udev);
kfree(xdev->channels); /* Argument may be NULL, and that's fine */
kfree(xdev);
}

View File

@@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
}
rps->irq = irq_of_parse_and_map(np, 0);
if (rps->irq < 0) {
if (!rps->irq) {
ret = -EINVAL;
goto err_iomap;
}

View File

@@ -32,7 +32,7 @@ static int riscv_clock_next_event(unsigned long delta,
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
.rating = 100,
.set_next_event = riscv_clock_next_event,
};

View File

@@ -259,6 +259,11 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
if (initialized) {
pr_debug("%pOF: skipping further SP804 timer device\n", np);
return 0;
}
base = of_iomap(np, 0);
if (!base)
return -ENXIO;
@@ -270,11 +275,6 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
writel(0, timer1_base + timer->ctrl);
writel(0, timer2_base + timer->ctrl);
if (initialized || !of_device_is_available(np)) {
ret = -EINVAL;
goto err;
}
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
clk1 = NULL;

View File

@@ -77,6 +77,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->completion_addr = compl;
}
static struct dma_async_tx_descriptor *
idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
{
struct idxd_wq *wq = to_idxd_wq(c);
u32 desc_flags;
struct idxd_desc *desc;
if (wq->state != IDXD_WQ_ENABLED)
return NULL;
op_flag_setup(flags, &desc_flags);
desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
if (IS_ERR(desc))
return NULL;
idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
0, 0, 0, desc->compl_dma, desc_flags);
desc->txd.flags = flags;
return &desc->txd;
}
static struct dma_async_tx_descriptor *
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags)
@@ -181,10 +202,12 @@ int idxd_register_dma_device(struct idxd_device *idxd)
INIT_LIST_HEAD(&dma->channels);
dma->dev = dev;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release;
dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;

View File

@@ -232,7 +232,7 @@ struct zynqmp_dma_chan {
bool is_dmacoherent;
struct tasklet_struct tasklet;
bool idle;
u32 desc_size;
size_t desc_size;
bool err;
u32 bus_width;
u32 src_burst_len;
@@ -489,7 +489,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
}
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
(2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
(2 * ZYNQMP_DMA_DESC_SIZE(chan) *
ZYNQMP_DMA_NUM_DESCS),
&chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v)
return -ENOMEM;

View File

@@ -375,8 +375,8 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (adev) {
info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
put_device(&adev->dev);
if (!info->id_extcon)
return -EPROBE_DEFER;
if (IS_ERR(info->id_extcon))
return PTR_ERR(info->id_extcon);
dev_info(dev, "controlling USB role\n");
} else {

View File

@@ -194,6 +194,13 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info)
return 0;
}
static void ptn5150_work_sync_and_put(void *data)
{
struct ptn5150_info *info = data;
cancel_work_sync(&info->irq_work);
}
static int ptn5150_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
@@ -284,6 +291,10 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c)
if (ret)
return -EINVAL;
ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info);
if (ret)
return ret;
/*
* Update current extcon state if for example OTG connection was there
* before the probe

View File

@@ -863,6 +863,8 @@ EXPORT_SYMBOL_GPL(extcon_set_property_capability);
* @extcon_name: the extcon name provided with extcon_dev_register()
*
* Return the pointer of extcon device if success or ERR_PTR(err) if fail.
* NOTE: This function returns -EPROBE_DEFER so it may only be called from
* probe() functions.
*/
struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
@@ -876,7 +878,7 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
if (!strcmp(sd->name, extcon_name))
goto out;
}
sd = NULL;
sd = ERR_PTR(-EPROBE_DEFER);
out:
mutex_unlock(&extcon_dev_list_lock);
return sd;
@@ -1230,19 +1232,14 @@ int extcon_dev_register(struct extcon_dev *edev)
edev->dev.type = &edev->extcon_dev_type;
}
ret = device_register(&edev->dev);
if (ret) {
put_device(&edev->dev);
goto err_dev;
}
spin_lock_init(&edev->lock);
edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
sizeof(*edev->nh), GFP_KERNEL);
if (edev->max_supported) {
edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
GFP_KERNEL);
if (!edev->nh) {
ret = -ENOMEM;
device_unregister(&edev->dev);
goto err_dev;
goto err_alloc_nh;
}
}
for (index = 0; index < edev->max_supported; index++)
@@ -1253,6 +1250,12 @@ int extcon_dev_register(struct extcon_dev *edev)
dev_set_drvdata(&edev->dev, edev);
edev->state = 0;
ret = device_register(&edev->dev);
if (ret) {
put_device(&edev->dev);
goto err_dev;
}
mutex_lock(&extcon_dev_list_lock);
list_add(&edev->entry, &extcon_dev_list);
mutex_unlock(&extcon_dev_list_lock);
@@ -1260,6 +1263,9 @@ int extcon_dev_register(struct extcon_dev *edev)
return 0;
err_dev:
if (edev->max_supported)
kfree(edev->nh);
err_alloc_nh:
if (edev->max_supported)
kfree(edev->extcon_dev_type.groups);
err_alloc_groups:
@@ -1320,6 +1326,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
if (edev->max_supported) {
kfree(edev->extcon_dev_type.groups);
kfree(edev->cables);
kfree(edev->nh);
}
put_device(&edev->dev);

View File

@@ -603,7 +603,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
"%d-%d", dh->type, entry->instance);
if (*ret) {
kfree(entry);
kobject_put(&entry->kobj);
return;
}

View File

@@ -941,17 +941,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
{
struct stratix10_svc_data_mem *pmem;
size_t size = 0;
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->vaddr == kaddr) {
size = pmem->size;
break;
}
gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
gen_pool_free(chan->ctrl->genpool,
(unsigned long)kaddr, pmem->size);
pmem->vaddr = NULL;
list_del(&pmem->node);
return;
}
list_del(&svc_data_mem);
}
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);

View File

@@ -1108,20 +1108,21 @@ static int pca953x_regcache_sync(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
int ret;
u8 regaddr;
/*
* The ordering between direction and output is important,
* sync these registers first and only then sync the rest.
*/
ret = regcache_sync_region(chip->regmap, chip->regs->direction,
chip->regs->direction + NBANK(chip));
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
ret = regcache_sync_region(chip->regmap, chip->regs->output,
chip->regs->output + NBANK(chip));
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@@ -1129,16 +1130,18 @@ static int pca953x_regcache_sync(struct device *dev)
#ifdef CONFIG_GPIO_PCA953X_IRQ
if (chip->driver_data & PCA_PCAL) {
ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
PCAL953X_IN_LATCH + NBANK(chip));
regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
return ret;
}
ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
PCAL953X_INT_MASK + NBANK(chip));
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);

View File

@@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs yc_video_codecs_decode = {

View File

@@ -1013,9 +1013,12 @@ static bool get_pixel_clk_frequency_100hz(
* not be programmed equal to DPREFCLK
*/
modulo_hz = REG_READ(MODULO[inst]);
if (modulo_hz)
*pixel_clk_khz = div_u64((uint64_t)clock_hz*
clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
modulo_hz);
else
*pixel_clk_khz = 0;
} else {
/* NOTE: There is agreement with VBIOS here that MODULO is
* programmed equal to DPREFCLK, in which case PHASE will be

View File

@@ -772,7 +772,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
goto failed;
}
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);

View File

@@ -1627,6 +1627,7 @@ static const struct throttling_logging_label {
uint32_t feature_mask;
const char *label;
} logging_label[] = {
{(1U << THROTTLER_TEMP_GPU_BIT), "GPU"},
{(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
{(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
{(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},

View File

@@ -721,7 +721,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
goto failed;
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);

View File

@@ -474,7 +474,10 @@ static void ast_set_color_reg(struct ast_private *ast,
static void ast_set_crtthd_reg(struct ast_private *ast)
{
/* Set Threshold */
if (ast->chip == AST2300 || ast->chip == AST2400 ||
if (ast->chip == AST2600) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0);
} else if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);

View File

@@ -1268,6 +1268,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
return 0;
}
static
struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp,
struct drm_atomic_state *state)
{
struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
if (!connector)
return NULL;
conn_state = drm_atomic_get_old_connector_state(state, connector);
if (!conn_state)
return NULL;
return conn_state->crtc;
}
static
struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
struct drm_atomic_state *state)
@@ -1448,14 +1467,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
{
struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc *old_crtc, *new_crtc;
struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
new_crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!new_crtc)
goto out;
new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc);
if (!new_crtc_state)
goto out;
@@ -1464,6 +1485,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
return;
out:
old_crtc = analogix_dp_get_old_crtc(dp, old_state);
if (old_crtc) {
old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
old_crtc);
/* When moving from PSR to fully disabled, exit PSR first. */
if (old_crtc_state && old_crtc_state->self_refresh_active) {
ret = analogix_dp_disable_psr(dp);
if (ret)
DRM_ERROR("Failed to disable psr (%d)\n", ret);
}
}
analogix_dp_bridge_disable(bridge);
}

View File

@@ -608,10 +608,14 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ctx->host_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
return -EINVAL;
if (!ctx->host_node)
return -ENODEV;
if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) {
ret = -EINVAL;
goto err_put_node;
}
if (!ctx->host_node) {
ret = -ENODEV;
goto err_put_node;
}
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@@ -638,16 +642,22 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
if (ret < 0)
return ret;
goto err_put_node;
if (panel) {
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
if (IS_ERR(panel_bridge)) {
ret = PTR_ERR(panel_bridge);
goto err_put_node;
}
}
ctx->panel_bridge = panel_bridge;
return 0;
err_put_node:
of_node_put(ctx->host_node);
return ret;
}
static int sn65dsi83_probe(struct i2c_client *client,
@@ -680,8 +690,10 @@ static int sn65dsi83_probe(struct i2c_client *client,
return ret;
ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
if (IS_ERR(ctx->regmap))
return PTR_ERR(ctx->regmap);
if (IS_ERR(ctx->regmap)) {
ret = PTR_ERR(ctx->regmap);
goto err_put_node;
}
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
@@ -691,6 +703,10 @@ static int sn65dsi83_probe(struct i2c_client *client,
drm_bridge_add(&ctx->bridge);
return 0;
err_put_node:
of_node_put(ctx->host_node);
return ret;
}
static int sn65dsi83_remove(struct i2c_client *client)

View File

@@ -1003,9 +1003,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
return drm_atomic_crtc_effectively_active(old_state);
/*
* We need to run through the crtc_funcs->disable() function if the CRTC
* is currently on, if it's transitioning to self refresh mode, or if
* it's in self refresh mode and needs to be fully disabled.
* We need to disable bridge(s) and CRTC if we're transitioning out of
* self-refresh and changing CRTCs at the same time, because the
* bridge tracks self-refresh status via CRTC state.
*/
if (old_state->self_refresh_active &&
old_state->crtc != new_state->crtc)
return true;
/*
* We also need to run through the crtc_funcs->disable() function if
* the CRTC is currently on, if it's transitioning to self refresh
* mode, or if it's in self refresh mode and needs to be fully
* disabled.
*/
return old_state->active ||
(old_state->self_refresh_active && !new_state->active) ||

View File

@@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
if (plane == &ipu_crtc->plane[0]->base)
disable_full = true;
if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
disable_partial = true;
}

View File

@@ -473,6 +473,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
if (!mode)
return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -487,6 +489,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
if (!mode)
return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}

View File

@@ -380,9 +380,10 @@ static int debug_notifier_call(struct notifier_block *self,
int cpu;
struct debug_drvdata *drvdata;
mutex_lock(&debug_lock);
/* Bail out if we can't acquire the mutex or the functionality is off */
if (!mutex_trylock(&debug_lock))
return NOTIFY_DONE;
/* Bail out if the functionality is disabled */
if (!debug_enable)
goto skip_dump;
@@ -401,7 +402,7 @@ static int debug_notifier_call(struct notifier_block *self,
skip_dump:
mutex_unlock(&debug_lock);
return 0;
return NOTIFY_DONE;
}
static struct notifier_block debug_notifier = {

View File

@@ -760,7 +760,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
struct i2c_adapter *adap)
{
unsigned long time_left;
unsigned long time_left, msg_timeout;
u32 reg;
id->p_msg = msg;
@@ -785,8 +785,16 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
else
cdns_i2c_msend(id);
/* Minimal time to execute this message */
msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk);
/* Plus some wiggle room */
msg_timeout += msecs_to_jiffies(500);
if (msg_timeout < adap->timeout)
msg_timeout = adap->timeout;
/* Wait for the signal of completion */
time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
if (time_left == 0) {
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,

View File

@@ -188,7 +188,6 @@ static const struct iio_chan_spec ad7124_channel_template = {
.sign = 'u',
.realbits = 24,
.storagebits = 32,
.shift = 8,
.endianness = IIO_BE,
},
};

View File

@@ -36,8 +36,8 @@
/* Bits and mask definition for SC27XX_ADC_CH_CFG register */
#define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
#define SC27XX_ADC_SCALE_SHIFT 8
#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9)
#define SC27XX_ADC_SCALE_SHIFT 9
/* Bits definitions for SC27XX_ADC_INT_EN registers */
#define SC27XX_ADC_IRQ_EN BIT(0)
@@ -103,14 +103,14 @@ static struct sc27xx_adc_linear_graph small_scale_graph = {
100, 341,
};
static const struct sc27xx_adc_linear_graph big_scale_graph_calib = {
4200, 856,
3600, 733,
static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = {
4200, 850,
3600, 728,
};
static const struct sc27xx_adc_linear_graph small_scale_graph_calib = {
1000, 833,
100, 80,
static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = {
1000, 838,
100, 84,
};
static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc)
@@ -130,11 +130,11 @@ static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data,
size_t len;
if (big_scale) {
calib_graph = &big_scale_graph_calib;
calib_graph = &sc2731_big_scale_graph_calib;
graph = &big_scale_graph;
cell_name = "big_scale_calib";
} else {
calib_graph = &small_scale_graph_calib;
calib_graph = &sc2731_small_scale_graph_calib;
graph = &small_scale_graph;
cell_name = "small_scale_calib";
}

View File

@@ -61,7 +61,7 @@ struct stmpe_adc {
static int stmpe_read_voltage(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
long ret;
unsigned long ret;
mutex_lock(&info->lock);
@@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
if (ret <= 0) {
if (ret == 0) {
stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA,
STMPE_ADC_CH(info->channel));
mutex_unlock(&info->lock);
@@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
static int stmpe_read_temp(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
long ret;
unsigned long ret;
mutex_lock(&info->lock);
@@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info,
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
if (ret <= 0) {
if (ret == 0) {
mutex_unlock(&info->lock);
return -ETIMEDOUT;
}

View File

@@ -71,16 +71,18 @@ st_sensors_match_odr_error:
int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
{
int err;
int err = 0;
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
mutex_lock(&sdata->odr_lock);
if (!sdata->sensor_settings->odr.mask)
return 0;
goto unlock_mutex;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
goto st_sensors_match_odr_error;
goto unlock_mutex;
if ((sdata->sensor_settings->odr.addr ==
sdata->sensor_settings->pw.addr) &&
@@ -103,7 +105,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
if (err >= 0)
sdata->odr = odr_out.hz;
st_sensors_match_odr_error:
unlock_mutex:
mutex_unlock(&sdata->odr_lock);
return err;
}
EXPORT_SYMBOL(st_sensors_set_odr);
@@ -365,6 +369,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *of_pdata;
int err = 0;
mutex_init(&sdata->odr_lock);
/* If OF/DT pdata exists, it will take precedence of anything else */
of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
if (IS_ERR(of_pdata))
@@ -558,18 +564,24 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
err = -EBUSY;
goto out;
} else {
mutex_lock(&sdata->odr_lock);
err = st_sensors_set_enable(indio_dev, true);
if (err < 0)
if (err < 0) {
mutex_unlock(&sdata->odr_lock);
goto out;
}
msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
err = st_sensors_read_axis_data(indio_dev, ch, val);
if (err < 0)
if (err < 0) {
mutex_unlock(&sdata->odr_lock);
goto out;
}
*val = *val >> ch->scan_type.shift;
err = st_sensors_set_enable(indio_dev, false);
mutex_unlock(&sdata->odr_lock);
}
out:
mutex_unlock(&indio_dev->mlock);

View File

@@ -575,10 +575,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
*/
swd = kzalloc(sizeof(*swd), GFP_KERNEL);
if (!swd) {
ret = -ENOMEM;
goto error_kzalloc;
}
if (!swd)
return ERR_PTR(-ENOMEM);
/*
* Allocate an IIO device.
*
@@ -590,7 +589,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
indio_dev = iio_device_alloc(parent, sizeof(*st));
if (!indio_dev) {
ret = -ENOMEM;
goto error_ret;
goto error_free_swd;
}
st = iio_priv(indio_dev);
@@ -616,6 +615,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
* indio_dev->name = spi_get_device_id(spi)->name;
*/
indio_dev->name = kstrdup(name, GFP_KERNEL);
if (!indio_dev->name) {
ret = -ENOMEM;
goto error_free_device;
}
/* Provide description of available channels */
indio_dev->channels = iio_dummy_channels;
@@ -632,7 +635,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
ret = iio_simple_dummy_events_register(indio_dev);
if (ret < 0)
goto error_free_device;
goto error_free_name;
ret = iio_simple_dummy_configure_buffer(indio_dev);
if (ret < 0)
@@ -649,11 +652,12 @@ error_unconfigure_buffer:
iio_simple_dummy_unconfigure_buffer(indio_dev);
error_unregister_events:
iio_simple_dummy_events_unregister(indio_dev);
error_free_name:
kfree(indio_dev->name);
error_free_device:
iio_device_free(indio_dev);
error_ret:
error_free_swd:
kfree(swd);
error_kzalloc:
return ERR_PTR(ret);
}

View File

@@ -104,6 +104,7 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
u16 tries = 20;
u8 buffer[12];
int ret;
unsigned long time_left;
ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1);
if (ret < 0)
@@ -112,10 +113,8 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
if (data->client->irq) {
reinit_completion(&data->completion);
ret = wait_for_completion_timeout(&data->completion, HZ/10);
if (ret < 0)
return ret;
else if (ret == 0)
time_left = wait_for_completion_timeout(&data->completion, HZ/10);
if (time_left == 0)
return -ETIMEDOUT;
vl53l0x_clear_irq(data);

View File

@@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface,
if (!dev->tp_data)
goto err_free_bt_buffer;
if (dev->bt_urb)
if (dev->bt_urb) {
usb_fill_int_urb(dev->bt_urb, udev,
usb_rcvintpipe(udev, cfg->bt_ep),
dev->bt_data, dev->cfg.bt_datalen,
bcm5974_irq_button, dev, 1);
dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
}
usb_fill_int_urb(dev->tp_urb, udev,
usb_rcvintpipe(udev, cfg->tp_ep),
dev->tp_data, dev->cfg.tp_datalen,
bcm5974_irq_trackpad, dev, 1);
dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* create bcm5974 device */
usb_make_path(udev, dev->phys, sizeof(dev->phys));
strlcat(dev->phys, "/input0", sizeof(dev->phys));

View File

@@ -3786,6 +3786,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
if (resource_size(res) < arm_smmu_resource_size(smmu)) {
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;

View File

@@ -2090,11 +2090,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
ioaddr = res->start;
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.

View File

@@ -7942,17 +7942,22 @@ EXPORT_SYMBOL(md_register_thread);
void md_unregister_thread(struct md_thread **threadp)
{
struct md_thread *thread = *threadp;
if (!thread)
return;
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
/* Locking ensures that mddev_unlock does not wake_up a
struct md_thread *thread;
/*
* Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
thread = *threadp;
if (!thread) {
spin_unlock(&pers_lock);
return;
}
*threadp = NULL;
spin_unlock(&pers_lock);
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
kthread_stop(thread->tsk);
kfree(thread);
}

View File

@@ -128,21 +128,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
if (conf->nr_strip_zones == 1) {
conf->layout = RAID0_ORIG_LAYOUT;
} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
conf->layout = mddev->layout;
} else if (default_layout == RAID0_ORIG_LAYOUT ||
default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
conf->layout = default_layout;
} else {
pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
mdname(mddev));
pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
err = -ENOTSUPP;
goto abort;
}
/*
* now since we have the hard sector sizes, we can make sure
* chunk size is a multiple of that sector size
@@ -273,6 +258,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
(unsigned long long)smallest->sectors);
}
if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
conf->layout = RAID0_ORIG_LAYOUT;
} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
conf->layout = mddev->layout;
} else if (default_layout == RAID0_ORIG_LAYOUT ||
default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
conf->layout = default_layout;
} else {
pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
mdname(mddev));
pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
err = -EOPNOTSUPP;
goto abort;
}
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;

View File

@@ -667,6 +667,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
return 0;
out_init_fail:
usb_set_intfdata(ucr->pusb_intf, NULL);
usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
ucr->iobuf_dma);
return ret;

View File

@@ -1351,17 +1351,18 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
struct fastrpc_req_munmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
struct fastrpc_buf *buf, *b;
struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_munmap_req_msg req_msg;
struct device *dev = fl->sctx->dev;
int err;
u32 sc;
spin_lock(&fl->lock);
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
buf = iter;
break;
buf = NULL;
}
}
spin_unlock(&fl->lock);

View File

@@ -248,6 +248,11 @@ void lkdtm_ARRAY_BOUNDS(void)
not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
if (!not_checked || !checked) {
kfree(not_checked);
kfree(checked);
return;
}
pr_info("Array access within bounds ...\n");
/* For both, touch all bytes in the actual member size. */
@@ -267,6 +272,9 @@ void lkdtm_ARRAY_BOUNDS(void)
kfree(not_checked);
kfree(checked);
pr_err("FAIL: survived array bounds overflow!\n");
if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
pr_expected_config(CONFIG_UBSAN_TRAP);
else
pr_expected_config(CONFIG_UBSAN_BOUNDS);
}

View File

@@ -9,19 +9,19 @@
extern char *lkdtm_kernel_info;
#define pr_expected_config(kconfig) \
{ \
do { \
if (IS_ENABLED(kconfig)) \
pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
lkdtm_kernel_info); \
else \
pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
lkdtm_kernel_info); \
}
} while (0)
#ifndef MODULE
int lkdtm_check_bool_cmdline(const char *param);
#define pr_expected_config_param(kconfig, param) \
{ \
do { \
if (IS_ENABLED(kconfig)) { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
@@ -52,7 +52,7 @@ int lkdtm_check_bool_cmdline(const char *param);
break; \
} \
} \
}
} while (0)
#else
#define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig)
#endif

View File

@@ -30,12 +30,12 @@ static const unsigned char test_text[] = "This is a test.\n";
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
return stack + 0;
return stack + unconst;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
unsigned char buf[32];
unsigned char buf[128];
int i;
/* Exercise stack to avoid everything living in registers. */
@@ -43,7 +43,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value)
buf[i] = value & 0xff;
}
return trick_compiler(buf);
/*
* Put the target buffer in the middle of stack allocation
* so that we don't step on future stack users regardless
* of stack growth direction.
*/
return trick_compiler(&buf[(128/2)-32]);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
@@ -66,6 +71,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
bad_stack -= sizeof(unsigned long);
}
#ifdef ARCH_HAS_CURRENT_STACK_POINTER
pr_info("stack : %px\n", (void *)current_stack_pointer);
#endif
pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);

View File

@@ -100,7 +100,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
pi->base = base;
pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
/* initlize capability by RDPT */
/* initialize capability by RDPT */
pi->capability &= ioread8(base);
pi->events = pi->capability;

View File

@@ -34,7 +34,9 @@ pvpanic_send_event(unsigned int event)
{
struct pvpanic_instance *pi_cur;
spin_lock(&pvpanic_lock);
if (!spin_trylock(&pvpanic_lock))
return;
list_for_each_entry(pi_cur, &pvpanic_list, list) {
if (event & pi_cur->capability & pi_cur->events)
iowrite8(event, pi_cur->base);
@@ -56,9 +58,13 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
return NOTIFY_DONE;
}
/*
* Call our notifier very early on panic, deferring the
* action taken to the hypervisor.
*/
static struct notifier_block pvpanic_panic_nb = {
.notifier_call = pvpanic_panic_notify,
.priority = 1, /* let this called before broken drm_fb_helper */
.priority = INT_MAX,
};
static void pvpanic_remove(void *param)

View File

@@ -1482,7 +1482,6 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
err = mmc_cqe_recovery(host);
if (err)
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
else
mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));

View File

@@ -97,6 +97,33 @@ out:
return e;
}
/*
* has_enough_free_count - whether ubi has enough free pebs to fill fm pools
* @ubi: UBI device description object
* @is_wl_pool: whether UBI is filling wear leveling pool
*
* This helper function checks whether there are enough free pebs (deducted
* by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
* there is at least one of free pebs is filled into fm_wl_pool.
* For wear leveling pool, UBI should also reserve free pebs for bad pebs
* handling, because there maybe no enough free pebs for user volumes after
* producing new bad pebs.
*/
static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
{
int fm_used = 0; // fastmap non anchor pebs.
int beb_rsvd_pebs;
if (!ubi->free.rb_node)
return false;
beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
fm_used = ubi->fm_size / ubi->leb_size - 1;
return ubi->free_count - beb_rsvd_pebs > fm_used;
}
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
@@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi)
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
}
if (ubi->fm_next_anchor) {
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
ubi->free_count++;
}
/* All available PEBs are in ubi->free, now is the time to get
/*
* All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
if (!ubi->free.rb_node)
if (!has_enough_free_count(ubi, false))
break;
e = wl_get_wle(ubi);
@@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
if (!ubi->free.rb_node ||
(ubi->free_count - ubi->beb_rsvd_pebs < 5))
if (!has_enough_free_count(ubi, true))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
@@ -286,20 +308,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
/* Do we have a next anchor? */
if (!ubi->fm_next_anchor) {
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
if (!ubi->fm_next_anchor)
/* Tell wear leveling to produce a new anchor PEB */
ubi->fm_do_produce_anchor = 1;
/* Do we already have an anchor? */
if (ubi->fm_anchor) {
spin_unlock(&ubi->wl_lock);
return 0;
}
/* Do wear leveling to get a new anchor PEB or check the
* existing next anchor candidate.
*/
/* See if we can find an anchor PEB on the list of free PEBs */
anchor = ubi_wl_get_fm_peb(ubi, 1);
if (anchor) {
ubi->fm_anchor = anchor;
spin_unlock(&ubi->wl_lock);
return 0;
}
ubi->fm_do_produce_anchor = 1;
/* No luck, trigger wear leveling to produce a new anchor PEB. */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -381,11 +409,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
ubi->fm_anchor = NULL;
}
if (ubi->fm_next_anchor) {
return_unused_peb(ubi, ubi->fm_next_anchor);
ubi->fm_next_anchor = NULL;
}
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);

View File

@@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
if (ubi->fm_next_anchor) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
free_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {

View File

@@ -489,8 +489,7 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
* @fm_anchor: The new anchor PEB used during fastmap update
* @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
* @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
@@ -601,7 +600,6 @@ struct ubi_device {
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */

View File

@@ -309,7 +309,6 @@ out_mapping:
ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;

View File

@@ -689,16 +689,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used);
if (e1 && ubi->fm_next_anchor &&
(ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
if (e1 && ubi->fm_anchor &&
(ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1;
/* fm_next_anchor is no longer considered a good anchor
* candidate.
/*
* fm_anchor is no longer considered a good anchor.
* NULL assignment also prevents multiple wear level checks
* of this PEB.
*/
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
ubi->fm_next_anchor = NULL;
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->fm_anchor = NULL;
ubi->free_count++;
}
@@ -1085,12 +1085,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) {
spin_lock(&ubi->wl_lock);
if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
if (!ubi->fm_disabled && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START) {
/* Abort anchor production, if needed it will be
/*
* Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
ubi->fm_next_anchor = e;
ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);

View File

@@ -2047,8 +2047,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
gphy_fw_np, i);
if (err)
if (err) {
of_node_put(gphy_fw_np);
goto remove_gphy;
}
i++;
}

View File

@@ -3492,6 +3492,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
*/
child = of_get_child_by_name(np, "mdio");
err = mv88e6xxx_mdio_register(chip, child, false);
of_node_put(child);
if (err)
return err;

View File

@@ -50,22 +50,17 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
u16 ctrl, u16 status, u16 lpa,
u16 bmsr, u16 lpa, u16 status,
struct phylink_link_state *state)
{
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
/* The Spped and Duplex Resolved register is 1 if AN is enabled
* and complete, or if AN is disabled. So with disabled AN we
* still get here on link up. But we want to set an_complete
* only if AN was enabled, thus we look at BMCR_ANENABLE.
* (According to 802.3-2008 section 22.2.4.2.10, we should be
* able to get this same value from BMSR_ANEGCAPABLE, but tests
* show that these Marvell PHYs don't conform to this part of
* the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
* still get here on link up.
*/
state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -191,12 +186,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
u16 lpa, status, ctrl;
u16 bmsr, lpa, status;
int err;
err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
return err;
}
@@ -212,7 +207,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -915,13 +910,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
u16 lpa, status, ctrl;
u16 bmsr, lpa, status;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6390_SGMII_BMCR, &ctrl);
MV88E6390_SGMII_BMSR, &bmsr);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -939,7 +934,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,

View File

@@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio = mdiobus_alloc();
if (mdio == NULL) {
netdev_err(dev, "Error allocating MDIO bus\n");
return -ENOMEM;
ret = -ENOMEM;
goto put_node;
}
mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio->id);
goto out_free_mdio;
}
of_node_put(mdio_node);
if (netif_msg_drv(priv))
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
out_free_mdio:
mdiobus_free(mdio);
mdio = NULL;
put_node:
of_node_put(mdio_node);
return ret;
}

View File

@@ -830,8 +830,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
i40e_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
kfree(tx_ring->xsk_descs);
tx_ring->xsk_descs = NULL;
if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
@@ -1433,13 +1431,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
if (ring_is_xdp(tx_ring)) {
tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
GFP_KERNEL);
if (!tx_ring->xsk_descs)
goto err;
}
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */
@@ -1463,8 +1454,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
return 0;
err:
kfree(tx_ring->xsk_descs);
tx_ring->xsk_descs = NULL;
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
return -ENOMEM;

View File

@@ -390,7 +390,6 @@ struct i40e_ring {
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)

View File

@@ -473,11 +473,11 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
**/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
struct xdp_desc *descs = xdp_ring->xsk_descs;
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;

Some files were not shown because too many files have changed in this diff Show More