diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor index d76cd3946434..e9ef69aef20b 100644 --- a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor +++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor @@ -5,6 +5,9 @@ Contact: linux-mtd@lists.infradead.org Description: (RO) The JEDEC ID of the SPI NOR flash as reported by the flash device. + The attribute is not present if the flash doesn't support + the "Read JEDEC ID" command (9Fh). This is the case for + non-JEDEC compliant flashes. What: /sys/bus/spi/devices/.../spi-nor/manufacturer Date: April 2021 diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml index acea1cd444fd..9b0548264a39 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml @@ -14,9 +14,6 @@ description: |+ This PCIe host controller is based on the Synopsys DesignWare PCIe IP and thus inherits all the common properties defined in snps,dw-pcie.yaml. -allOf: - - $ref: /schemas/pci/snps,dw-pcie.yaml# - properties: compatible: enum: @@ -59,7 +56,7 @@ properties: - const: pcie - const: pcie_bus - const: pcie_phy - - const: pcie_inbound_axi for imx6sx-pcie, pcie_aux for imx8mq-pcie + - enum: [ pcie_inbound_axi, pcie_aux ] num-lanes: const: 1 @@ -166,6 +163,47 @@ required: - clocks - clock-names +allOf: + - $ref: /schemas/pci/snps,dw-pcie.yaml# + - if: + properties: + compatible: + contains: + const: fsl,imx6sx-pcie + then: + properties: + clock-names: + items: + - {} + - {} + - {} + - const: pcie_inbound_axi + - if: + properties: + compatible: + contains: + const: fsl,imx8mq-pcie + then: + properties: + clock-names: + items: + - {} + - {} + - {} + - const: pcie_aux + - if: + properties: + compatible: + not: + contains: + enum: + - fsl,imx6sx-pcie + - fsl,imx8mq-pcie + then: + properties: + clock-names: + maxItems: 3 + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml index 30b6396d83c8..aea0e2bcdd77 100644 --- a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml @@ -36,7 +36,7 @@ properties: - const: mpu interrupts: - maxItems: 1 + maxItems: 2 clocks: items: @@ -94,8 +94,9 @@ examples: #interrupt-cells = <1>; ranges = <0x81000000 0 0x40000000 0 0x40000000 0 0x00010000>, <0x82000000 0 0x50000000 0 0x50000000 0 0x20000000>; - interrupts = ; - interrupt-names = "intr"; + interrupts = , + ; + interrupt-names = "msi", "intr"; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &gic GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt index 5d6ea66a863f..1f75feec3dec 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt +++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt @@ -109,7 +109,7 @@ audio-codec@1{ reg = <1 0>; interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "intr2" - reset-gpios = <&msmgpio 64 0>; + reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>; slim-ifc-dev = <&wc9335_ifd>; clock-names = "mclk", "native"; clocks = <&rpmcc RPM_SMD_DIV_CLK1>, diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst index f64cb666498a..f28887045049 100644 --- a/Documentation/driver-api/spi.rst +++ b/Documentation/driver-api/spi.rst @@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as a pair of FIFOs connected to dual DMA engines on the other side of the SPI shift register (maximizing throughput). Such drivers bridge between whatever bus they sit on (often the platform bus) and SPI, and expose -the SPI side of their device as a :c:type:`struct spi_master -`. SPI devices are children of that master, +the SPI side of their device as a :c:type:`struct spi_controller +`. SPI devices are children of that master, represented as a :c:type:`struct spi_device ` and manufactured from :c:type:`struct spi_board_info ` descriptors which are usually provided by diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst index 4a25c5eb6f07..8c47847755a6 100644 --- a/Documentation/fault-injection/fault-injection.rst +++ b/Documentation/fault-injection/fault-injection.rst @@ -83,9 +83,7 @@ configuration of fault-injection capabilities. - /sys/kernel/debug/fail*/times: specifies how many times failures may happen at most. A value of -1 - means "no limit". Note, though, that this file only accepts unsigned - values. So, if you want to specify -1, you better use 'printf' instead - of 'echo', e.g.: $ printf %#x -1 > times + means "no limit". - /sys/kernel/debug/fail*/space: @@ -277,7 +275,7 @@ Application Examples echo Y > /sys/kernel/debug/$FAILTYPE/task-filter echo 10 > /sys/kernel/debug/$FAILTYPE/probability echo 100 > /sys/kernel/debug/$FAILTYPE/interval - printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times + echo -1 > /sys/kernel/debug/$FAILTYPE/times echo 0 > /sys/kernel/debug/$FAILTYPE/space echo 2 > /sys/kernel/debug/$FAILTYPE/verbose echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait @@ -331,7 +329,7 @@ Application Examples echo N > /sys/kernel/debug/$FAILTYPE/task-filter echo 10 > /sys/kernel/debug/$FAILTYPE/probability echo 100 > /sys/kernel/debug/$FAILTYPE/interval - printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times + echo -1 > /sys/kernel/debug/$FAILTYPE/times echo 0 > /sys/kernel/debug/$FAILTYPE/space echo 2 > /sys/kernel/debug/$FAILTYPE/verbose echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait @@ -362,7 +360,7 @@ Application Examples echo N > /sys/kernel/debug/$FAILTYPE/task-filter echo 100 > /sys/kernel/debug/$FAILTYPE/probability echo 0 > /sys/kernel/debug/$FAILTYPE/interval - printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times + echo -1 > /sys/kernel/debug/$FAILTYPE/times echo 0 > /sys/kernel/debug/$FAILTYPE/space echo 1 > /sys/kernel/debug/$FAILTYPE/verbose diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst index 8ced754a5a0f..f3484f60eae5 100644 --- a/Documentation/process/deprecated.rst +++ b/Documentation/process/deprecated.rst @@ -70,6 +70,9 @@ Instead, the 2-factor form of the allocator should be used:: foo = kmalloc_array(count, size, GFP_KERNEL); +Specifically, kmalloc() can be replaced with kmalloc_array(), and +kzalloc() can be replaced with kcalloc(). + If no 2-factor form is available, the saturate-on-overflow helpers should be used:: @@ -90,9 +93,20 @@ Instead, use the helper:: array usage and switch to a `flexible array member <#zero-length-and-one-element-arrays>`_ instead. -See array_size(), array3_size(), and struct_size(), -for more details as well as the related check_add_overflow() and -check_mul_overflow() family of functions. +For other calculations, please compose the use of the size_mul(), +size_add(), and size_sub() helpers. For example, in the case of:: + + foo = krealloc(current_size + chunk_size * (count - 3), GFP_KERNEL); + +Instead, use the helpers:: + + foo = krealloc(size_add(current_size, + size_mul(chunk_size, + size_sub(count, 3))), GFP_KERNEL); + +For more details, also see array3_size() and flex_array_size(), +as well as the related check_mul_overflow(), check_add_overflow(), +check_sub_overflow(), and check_shl_overflow() family of functions. simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull() ---------------------------------------------------------------------- diff --git a/MAINTAINERS b/MAINTAINERS index 1757129cb9c3..4b62a7a454c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7793,7 +7793,7 @@ F: Documentation/locking/*futex* F: include/asm-generic/futex.h F: include/linux/futex.h F: include/uapi/linux/futex.h -F: kernel/futex.c +F: kernel/futex/* F: tools/perf/bench/futex* F: tools/testing/selftests/futex/ diff --git a/Makefile b/Makefile index 66cd053b3e7a..6a9589c7b1bc 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 84 +SUBLEVEL = 87 EXTRAVERSION = NAME = Trick or Treat diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 2592356e3215..0ce1eee0924b 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -77,7 +77,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); /* Work to do on interrupt/exception return. */ #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_RESUME) + _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL) /* Work to do on any return to userspace. */ #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index e227f3a29a43..c41a5a9c3b9f 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -469,8 +469,10 @@ entSys: #ifdef CONFIG_AUDITSYSCALL lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT and $3, $6, $3 -#endif bne $3, strace +#else + blbs $3, strace /* check for SYSCALL_TRACE in disguise */ +#endif beq $4, 1f ldq $27, 0($5) 1: jsr $26, ($27), sys_ni_syscall diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 46e6d3ed8f35..c042c416a94a 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -74,7 +74,7 @@ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi index 7f2f24a29e6c..352a2f7ba311 100644 --- a/arch/arm/boot/dts/armada-375.dtsi +++ b/arch/arm/boot/dts/armada-375.dtsi @@ -582,7 +582,7 @@ pcie1: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi index cff1269f3fbf..7146cc8f082a 100644 --- a/arch/arm/boot/dts/armada-380.dtsi +++ b/arch/arm/boot/dts/armada-380.dtsi @@ -79,7 +79,7 @@ /* x1 port */ pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -98,7 +98,7 @@ /* x1 port */ pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts index 01b0dfd55d70..e7649c795699 100644 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts @@ -23,6 +23,12 @@ stdout-path = &uart0; }; + aliases { + ethernet0 = ð0; + ethernet1 = ð1; + ethernet2 = ð2; + }; + memory { device_type = "memory"; reg = <0x00000000 0x40000000>; /* 1024 MB */ @@ -450,7 +456,17 @@ }; }; - /* port 6 is connected to eth0 */ + ports@6 { + reg = <6>; + label = "cpu"; + ethernet = <ð0>; + phy-mode = "rgmii-id"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi index f0022d10c715..f081f7cb66e5 100644 --- a/arch/arm/boot/dts/armada-385.dtsi +++ b/arch/arm/boot/dts/armada-385.dtsi @@ -84,7 +84,7 @@ /* x1 port */ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -103,7 +103,7 @@ /* x1 port */ pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -125,7 +125,7 @@ */ pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi index e0b7c2099831..9525e7b7f436 100644 --- a/arch/arm/boot/dts/armada-39x.dtsi +++ b/arch/arm/boot/dts/armada-39x.dtsi @@ -453,7 +453,7 @@ /* x1 port */ pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -472,7 +472,7 @@ /* x1 port */ pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -494,7 +494,7 @@ */ pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi index 8558bf6bb54c..d55fe162fc7f 100644 --- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi @@ -97,7 +97,7 @@ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -115,7 +115,7 @@ pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -133,7 +133,7 @@ pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -151,7 +151,7 @@ pcie5: pcie@5,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; reg = <0x2800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 2d85fe8ac327..fdcc81819940 100644 --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -112,7 +112,7 @@ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -130,7 +130,7 @@ pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -148,7 +148,7 @@ pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -166,7 +166,7 @@ pcie5: pcie@5,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; reg = <0x2800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -184,7 +184,7 @@ pcie6: pcie@6,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x84000 0 0x2000>; + assigned-addresses = <0x82003000 0 0x84000 0 0x2000>; reg = <0x3000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -202,7 +202,7 @@ pcie7: pcie@7,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x88000 0 0x2000>; + assigned-addresses = <0x82003800 0 0x88000 0 0x2000>; reg = <0x3800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -220,7 +220,7 @@ pcie8: pcie@8,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>; + assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>; reg = <0x4000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -238,7 +238,7 @@ pcie9: pcie@9,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; + assigned-addresses = <0x82004800 0 0x42000 0 0x2000>; reg = <0x4800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 89e0bdaf3a85..726d353eda68 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -129,7 +129,7 @@ pcie1: pcie@2 { device_type = "pci"; status = "disabled"; - assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; reg = <0x1000 0 0 0 0>; clocks = <&gate_clk 5>; marvell,pcie-port = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts index eb6eb21cb2a4..33c8d5b3d679 100644 --- a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts +++ b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts @@ -366,7 +366,7 @@ spi-max-frequency = <20000000>; spi-rx-bus-width = <2>; label = "bmc"; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts index d4ff49939a3d..bbe18618f5c5 100644 --- a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts +++ b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts @@ -142,7 +142,7 @@ reg = <0>; spi-rx-bus-width = <2>; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts index 82a104b2a65f..8e3425cb8e8b 100644 --- a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts +++ b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts @@ -388,7 +388,7 @@ spi-max-frequency = <5000000>; spi-rx-bus-width = <2>; label = "bmc"; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -422,7 +422,7 @@ reg = <1>; spi-max-frequency = <5000000>; spi-rx-bus-width = <2>; - partitions@88000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -447,7 +447,7 @@ reg = <0>; spi-max-frequency = <5000000>; spi-rx-bus-width = <2>; - partitions@A0000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts index 0334641f8829..cf274c926711 100644 --- a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts +++ b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts @@ -74,7 +74,7 @@ spi-rx-bus-width = <2>; reg = <0>; spi-max-frequency = <5000000>; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -135,7 +135,7 @@ spi-rx-bus-width = <2>; reg = <0>; spi-max-frequency = <5000000>; - partitions@A0000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts index 767e0ac0df7c..7fe7efee28ac 100644 --- a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts +++ b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts @@ -107,7 +107,7 @@ reg = <0>; spi-rx-bus-width = <2>; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -146,7 +146,7 @@ reg = <1>; npcm,fiu-rx-bus-width = <2>; - partitions@88000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -173,7 +173,7 @@ reg = <0>; spi-rx-bus-width = <2>; - partitions@A0000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index d1c1c6aab2b8..0e830476fefd 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -1571,7 +1571,7 @@ }; etb@1a01000 { - compatible = "coresight-etb10", "arm,primecell"; + compatible = "arm,coresight-etb10", "arm,primecell"; reg = <0x1a01000 0x1000>; clocks = <&rpmcc RPM_QDSS_CLK>; diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index fd41243a0b2c..9d5a04a46b14 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi @@ -47,7 +47,7 @@ compatible = "arm,pl110", "arm,primecell"; reg = <0xfc200000 0x1000>; interrupt-parent = <&vic1>; - interrupts = <12>; + interrupts = <13>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts index 2e3c9fbb4eb3..275167f26fd9 100644 --- a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts +++ b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts @@ -13,7 +13,6 @@ /dts-v1/; #include "stm32mp157.dtsi" -#include "stm32mp15xc.dtsi" #include "stm32mp15xx-dhcor-som.dtsi" #include "stm32mp15xx-dhcor-avenger96.dtsi" diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi index 8eb51d84b698..d3375ad8c91f 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi @@ -100,7 +100,7 @@ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>; + gpio = <&gpioz 3 GPIO_ACTIVE_HIGH>; enable-active-high; }; }; diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 9a18da3e10cc..b682189a2b5d 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -129,15 +129,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_UPROBE 3 /* breakpointed or singlestepping */ -#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ -#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ -#define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */ +#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ -#define TIF_RESTORE_SIGMASK 20 +#define TIF_RESTORE_SIGMASK 19 +#define TIF_SYSCALL_TRACE 20 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */ +#define TIF_SECCOMP 23 /* seccomp syscall filtering active */ + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 41b2e8abc9e6..708816caf859 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -43,18 +43,21 @@ static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE; /* - * FIXME: the timer needs some delay to stablize the counter capture + * Read the timer through the CVWR register. Delay is required after requesting + * a read. The CR register cannot be directly read due to metastability issues + * documented in the PXA168 software manual. */ static inline uint32_t timer_read(void) { - int delay = 100; + uint32_t val; + int delay = 3; __raw_writel(1, mmp_timer_base + TMR_CVWR(1)); while (delay--) - cpu_relax(); + val = __raw_readl(mmp_timer_base + TMR_CVWR(1)); - return __raw_readl(mmp_timer_base + TMR_CVWR(1)); + return val; } static u64 notrace mmp_read_sched_clock(void) diff --git a/arch/arm/nwfpe/Makefile b/arch/arm/nwfpe/Makefile index 303400fa2cdf..2aec85ab1e8b 100644 --- a/arch/arm/nwfpe/Makefile +++ b/arch/arm/nwfpe/Makefile @@ -11,3 +11,9 @@ nwfpe-y += fpa11.o fpa11_cpdo.o fpa11_cpdt.o \ entry.o nwfpe-$(CONFIG_FPE_NWFPE_XP) += extended_cpdo.o + +# Try really hard to avoid generating calls to __aeabi_uldivmod() from +# float64_rem() due to loop elision. +ifdef CONFIG_CC_IS_CLANG +CFLAGS_softfloat.o += -mllvm -replexitval=never +endif diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index 1cee26479bfe..b276dd77df83 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -125,9 +125,12 @@ /delete-property/ mrvl,i2c-fast-mode; status = "okay"; + /* MCP7940MT-I/MNY RTC */ rtc@6f { compatible = "microchip,mcp7940x"; reg = <0x6f>; + interrupt-parent = <&gpiosb>; + interrupts = <5 0>; /* GPIO2_5 */ }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts index 7d369fdd3117..9d20cabf4f69 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts @@ -26,14 +26,14 @@ stdout-path = "serial0:921600n8"; }; - cpus_fixed_vproc0: fixedregulator@0 { + cpus_fixed_vproc0: regulator-vproc-buck0 { compatible = "regulator-fixed"; regulator-name = "vproc_buck0"; regulator-min-microvolt = <1000000>; regulator-max-microvolt = <1000000>; }; - cpus_fixed_vproc1: fixedregulator@1 { + cpus_fixed_vproc1: regulator-vproc-buck1 { compatible = "regulator-fixed"; regulator-name = "vproc_buck1"; regulator-min-microvolt = <1000000>; @@ -50,7 +50,7 @@ id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>; }; - usb_p0_vbus: regulator@2 { + usb_p0_vbus: regulator-usb-p0-vbus { compatible = "regulator-fixed"; regulator-name = "p0_vbus"; regulator-min-microvolt = <5000000>; @@ -59,7 +59,7 @@ enable-active-high; }; - usb_p1_vbus: regulator@3 { + usb_p1_vbus: regulator-usb-p1-vbus { compatible = "regulator-fixed"; regulator-name = "p1_vbus"; regulator-min-microvolt = <5000000>; @@ -68,7 +68,7 @@ enable-active-high; }; - usb_p2_vbus: regulator@4 { + usb_p2_vbus: regulator-usb-p2-vbus { compatible = "regulator-fixed"; regulator-name = "p2_vbus"; regulator-min-microvolt = <5000000>; @@ -77,7 +77,7 @@ enable-active-high; }; - usb_p3_vbus: regulator@5 { + usb_p3_vbus: regulator-usb-p3-vbus { compatible = "regulator-fixed"; regulator-name = "p3_vbus"; regulator-min-microvolt = <5000000>; diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi index a9cca9c146fd..993a03d7fff1 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi @@ -160,70 +160,70 @@ #clock-cells = <0>; }; - clk26m: oscillator@0 { + clk26m: oscillator-26m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; clock-output-names = "clk26m"; }; - clk32k: oscillator@1 { + clk32k: oscillator-32k { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32768>; clock-output-names = "clk32k"; }; - clkfpc: oscillator@2 { + clkfpc: oscillator-50m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; clock-output-names = "clkfpc"; }; - clkaud_ext_i_0: oscillator@3 { + clkaud_ext_i_0: oscillator-aud0 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <6500000>; clock-output-names = "clkaud_ext_i_0"; }; - clkaud_ext_i_1: oscillator@4 { + clkaud_ext_i_1: oscillator-aud1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <196608000>; clock-output-names = "clkaud_ext_i_1"; }; - clkaud_ext_i_2: oscillator@5 { + clkaud_ext_i_2: oscillator-aud2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <180633600>; clock-output-names = "clkaud_ext_i_2"; }; - clki2si0_mck_i: oscillator@6 { + clki2si0_mck_i: oscillator-i2s0 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si0_mck_i"; }; - clki2si1_mck_i: oscillator@7 { + clki2si1_mck_i: oscillator-i2s1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si1_mck_i"; }; - clki2si2_mck_i: oscillator@8 { + clki2si2_mck_i: oscillator-i2s2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si2_mck_i"; }; - clktdmin_mclk_i: oscillator@9 { + clktdmin_mclk_i: oscillator-mclk { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; @@ -266,7 +266,7 @@ reg = <0 0x10005000 0 0x1000>; }; - pio: pinctrl@10005000 { + pio: pinctrl@1000b000 { compatible = "mediatek,mt2712-pinctrl"; reg = <0 0x1000b000 0 0x1000>; mediatek,pctl-regmap = <&syscfg_pctl_a>; diff --git a/arch/arm64/boot/dts/mediatek/mt6779.dtsi b/arch/arm64/boot/dts/mediatek/mt6779.dtsi index 9bdf5145966c..dde9ce137b4f 100644 --- a/arch/arm64/boot/dts/mediatek/mt6779.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6779.dtsi @@ -88,14 +88,14 @@ interrupts = ; }; - clk26m: oscillator@0 { + clk26m: oscillator-26m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; clock-output-names = "clk26m"; }; - clk32k: oscillator@1 { + clk32k: oscillator-32k { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32768>; @@ -117,7 +117,7 @@ compatible = "simple-bus"; ranges; - gic: interrupt-controller@0c000000 { + gic: interrupt-controller@c000000 { compatible = "arm,gic-v3"; #interrupt-cells = <4>; interrupt-parent = <&gic>; @@ -138,7 +138,7 @@ }; - sysirq: intpol-controller@0c53a650 { + sysirq: intpol-controller@c53a650 { compatible = "mediatek,mt6779-sysirq", "mediatek,mt6577-sysirq"; interrupt-controller; diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi index 15616231022a..c3677d77e0a4 100644 --- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi @@ -95,7 +95,7 @@ }; }; - clk26m: oscillator@0 { + clk26m: oscillator-26m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index 409cf827970c..f4e0bea8ddcb 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -1212,7 +1212,7 @@ ; interrupt-names = "job", "mmu", "gpu"; - clocks = <&topckgen CLK_TOP_MFGPLL_CK>; + clocks = <&mfgcfg CLK_MFG_BG3D>; power-domains = <&spm MT8183_POWER_DOMAIN_MFG_CORE0>, diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi index fcddec14738d..54514d62398f 100644 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi @@ -17,7 +17,7 @@ }; firmware { - optee: optee@4fd00000 { + optee: optee { compatible = "linaro,optee-tz"; method = "smc"; }; @@ -210,7 +210,7 @@ }; }; - i2c0_pins_a: i2c0@0 { + i2c0_pins_a: i2c0 { pins1 { pinmux = , ; @@ -218,7 +218,7 @@ }; }; - i2c2_pins_a: i2c2@0 { + i2c2_pins_a: i2c2 { pins1 { pinmux = , ; diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts index 5aec18308712..5310259d03dc 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts +++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts @@ -37,6 +37,8 @@ &spi_0 { cs-select = <0>; + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; status = "okay"; m25p80@0 { diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi index ce4c2b4a5fc0..30ac0b2e8c89 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi @@ -401,7 +401,7 @@ reset-names = "phy", "common"; - pcie_phy0: lane@84200 { + pcie_phy0: phy@84200 { reg = <0x0 0x84200 0x0 0x16c>, /* Serdes Tx */ <0x0 0x84400 0x0 0x200>, /* Serdes Rx */ <0x0 0x84800 0x0 0x4f4>; /* PCS: Lane0, COM, PCIE */ diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi index 6b9ac0550490..9d4019e0949a 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi @@ -106,7 +106,7 @@ reset-names = "phy","common"; status = "disabled"; - usb1_ssphy: lane@58200 { + usb1_ssphy: phy@58200 { reg = <0x00058200 0x130>, /* Tx */ <0x00058400 0x200>, /* Rx */ <0x00058800 0x1f8>, /* PCS */ @@ -149,7 +149,7 @@ reset-names = "phy","common"; status = "disabled"; - usb0_ssphy: lane@78200 { + usb0_ssphy: phy@78200 { reg = <0x00078200 0x130>, /* Tx */ <0x00078400 0x200>, /* Rx */ <0x00078800 0x1f8>, /* PCS */ diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 19e201f52b16..b967dbfba3b8 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1307,7 +1307,7 @@ }; mpss: remoteproc@4080000 { - compatible = "qcom,msm8916-mss-pil", "qcom,q6v5-pil"; + compatible = "qcom,msm8916-mss-pil"; reg = <0x04080000 0x100>, <0x04020000 0x040>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 6077c3601951..40174220e8e2 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -142,82 +142,92 @@ /* Nominal fmax for now */ opp-307200000 { opp-hz = /bits/ 64 <307200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-422400000 { opp-hz = /bits/ 64 <422400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-480000000 { opp-hz = /bits/ 64 <480000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-556800000 { opp-hz = /bits/ 64 <556800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-652800000 { opp-hz = /bits/ 64 <652800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-729600000 { opp-hz = /bits/ 64 <729600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-844800000 { opp-hz = /bits/ 64 <844800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-960000000 { opp-hz = /bits/ 64 <960000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1036800000 { opp-hz = /bits/ 64 <1036800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1113600000 { opp-hz = /bits/ 64 <1113600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1190400000 { opp-hz = /bits/ 64 <1190400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1228800000 { opp-hz = /bits/ 64 <1228800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1324800000 { opp-hz = /bits/ 64 <1324800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x5>; + clock-latency-ns = <200000>; + }; + opp-1363200000 { + opp-hz = /bits/ 64 <1363200000>; + opp-supported-hw = <0x2>; clock-latency-ns = <200000>; }; opp-1401600000 { opp-hz = /bits/ 64 <1401600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x5>; clock-latency-ns = <200000>; }; opp-1478400000 { opp-hz = /bits/ 64 <1478400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; + clock-latency-ns = <200000>; + }; + opp-1497600000 { + opp-hz = /bits/ 64 <1497600000>; + opp-supported-hw = <0x04>; clock-latency-ns = <200000>; }; opp-1593600000 { opp-hz = /bits/ 64 <1593600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; }; @@ -230,127 +240,137 @@ /* Nominal fmax for now */ opp-307200000 { opp-hz = /bits/ 64 <307200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-403200000 { opp-hz = /bits/ 64 <403200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-480000000 { opp-hz = /bits/ 64 <480000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-556800000 { opp-hz = /bits/ 64 <556800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-652800000 { opp-hz = /bits/ 64 <652800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-729600000 { opp-hz = /bits/ 64 <729600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-806400000 { opp-hz = /bits/ 64 <806400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-883200000 { opp-hz = /bits/ 64 <883200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-940800000 { opp-hz = /bits/ 64 <940800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1036800000 { opp-hz = /bits/ 64 <1036800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1113600000 { opp-hz = /bits/ 64 <1113600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1190400000 { opp-hz = /bits/ 64 <1190400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1248000000 { opp-hz = /bits/ 64 <1248000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1324800000 { opp-hz = /bits/ 64 <1324800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1401600000 { opp-hz = /bits/ 64 <1401600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1478400000 { opp-hz = /bits/ 64 <1478400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1555200000 { opp-hz = /bits/ 64 <1555200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1632000000 { opp-hz = /bits/ 64 <1632000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1708800000 { opp-hz = /bits/ 64 <1708800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1785600000 { opp-hz = /bits/ 64 <1785600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; + clock-latency-ns = <200000>; + }; + opp-1804800000 { + opp-hz = /bits/ 64 <1804800000>; + opp-supported-hw = <0x6>; clock-latency-ns = <200000>; }; opp-1824000000 { opp-hz = /bits/ 64 <1824000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; + clock-latency-ns = <200000>; + }; + opp-1900800000 { + opp-hz = /bits/ 64 <1900800000>; + opp-supported-hw = <0x4>; clock-latency-ns = <200000>; }; opp-1920000000 { opp-hz = /bits/ 64 <1920000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; opp-1996800000 { opp-hz = /bits/ 64 <1996800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; opp-2073600000 { opp-hz = /bits/ 64 <2073600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; opp-2150400000 { opp-hz = /bits/ 64 <2150400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; }; @@ -598,7 +618,7 @@ reset-names = "phy", "common", "cfg"; status = "disabled"; - pciephy_0: lane@35000 { + pciephy_0: phy@35000 { reg = <0x00035000 0x130>, <0x00035200 0x200>, <0x00035400 0x1dc>; @@ -611,7 +631,7 @@ reset-names = "lane0"; }; - pciephy_1: lane@36000 { + pciephy_1: phy@36000 { reg = <0x00036000 0x130>, <0x00036200 0x200>, <0x00036400 0x1dc>; @@ -624,7 +644,7 @@ reset-names = "lane1"; }; - pciephy_2: lane@37000 { + pciephy_2: phy@37000 { reg = <0x00037000 0x130>, <0x00037200 0x200>, <0x00037400 0x1dc>; @@ -975,17 +995,17 @@ compatible ="operating-points-v2"; /* - * 624Mhz and 560Mhz are only available on speed - * bin (1 << 0). All the rest are available on - * all bins of the hardware + * 624Mhz is only available on speed bins 0 and 3. + * 560Mhz is only available on speed bins 0, 2 and 3. + * All the rest are available on all bins of the hardware. */ opp-624000000 { opp-hz = /bits/ 64 <624000000>; - opp-supported-hw = <0x01>; + opp-supported-hw = <0x09>; }; opp-560000000 { opp-hz = /bits/ 64 <560000000>; - opp-supported-hw = <0x01>; + opp-supported-hw = <0x0d>; }; opp-510000000 { opp-hz = /bits/ 64 <510000000>; @@ -1743,7 +1763,7 @@ reset-names = "ufsphy"; status = "disabled"; - ufsphy_lane: lanes@627400 { + ufsphy_lane: phy@627400 { reg = <0x627400 0x12c>, <0x627600 0x200>, <0x627c00 0x1b4>; @@ -2598,7 +2618,7 @@ reset-names = "phy", "common"; status = "disabled"; - ssusb_phy_0: lane@7410200 { + ssusb_phy_0: phy@7410200 { reg = <0x07410200 0x200>, <0x07410400 0x130>, <0x07410600 0x1a8>; diff --git a/arch/arm64/boot/dts/qcom/msm8996pro.dtsi b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi new file mode 100644 index 000000000000..63e1b4ec7a36 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2022, Linaro Limited + */ + +#include "msm8996.dtsi" + +/ { + /delete-node/ opp-table-cluster0; + /delete-node/ opp-table-cluster1; + + /* + * On MSM8996 Pro the cpufreq driver shifts speed bins into the high + * nibble of supported hw, so speed bin 0 becomes 0x10, speed bin 1 + * becomes 0x20, speed 2 becomes 0x40. + */ + + cluster0_opp: opp-table-cluster0 { + compatible = "operating-points-v2-kryo-cpu"; + nvmem-cells = <&speedbin_efuse>; + opp-shared; + + opp-307200000 { + opp-hz = /bits/ 64 <307200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-384000000 { + opp-hz = /bits/ 64 <384000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-460800000 { + opp-hz = /bits/ 64 <460800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-537600000 { + opp-hz = /bits/ 64 <537600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-614400000 { + opp-hz = /bits/ 64 <614400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-691200000 { + opp-hz = /bits/ 64 <691200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-768000000 { + opp-hz = /bits/ 64 <768000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-844800000 { + opp-hz = /bits/ 64 <844800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-902400000 { + opp-hz = /bits/ 64 <902400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-979200000 { + opp-hz = /bits/ 64 <979200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1056000000 { + opp-hz = /bits/ 64 <1056000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1132800000 { + opp-hz = /bits/ 64 <1132800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1209600000 { + opp-hz = /bits/ 64 <1209600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1286400000 { + opp-hz = /bits/ 64 <1286400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1363200000 { + opp-hz = /bits/ 64 <1363200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1440000000 { + opp-hz = /bits/ 64 <1440000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1516800000 { + opp-hz = /bits/ 64 <1516800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1593600000 { + opp-hz = /bits/ 64 <1593600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1996800000 { + opp-hz = /bits/ 64 <1996800000>; + opp-supported-hw = <0x20>; + clock-latency-ns = <200000>; + }; + opp-2188800000 { + opp-hz = /bits/ 64 <2188800000>; + opp-supported-hw = <0x10>; + clock-latency-ns = <200000>; + }; + }; + + cluster1_opp: opp-table-cluster1 { + compatible = "operating-points-v2-kryo-cpu"; + nvmem-cells = <&speedbin_efuse>; + opp-shared; + + opp-307200000 { + opp-hz = /bits/ 64 <307200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-384000000 { + opp-hz = /bits/ 64 <384000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-460800000 { + opp-hz = /bits/ 64 <460800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-537600000 { + opp-hz = /bits/ 64 <537600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-614400000 { + opp-hz = /bits/ 64 <614400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-691200000 { + opp-hz = /bits/ 64 <691200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-748800000 { + opp-hz = /bits/ 64 <748800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-825600000 { + opp-hz = /bits/ 64 <825600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-902400000 { + opp-hz = /bits/ 64 <902400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-979200000 { + opp-hz = /bits/ 64 <979200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1056000000 { + opp-hz = /bits/ 64 <1056000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1132800000 { + opp-hz = /bits/ 64 <1132800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1209600000 { + opp-hz = /bits/ 64 <1209600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1286400000 { + opp-hz = /bits/ 64 <1286400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1363200000 { + opp-hz = /bits/ 64 <1363200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1440000000 { + opp-hz = /bits/ 64 <1440000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1516800000 { + opp-hz = /bits/ 64 <1516800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1593600000 { + opp-hz = /bits/ 64 <1593600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1670400000 { + opp-hz = /bits/ 64 <1670400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1747200000 { + opp-hz = /bits/ 64 <1747200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1824000000 { + opp-hz = /bits/ 64 <1824000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1900800000 { + opp-hz = /bits/ 64 <1900800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1977600000 { + opp-hz = /bits/ 64 <1977600000>; + opp-supported-hw = <0x30>; + clock-latency-ns = <200000>; + }; + opp-2054400000 { + opp-hz = /bits/ 64 <2054400000>; + opp-supported-hw = <0x30>; + clock-latency-ns = <200000>; + }; + opp-2150400000 { + opp-hz = /bits/ 64 <2150400000>; + opp-supported-hw = <0x30>; + clock-latency-ns = <200000>; + }; + opp-2246400000 { + opp-hz = /bits/ 64 <2246400000>; + opp-supported-hw = <0x10>; + clock-latency-ns = <200000>; + }; + opp-2342400000 { + opp-hz = /bits/ 64 <2342400000>; + opp-supported-hw = <0x10>; + clock-latency-ns = <200000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index 228339f81c32..5350b911f4f6 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -994,7 +994,7 @@ vdda-phy-supply = <&vreg_l1a_0p875>; vdda-pll-supply = <&vreg_l2a_1p2>; - pciephy: lane@1c06800 { + pciephy: phy@1c06800 { reg = <0x01c06200 0x128>, <0x01c06400 0x1fc>, <0x01c06800 0x20c>; #phy-cells = <0>; @@ -1066,7 +1066,7 @@ reset-names = "ufsphy"; resets = <&ufshc 0>; - ufsphy_lanes: lanes@1da7400 { + ufsphy_lanes: phy@1da7400 { reg = <0x01da7400 0x128>, <0x01da7600 0x1fc>, <0x01da7c00 0x1dc>, @@ -1999,7 +1999,7 @@ <&gcc GCC_USB3PHY_PHY_BCR>; reset-names = "phy", "common"; - usb1_ssphy: lane@c010200 { + usb1_ssphy: phy@c010200 { reg = <0xc010200 0x128>, <0xc010400 0x200>, <0xc010c00 0x20c>, diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi index e847d7209afc..affc736d154a 100644 --- a/arch/arm64/boot/dts/qcom/pm660.dtsi +++ b/arch/arm64/boot/dts/qcom/pm660.dtsi @@ -152,7 +152,7 @@ qcom,pre-scaling = <1 3>; }; - vcoin: vcoin@83 { + vcoin: vcoin@85 { reg = ; qcom,decimation = <1024>; qcom,pre-scaling = <1 3>; diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi index 952bb133914f..c2e1a0d9a272 100644 --- a/arch/arm64/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi @@ -768,7 +768,7 @@ pins = "gpio17", "gpio18", "gpio19"; function = "gpio"; drive-strength = <2>; - bias-no-pull; + bias-disable; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi index dfd1b42c07fd..3566db1d7357 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi @@ -1299,7 +1299,7 @@ ap_ts_i2c: &i2c14 { config { pins = "gpio126"; function = "gpio"; - bias-no-pull; + bias-disable; drive-strength = <2>; output-low; }; @@ -1309,7 +1309,7 @@ ap_ts_i2c: &i2c14 { config { pins = "gpio126"; function = "gpio"; - bias-no-pull; + bias-disable; drive-strength = <2>; output-high; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index 2d5533dd4ec2..146d3cd3f1b3 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -1045,7 +1045,10 @@ /* PINCTRL - additions to nodes defined in sdm845.dtsi */ &qup_spi2_default { - drive-strength = <16>; + pinconf { + pins = "gpio27", "gpio28", "gpio29", "gpio30"; + drive-strength = <16>; + }; }; &qup_uart3_default{ diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index ea7a272d267a..ed293f635f14 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -2064,7 +2064,7 @@ status = "disabled"; - pcie0_lane: lanes@1c06200 { + pcie0_lane: phy@1c06200 { reg = <0 0x01c06200 0 0x128>, <0 0x01c06400 0 0x1fc>, <0 0x01c06800 0 0x218>, @@ -2174,7 +2174,7 @@ status = "disabled"; - pcie1_lane: lanes@1c06200 { + pcie1_lane: phy@1c06200 { reg = <0 0x01c0a800 0 0x800>, <0 0x01c0a800 0 0x800>, <0 0x01c0b800 0 0x400>; @@ -2302,7 +2302,7 @@ reset-names = "ufsphy"; status = "disabled"; - ufs_mem_phy_lanes: lanes@1d87400 { + ufs_mem_phy_lanes: phy@1d87400 { reg = <0 0x01d87400 0 0x108>, <0 0x01d87600 0 0x1e0>, <0 0x01d87c00 0 0x1dc>, @@ -3699,7 +3699,7 @@ <&gcc GCC_USB3_PHY_PRIM_BCR>; reset-names = "phy", "common"; - usb_1_ssphy: lanes@88e9200 { + usb_1_ssphy: phy@88e9200 { reg = <0 0x088e9200 0 0x128>, <0 0x088e9400 0 0x200>, <0 0x088e9c00 0 0x218>, @@ -3732,7 +3732,7 @@ <&gcc GCC_USB3_PHY_SEC_BCR>; reset-names = "phy", "common"; - usb_2_ssphy: lane@88eb200 { + usb_2_ssphy: phy@88eb200 { reg = <0 0x088eb200 0 0x128>, <0 0x088eb400 0 0x1fc>, <0 0x088eb800 0 0x218>, diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index 617a634ac905..834fb463f99e 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -475,8 +475,10 @@ }; &qup_i2c12_default { - drive-strength = <2>; - bias-disable; + pinmux { + drive-strength = <2>; + bias-disable; + }; }; &qup_uart6_default { diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi index f89af5e35112..dc3bddc54eb6 100644 --- a/arch/arm64/boot/dts/qcom/sm6125.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi @@ -408,7 +408,7 @@ sdhc_1: sdhci@4744000 { compatible = "qcom,sm6125-sdhci", "qcom,sdhci-msm-v5"; reg = <0x04744000 0x1000>, <0x04745000 0x1000>; - reg-names = "hc", "core"; + reg-names = "hc", "cqhci"; interrupts = , ; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index f347f752d536..292e40d6162d 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -1692,12 +1692,12 @@ reset-names = "ufsphy"; status = "disabled"; - ufs_mem_phy_lanes: lanes@1d87400 { - reg = <0 0x01d87400 0 0x108>, - <0 0x01d87600 0 0x1e0>, - <0 0x01d87c00 0 0x1dc>, - <0 0x01d87800 0 0x108>, - <0 0x01d87a00 0 0x1e0>; + ufs_mem_phy_lanes: phy@1d87400 { + reg = <0 0x01d87400 0 0x16c>, + <0 0x01d87600 0 0x200>, + <0 0x01d87c00 0 0x200>, + <0 0x01d87800 0 0x16c>, + <0 0x01d87a00 0 0x200>; #phy-cells = <0>; }; }; @@ -3010,7 +3010,7 @@ <&gcc GCC_USB3_PHY_PRIM_BCR>; reset-names = "phy", "common"; - usb_1_ssphy: lanes@88e9200 { + usb_1_ssphy: phy@88e9200 { reg = <0 0x088e9200 0 0x200>, <0 0x088e9400 0 0x200>, <0 0x088e9c00 0 0x218>, @@ -3043,7 +3043,7 @@ <&gcc GCC_USB3_PHY_SEC_BCR>; reset-names = "phy", "common"; - usb_2_ssphy: lane@88eb200 { + usb_2_ssphy: phy@88eb200 { reg = <0 0x088eb200 0 0x200>, <0 0x088eb400 0 0x200>, <0 0x088eb800 0 0x800>, diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi index b15d085db05a..effbd6a9c989 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi @@ -591,7 +591,7 @@ pins = "gpio39"; function = "gpio"; drive-strength = <2>; - bias-disabled; + bias-disable; input-enable; }; diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index b710bca45648..4e3b772a8bde 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -1463,7 +1463,7 @@ status = "disabled"; - pcie0_lane: lanes@1c06200 { + pcie0_lane: phy@1c06200 { reg = <0 0x1c06200 0 0x170>, /* tx */ <0 0x1c06400 0 0x200>, /* rx */ <0 0x1c06800 0 0x1f0>, /* pcs */ @@ -1569,7 +1569,7 @@ status = "disabled"; - pcie1_lane: lanes@1c0e200 { + pcie1_lane: phy@1c0e200 { reg = <0 0x1c0e200 0 0x170>, /* tx0 */ <0 0x1c0e400 0 0x200>, /* rx0 */ <0 0x1c0ea00 0 0x1f0>, /* pcs */ @@ -1677,7 +1677,7 @@ status = "disabled"; - pcie2_lane: lanes@1c16200 { + pcie2_lane: phy@1c16200 { reg = <0 0x1c16200 0 0x170>, /* tx0 */ <0 0x1c16400 0 0x200>, /* rx0 */ <0 0x1c16a00 0 0x1f0>, /* pcs */ @@ -1756,12 +1756,12 @@ reset-names = "ufsphy"; status = "disabled"; - ufs_mem_phy_lanes: lanes@1d87400 { - reg = <0 0x01d87400 0 0x108>, - <0 0x01d87600 0 0x1e0>, - <0 0x01d87c00 0 0x1dc>, - <0 0x01d87800 0 0x108>, - <0 0x01d87a00 0 0x1e0>; + ufs_mem_phy_lanes: phy@1d87400 { + reg = <0 0x01d87400 0 0x16c>, + <0 0x01d87600 0 0x200>, + <0 0x01d87c00 0 0x200>, + <0 0x01d87800 0 0x16c>, + <0 0x01d87a00 0 0x200>; #phy-cells = <0>; }; }; @@ -1933,7 +1933,7 @@ pins = "gpio7"; function = "dmic1_data"; drive-strength = <2>; - pull-down; + bias-pull-down; input-enable; }; }; @@ -2306,15 +2306,11 @@ dp_phy: dp-phy@88ea200 { reg = <0 0x088ea200 0 0x200>, <0 0x088ea400 0 0x200>, - <0 0x088eac00 0 0x400>, + <0 0x088eaa00 0 0x200>, <0 0x088ea600 0 0x200>, - <0 0x088ea800 0 0x200>, - <0 0x088eaa00 0 0x100>; + <0 0x088ea800 0 0x200>; #phy-cells = <0>; #clock-cells = <1>; - clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; - clock-names = "pipe0"; - clock-output-names = "usb3_phy_pipe_clk_src"; }; }; @@ -2336,7 +2332,7 @@ <&gcc GCC_USB3_PHY_SEC_BCR>; reset-names = "phy", "common"; - usb_2_ssphy: lanes@88eb200 { + usb_2_ssphy: phy@88eb200 { reg = <0 0x088eb200 0 0x200>, <0 0x088eb400 0 0x200>, <0 0x088eb800 0 0x800>; diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index c0a3ea47302f..1ef16975d13a 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -1123,12 +1123,12 @@ reset-names = "ufsphy"; status = "disabled"; - ufs_mem_phy_lanes: lanes@1d87400 { - reg = <0 0x01d87400 0 0x108>, - <0 0x01d87600 0 0x1e0>, - <0 0x01d87c00 0 0x1dc>, - <0 0x01d87800 0 0x108>, - <0 0x01d87a00 0 0x1e0>; + ufs_mem_phy_lanes: phy@1d87400 { + reg = <0 0x01d87400 0 0x188>, + <0 0x01d87600 0 0x200>, + <0 0x01d87c00 0 0x200>, + <0 0x01d87800 0 0x188>, + <0 0x01d87a00 0 0x200>; #phy-cells = <0>; #clock-cells = <0>; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 82be00069bcd..4f232f575ab2 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -120,7 +120,6 @@ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>; dma-names = "tx", "rx1", "rx2"; - dma-coherent; rng: rng@4e10000 { compatible = "inside-secure,safexcel-eip76"; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 6c81997ee28a..ad21bb1417aa 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -336,7 +336,6 @@ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>; dma-names = "tx", "rx1", "rx2"; - dma-coherent; rng: rng@4e10000 { compatible = "inside-secure,safexcel-eip76"; diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 657c921fd784..e1e10a24519b 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -76,7 +76,7 @@ struct task_struct; struct step_hook { struct list_head node; - int (*fn)(struct pt_regs *regs, unsigned int esr); + int (*fn)(struct pt_regs *regs, unsigned long esr); }; void register_user_step_hook(struct step_hook *hook); @@ -87,7 +87,7 @@ void unregister_kernel_step_hook(struct step_hook *hook); struct break_hook { struct list_head node; - int (*fn)(struct pt_regs *regs, unsigned int esr); + int (*fn)(struct pt_regs *regs, unsigned long esr); u16 imm; u16 mask; /* These bits are ignored when comparing with imm */ }; diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 8f59bbeba7a7..9f91c8906edd 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -324,14 +324,14 @@ #ifndef __ASSEMBLY__ #include -static inline bool esr_is_data_abort(u32 esr) +static inline bool esr_is_data_abort(unsigned long esr) { - const u32 ec = ESR_ELx_EC(esr); + const unsigned long ec = ESR_ELx_EC(esr); return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR; } -const char *esr_get_class_string(u32 esr); +const char *esr_get_class_string(unsigned long esr); #endif /* __ASSEMBLY */ #endif /* __ASM_ESR_H */ diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 339477dca551..0e6535aa78c2 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -19,9 +19,9 @@ #define __exception_irq_entry __kprobes #endif -static inline u32 disr_to_esr(u64 disr) +static inline unsigned long disr_to_esr(u64 disr) { - unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; + unsigned long esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; if ((disr & DISR_EL1_IDS) == 0) esr |= (disr & DISR_EL1_ESR_MASK); @@ -57,23 +57,23 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); -void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); +void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); -void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs); -void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); -void do_sve_acc(unsigned int esr, struct pt_regs *regs); -void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); -void do_sysinstr(unsigned int esr, struct pt_regs *regs); -void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); -void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); -void do_cp15instr(unsigned int esr, struct pt_regs *regs); +void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs); +void do_sve_acc(unsigned long esr, struct pt_regs *regs); +void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs); +void do_sysinstr(unsigned long esr, struct pt_regs *regs); +void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs); +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); +void do_cp15instr(unsigned long esr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); -void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); -void do_serror(struct pt_regs *regs, unsigned int esr); +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr); +void do_serror(struct pt_regs *regs, unsigned long esr); void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); -void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); +void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index d9bf3d12a2b8..7364530de0a7 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -240,13 +240,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, } #endif -static inline bool is_ttbr0_addr(unsigned long addr) +static __always_inline bool is_ttbr0_addr(unsigned long addr) { /* entry assembly clears tags for TTBR0 addrs */ return addr < TASK_SIZE; } -static inline bool is_ttbr1_addr(unsigned long addr) +static __always_inline bool is_ttbr1_addr(unsigned long addr) { /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 305a7157c6a6..0eb7709422e2 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -23,9 +23,9 @@ void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm64_notify_die(const char *str, struct pt_regs *regs, int signo, int sicode, unsigned long far, - int err); + unsigned long err); -void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, +void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long, struct pt_regs *), int sig, int code, const char *name); diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 54f32a0675df..6e5826470bea 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -24,7 +24,7 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); -void force_signal_inject(int signal, int code, unsigned long address, unsigned int err); +void force_signal_inject(int signal, int code, unsigned long address, unsigned long err); void arm64_notify_segfault(unsigned long addr); void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str); void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); @@ -57,7 +57,7 @@ static inline int in_entry_text(unsigned long ptr) * errors share the same encoding as an all-zeros encoding from a CPU that * doesn't support RAS. */ -static inline bool arm64_is_ras_serror(u32 esr) +static inline bool arm64_is_ras_serror(unsigned long esr) { WARN_ON(preemptible()); @@ -77,9 +77,9 @@ static inline bool arm64_is_ras_serror(u32 esr) * We treat them as Uncontainable. * Non-RAS SError's are reported as Uncontained/Uncategorized. */ -static inline u32 arm64_ras_serror_get_severity(u32 esr) +static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr) { - u32 aet = esr & ESR_ELx_AET; + unsigned long aet = esr & ESR_ELx_AET; if (!arm64_is_ras_serror(esr)) { /* Not a RAS error, we can't interpret the ESR. */ @@ -98,6 +98,6 @@ static inline u32 arm64_ras_serror_get_severity(u32 esr) return aet; } -bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr); -void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr); +bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr); +void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr); #endif diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 4f3661eeb7ec..bf9fe71589bc 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -202,7 +202,7 @@ void unregister_kernel_step_hook(struct step_hook *hook) * So we call all the registered handlers, until the right handler is * found which returns zero. */ -static int call_step_hook(struct pt_regs *regs, unsigned int esr) +static int call_step_hook(struct pt_regs *regs, unsigned long esr) { struct step_hook *hook; struct list_head *list; @@ -238,7 +238,7 @@ static void send_user_sigtrap(int si_code) "User debug trap"); } -static int single_step_handler(unsigned long unused, unsigned int esr, +static int single_step_handler(unsigned long unused, unsigned long esr, struct pt_regs *regs) { bool handler_found = false; @@ -299,11 +299,11 @@ void unregister_kernel_break_hook(struct break_hook *hook) unregister_debug_hook(&hook->node); } -static int call_break_hook(struct pt_regs *regs, unsigned int esr) +static int call_break_hook(struct pt_regs *regs, unsigned long esr) { struct break_hook *hook; struct list_head *list; - int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; + int (*fn)(struct pt_regs *regs, unsigned long esr) = NULL; list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; @@ -312,7 +312,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) * entirely not preemptible, and we can use rcu list safely here. */ list_for_each_entry_rcu(hook, list, node) { - unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; + unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; if ((comment & ~hook->mask) == hook->imm) fn = hook->fn; @@ -322,7 +322,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) } NOKPROBE_SYMBOL(call_break_hook); -static int brk_handler(unsigned long unused, unsigned int esr, +static int brk_handler(unsigned long unused, unsigned long esr, struct pt_regs *regs) { if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 8ecca795aca0..fc91dad1579a 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -273,13 +273,13 @@ extern void (*handle_arch_irq)(struct pt_regs *); extern void (*handle_arch_fiq)(struct pt_regs *); static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, - unsigned int esr) + unsigned long esr) { arm64_enter_nmi(regs); console_verbose(); - pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n", + pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", vector, smp_processor_id(), esr, esr_get_class_string(esr)); @@ -796,7 +796,7 @@ UNHANDLED(el0t, 32, error) #ifdef CONFIG_VMAP_STACK asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) { - unsigned int esr = read_sysreg(esr_el1); + unsigned long esr = read_sysreg(esr_el1); unsigned long far = read_sysreg(far_el1); arm64_enter_nmi(regs); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index ff4962750b3d..7a3fcf21b18a 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -930,7 +930,7 @@ void fpsimd_release_task(struct task_struct *dead_task) * would have disabled the SVE access trap for userspace during * ret_to_user, making an SVE access trap impossible in that case. */ -void do_sve_acc(unsigned int esr, struct pt_regs *regs) +void do_sve_acc(unsigned long esr, struct pt_regs *regs) { /* Even if we chose not to use SVE, the hardware could still trap: */ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { @@ -972,7 +972,7 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) /* * Trapped FP/ASIMD access. */ -void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) +void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs) { /* TODO: implement lazy context saving/restoring */ WARN_ON(1); @@ -981,7 +981,7 @@ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) /* * Raise a SIGFPE for the current process. */ -void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) +void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs) { unsigned int si_code = FPE_FLTUNK; diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 712e97c03e54..2a7f21314cde 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -617,7 +617,7 @@ NOKPROBE_SYMBOL(toggle_bp_registers); /* * Debug exception handlers. */ -static int breakpoint_handler(unsigned long unused, unsigned int esr, +static int breakpoint_handler(unsigned long unused, unsigned long esr, struct pt_regs *regs) { int i, step = 0, *kernel_step; @@ -751,7 +751,7 @@ static int watchpoint_report(struct perf_event *wp, unsigned long addr, return step; } -static int watchpoint_handler(unsigned long addr, unsigned int esr, +static int watchpoint_handler(unsigned long addr, unsigned long esr, struct pt_regs *regs) { int i, step = 0, *kernel_step, access, closest_match = 0; diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 2aede780fb80..cda9c1e9864f 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -232,14 +232,14 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, return err; } -static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) +static int kgdb_brk_fn(struct pt_regs *regs, unsigned long esr) { kgdb_handle_exception(1, SIGTRAP, 0, regs); return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_brk_fn) -static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) +static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned long esr) { compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); @@ -248,7 +248,7 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) } NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); -static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) +static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr) { if (!kgdb_single_step) return DBG_HOOK_ERROR; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 6dbcc89f6662..b7404dba0d62 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -332,7 +332,7 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) } static int __kprobes -kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) +kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long addr = instruction_pointer(regs); @@ -356,7 +356,7 @@ static struct break_hook kprobes_break_ss_hook = { }; static int __kprobes -kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) +kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr) { kprobe_handler(regs); return DBG_HOOK_HANDLED; diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index 9be668f3f034..d49aef2657cd 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -166,7 +166,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, } static int uprobe_breakpoint_handler(struct pt_regs *regs, - unsigned int esr) + unsigned long esr) { if (uprobe_pre_sstep_notifier(regs)) return DBG_HOOK_HANDLED; @@ -175,7 +175,7 @@ static int uprobe_breakpoint_handler(struct pt_regs *regs, } static int uprobe_single_step_handler(struct pt_regs *regs, - unsigned int esr) + unsigned long esr) { struct uprobe_task *utask = current->utask; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index fe0cd0568813..f859cc870d5b 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -243,7 +243,7 @@ static void arm64_show_signal(int signo, const char *str) static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); struct task_struct *tsk = current; - unsigned int esr = tsk->thread.fault_code; + unsigned long esr = tsk->thread.fault_code; struct pt_regs *regs = task_pt_regs(tsk); /* Leave if the signal won't be shown */ @@ -254,7 +254,7 @@ static void arm64_show_signal(int signo, const char *str) pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); if (esr) - pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr); + pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr); pr_cont("%s", str); print_vma_addr(KERN_CONT " in ", regs->pc); @@ -288,7 +288,7 @@ void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, void arm64_notify_die(const char *str, struct pt_regs *regs, int signo, int sicode, unsigned long far, - int err) + unsigned long err) { if (user_mode(regs)) { WARN_ON(regs != current_pt_regs()); @@ -440,7 +440,7 @@ exit: return fn ? fn(regs, instr) : 1; } -void force_signal_inject(int signal, int code, unsigned long address, unsigned int err) +void force_signal_inject(int signal, int code, unsigned long address, unsigned long err) { const char *desc; struct pt_regs *regs = current_pt_regs(); @@ -507,7 +507,7 @@ void do_bti(struct pt_regs *regs) } NOKPROBE_SYMBOL(do_bti); -void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr) +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr) { /* * Unexpected FPAC exception or pointer authentication failure in @@ -538,7 +538,7 @@ NOKPROBE_SYMBOL(do_ptrauth_fault); uaccess_ttbr0_disable(); \ } -static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) +static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs) { unsigned long tagged_address, address; int rt = ESR_ELx_SYS64_ISS_RT(esr); @@ -578,7 +578,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } -static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) +static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) { int rt = ESR_ELx_SYS64_ISS_RT(esr); unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); @@ -597,7 +597,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } -static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) +static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) { int rt = ESR_ELx_SYS64_ISS_RT(esr); @@ -605,7 +605,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } -static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) +static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) { int rt = ESR_ELx_SYS64_ISS_RT(esr); @@ -613,7 +613,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } -static void mrs_handler(unsigned int esr, struct pt_regs *regs) +static void mrs_handler(unsigned long esr, struct pt_regs *regs) { u32 sysreg, rt; @@ -624,15 +624,15 @@ static void mrs_handler(unsigned int esr, struct pt_regs *regs) force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } -static void wfi_handler(unsigned int esr, struct pt_regs *regs) +static void wfi_handler(unsigned long esr, struct pt_regs *regs) { arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } struct sys64_hook { - unsigned int esr_mask; - unsigned int esr_val; - void (*handler)(unsigned int esr, struct pt_regs *regs); + unsigned long esr_mask; + unsigned long esr_val; + void (*handler)(unsigned long esr, struct pt_regs *regs); }; static const struct sys64_hook sys64_hooks[] = { @@ -675,7 +675,7 @@ static const struct sys64_hook sys64_hooks[] = { }; #ifdef CONFIG_COMPAT -static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) +static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs) { int cond; @@ -695,7 +695,7 @@ static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) return aarch32_opcode_cond_checks[cond](regs->pstate); } -static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) +static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) { int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; @@ -712,7 +712,7 @@ static const struct sys64_hook cp15_32_hooks[] = { {}, }; -static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) +static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs) { int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; @@ -732,7 +732,7 @@ static const struct sys64_hook cp15_64_hooks[] = { {}, }; -void do_cp15instr(unsigned int esr, struct pt_regs *regs) +void do_cp15instr(unsigned long esr, struct pt_regs *regs) { const struct sys64_hook *hook, *hook_base; @@ -773,7 +773,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs) NOKPROBE_SYMBOL(do_cp15instr); #endif -void do_sysinstr(unsigned int esr, struct pt_regs *regs) +void do_sysinstr(unsigned long esr, struct pt_regs *regs) { const struct sys64_hook *hook; @@ -837,7 +837,7 @@ static const char *esr_class_str[] = { [ESR_ELx_EC_BRK64] = "BRK (AArch64)", }; -const char *esr_get_class_string(u32 esr) +const char *esr_get_class_string(unsigned long esr) { return esr_class_str[ESR_ELx_EC(esr)]; } @@ -846,7 +846,7 @@ const char *esr_get_class_string(u32 esr) * bad_el0_sync handles unexpected, but potentially recoverable synchronous * exceptions taken from EL0. */ -void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) { unsigned long pc = instruction_pointer(regs); @@ -862,7 +862,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) __aligned(16); -void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) +void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) { unsigned long tsk_stk = (unsigned long)current->stack; unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); @@ -871,7 +871,7 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) console_verbose(); pr_emerg("Insufficient stack space to handle exception!"); - pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr)); + pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr)); pr_emerg("FAR: 0x%016lx\n", far); pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", @@ -892,11 +892,11 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) } #endif -void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) +void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) { console_verbose(); - pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n", + pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", smp_processor_id(), esr, esr_get_class_string(esr)); if (regs) __show_regs(regs); @@ -907,9 +907,9 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) unreachable(); } -bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) +bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) { - u32 aet = arm64_ras_serror_get_severity(esr); + unsigned long aet = arm64_ras_serror_get_severity(esr); switch (aet) { case ESR_ELx_AET_CE: /* corrected error */ @@ -939,7 +939,7 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) } } -void do_serror(struct pt_regs *regs, unsigned int esr) +void do_serror(struct pt_regs *regs, unsigned long esr) { /* non-RAS errors are not containable */ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) @@ -960,7 +960,7 @@ int is_valid_bugaddr(unsigned long addr) return 1; } -static int bug_handler(struct pt_regs *regs, unsigned int esr) +static int bug_handler(struct pt_regs *regs, unsigned long esr) { switch (report_bug(regs->pc, regs)) { case BUG_TRAP_TYPE_BUG: @@ -985,7 +985,7 @@ static struct break_hook bug_break_hook = { .imm = BUG_BRK_IMM, }; -static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) +static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr) { pr_err("%s generated an invalid instruction at %pS!\n", "Kernel text patching", @@ -1007,7 +1007,7 @@ static struct break_hook fault_break_hook = { #define KASAN_ESR_SIZE_MASK 0x0f #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) -static int kasan_handler(struct pt_regs *regs, unsigned int esr) +static int kasan_handler(struct pt_regs *regs, unsigned long esr) { bool recover = esr & KASAN_ESR_RECOVER; bool write = esr & KASAN_ESR_WRITE; @@ -1050,11 +1050,11 @@ static struct break_hook kasan_break_hook = { * Initial handler for AArch64 BRK exceptions * This handler only used until debug_traps_init(). */ -int __init early_brk64(unsigned long addr, unsigned int esr, +int __init early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs) { #ifdef CONFIG_KASAN_SW_TAGS - unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; + unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9ae24e3b72be..d09b21faa0b2 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -43,7 +43,7 @@ #include struct fault_info { - int (*fn)(unsigned long far, unsigned int esr, + int (*fn)(unsigned long far, unsigned long esr, struct pt_regs *regs); int sig; int code; @@ -53,17 +53,17 @@ struct fault_info { static const struct fault_info fault_info[]; static struct fault_info debug_fault_info[]; -static inline const struct fault_info *esr_to_fault_info(unsigned int esr) +static inline const struct fault_info *esr_to_fault_info(unsigned long esr) { return fault_info + (esr & ESR_ELx_FSC); } -static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) +static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr) { return debug_fault_info + DBG_ESR_EVT(esr); } -static void data_abort_decode(unsigned int esr) +static void data_abort_decode(unsigned long esr) { pr_alert("Data abort info:\n"); @@ -85,11 +85,11 @@ static void data_abort_decode(unsigned int esr) (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); } -static void mem_abort_decode(unsigned int esr) +static void mem_abort_decode(unsigned long esr) { pr_alert("Mem abort info:\n"); - pr_alert(" ESR = 0x%08x\n", esr); + pr_alert(" ESR = 0x%016lx\n", esr); pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", ESR_ELx_EC(esr), esr_get_class_string(esr), (esr & ESR_ELx_IL) ? 32 : 16); @@ -99,7 +99,7 @@ static void mem_abort_decode(unsigned int esr) pr_alert(" EA = %lu, S1PTW = %lu\n", (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); - pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC), + pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC), esr_to_fault_info(esr)->name); if (esr_is_data_abort(esr)) @@ -229,20 +229,20 @@ int ptep_set_access_flags(struct vm_area_struct *vma, return 1; } -static bool is_el1_instruction_abort(unsigned int esr) +static bool is_el1_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; } -static bool is_el1_data_abort(unsigned int esr) +static bool is_el1_data_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; } -static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, +static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { - unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; + unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE; if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) return false; @@ -258,7 +258,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, } static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, - unsigned int esr, + unsigned long esr, struct pt_regs *regs) { unsigned long flags; @@ -290,7 +290,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, } static void die_kernel_fault(const char *msg, unsigned long addr, - unsigned int esr, struct pt_regs *regs) + unsigned long esr, struct pt_regs *regs) { bust_spinlocks(1); @@ -306,7 +306,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, } #ifdef CONFIG_KASAN_HW_TAGS -static void report_tag_fault(unsigned long addr, unsigned int esr, +static void report_tag_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { /* @@ -318,11 +318,11 @@ static void report_tag_fault(unsigned long addr, unsigned int esr, } #else /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ -static inline void report_tag_fault(unsigned long addr, unsigned int esr, +static inline void report_tag_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { } #endif -static void do_tag_recovery(unsigned long addr, unsigned int esr, +static void do_tag_recovery(unsigned long addr, unsigned long esr, struct pt_regs *regs) { @@ -337,9 +337,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr, isb(); } -static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) +static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) { - unsigned int fsc = esr & ESR_ELx_FSC; + unsigned long fsc = esr & ESR_ELx_FSC; if (!is_el1_data_abort(esr)) return false; @@ -350,7 +350,12 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) return false; } -static void __do_kernel_fault(unsigned long addr, unsigned int esr, +static bool is_translation_fault(unsigned long esr) +{ + return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT; +} + +static void __do_kernel_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { const char *msg; @@ -382,7 +387,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, } else if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; } else { - if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) + if (is_translation_fault(esr) && + kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) return; msg = "paging request"; @@ -391,7 +397,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, die_kernel_fault(msg, addr, esr, regs); } -static void set_thread_esr(unsigned long address, unsigned int esr) +static void set_thread_esr(unsigned long address, unsigned long esr) { current->thread.fault_address = address; @@ -439,7 +445,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) * exception level). Fail safe by not providing an ESR * context record at all. */ - WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); + WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr); esr = 0; break; } @@ -448,7 +454,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) current->thread.fault_code = esr; } -static void do_bad_area(unsigned long far, unsigned int esr, +static void do_bad_area(unsigned long far, unsigned long esr, struct pt_regs *regs) { unsigned long addr = untagged_addr(far); @@ -499,7 +505,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, return handle_mm_fault(vma, addr, mm_flags, regs); } -static bool is_el0_instruction_abort(unsigned int esr) +static bool is_el0_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; } @@ -508,12 +514,12 @@ static bool is_el0_instruction_abort(unsigned int esr) * Note: not valid for EL1 DC IVAC, but we never use that such that it * should fault. EL0 cannot issue DC IVAC (undef). */ -static bool is_write_abort(unsigned int esr) +static bool is_write_abort(unsigned long esr) { return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); } -static int __kprobes do_page_fault(unsigned long far, unsigned int esr, +static int __kprobes do_page_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -671,7 +677,7 @@ no_context: } static int __kprobes do_translation_fault(unsigned long far, - unsigned int esr, + unsigned long esr, struct pt_regs *regs) { unsigned long addr = untagged_addr(far); @@ -683,19 +689,19 @@ static int __kprobes do_translation_fault(unsigned long far, return 0; } -static int do_alignment_fault(unsigned long far, unsigned int esr, +static int do_alignment_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { do_bad_area(far, esr, regs); return 0; } -static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs) +static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) { return 1; /* "fault" */ } -static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) +static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; unsigned long siaddr; @@ -725,7 +731,7 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) return 0; } -static int do_tag_check_fault(unsigned long far, unsigned int esr, +static int do_tag_check_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { /* @@ -805,7 +811,7 @@ static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, }; -void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) +void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_fault_info(esr); unsigned long addr = untagged_addr(far); @@ -828,14 +834,14 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) } NOKPROBE_SYMBOL(do_mem_abort); -void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) +void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) { arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, addr, esr); } NOKPROBE_SYMBOL(do_sp_pc_abort); -int __init early_brk64(unsigned long addr, unsigned int esr, +int __init early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs); /* @@ -855,7 +861,7 @@ static struct fault_info __refdata debug_fault_info[] = { }; void __init hook_debug_fault_code(int nr, - int (*fn)(unsigned long, unsigned int, struct pt_regs *), + int (*fn)(unsigned long, unsigned long, struct pt_regs *), int sig, int code, const char *name) { BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); @@ -888,7 +894,7 @@ static void debug_exception_exit(struct pt_regs *regs) } NOKPROBE_SYMBOL(debug_exception_exit); -void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_debug_fault_info(esr); diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index 6e6756e8fa0a..86a6e2590866 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -361,6 +361,8 @@ static struct clk clk_periph = { */ int clk_enable(struct clk *clk) { + if (!clk) + return 0; mutex_lock(&clocks_mutex); clk_enable_unlocked(clk); mutex_unlock(&clocks_mutex); diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index 1daa0c6b6f4e..572a053e30ed 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -211,7 +211,7 @@ union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port) { union cvmx_helper_link_info result; - WARN(!octeon_is_simulation(), + WARN_ONCE(!octeon_is_simulation(), "Using deprecated link status - please update your DT"); /* Unless we fix it later, all links are defaulted to down */ diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 6044ff471002..a18ad2daf005 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -1100,7 +1100,7 @@ union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port) if (index == 0) result = __cvmx_helper_rgmii_link_get(ipd_port); else { - WARN(1, "Using deprecated link status - please update your DT"); + WARN_ONCE(1, "Using deprecated link status - please update your DT"); result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 1000; diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h index 6bbf082dd149..79d5bb0e06d6 100644 --- a/arch/mips/include/asm/mach-ralink/mt7621.h +++ b/arch/mips/include/asm/mach-ralink/mt7621.h @@ -7,10 +7,12 @@ #ifndef _MT7621_REGS_H_ #define _MT7621_REGS_H_ +#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(x))) + #define MT7621_PALMBUS_BASE 0x1C000000 #define MT7621_PALMBUS_SIZE 0x03FFFFFF -#define MT7621_SYSC_BASE 0x1E000000 +#define MT7621_SYSC_BASE IOMEM(0x1E000000) #define SYSC_REG_CHIP_NAME0 0x00 #define SYSC_REG_CHIP_NAME1 0x04 diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c index e673603e11e5..92140edb3ce3 100644 --- a/arch/mips/kernel/vpe-cmp.c +++ b/arch/mips/kernel/vpe-cmp.c @@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe); static void vpe_device_release(struct device *cd) { - kfree(cd); } static struct class vpe_class = { @@ -157,6 +156,7 @@ out_dev: device_del(&vpe_device); out_class: + put_device(&vpe_device); class_unregister(&vpe_class); out_chrdev: @@ -169,7 +169,7 @@ void __exit vpe_module_exit(void) { struct vpe *v, *n; - device_del(&vpe_device); + device_unregister(&vpe_device); class_unregister(&vpe_class); unregister_chrdev(major, VPE_MODULE_NAME); diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index bad6b0891b2b..84a82b551ec3 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -313,7 +313,6 @@ ATTRIBUTE_GROUPS(vpe); static void vpe_device_release(struct device *cd) { - kfree(cd); } static struct class vpe_class = { @@ -497,6 +496,7 @@ out_dev: device_del(&vpe_device); out_class: + put_device(&vpe_device); class_unregister(&vpe_class); out_chrdev: @@ -509,7 +509,7 @@ void __exit vpe_module_exit(void) { struct vpe *v, *n; - device_del(&vpe_device); + device_unregister(&vpe_device); class_unregister(&vpe_class); unregister_chrdev(major, VPE_MODULE_NAME); diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index 4c8378661219..0db23bcf2a97 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -23,6 +23,7 @@ #define MT7621_MEM_TEST_PATTERN 0xaa5555aa static u32 detect_magic __initdata; +static struct ralink_soc_info *soc_info_ptr; phys_addr_t mips_cpc_default_phys_base(void) { @@ -66,41 +67,83 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } -static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev) +static unsigned int __init mt7621_get_soc_name0(void) +{ + return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME0); +} + +static unsigned int __init mt7621_get_soc_name1(void) +{ + return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME1); +} + +static bool __init mt7621_soc_valid(void) +{ + if (mt7621_get_soc_name0() == MT7621_CHIP_NAME0 && + mt7621_get_soc_name1() == MT7621_CHIP_NAME1) + return true; + else + return false; +} + +static const char __init *mt7621_get_soc_id(void) +{ + if (mt7621_soc_valid()) + return "MT7621"; + else + return "invalid"; +} + +static unsigned int __init mt7621_get_soc_rev(void) +{ + return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_REV); +} + +static unsigned int __init mt7621_get_soc_ver(void) +{ + return (mt7621_get_soc_rev() >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK; +} + +static unsigned int __init mt7621_get_soc_eco(void) +{ + return (mt7621_get_soc_rev() & CHIP_REV_ECO_MASK); +} + +static const char __init *mt7621_get_soc_revision(void) +{ + if (mt7621_get_soc_rev() == 1 && mt7621_get_soc_eco() == 1) + return "E2"; + else + return "E1"; +} + +static int __init mt7621_soc_dev_init(void) { struct soc_device *soc_dev; struct soc_device_attribute *soc_dev_attr; soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) - return; + return -ENOMEM; soc_dev_attr->soc_id = "mt7621"; soc_dev_attr->family = "Ralink"; + soc_dev_attr->revision = mt7621_get_soc_revision(); - if (((rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK) == 1 && - (rev & CHIP_REV_ECO_MASK) == 1) - soc_dev_attr->revision = "E2"; - else - soc_dev_attr->revision = "E1"; - - soc_dev_attr->data = soc_info; + soc_dev_attr->data = soc_info_ptr; soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { kfree(soc_dev_attr); - return; + return PTR_ERR(soc_dev); } + + return 0; } +device_initcall(mt7621_soc_dev_init); void __init prom_soc_init(struct ralink_soc_info *soc_info) { - void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); - unsigned char *name = NULL; - u32 n0; - u32 n1; - u32 rev; - /* Early detection of CMP support */ mips_cm_probe(); mips_cpc_probe(); @@ -123,27 +166,23 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info) __sync(); } - n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); - n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); - - if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) { - name = "MT7621"; + if (mt7621_soc_valid()) soc_info->compatible = "mediatek,mt7621-soc"; - } else { - panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1); - } + else + panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", + mt7621_get_soc_name0(), + mt7621_get_soc_name1()); ralink_soc = MT762X_SOC_MT7621AT; - rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, "MediaTek %s ver:%u eco:%u", - name, - (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, - (rev & CHIP_REV_ECO_MASK)); + mt7621_get_soc_id(), + mt7621_get_soc_ver(), + mt7621_get_soc_eco()); soc_info->mem_detect = mt7621_memory_detect; - soc_dev_init(soc_info, rev); + soc_info_ptr = soc_info; if (!register_cps_smp_ops()) return; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 7834ce3aa7f1..2dae702e7a5a 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -788,6 +788,7 @@ void __noreturn rtas_halt(void) /* Must be in the RMO region, so we place it here */ static char rtas_os_term_buf[2048]; +static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE; void rtas_os_term(char *str) { @@ -799,16 +800,20 @@ void rtas_os_term(char *str) * this property may terminate the partition which we want to avoid * since it interferes with panic_timeout. */ - if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || - RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term")) + if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE) return; snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); + /* + * Keep calling as long as RTAS returns a "try again" status, + * but don't use rtas_busy_delay(), which potentially + * schedules. + */ do { - status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL, + status = rtas_call(ibm_os_term_token, 1, 1, NULL, __pa(rtas_os_term_buf)); - } while (rtas_busy_delay(status)); + } while (rtas_busy_delay_time(status)); if (status != 0) printk(KERN_EMERG "ibm,os-term call failed %d\n", status); @@ -1167,6 +1172,13 @@ void __init rtas_initialize(void) no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry); rtas.entry = no_entry ? rtas.base : entry; + /* + * Discover these now to avoid device tree lookups in the + * panic path. + */ + if (of_property_read_bool(rtas.dev, "ibm,extended-os-term")) + ibm_os_term_token = rtas_token("ibm,os-term"); + /* If RTAS was found, allocate the RMO buffer for it and look for * the stop-self token if any */ diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 082f6d0308a4..8718289c051d 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -61,6 +61,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re next_sp = fp[0]; if (next_sp == sp + STACK_INT_FRAME_SIZE && + validate_sp(sp, current, STACK_INT_FRAME_SIZE) && fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { /* * This looks like an interrupt frame for an diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h index 8965b4463d43..5e86371a20c7 100644 --- a/arch/powerpc/perf/hv-gpci-requests.h +++ b/arch/powerpc/perf/hv-gpci-requests.h @@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id) ) #include I(REQUEST_END) +#ifdef ENABLE_EVENTS_COUNTERINFO_V6 /* * Not available for counter_info_version >= 0x8, use * run_instruction_cycles_by_partition(0x100) instead. @@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id) __count(0x10, 8, cycles) ) #include I(REQUEST_END) +#endif #define REQUEST_NAME system_performance_capabilities #define REQUEST_NUM 0x40 @@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged) ) #include I(REQUEST_END) +#ifdef ENABLE_EVENTS_COUNTERINFO_V6 #define REQUEST_NAME processor_bus_utilization_abc_links #define REQUEST_NUM 0x50 #define REQUEST_IDX_KIND "hw_chip_id=?" @@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx) __count(0x28, 8, instructions_completed) ) #include I(REQUEST_END) +#endif /* Processor_core_power_mode (0x95) skipped, no counters */ /* Affinity_domain_information_by_virtual_processor (0xA0) skipped, diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index c756228a081f..28b770bbc10b 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -72,7 +72,7 @@ static struct attribute_group format_group = { static struct attribute_group event_group = { .name = "events", - .attrs = hv_gpci_event_attrs, + /* .attrs is set in init */ }; #define HV_CAPS_ATTR(_name, _format) \ @@ -330,6 +330,7 @@ static int hv_gpci_init(void) int r; unsigned long hret; struct hv_perf_caps caps; + struct hv_gpci_request_buffer *arg; hv_gpci_assert_offsets_correct(); @@ -353,6 +354,36 @@ static int hv_gpci_init(void) /* sampling not supported */ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + arg = (void *)get_cpu_var(hv_gpci_reqb); + memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); + + /* + * hcall H_GET_PERF_COUNTER_INFO populates the output + * counter_info_version value based on the system hypervisor. + * Pass the counter request 0x10 corresponds to request type + * 'Dispatch_timebase_by_processor', to get the supported + * counter_info_version. + */ + arg->params.counter_request = cpu_to_be32(0x10); + + r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, + virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); + if (r) { + pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r); + arg->params.counter_info_version_out = 0x8; + } + + /* + * Use counter_info_version_out value to assign + * required hv-gpci event list. + */ + if (arg->params.counter_info_version_out >= 0x8) + event_group.attrs = hv_gpci_event_attrs; + else + event_group.attrs = hv_gpci_event_attrs_v6; + + put_cpu_var(hv_gpci_reqb); + r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); if (r) return r; diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h index 4d108262bed7..c72020912dea 100644 --- a/arch/powerpc/perf/hv-gpci.h +++ b/arch/powerpc/perf/hv-gpci.h @@ -26,6 +26,7 @@ enum { #define REQUEST_FILE "../hv-gpci-requests.h" #define NAME_LOWER hv_gpci #define NAME_UPPER HV_GPCI +#define ENABLE_EVENTS_COUNTERINFO_V6 #include "req-gen/perf.h" #undef REQUEST_FILE #undef NAME_LOWER diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h index fa9bc804e67a..6b2a59fefffa 100644 --- a/arch/powerpc/perf/req-gen/perf.h +++ b/arch/powerpc/perf/req-gen/perf.h @@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \ #define REQUEST_(r_name, r_value, r_idx_1, r_fields) \ r_fields +/* Generate event list for platforms with counter_info_version 0x6 or below */ +static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = { +#include REQUEST_FILE + NULL +}; + +/* + * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci + * events were deprecated for platform firmware that supports + * counter_info_version 0x8 or above. + * Those deprecated events are still part of platform firmware that + * support counter_info_version 0x6 and below. As per the getPerfCountInfo + * v1.018 documentation there is no counter_info_version 0x7. + * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of + * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms + * that supports counter_info_version 0x8 or above. + */ +#undef ENABLE_EVENTS_COUNTERINFO_V6 + +/* Generate event list for platforms with counter_info_version 0x8 or above*/ static __maybe_unused struct attribute *hv_gpci_event_attrs[] = { #include REQUEST_FILE NULL diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index b91ebebd9ff2..e0049b7df212 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -530,6 +530,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op) err_bcom_rx_irq: bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); err_bcom_rx: + free_irq(lpbfifo.irq, &lpbfifo); err_irq: iounmap(lpbfifo.regs); lpbfifo.regs = NULL; diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index b6133a237a70..6e18d0703568 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -106,7 +106,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, goto next; unreg: - platform_device_del(pdev); + platform_device_put(pdev); err: pr_err("%pOF: registration failed\n", np); next: diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 09fafcf2d3a0..f51fd4ac3f0b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -845,18 +845,8 @@ static int __init eeh_pseries_init(void) return -EINVAL; } - /* Initialize error log lock and size */ - spin_lock_init(&slot_errbuf_lock); - eeh_error_buf_size = rtas_token("rtas-error-log-max"); - if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { - pr_info("%s: unknown EEH error log size\n", - __func__); - eeh_error_buf_size = 1024; - } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { - pr_info("%s: EEH error log size %d exceeds the maximal %d\n", - __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); - eeh_error_buf_size = RTAS_ERROR_LOG_MAX; - } + /* Initialize error log size */ + eeh_error_buf_size = rtas_get_error_log_max(); /* Set EEH probe mode */ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 2bf78a30238b..43bd2579d942 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -437,6 +437,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); if (!data->trig_mmio) { + iounmap(data->eoi_mmio); pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); return -ENOMEM; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 8b28ff9d98d1..3c085e1e5232 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1528,9 +1528,9 @@ bpt_cmds(void) cmd = inchar(); switch (cmd) { - static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; - int mode; - case 'd': /* bd - hardware data breakpoint */ + case 'd': { /* bd - hardware data breakpoint */ + static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; + int mode; if (xmon_is_ro) { printf(xmon_ro_msg); break; @@ -1563,6 +1563,7 @@ bpt_cmds(void) force_enable_xmon(); break; + } case 'i': /* bi - hardware instr breakpoint */ if (xmon_is_ro) { diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index a5c2ca1d1cd8..ec19d6afc896 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -5,4 +5,10 @@ #include #include +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} +#define arch_clear_hugepage_flags arch_clear_hugepage_flags + #endif /* _ASM_RISCV_HUGETLB_H */ diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 0099dc116168..5ff1f19fd45c 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,6 +19,8 @@ typedef struct { #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; + /* A local tlb flush is needed before user execution can resume. */ + cpumask_t tlb_stale_mask; #endif } mm_context_t; diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 39b550310ec6..799c16e06525 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -386,7 +386,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, * Relying on flush_tlb_fix_spurious_fault would suffice, but * the extra traps reduce performance. So, eagerly SFENCE.VMA. */ - local_flush_tlb_page(address); + flush_tlb_page(vma, address); } static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 801019381dea..907b9efd39a8 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -22,6 +22,24 @@ static inline void local_flush_tlb_page(unsigned long addr) { ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); +} + #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_page(addr) do { } while (0) diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index f314ff44c48d..d4d628af21a4 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -216,7 +216,7 @@ do { \ might_fault(); \ access_ok(__p, sizeof(*__p)) ? \ __get_user((x), __p) : \ - ((x) = 0, -EFAULT); \ + ((x) = (__force __typeof__(x))0, -EFAULT); \ }) #define __put_user_asm(insn, x, ptr, err) \ diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h index cb6ff7dccb92..de8474146a9b 100644 --- a/arch/riscv/kernel/probes/simulate-insn.h +++ b/arch/riscv/kernel/probes/simulate-insn.h @@ -31,9 +31,9 @@ __RISCV_INSN_FUNCS(fence, 0x7f, 0x0f); } while (0) __RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001); -__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002); +__RISCV_INSN_FUNCS(c_jr, 0xf07f, 0x8002); __RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001); -__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002); +__RISCV_INSN_FUNCS(c_jalr, 0xf07f, 0x9002); __RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001); __RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001); __RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002); diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index c2601150b91c..811e837a8c4e 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -60,7 +60,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, } else { fp = frame->fp; pc = ftrace_graph_ret_addr(current, NULL, frame->ra, - (unsigned long *)(fp - 8)); + &frame->ra); } } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 2f4cd85fb651..4102c97309cc 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -211,7 +211,7 @@ static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used * to get per-cpu overflow stack(get_overflow_stack). */ -long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)]; +long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16); asmlinkage unsigned long get_overflow_stack(void) { return (unsigned long)this_cpu_ptr(overflow_stack) + diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index ee3459cb6750..cc4a47bda82a 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -196,6 +196,16 @@ switch_mm_fast: if (need_flush_tlb) local_flush_tlb_all(); +#ifdef CONFIG_SMP + else { + cpumask_t *mask = &mm->context.tlb_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + local_flush_tlb_all_asid(cntx & asid_mask); + } + } +#endif } static void set_mm_noasid(struct mm_struct *mm) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 64f8201237c2..efefc3986c48 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -5,23 +5,7 @@ #include #include #include - -static inline void local_flush_tlb_all_asid(unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); -} - -static inline void local_flush_tlb_page_asid(unsigned long addr, - unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); -} +#include void flush_tlb_all(void) { @@ -31,6 +15,7 @@ void flush_tlb_all(void) static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { + struct cpumask *pmask = &mm->context.tlb_stale_mask; struct cpumask *cmask = mm_cpumask(mm); struct cpumask hmask; unsigned int cpuid; @@ -45,6 +30,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, if (static_branch_unlikely(&use_asid_allocator)) { unsigned long asid = atomic_long_read(&mm->context.id); + /* + * TLB will be immediately flushed on harts concurrently + * executing this MM context. TLB flush on other harts + * is deferred until this MM context migrates there. + */ + cpumask_setall(pmask); + cpumask_clear_cpu(cpuid, pmask); + cpumask_andnot(pmask, pmask, cmask); + if (broadcast) { riscv_cpuid_to_hartid_mask(cmask, &hmask); sbi_remote_sfence_vma_asid(cpumask_bits(&hmask), diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 3af4131c22c7..2e3f1a626a3a 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -120,6 +120,25 @@ static bool in_auipc_jalr_range(s64 val) val < ((1L << 31) - (1L << 11)); } +/* Emit fixed-length instructions for address */ +static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) +{ + u64 ip = (u64)(ctx->insns + ctx->ninsns); + s64 off = addr - ip; + s64 upper = (off + (1 << 11)) >> 12; + s64 lower = off & 0xfff; + + if (extra_pass && !in_auipc_jalr_range(off)) { + pr_err("bpf-jit: target offset 0x%llx is out of range\n", off); + return -ERANGE; + } + + emit(rv_auipc(rd, upper), ctx); + emit(rv_addi(rd, rd, lower), ctx); + return 0; +} + +/* Emit variable-length instructions for 32-bit and 64-bit imm */ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) { /* Note that the immediate from the add is sign-extended, @@ -887,7 +906,15 @@ out_be: u64 imm64; imm64 = (u64)insn1.imm << 32 | (u32)imm; - emit_imm(rd, imm64, ctx); + if (bpf_pseudo_func(insn)) { + /* fixed-length insns for extra jit pass */ + ret = emit_addr(rd, imm64, extra_pass, ctx); + if (ret) + return ret; + } else { + emit_imm(rd, imm64, ctx); + } + return 1; } diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index b9687980aab6..d6f7c6c1a930 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -2,6 +2,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index dc3ae55f79e0..912fb3821a6b 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -1423,6 +1423,7 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) /* MCHBAR is disabled */ if (!(mch_bar & BIT(0))) { pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); + pci_dev_put(pdev); return; } mch_bar &= ~BIT(0); @@ -1436,6 +1437,8 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) box->io_addr = ioremap(addr, type->mmio_map_size); if (!box->io_addr) pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); + + pci_dev_put(pdev); } static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index ed869443efb2..8f371f3cbbd2 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -2891,6 +2891,7 @@ static bool hswep_has_limit_sbox(unsigned int device) return false; pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); + pci_dev_put(dev); if (!hswep_get_chop(capid4)) return true; @@ -3803,6 +3804,21 @@ static const struct attribute_group *skx_iio_attr_update[] = { NULL, }; +static void pmu_clear_mapping_attr(const struct attribute_group **groups, + struct attribute_group *ag) +{ + int i; + + for (i = 0; groups[i]; i++) { + if (groups[i] == ag) { + for (i++; groups[i]; i++) + groups[i - 1] = groups[i]; + groups[i - 1] = NULL; + break; + } + } +} + static int pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) { @@ -3851,7 +3867,7 @@ clear_attrs: clear_topology: kfree(type->topology); clear_attr_update: - type->attr_update = NULL; + pmu_clear_mapping_attr(type->attr_update, ag); return ret; } @@ -4492,6 +4508,8 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map type->topology = NULL; } + pci_dev_put(dev); + return ret; } @@ -4857,6 +4875,8 @@ static int snr_uncore_mmio_map(struct intel_uncore_box *box, addr += box_ctl; + pci_dev_put(pdev); + box->io_addr = ioremap(addr, type->mmio_map_size); if (!box->io_addr) { pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); @@ -5139,6 +5159,11 @@ static int icx_iio_get_topology(struct intel_uncore_type *type) static int icx_iio_set_mapping(struct intel_uncore_type *type) { + /* Detect ICX-D system. This case is not supported */ + if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) { + pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group); + return -EPERM; + } return pmu_iio_set_mapping(type, &icx_iio_mapping_group); } diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 762f10cdfb7a..95f98af74fdc 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -469,8 +469,6 @@ void hyperv_cleanup(void) { union hv_x64_msr_hypercall_contents hypercall_msr; - unregister_syscore_ops(&hv_syscore_ops); - /* Reset our OS id */ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 8961f1031180..544e6c61e17d 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1951,6 +1951,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) if (ctrl == PR_SPEC_FORCE_DISABLE) task_set_spec_ib_force_disable(task); task_update_spec_tif(task); + if (task == current) + indirect_branch_prediction_barrier(); break; default: return -ERANGE; diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index a873577e49dc..6469d3135d26 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -526,7 +526,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high, /* Fall back to method we used for older processors: */ switch (block) { case 0: - addr = msr_ops.misc(bank); + addr = mca_msr_reg(bank, MCA_MISC); break; case 1: offset = ((low & MASK_BLKPTR_LO) >> 21); @@ -965,6 +965,24 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) return status & MCI_STATUS_DEFERRED; } +static bool _log_error_deferred(unsigned int bank, u32 misc) +{ + if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), + mca_msr_reg(bank, MCA_ADDR), misc)) + return false; + + /* + * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. + * Return true here to avoid accessing these registers. + */ + if (!mce_flags.smca) + return true; + + /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ + wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); + return true; +} + /* * We have three scenarios for checking for Deferred errors: * @@ -976,20 +994,9 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) */ static void log_error_deferred(unsigned int bank) { - bool defrd; - - defrd = _log_error_bank(bank, msr_ops.status(bank), - msr_ops.addr(bank), 0); - - if (!mce_flags.smca) + if (_log_error_deferred(bank, 0)) return; - /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */ - if (defrd) { - wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); - return; - } - /* * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check * for a valid error. @@ -1009,7 +1016,7 @@ static void amd_deferred_error_interrupt(void) static void log_error_thresholding(unsigned int bank, u64 misc) { - _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); + _log_error_deferred(bank, misc); } static void log_and_reset_block(struct threshold_block *block) @@ -1397,7 +1404,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, } } - err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); + err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC)); if (err) goto out_kobj; diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 773037e5fd76..5ee82fd386dd 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -176,53 +176,27 @@ void mce_unregister_decode_chain(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); -static inline u32 ctl_reg(int bank) +u32 mca_msr_reg(int bank, enum mca_msr reg) { - return MSR_IA32_MCx_CTL(bank); -} + if (mce_flags.smca) { + switch (reg) { + case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); + case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); + case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); + case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank); + } + } -static inline u32 status_reg(int bank) -{ - return MSR_IA32_MCx_STATUS(bank); -} + switch (reg) { + case MCA_CTL: return MSR_IA32_MCx_CTL(bank); + case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); + case MCA_MISC: return MSR_IA32_MCx_MISC(bank); + case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank); + } -static inline u32 addr_reg(int bank) -{ - return MSR_IA32_MCx_ADDR(bank); + return 0; } -static inline u32 misc_reg(int bank) -{ - return MSR_IA32_MCx_MISC(bank); -} - -static inline u32 smca_ctl_reg(int bank) -{ - return MSR_AMD64_SMCA_MCx_CTL(bank); -} - -static inline u32 smca_status_reg(int bank) -{ - return MSR_AMD64_SMCA_MCx_STATUS(bank); -} - -static inline u32 smca_addr_reg(int bank) -{ - return MSR_AMD64_SMCA_MCx_ADDR(bank); -} - -static inline u32 smca_misc_reg(int bank) -{ - return MSR_AMD64_SMCA_MCx_MISC(bank); -} - -struct mca_msr_regs msr_ops = { - .ctl = ctl_reg, - .status = status_reg, - .addr = addr_reg, - .misc = misc_reg -}; - static void __print_mce(struct mce *m) { pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", @@ -371,11 +345,11 @@ static int msr_to_offset(u32 msr) if (msr == mca_cfg.rip_msr) return offsetof(struct mce, ip); - if (msr == msr_ops.status(bank)) + if (msr == mca_msr_reg(bank, MCA_STATUS)) return offsetof(struct mce, status); - if (msr == msr_ops.addr(bank)) + if (msr == mca_msr_reg(bank, MCA_ADDR)) return offsetof(struct mce, addr); - if (msr == msr_ops.misc(bank)) + if (msr == mca_msr_reg(bank, MCA_MISC)) return offsetof(struct mce, misc); if (msr == MSR_IA32_MCG_STATUS) return offsetof(struct mce, mcgstatus); @@ -676,10 +650,10 @@ static struct notifier_block mce_default_nb = { static noinstr void mce_read_aux(struct mce *m, int i) { if (m->status & MCI_STATUS_MISCV) - m->misc = mce_rdmsrl(msr_ops.misc(i)); + m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC)); if (m->status & MCI_STATUS_ADDRV) { - m->addr = mce_rdmsrl(msr_ops.addr(i)); + m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR)); /* * Mask the reported address by the reported granularity. @@ -749,7 +723,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) m.bank = i; barrier(); - m.status = mce_rdmsrl(msr_ops.status(i)); + m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); /* If this entry is not valid, ignore it */ if (!(m.status & MCI_STATUS_VAL)) @@ -817,7 +791,7 @@ clear_it: /* * Clear state for this bank. */ - mce_wrmsrl(msr_ops.status(i), 0); + mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } /* @@ -842,7 +816,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, int i; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { - m->status = mce_rdmsrl(msr_ops.status(i)); + m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); if (!(m->status & MCI_STATUS_VAL)) continue; @@ -1143,7 +1117,7 @@ static void mce_clear_state(unsigned long *toclear) for (i = 0; i < this_cpu_read(mce_num_banks); i++) { if (test_bit(i, toclear)) - mce_wrmsrl(msr_ops.status(i), 0); + mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } } @@ -1202,7 +1176,7 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin m->addr = 0; m->bank = i; - m->status = mce_rdmsrl(msr_ops.status(i)); + m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); if (!(m->status & MCI_STATUS_VAL)) continue; @@ -1699,8 +1673,8 @@ static void __mcheck_cpu_init_clear_banks(void) if (!b->init) continue; - wrmsrl(msr_ops.ctl(i), b->ctl); - wrmsrl(msr_ops.status(i), 0); + wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); + wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } } @@ -1726,7 +1700,7 @@ static void __mcheck_cpu_check_banks(void) if (!b->init) continue; - rdmsrl(msr_ops.ctl(i), msrval); + rdmsrl(mca_msr_reg(i, MCA_CTL), msrval); b->init = !!msrval; } } @@ -1883,13 +1857,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); mce_flags.amd_threshold = 1; - - if (mce_flags.smca) { - msr_ops.ctl = smca_ctl_reg; - msr_ops.status = smca_status_reg; - msr_ops.addr = smca_addr_reg; - msr_ops.misc = smca_misc_reg; - } } } @@ -2265,7 +2232,7 @@ static void mce_disable_error_reporting(void) struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(msr_ops.ctl(i), 0); + wrmsrl(mca_msr_reg(i, MCA_CTL), 0); } return; } @@ -2617,7 +2584,7 @@ static void mce_reenable_cpu(void) struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(msr_ops.ctl(i), b->ctl); + wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); } } diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 80dc94313bcf..760b57814760 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -168,14 +168,14 @@ struct mce_vendor_flags { extern struct mce_vendor_flags mce_flags; -struct mca_msr_regs { - u32 (*ctl) (int bank); - u32 (*status) (int bank); - u32 (*addr) (int bank); - u32 (*misc) (int bank); +enum mca_msr { + MCA_CTL, + MCA_STATUS, + MCA_ADDR, + MCA_MISC, }; -extern struct mca_msr_regs msr_ops; +u32 mca_msr_reg(int bank, enum mca_msr reg); /* Decide whether to add MCE record to MCE event pool or filter it out. */ extern bool filter_mce(struct mce *m); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 7e8e07bddd5f..1ba590e6ef7b 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -659,7 +659,6 @@ void load_ucode_intel_ap(void) else iup = &intel_ucode_patch; -reget: if (!*iup) { patch = __load_ucode_intel(&uci); if (!patch) @@ -670,12 +669,7 @@ reget: uci.mc = *iup; - if (apply_microcode_early(&uci, true)) { - /* Mixed-silicon system? Try to refetch the proper patch: */ - *iup = NULL; - - goto reget; - } + apply_microcode_early(&uci, true); } static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 19876ebfb504..fa5777af8da1 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -533,11 +533,15 @@ const struct vm_operations_struct sgx_vm_ops = { void sgx_encl_release(struct kref *ref) { struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); + unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1); struct sgx_va_page *va_page; struct sgx_encl_page *entry; - unsigned long index; + unsigned long count = 0; - xa_for_each(&encl->page_array, index, entry) { + XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base)); + + xas_lock(&xas); + xas_for_each(&xas, entry, max_page_index) { if (entry->epc_page) { /* * The page and its radix tree entry cannot be freed @@ -552,9 +556,20 @@ void sgx_encl_release(struct kref *ref) } kfree(entry); - /* Invoke scheduler to prevent soft lockups. */ - cond_resched(); + /* + * Invoke scheduler on every XA_CHECK_SCHED iteration + * to prevent soft lockups. + */ + if (!(++count % XA_CHECK_SCHED)) { + xas_pause(&xas); + xas_unlock(&xas); + + cond_resched(); + + xas_lock(&xas); + } } + xas_unlock(&xas); xa_destroy(&encl->page_array); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e8326a8d1c5d..03a454d427c3 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -401,10 +401,8 @@ int crash_load_segments(struct kimage *image) kbuf.buf_align = ELF_CORE_HEADER_ALIGN; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); - if (ret) { - vfree((void *)image->elf_headers); + if (ret) return ret; - } image->elf_load_addr = kbuf.mem; pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", image->elf_load_addr, kbuf.bufsz, kbuf.bufsz); diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index b3c9ef01d6c0..4017da3a4c70 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -219,7 +219,9 @@ void ftrace_replace_code(int enable) ret = ftrace_verify_code(rec->ip, old); if (ret) { + ftrace_expected = old; ftrace_bug(ret, rec); + ftrace_expected = NULL; return; } } diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6872f3834668..c4b618d0b16a 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -289,12 +290,15 @@ static int can_probe(unsigned long paddr) if (ret < 0) return 0; +#ifdef CONFIG_KGDB /* - * Another debugging subsystem might insert this breakpoint. - * In that case, we can't recover it. + * If there is a dynamically installed kgdb sw breakpoint, + * this function should not be probed. */ - if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) + if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && + kgdb_has_hit_break(addr)) return 0; +#endif addr += insn.length; } diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 71425ebba98a..a9121073d951 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -272,19 +273,6 @@ static int insn_is_indirect_jump(struct insn *insn) return ret; } -static bool is_padding_int3(unsigned long addr, unsigned long eaddr) -{ - unsigned char ops; - - for (; addr < eaddr; addr++) { - if (get_kernel_nofault(ops, (void *)addr) < 0 || - ops != INT3_INSN_OPCODE) - return false; - } - - return true; -} - /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { @@ -327,15 +315,15 @@ static int can_optimize(unsigned long paddr) ret = insn_decode_kernel(&insn, (void *)recovered_insn); if (ret < 0) return 0; - +#ifdef CONFIG_KGDB /* - * In the case of detecting unknown breakpoint, this could be - * a padding INT3 between functions. Let's check that all the - * rest of the bytes are also INT3. + * If there is a dynamically installed kgdb sw breakpoint, + * this function should not be probed. */ - if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) - return is_padding_int3(addr, paddr - offset + size) ? 1 : 0; - + if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && + kgdb_has_hit_break(addr)) + return 0; +#endif /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b63cf8f7745e..6c07f6daaa22 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) switch (opc1) { case 0xeb: /* jmp 8 */ case 0xe9: /* jmp 32 */ - case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ break; + case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ + goto setup; case 0xe8: /* call relative */ branch_clear_offset(auprobe, insn); @@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) return -ENOTSUPP; } +setup: auprobe->branch.opc1 = opc1; auprobe->branch.ilen = insn->length; auprobe->branch.offs = insn->immediate.value; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 817632d5a118..cdebeceedbd0 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4970,24 +4970,35 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; /* - * Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks - * that have higher priority than VM-Exit (see Intel SDM's pseudocode - * for VMXON), as KVM must load valid CR0/CR4 values into hardware while - * running the guest, i.e. KVM needs to check the _guest_ values. + * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter + * the guest and so cannot rely on hardware to perform the check, + * which has higher priority than VM-Exit (see Intel SDM's pseudocode + * for VMXON). * - * Rely on hardware for the other two pre-VM-Exit checks, !VM86 and - * !COMPATIBILITY modes. KVM may run the guest in VM86 to emulate Real - * Mode, but KVM will never take the guest out of those modes. + * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 + * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't + * force any of the relevant guest state. For a restricted guest, KVM + * does force CR0.PE=1, but only to also force VM86 in order to emulate + * Real Mode, and so there's no need to check CR0.PE manually. */ - if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || - !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { + if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* - * CPL=0 and all other checks that are lower priority than VM-Exit must - * be checked manually. + * The CPL is checked for "not in VMX operation" and for "in VMX root", + * and has higher priority than the VM-Fail due to being post-VMXON, + * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, + * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits + * from L2 to L1, i.e. there's no need to check for the vCPU being in + * VMX non-root. + * + * Forwarding the VM-Exit unconditionally, i.e. without performing the + * #UD checks (see above), is functionally ok because KVM doesn't allow + * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's + * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are + * missed by hardware due to shadowing CR0 and/or CR4. */ if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); @@ -4997,6 +5008,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu) if (vmx->nested.vmxon) return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); + /* + * Invalid CR0/CR4 generates #GP. These checks are performed if and + * only if the vCPU isn't already in VMX operation, i.e. effectively + * have lower priority than the VM-Fail above. + */ + if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || + !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); @@ -6644,7 +6666,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_RDSEED_EXITING | SECONDARY_EXEC_XSAVES | - SECONDARY_EXEC_TSC_SCALING; + SECONDARY_EXEC_TSC_SCALING | + SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; /* * We can emulate "VMCS shadowing," even if the hardware diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 6693ebdc0770..b8cf9a59c145 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -188,8 +188,10 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, /* Enforce CPUID restriction on max enclave size. */ max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : sgx_12_0->edx; - if (size >= BIT_ULL(max_size_log2)) + if (size >= BIT_ULL(max_size_log2)) { kvm_inject_gp(vcpu, 0); + return 1; + } /* * sgx_virt_ecreate() returns: diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c1b2f764b29a..cdec892b28e2 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) void xen_smp_intr_free(unsigned int cpu) { + kfree(per_cpu(xen_resched_irq, cpu).name); + per_cpu(xen_resched_irq, cpu).name = NULL; if (per_cpu(xen_resched_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); per_cpu(xen_resched_irq, cpu).irq = -1; - kfree(per_cpu(xen_resched_irq, cpu).name); - per_cpu(xen_resched_irq, cpu).name = NULL; } + kfree(per_cpu(xen_callfunc_irq, cpu).name); + per_cpu(xen_callfunc_irq, cpu).name = NULL; if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); per_cpu(xen_callfunc_irq, cpu).irq = -1; - kfree(per_cpu(xen_callfunc_irq, cpu).name); - per_cpu(xen_callfunc_irq, cpu).name = NULL; } + kfree(per_cpu(xen_debug_irq, cpu).name); + per_cpu(xen_debug_irq, cpu).name = NULL; if (per_cpu(xen_debug_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); per_cpu(xen_debug_irq, cpu).irq = -1; - kfree(per_cpu(xen_debug_irq, cpu).name); - per_cpu(xen_debug_irq, cpu).name = NULL; } + kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); + per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, NULL); per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; - kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); - per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; } } @@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu) char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); + per_cpu(xen_resched_irq, cpu).name = resched_name; rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, @@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_resched_irq, cpu).irq = rc; - per_cpu(xen_resched_irq, cpu).name = resched_name; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); + per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, @@ -86,10 +87,10 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_callfunc_irq, cpu).irq = rc; - per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; if (!xen_fifo_events) { debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); + per_cpu(xen_debug_irq, cpu).name = debug_name; rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, IRQF_PERCPU | IRQF_NOBALANCING, @@ -97,10 +98,10 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_debug_irq, cpu).irq = rc; - per_cpu(xen_debug_irq, cpu).name = debug_name; } callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, @@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; - per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; return 0; diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index cd5539fc5eb4..b47b5111397a 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -97,18 +97,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void) void xen_smp_intr_free_pv(unsigned int cpu) { + kfree(per_cpu(xen_irq_work, cpu).name); + per_cpu(xen_irq_work, cpu).name = NULL; if (per_cpu(xen_irq_work, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); per_cpu(xen_irq_work, cpu).irq = -1; - kfree(per_cpu(xen_irq_work, cpu).name); - per_cpu(xen_irq_work, cpu).name = NULL; } + kfree(per_cpu(xen_pmu_irq, cpu).name); + per_cpu(xen_pmu_irq, cpu).name = NULL; if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); per_cpu(xen_pmu_irq, cpu).irq = -1; - kfree(per_cpu(xen_pmu_irq, cpu).name); - per_cpu(xen_pmu_irq, cpu).name = NULL; } } @@ -118,6 +118,7 @@ int xen_smp_intr_init_pv(unsigned int cpu) char *callfunc_name, *pmu_name; callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); + per_cpu(xen_irq_work, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, xen_irq_work_interrupt, @@ -127,10 +128,10 @@ int xen_smp_intr_init_pv(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_irq_work, cpu).irq = rc; - per_cpu(xen_irq_work, cpu).name = callfunc_name; if (is_xen_pmu) { pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); + per_cpu(xen_pmu_irq, cpu).name = pmu_name; rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, xen_pmu_irq_handler, IRQF_PERCPU|IRQF_NOBALANCING, @@ -138,7 +139,6 @@ int xen_smp_intr_init_pv(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_pmu_irq, cpu).irq = rc; - per_cpu(xen_pmu_irq, cpu).name = pmu_name; } return 0; diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 043c73dfd2c9..5c6fc16e4b92 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu) cpu, per_cpu(lock_kicker_irq, cpu)); name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); + per_cpu(irq_name, cpu) = name; irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, dummy_handler, @@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu) if (irq >= 0) { disable_irq(irq); /* make sure it's never delivered */ per_cpu(lock_kicker_irq, cpu) = irq; - per_cpu(irq_name, cpu) = name; } printk("cpu %d spinlock event irq %d\n", cpu, irq); @@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu) if (!xen_pvspin) return; + kfree(per_cpu(irq_name, cpu)); + per_cpu(irq_name, cpu) = NULL; /* * When booting the kernel with 'mitigations=auto,nosmt', the secondary * CPUs are not activated, and lock_kicker_irq is not initialized. @@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu) unbind_from_irqhandler(irq, NULL); per_cpu(lock_kicker_irq, cpu) = -1; - kfree(per_cpu(irq_name, cpu)); - per_cpu(irq_name, cpu) = NULL; } PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 7d8fe13573f6..85120d7b5cf0 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq); void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) { + struct bfq_queue *old_bfqq = bic->bfqq[is_sync]; + + /* Clear bic pointer if bfqq is detached from this bic */ + if (old_bfqq && old_bfqq->bic == bic) + old_bfqq->bic = NULL; + /* * If bfqq != NULL, then a non-stable queue merge between * bic->bfqq and bfqq is happening here. This causes troubles @@ -5245,9 +5251,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); - bfqq->bic = NULL; - bfq_exit_bfqq(bfqd, bfqq); bic_set_bfqq(bic, NULL, is_sync); + bfq_exit_bfqq(bfqd, bfqq); spin_unlock_irqrestore(&bfqd->lock, flags); } } @@ -6630,6 +6635,12 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, true, is_sync, NULL); + if (unlikely(bfqq == &bfqd->oom_bfqq)) + bfqq_already_existing = true; + } else + bfqq_already_existing = true; + + if (!bfqq_already_existing) { bfqq->waker_bfqq = old_bfqq->waker_bfqq; bfqq->tentative_waker_bfqq = NULL; @@ -6643,8 +6654,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) if (bfqq->waker_bfqq) hlist_add_head(&bfqq->woken_list_node, &bfqq->waker_bfqq->woken_list); - } else - bfqq_already_existing = true; + } } } diff --git a/block/blk-merge.c b/block/blk-merge.c index bbe66a9010bf..bb26db93ad1d 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -279,6 +279,16 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, *segs = nsegs; return NULL; split: + /* + * We can't sanely support splitting for a REQ_NOWAIT bio. End it + * with EAGAIN if splitting is required and return an error pointer. + */ + if (bio->bi_opf & REQ_NOWAIT) { + bio->bi_status = BLK_STS_AGAIN; + bio_endio(bio); + return ERR_PTR(-EAGAIN); + } + *segs = nsegs; /* diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 253c857cba47..7074ce8d2d03 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -187,7 +187,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; - int i, ret; + int i, j, ret; if (!hctx->nr_ctx) return 0; @@ -199,9 +199,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) hctx_for_each_ctx(hctx, ctx, i) { ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); if (ret) - break; + goto out; } + return 0; +out: + hctx_for_each_ctx(hctx, ctx, j) { + if (j < i) + kobject_del(&ctx->kobj); + } + kobject_del(&hctx->kobj); return ret; } diff --git a/block/genhd.c b/block/genhd.c index 68065189ca17..a1d9e785dcf7 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -530,6 +530,7 @@ out_unregister_queue: rq_qos_exit(disk->queue); out_put_slave_dir: kobject_put(disk->slave_dir); + disk->slave_dir = NULL; out_put_holder_dir: kobject_put(disk->part0->bd_holder_dir); out_del_integrity: @@ -624,6 +625,7 @@ void del_gendisk(struct gendisk *disk) kobject_put(disk->part0->bd_holder_dir); kobject_put(disk->slave_dir); + disk->slave_dir = NULL; part_stat_set_all(disk->part0, 0); disk->part0->bd_stamp = 0; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index cd2342d29704..950c46332a76 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -153,6 +153,20 @@ static u8 dd_rq_ioclass(struct request *rq) return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); } +/* + * get the request before `rq' in sector-sorted order + */ +static inline struct request * +deadline_earlier_request(struct request *rq) +{ + struct rb_node *node = rb_prev(&rq->rb_node); + + if (node) + return rb_entry_rq(node); + + return NULL; +} + /* * get the request after `rq' in sector-sorted order */ @@ -288,6 +302,39 @@ static inline int deadline_check_fifo(struct dd_per_prio *per_prio, return 0; } +/* + * Check if rq has a sequential request preceding it. + */ +static bool deadline_is_seq_writes(struct deadline_data *dd, struct request *rq) +{ + struct request *prev = deadline_earlier_request(rq); + + if (!prev) + return false; + + return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq); +} + +/* + * Skip all write requests that are sequential from @rq, even if we cross + * a zone boundary. + */ +static struct request *deadline_skip_seq_writes(struct deadline_data *dd, + struct request *rq) +{ + sector_t pos = blk_rq_pos(rq); + sector_t skipped_sectors = 0; + + while (rq) { + if (blk_rq_pos(rq) != pos + skipped_sectors) + break; + skipped_sectors += blk_rq_sectors(rq); + rq = deadline_latter_request(rq); + } + + return rq; +} + /* * For the specified data direction, return the next request to * dispatch using arrival ordered lists. @@ -308,11 +355,16 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, /* * Look for a write request that can be dispatched, that is one with - * an unlocked target zone. + * an unlocked target zone. For some HDDs, breaking a sequential + * write stream can lead to lower throughput, so make sure to preserve + * sequential write streams, even if that stream crosses into the next + * zones and these zones are unlocked. */ spin_lock_irqsave(&dd->zone_lock, flags); list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { - if (blk_req_can_dispatch_to_zone(rq)) + if (blk_req_can_dispatch_to_zone(rq) && + (blk_queue_nonrot(rq->q) || + !deadline_is_seq_writes(dd, rq))) goto out; } rq = NULL; @@ -342,13 +394,19 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, /* * Look for a write request that can be dispatched, that is one with - * an unlocked target zone. + * an unlocked target zone. For some HDDs, breaking a sequential + * write stream can lead to lower throughput, so make sure to preserve + * sequential write streams, even if that stream crosses into the next + * zones and these zones are unlocked. */ spin_lock_irqsave(&dd->zone_lock, flags); while (rq) { if (blk_req_can_dispatch_to_zone(rq)) break; - rq = deadline_latter_request(rq); + if (blk_queue_nonrot(rq->q)) + rq = deadline_latter_request(rq); + else + rq = deadline_skip_seq_writes(dd, rq); } spin_unlock_irqrestore(&dd->zone_lock, flags); @@ -733,6 +791,18 @@ static void dd_prepare_request(struct request *rq) rq->elv.priv[0] = NULL; } +static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx) +{ + struct deadline_data *dd = hctx->queue->elevator->elevator_data; + enum dd_prio p; + + for (p = 0; p <= DD_PRIO_MAX; p++) + if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE])) + return true; + + return false; +} + /* * Callback from inside blk_mq_free_request(). * @@ -755,7 +825,6 @@ static void dd_finish_request(struct request *rq) struct deadline_data *dd = q->elevator->elevator_data; const u8 ioprio_class = dd_rq_ioclass(rq); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; - struct dd_per_prio *per_prio = &dd->per_prio[prio]; /* * The block layer core may call dd_finish_request() without having @@ -771,9 +840,10 @@ static void dd_finish_request(struct request *rq) spin_lock_irqsave(&dd->zone_lock, flags); blk_req_zone_write_unlock(rq); - if (!list_empty(&per_prio->fifo_list[DD_WRITE])) - blk_mq_sched_mark_restart_hctx(rq->mq_hctx); spin_unlock_irqrestore(&dd->zone_lock, flags); + + if (dd_has_write_work(rq->mq_hctx)) + blk_mq_sched_mark_restart_hctx(rq->mq_hctx); } } diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 668095eca0fa..ca3a40fc7da9 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -68,11 +68,12 @@ struct aead_instance_ctx { struct cryptd_skcipher_ctx { refcount_t refcnt; - struct crypto_sync_skcipher *child; + struct crypto_skcipher *child; }; struct cryptd_skcipher_request_ctx { crypto_completion_t complete; + struct skcipher_request req; }; struct cryptd_hash_ctx { @@ -227,13 +228,13 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_sync_skcipher *child = ctx->child; + struct crypto_skcipher *child = ctx->child; - crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(child, - crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(child, key, keylen); + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, + crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + return crypto_skcipher_setkey(child, key, keylen); } static void cryptd_skcipher_complete(struct skcipher_request *req, int err) @@ -258,13 +259,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base, struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + struct crypto_skcipher *child = ctx->child; if (unlikely(err == -EINPROGRESS)) goto out; - skcipher_request_set_sync_tfm(subreq, child); + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, @@ -286,13 +287,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base, struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + struct crypto_skcipher *child = ctx->child; if (unlikely(err == -EINPROGRESS)) goto out; - skcipher_request_set_sync_tfm(subreq, child); + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, @@ -343,9 +344,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctx->child = (struct crypto_sync_skcipher *)cipher; + ctx->child = cipher; crypto_skcipher_set_reqsize( - tfm, sizeof(struct cryptd_skcipher_request_ctx)); + tfm, sizeof(struct cryptd_skcipher_request_ctx) + + crypto_skcipher_reqsize(cipher)); return 0; } @@ -353,7 +355,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->child); + crypto_free_skcipher(ctx->child); } static void cryptd_skcipher_free(struct skcipher_instance *inst) @@ -931,7 +933,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - return &ctx->child->base; + return ctx->child; } EXPORT_SYMBOL_GPL(cryptd_skcipher_child); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 3362897bf61b..4ada7e749390 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1295,15 +1295,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, goto out_free_tfm; } - - for (i = 0; i < num_mb; ++i) - if (testmgr_alloc_buf(data[i].xbuf)) { - while (i--) - testmgr_free_buf(data[i].xbuf); - goto out_free_tfm; - } - - for (i = 0; i < num_mb; ++i) { data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 8e011e59b9b4..ee1832ba39a2 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; - goto cleanup; + goto pop_walk_state; } info->parameters = &this_walk_state->operands[0]; @@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ACPI_FREE(info); if (ACPI_FAILURE(status)) { - goto cleanup; + goto pop_walk_state; } next_walk_state->method_nesting_depth = @@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, return_ACPI_STATUS(status); +pop_walk_state: + + /* On error, pop the walk state to be deleted from thread */ + + acpi_ds_pop_walk_state(thread); + cleanup: /* On error, we must terminate the method properly */ diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index d9877153f400..fdd503bb69c4 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, status = acpi_ut_walk_package_tree(source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); - if (ACPI_FAILURE(status)) { - - /* On failure, delete the destination package object */ - - acpi_ut_remove_reference(dest_obj); - } - return_ACPI_STATUS(status); } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 19358a641610..33921949bd8f 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -399,16 +399,68 @@ static const struct dmi_system_id medion_laptop[] = { { } }; +static const struct dmi_system_id asus_laptop[] = { + { + .ident = "Asus Vivobook K3402ZA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"), + }, + }, + { + .ident = "Asus Vivobook K3502ZA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"), + }, + }, + { } +}; + +static const struct dmi_system_id lenovo_laptop[] = { + { + .ident = "LENOVO IdeaPad Flex 5 14ALC7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82R9"), + }, + }, + { + .ident = "LENOVO IdeaPad Flex 5 16ALC7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82RA"), + }, + }, + { } +}; + +static const struct dmi_system_id schenker_gm_rg[] = { + { + .ident = "XMG CORE 15 (M22)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), + }, + }, + { } +}; + struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; unsigned char triggering; unsigned char polarity; unsigned char shareable; + bool override; }; -static const struct irq_override_cmp skip_override_table[] = { - { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, +static const struct irq_override_cmp override_table[] = { + { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, + { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, + { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, @@ -416,6 +468,17 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, { int i; + for (i = 0; i < ARRAY_SIZE(override_table); i++) { + const struct irq_override_cmp *entry = &override_table[i]; + + if (dmi_check_system(entry->system) && + entry->irq == gsi && + entry->triggering == triggering && + entry->polarity == polarity && + entry->shareable == shareable) + return entry->override; + } + #ifdef CONFIG_X86 /* * IRQ override isn't needed on modern AMD Zen systems and @@ -426,17 +489,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, return false; #endif - for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) { - const struct irq_override_cmp *entry = &skip_override_table[i]; - - if (dmi_check_system(entry->system) && - entry->irq == gsi && - entry->triggering == triggering && - entry->polarity == polarity && - entry->shareable == shareable) - return false; - } - return true; } diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index e0185e841b2a..2af1ae172102 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -378,16 +378,13 @@ static int lps0_device_attach(struct acpi_device *adev, * AMDI0006: * - should use rev_id 0x0 * - function mask = 0x3: Should use Microsoft method - * AMDI0007: - * - Should use rev_id 0x2 - * - Should only use AMD method */ const char *hid = acpi_device_hid(adev); - rev_id = strcmp(hid, "AMDI0007") ? 0 : 2; + rev_id = 0; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle, - ACPI_LPS0_DSM_UUID_MICROSOFT, 0, + ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id, &lps0_dsm_guid_microsoft); if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") || !strcmp(hid, "AMD0005") || @@ -395,9 +392,6 @@ static int lps0_device_attach(struct acpi_device *adev, lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); - } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) { - lps0_dsm_func_mask_microsoft = -EINVAL; - acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); } } else { rev_id = 1; diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 2a04e8abd397..26e0eb537b4f 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -267,7 +267,7 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && !(qc->flags & ATA_QCFLAG_FAILED)) { ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); - qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15]; } else ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 812731e80f8e..149ee16fd022 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -83,6 +83,7 @@ enum board_ids { static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void ahci_remove_one(struct pci_dev *dev); static void ahci_shutdown_one(struct pci_dev *dev); +static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, @@ -668,6 +669,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, ahci_save_initial_config(&pdev->dev, hpriv); } +static int ahci_pci_reset_controller(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + struct ahci_host_priv *hpriv = host->private_data; + int rc; + + rc = ahci_reset_controller(host); + if (rc) + return rc; + + /* + * If platform firmware failed to enable ports, try to enable + * them here. + */ + ahci_intel_pcs_quirk(pdev, hpriv); + + return 0; +} + static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; @@ -735,7 +755,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), @@ -806,7 +826,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, timing, deadline, &online, @@ -869,7 +889,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev) struct ata_host *host = pci_get_drvdata(pdev); int rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; ahci_pci_init_controller(host); @@ -904,7 +924,7 @@ static int ahci_pci_device_resume(struct device *dev) ahci_mcp89_apple_enable(pdev); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; @@ -1789,12 +1809,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); - /* - * If platform firmware failed to enable ports, try to enable - * them here. - */ - ahci_intel_pcs_quirk(pdev, hpriv); - /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; @@ -1904,7 +1918,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 5b46fc9aeb4a..e5ac3d1c214c 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -125,7 +125,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, timing, deadline, &online, diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index dffc432b9d54..292099410cf6 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -365,7 +365,7 @@ static int xgene_ahci_do_hardreset(struct ata_link *link, do { /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, timing, deadline, online, ahci_check_ready); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 395772fa3943..192115a45dd7 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1552,7 +1552,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, timing, deadline, online, @@ -2038,7 +2038,7 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && !(qc->flags & ATA_QCFLAG_FAILED)) { ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); - qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15]; } else ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 7a7d6642edcc..d15f3e908ea4 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -554,13 +554,13 @@ static void ata_acpi_gtf_to_tf(struct ata_device *dev, tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->protocol = ATA_PROT_NODATA; - tf->feature = gtf->tf[0]; /* 0x1f1 */ + tf->error = gtf->tf[0]; /* 0x1f1 */ tf->nsect = gtf->tf[1]; /* 0x1f2 */ tf->lbal = gtf->tf[2]; /* 0x1f3 */ tf->lbam = gtf->tf[3]; /* 0x1f4 */ tf->lbah = gtf->tf[4]; /* 0x1f5 */ tf->device = gtf->tf[5]; /* 0x1f6 */ - tf->command = gtf->tf[6]; /* 0x1f7 */ + tf->status = gtf->tf[6]; /* 0x1f7 */ } static int ata_acpi_filter_tf(struct ata_device *dev, @@ -650,9 +650,7 @@ static int ata_acpi_run_tf(struct ata_device *dev, struct ata_taskfile *pptf = NULL; struct ata_taskfile tf, ptf, rtf; unsigned int err_mask; - const char *level; const char *descr; - char msg[60]; int rc; if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0) @@ -666,6 +664,10 @@ static int ata_acpi_run_tf(struct ata_device *dev, pptf = &ptf; } + descr = ata_get_cmd_descript(tf.command); + if (!descr) + descr = "unknown"; + if (!ata_acpi_filter_tf(dev, &tf, pptf)) { rtf = tf; err_mask = ata_exec_internal(dev, &rtf, NULL, @@ -673,40 +675,42 @@ static int ata_acpi_run_tf(struct ata_device *dev, switch (err_mask) { case 0: - level = KERN_DEBUG; - snprintf(msg, sizeof(msg), "succeeded"); + ata_dev_dbg(dev, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" + "(%s) succeeded\n", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, descr); rc = 1; break; case AC_ERR_DEV: - level = KERN_INFO; - snprintf(msg, sizeof(msg), - "rejected by device (Stat=0x%02x Err=0x%02x)", - rtf.command, rtf.feature); + ata_dev_info(dev, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" + "(%s) rejected by device (Stat=0x%02x Err=0x%02x)", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, descr, + rtf.status, rtf.error); rc = 0; break; default: - level = KERN_ERR; - snprintf(msg, sizeof(msg), - "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", - err_mask, rtf.command, rtf.feature); + ata_dev_err(dev, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" + "(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, descr, + err_mask, rtf.status, rtf.error); rc = -EIO; break; } } else { - level = KERN_INFO; - snprintf(msg, sizeof(msg), "filtered out"); + ata_dev_info(dev, + "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" + "(%s) filtered out\n", + tf.command, tf.feature, tf.nsect, tf.lbal, + tf.lbam, tf.lbah, tf.device, descr); rc = 0; } - descr = ata_get_cmd_descript(tf.command); - - ata_dev_printk(dev, level, - "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n", - tf.command, tf.feature, tf.nsect, tf.lbal, - tf.lbam, tf.lbah, tf.device, - (descr ? descr : "unknown"), msg); - return rc; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 4d308e3163c3..c430cd3cfa17 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1185,7 +1185,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) ata_dev_warn(dev, "failed to read native max address (err_mask=0x%x)\n", err_mask); - if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) + if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED)) return -EACCES; return -EIO; } @@ -1249,7 +1249,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) "failed to set max address (err_mask=0x%x)\n", err_mask); if (err_mask == AC_ERR_DEV && - (tf.feature & (ATA_ABORTED | ATA_IDNF))) + (tf.error & (ATA_ABORTED | ATA_IDNF))) return -EACCES; return -EIO; } @@ -1616,7 +1616,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, /* perform minimal error analysis */ if (qc->flags & ATA_QCFLAG_FAILED) { - if (qc->result_tf.command & (ATA_ERR | ATA_DF)) + if (qc->result_tf.status & (ATA_ERR | ATA_DF)) qc->err_mask |= AC_ERR_DEV; if (!qc->err_mask) @@ -1625,7 +1625,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { - qc->result_tf.command |= ATA_SENSE; + qc->result_tf.status |= ATA_SENSE; } /* finish up */ @@ -1848,7 +1848,7 @@ retry: return 0; } - if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { + if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) { /* Device or controller might have reported * the wrong device class. Give a shot at the * other IDENTIFY if the current one is @@ -4371,7 +4371,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, /* A clean abort indicates an original or just out of spec drive and we should continue as we issue the setup based on the drive reported working geometry */ - if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) + if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED)) err_mask = 0; DPRINTK("EXIT, err_mask=%x\n", err_mask); @@ -6497,67 +6497,6 @@ const struct ata_port_info ata_dummy_port_info = { }; EXPORT_SYMBOL_GPL(ata_dummy_port_info); -/* - * Utility print functions - */ -void ata_port_printk(const struct ata_port *ap, const char *level, - const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - printk("%sata%u: %pV", level, ap->print_id, &vaf); - - va_end(args); -} -EXPORT_SYMBOL(ata_port_printk); - -void ata_link_printk(const struct ata_link *link, const char *level, - const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - if (sata_pmp_attached(link->ap) || link->ap->slave_link) - printk("%sata%u.%02u: %pV", - level, link->ap->print_id, link->pmp, &vaf); - else - printk("%sata%u: %pV", - level, link->ap->print_id, &vaf); - - va_end(args); -} -EXPORT_SYMBOL(ata_link_printk); - -void ata_dev_printk(const struct ata_device *dev, const char *level, - const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - printk("%sata%u.%02u: %pV", - level, dev->link->ap->print_id, dev->link->pmp + dev->devno, - &vaf); - - va_end(args); -} -EXPORT_SYMBOL(ata_dev_printk); - void ata_print_version(const struct device *dev, const char *version) { dev_printk(KERN_DEBUG, dev, "version %s\n", version); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 7aea631edb27..8350abc17290 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1386,7 +1386,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); if (err_mask == AC_ERR_DEV) - *r_sense_key = tf.feature >> 4; + *r_sense_key = tf.error >> 4; return err_mask; } @@ -1431,12 +1431,12 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc, err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); /* Ignore err_mask; ATA_ERR might be set */ - if (tf.command & ATA_SENSE) { + if (tf.status & ATA_SENSE) { ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); qc->flags |= ATA_QCFLAG_SENSE_VALID; } else { ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", - tf.command, err_mask); + tf.status, err_mask); } } @@ -1561,7 +1561,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, const struct ata_taskfile *tf) { unsigned int tmp, action = 0; - u8 stat = tf->command, err = tf->feature; + u8 stat = tf->status, err = tf->error; if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { qc->err_mask |= AC_ERR_HSM; @@ -1598,7 +1598,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { tmp = atapi_eh_request_sense(qc->dev, qc->scsicmd->sense_buffer, - qc->result_tf.feature >> 4); + qc->result_tf.error >> 4); if (!tmp) qc->flags |= ATA_QCFLAG_SENSE_VALID; else @@ -2372,7 +2372,7 @@ static void ata_eh_link_report(struct ata_link *link) cmd->hob_feature, cmd->hob_nsect, cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, cmd->device, qc->tag, data_buf, cdb_buf, - res->command, res->feature, res->nsect, + res->status, res->error, res->nsect, res->lbal, res->lbam, res->lbah, res->hob_feature, res->hob_nsect, res->hob_lbal, res->hob_lbam, res->hob_lbah, @@ -2380,28 +2380,28 @@ static void ata_eh_link_report(struct ata_link *link) qc->err_mask & AC_ERR_NCQ ? " " : ""); #ifdef CONFIG_ATA_VERBOSE_ERROR - if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | - ATA_SENSE | ATA_ERR)) { - if (res->command & ATA_BUSY) + if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | + ATA_SENSE | ATA_ERR)) { + if (res->status & ATA_BUSY) ata_dev_err(qc->dev, "status: { Busy }\n"); else ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", - res->command & ATA_DRDY ? "DRDY " : "", - res->command & ATA_DF ? "DF " : "", - res->command & ATA_DRQ ? "DRQ " : "", - res->command & ATA_SENSE ? "SENSE " : "", - res->command & ATA_ERR ? "ERR " : ""); + res->status & ATA_DRDY ? "DRDY " : "", + res->status & ATA_DF ? "DF " : "", + res->status & ATA_DRQ ? "DRQ " : "", + res->status & ATA_SENSE ? "SENSE " : "", + res->status & ATA_ERR ? "ERR " : ""); } if (cmd->command != ATA_CMD_PACKET && - (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | - ATA_IDNF | ATA_ABORTED))) + (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | + ATA_ABORTED))) ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", - res->feature & ATA_ICRC ? "ICRC " : "", - res->feature & ATA_UNC ? "UNC " : "", - res->feature & ATA_AMNF ? "AMNF " : "", - res->feature & ATA_IDNF ? "IDNF " : "", - res->feature & ATA_ABORTED ? "ABRT " : ""); + res->error & ATA_ICRC ? "ICRC " : "", + res->error & ATA_UNC ? "UNC " : "", + res->error & ATA_AMNF ? "AMNF " : "", + res->error & ATA_IDNF ? "IDNF " : "", + res->error & ATA_ABORTED ? "ABRT " : ""); #endif } } diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 8f3ff830ab0c..b5aa525d8760 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -191,8 +191,8 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis); void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) { - tf->command = fis[2]; /* status */ - tf->feature = fis[3]; /* error */ + tf->status = fis[2]; + tf->error = fis[3]; tf->lbal = fis[4]; tf->lbam = fis[5]; @@ -1402,8 +1402,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, *tag = buf[0] & 0x1f; - tf->command = buf[2]; - tf->feature = buf[3]; + tf->status = buf[2]; + tf->error = buf[3]; tf->lbal = buf[4]; tf->lbam = buf[5]; tf->lbah = buf[6]; @@ -1413,7 +1413,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, tf->hob_lbah = buf[10]; tf->nsect = buf[12]; tf->hob_nsect = buf[13]; - if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id)) + if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id) && + (tf->status & ATA_SENSE)) tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; return 0; @@ -1477,8 +1478,12 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) memcpy(&qc->result_tf, &tf, sizeof(tf)); qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; - if (dev->class == ATA_DEV_ZAC && - ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) { + + /* + * If the device supports NCQ autosense, ata_eh_read_log_10h() will have + * stored the sense data in qc->result_tf.auxiliary. + */ + if (qc->result_tf.auxiliary) { char sense_key, asc, ascq; sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 061d2f8feeb5..4d8129640d60 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -671,7 +671,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) */ static void ata_dump_status(unsigned id, struct ata_taskfile *tf) { - u8 stat = tf->command, err = tf->feature; + u8 stat = tf->status, err = tf->error; pr_warn("ata%u: status=0x%02x { ", id, stat); if (stat & ATA_BUSY) { @@ -867,8 +867,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) * onto sense key, asc & ascq. */ if (qc->err_mask || - tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { - ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, + tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { + ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, &sense_key, &asc, &ascq, verbose); ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); } else { @@ -897,13 +897,13 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) * Copy registers into sense buffer. */ desc[2] = 0x00; - desc[3] = tf->feature; /* == error reg */ + desc[3] = tf->error; desc[5] = tf->nsect; desc[7] = tf->lbal; desc[9] = tf->lbam; desc[11] = tf->lbah; desc[12] = tf->device; - desc[13] = tf->command; /* == status reg */ + desc[13] = tf->status; /* * Fill in Extend bit, and the high order bytes @@ -918,8 +918,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) } } else { /* Fixed sense format */ - desc[0] = tf->feature; - desc[1] = tf->command; /* status */ + desc[0] = tf->error; + desc[1] = tf->status; desc[2] = tf->device; desc[3] = tf->nsect; desc[7] = 0; @@ -968,14 +968,14 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) * onto sense key, asc & ascq. */ if (qc->err_mask || - tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { - ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, + tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { + ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, &sense_key, &asc, &ascq, verbose); ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); } else { /* Could not decode error */ ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", - tf->command, qc->err_mask); + tf->status, qc->err_mask); ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); return; } @@ -2490,7 +2490,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) /* fill these in, for the case where they are -not- overwritten */ cmd->sense_buffer[0] = 0x70; - cmd->sense_buffer[2] = qc->tf.feature >> 4; + cmd->sense_buffer[2] = qc->tf.error >> 4; ata_qc_reinit(qc); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index b71ea4a680b0..8409e53b7b7a 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -457,8 +457,8 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - tf->command = ata_sff_check_status(ap); - tf->feature = ioread8(ioaddr->error_addr); + tf->status = ata_sff_check_status(ap); + tf->error = ioread8(ioaddr->error_addr); tf->nsect = ioread8(ioaddr->nsect_addr); tf->lbal = ioread8(ioaddr->lbal_addr); tf->lbam = ioread8(ioaddr->lbam_addr); @@ -1837,7 +1837,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, memset(&tf, 0, sizeof(tf)); ap->ops->sff_tf_read(ap, &tf); - err = tf.feature; + err = tf.error; if (r_err) *r_err = err; diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index 46208ececbb6..3fc26026014e 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -416,8 +416,8 @@ static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ep93xx_pata_data *drv_data = ap->host->private_data; - tf->command = ep93xx_pata_check_status(ap); - tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); + tf->status = ep93xx_pata_check_status(ap); + tf->error = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT); tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL); tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM); diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 99c63087c8ae..17b557c91e1c 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -114,7 +114,7 @@ static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct ixp4xx_pata *ixpp = ap->host->private_data; - ata_dev_printk(adev, KERN_INFO, "configured for PIO%d 8bit\n", + ata_dev_info(adev, "configured for PIO%d 8bit\n", adev->pio_mode - XFER_PIO_0); ixp4xx_set_8bit_timing(ixpp, adev->pio_mode); } @@ -132,8 +132,8 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc, struct ixp4xx_pata *ixpp = ap->host->private_data; unsigned long flags; - ata_dev_printk(adev, KERN_DEBUG, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", - buflen); + ata_dev_dbg(adev, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", + buflen); spin_lock_irqsave(ap->lock, flags); /* set the expansion bus in 16bit mode and restore diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index f4949e704356..9dd6bffefb48 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c @@ -264,8 +264,8 @@ void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - tf->command = ns87560_check_status(ap); - tf->feature = ioread8(ioaddr->error_addr); + tf->status = ns87560_check_status(ap); + tf->error = ioread8(ioaddr->error_addr); tf->nsect = ioread8(ioaddr->nsect_addr); tf->lbal = ioread8(ioaddr->lbal_addr); tf->lbam = ioread8(ioaddr->lbam_addr); diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 4cc8a1027888..6c9f2efcedc1 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -386,7 +386,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) void __iomem *base = ap->ioaddr.data_addr; blob = __raw_readw(base + 0xc); - tf->feature = blob >> 8; + tf->error = blob >> 8; blob = __raw_readw(base + 2); tf->nsect = blob & 0xff; @@ -398,7 +398,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) blob = __raw_readw(base + 6); tf->device = blob & 0xff; - tf->command = blob >> 8; + tf->status = blob >> 8; if (tf->flags & ATA_TFLAG_LBA48) { if (likely(ap->ioaddr.ctl_addr)) { diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index 3da0e8e30286..149d771c61d6 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c @@ -213,7 +213,7 @@ static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - tf->feature = ata_inb(ap->host, ioaddr->error_addr); + tf->error = ata_inb(ap->host, ioaddr->error_addr); tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr); tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr); tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr); diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 8440203e835e..f9bb3be4b939 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -400,7 +400,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); do { diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index e517bd8822a5..659f1a903298 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -559,13 +559,13 @@ static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { void __iomem *port_base = inic_port_base(ap); - tf->feature = readb(port_base + PORT_TF_FEATURE); + tf->error = readb(port_base + PORT_TF_FEATURE); tf->nsect = readb(port_base + PORT_TF_NSECT); tf->lbal = readb(port_base + PORT_TF_LBAL); tf->lbam = readb(port_base + PORT_TF_LBAM); tf->lbah = readb(port_base + PORT_TF_LBAH); tf->device = readb(port_base + PORT_TF_DEVICE); - tf->command = readb(port_base + PORT_TF_COMMAND); + tf->status = readb(port_base + PORT_TF_COMMAND); } static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) @@ -582,11 +582,11 @@ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) */ inic_tf_read(qc->ap, &tf); - if (!(tf.command & ATA_ERR)) + if (!(tf.status & ATA_ERR)) return false; - rtf->command = tf.command; - rtf->feature = tf.feature; + rtf->status = tf.status; + rtf->error = tf.error; return true; } diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 44b0ed8f6bb8..9759e24f718f 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -417,8 +417,8 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - tf->command = sata_rcar_check_status(ap); - tf->feature = ioread32(ioaddr->error_addr); + tf->status = sata_rcar_check_status(ap); + tf->error = ioread32(ioaddr->error_addr); tf->nsect = ioread32(ioaddr->nsect_addr); tf->lbal = ioread32(ioaddr->lbal_addr); tf->lbam = ioread32(ioaddr->lbam_addr); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index f8552559db7f..2e3418a82b44 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -194,24 +194,24 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - u16 nsect, lbal, lbam, lbah, feature; + u16 nsect, lbal, lbam, lbah, error; - tf->command = k2_stat_check_status(ap); + tf->status = k2_stat_check_status(ap); tf->device = readw(ioaddr->device_addr); - feature = readw(ioaddr->error_addr); + error = readw(ioaddr->error_addr); nsect = readw(ioaddr->nsect_addr); lbal = readw(ioaddr->lbal_addr); lbam = readw(ioaddr->lbam_addr); lbah = readw(ioaddr->lbah_addr); - tf->feature = feature; + tf->error = error; tf->nsect = nsect; tf->lbal = lbal; tf->lbam = lbam; tf->lbah = lbah; if (tf->flags & ATA_TFLAG_LBA48) { - tf->hob_feature = feature >> 8; + tf->hob_feature = error >> 8; tf->hob_nsect = nsect >> 8; tf->hob_lbal = lbal >> 8; tf->hob_lbam = lbam >> 8; diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 8fa952cb9f7f..87e4ed66b306 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -183,24 +183,24 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - u16 nsect, lbal, lbam, lbah, feature; + u16 nsect, lbal, lbam, lbah, error; - tf->command = ata_sff_check_status(ap); + tf->status = ata_sff_check_status(ap); tf->device = readw(ioaddr->device_addr); - feature = readw(ioaddr->error_addr); + error = readw(ioaddr->error_addr); nsect = readw(ioaddr->nsect_addr); lbal = readw(ioaddr->lbal_addr); lbam = readw(ioaddr->lbam_addr); lbah = readw(ioaddr->lbah_addr); - tf->feature = feature; + tf->error = error; tf->nsect = nsect; tf->lbal = lbal; tf->lbam = lbam; tf->lbah = lbah; if (tf->flags & ATA_TFLAG_LBA48) { - tf->hob_feature = feature >> 8; + tf->hob_feature = error >> 8; tf->hob_nsect = nsect >> 8; tf->hob_lbal = lbal >> 8; tf->hob_lbam = lbam >> 8; diff --git a/drivers/base/class.c b/drivers/base/class.c index 7476f393df97..0e44a68e90a0 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -192,6 +192,11 @@ int __class_register(struct class *cls, struct lock_class_key *key) } error = class_add_groups(class_get(cls), cls->class_groups); class_put(cls); + if (error) { + kobject_del(&cp->subsys.kobj); + kfree_const(cp->subsys.kobj.name); + kfree(cp); + } return error; } EXPORT_SYMBOL_GPL(__class_register); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 63cc01118810..060348125635 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -1127,7 +1127,11 @@ static int __driver_attach(struct device *dev, void *data) return 0; } else if (ret < 0) { dev_dbg(dev, "Bus failed to match device: %d\n", ret); - return ret; + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; } /* ret > 0 means positive match */ if (driver_allows_async_probing(drv)) { diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3179c9265471..c1142a7a4fe6 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -484,7 +484,17 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - retval = __rpm_callback(callback, dev); + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = callback(dev); + + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f4e38c208b9f..eaf20a332401 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2244,7 +2244,8 @@ void drbd_destroy_device(struct kref *kref) kref_put(&peer_device->connection->kref, drbd_destroy_connection); kfree(peer_device); } - memset(device, 0xfd, sizeof(*device)); + if (device->submit.wq) + destroy_workqueue(device->submit.wq); kfree(device); kref_put(&resource->kref, drbd_destroy_resource); } @@ -2336,7 +2337,6 @@ void drbd_destroy_resource(struct kref *kref) idr_destroy(&resource->devices); free_cpumask_var(resource->cpu_mask); kfree(resource->name); - memset(resource, 0xf2, sizeof(*resource)); kfree(resource); } @@ -2677,7 +2677,6 @@ void drbd_destroy_connection(struct kref *kref) drbd_free_socket(&connection->data); kfree(connection->int_dig_in); kfree(connection->int_dig_vv); - memset(connection, 0xfc, sizeof(*connection)); kfree(connection); kref_put(&resource->kref, drbd_destroy_resource); } @@ -2800,7 +2799,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig err = add_disk(disk); if (err) - goto out_idr_remove_from_resource; + goto out_destroy_workqueue; /* inherit the connection state */ device->state.conn = first_connection(resource)->cstate; @@ -2814,6 +2813,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_debugfs_device_add(device); return NO_ERROR; +out_destroy_workqueue: + destroy_workqueue(device->submit.wq); out_idr_remove_from_resource: for_each_connection_safe(connection, n, resource) { peer_device = idr_remove(&connection->peer_devices, vnr); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index db0b3e8982fe..4dc25a123d94 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4587,8 +4587,10 @@ static int __init do_floppy_init(void) goto out_put_disk; err = floppy_alloc_disk(drive, 0); - if (err) + if (err) { + blk_mq_free_tag_set(&tag_sets[drive]); goto out_put_disk; + } timer_setup(&motor_off_timer[drive], motor_off_callback, 0); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 79e485949b60..68a0c0fe64dd 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -2091,7 +2091,16 @@ static const struct block_device_operations lo_fops = { /* * And now the modules code and kernel interface. */ -static int max_loop; + +/* + * If max_loop is specified, create that many devices upfront. + * This also becomes a hard limit. If max_loop is not specified, + * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module + * init time. Loop devices can be requested on-demand with the + * /dev/loop-control interface, or be instantiated by accessing + * a 'dead' device node. + */ +static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; module_param(max_loop, int, 0444); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); module_param(max_part, int, 0444); @@ -2536,7 +2545,7 @@ MODULE_ALIAS("devname:loop-control"); static int __init loop_init(void) { - int i, nr; + int i; int err; part_shift = 0; @@ -2564,19 +2573,6 @@ static int __init loop_init(void) goto err_out; } - /* - * If max_loop is specified, create that many devices upfront. - * This also becomes a hard limit. If max_loop is not specified, - * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module - * init time. Loop devices can be requested on-demand with the - * /dev/loop-control interface, or be instantiated by accessing - * a 'dead' device node. - */ - if (max_loop) - nr = max_loop; - else - nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; - err = misc_register(&loop_misc); if (err < 0) goto err_out; @@ -2588,7 +2584,7 @@ static int __init loop_init(void) } /* pre-create number of devices given by config or max_loop */ - for (i = 0; i < nr; i++) + for (i = 0; i < max_loop; i++) loop_add(i); printk(KERN_INFO "loop: module loaded\n"); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index de3d851d85e7..d707aa63e944 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -2353,7 +2353,7 @@ static int btintel_setup_combined(struct hci_dev *hdev) */ err = btintel_read_version(hdev, &ver); if (err) - return err; + break; /* Apply the device specific HCI quirks * @@ -2394,7 +2394,8 @@ static int btintel_setup_combined(struct hci_dev *hdev) default: bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); - return -EINVAL; + err = -EINVAL; + break; } exit_error: diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 69380cb03dd3..9c32263f872b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -735,13 +735,13 @@ static inline void btusb_free_frags(struct btusb_data *data) spin_lock_irqsave(&data->rxlock, flags); - kfree_skb(data->evt_skb); + dev_kfree_skb_irq(data->evt_skb); data->evt_skb = NULL; - kfree_skb(data->acl_skb); + dev_kfree_skb_irq(data->acl_skb); data->acl_skb = NULL; - kfree_skb(data->sco_skb); + dev_kfree_skb_irq(data->sco_skb); data->sco_skb = NULL; spin_unlock_irqrestore(&data->rxlock, flags); diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index cf4a56095817..8055f63603f4 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) i++; __skb_unlink(skb, &bcsp->unack); - kfree_skb(skb); + dev_kfree_skb_irq(skb); } if (skb_queue_empty(&bcsp->unack)) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index c43911488880..a77f70464db6 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -313,7 +313,7 @@ static void h5_pkt_cull(struct h5 *h5) break; __skb_unlink(skb, &h5->unack); - kfree_skb(skb); + dev_kfree_skb_irq(skb); } if (skb_queue_empty(&h5->unack)) diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index eb1e736efeeb..e4e5b26e2c33 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) default: BT_ERR("illegal hcill state: %ld (losing packet)", ll->hcill_state); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index ed91af4319b5..5d0428fc854f 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -912,7 +912,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) default: BT_ERR("Illegal tx state: %d (losing packet)", qca->tx_ibs_state); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; } diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index c22d4184bb61..0555e3838bce 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -143,15 +143,19 @@ static int __init amd_rng_mod_init(void) found: err = pci_read_config_dword(pdev, 0x58, &pmbase); if (err) - return err; + goto put_dev; pmbase &= 0x0000FF00; - if (pmbase == 0) - return -EIO; + if (pmbase == 0) { + err = -EIO; + goto put_dev; + } priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + if (!priv) { + err = -ENOMEM; + goto put_dev; + } if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", @@ -185,6 +189,8 @@ err_iomap: release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); out: kfree(priv); +put_dev: + pci_dev_put(pdev); return err; } @@ -200,6 +206,8 @@ static void __exit amd_rng_mod_exit(void) release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); + pci_dev_put(priv->pcidev); + kfree(priv); } diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 138ce434f86b..12fbe8091831 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, pci_tbl); +struct amd_geode_priv { + struct pci_dev *pcidev; + void __iomem *membase; +}; static int geode_rng_data_read(struct hwrng *rng, u32 *data) { @@ -90,6 +94,7 @@ static int __init geode_rng_init(void) const struct pci_device_id *ent; void __iomem *mem; unsigned long rng_base; + struct amd_geode_priv *priv; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); @@ -97,17 +102,26 @@ static int __init geode_rng_init(void) goto found; } /* Device not found. */ - goto out; + return err; found: + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto put_dev; + } + rng_base = pci_resource_start(pdev, 0); if (rng_base == 0) - goto out; + goto free_priv; err = -ENOMEM; mem = ioremap(rng_base, 0x58); if (!mem) - goto out; - geode_rng.priv = (unsigned long)mem; + goto free_priv; + + geode_rng.priv = (unsigned long)priv; + priv->membase = mem; + priv->pcidev = pdev; pr_info("AMD Geode RNG detected\n"); err = hwrng_register(&geode_rng); @@ -116,20 +130,26 @@ found: err); goto err_unmap; } -out: return err; err_unmap: iounmap(mem); - goto out; +free_priv: + kfree(priv); +put_dev: + pci_dev_put(pdev); + return err; } static void __exit geode_rng_exit(void) { - void __iomem *mem = (void __iomem *)geode_rng.priv; + struct amd_geode_priv *priv; + priv = (struct amd_geode_priv *)geode_rng.priv; hwrng_unregister(&geode_rng); - iounmap(mem); + iounmap(priv->membase); + pci_dev_put(priv->pcidev); + kfree(priv); } module_init(geode_rng_init); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 2badf36d4816..15c211c5d6f4 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -1273,6 +1273,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; + struct module *owner; if (!acquire_ipmi_user(user, &i)) { /* @@ -1334,8 +1335,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user) kfree(rcvr); } + owner = intf->owner; kref_put(&intf->refcount, intf_free); - module_put(intf->owner); + module_put(owner); } int ipmi_destroy_user(struct ipmi_user *user) @@ -3527,12 +3529,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf, struct ipmi_smi_msg *msg, unsigned char err) { + int rv; msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = err; msg->rsp_size = 3; - /* It's an error, so it will never requeue, no need to check return. */ - handle_one_recv_msg(intf, msg); + + /* This will never requeue, but it may ask us to free the message. */ + rv = handle_one_recv_msg(intf, msg); + if (rv == 0) + ipmi_free_smi_msg(msg); } static void cleanup_smi_msgs(struct ipmi_smi *intf) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 6f3272b58ced..17255e705cb0 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2152,6 +2152,20 @@ skip_fallback_noirq: } module_init(init_ipmi_si); +static void wait_msg_processed(struct smi_info *smi_info) +{ + unsigned long jiffies_now; + long time_diff; + + while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { + jiffies_now = jiffies; + time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) + * SI_USEC_PER_JIFFY); + smi_event_handler(smi_info, time_diff); + schedule_timeout_uninterruptible(1); + } +} + static void shutdown_smi(void *send_info) { struct smi_info *smi_info = send_info; @@ -2186,16 +2200,13 @@ static void shutdown_smi(void *send_info) * in the BMC. Note that timers and CPU interrupts are off, * so no need for locks. */ - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { - poll(smi_info); - schedule_timeout_uninterruptible(1); - } + wait_msg_processed(smi_info); + if (smi_info->handlers) disable_si_irq(smi_info); - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { - poll(smi_info); - schedule_timeout_uninterruptible(1); - } + + wait_msg_processed(smi_info); + if (smi_info->handlers) smi_info->handlers->cleanup(smi_info->si_sm); diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c index 92a37b33494c..f23c146bb740 100644 --- a/drivers/char/ipmi/kcs_bmc_aspeed.c +++ b/drivers/char/ipmi/kcs_bmc_aspeed.c @@ -404,13 +404,31 @@ static void aspeed_kcs_check_obe(struct timer_list *timer) static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) { struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); + int rc; + u8 str; /* We don't have an OBE IRQ, emulate it */ if (mask & KCS_BMC_EVENT_TYPE_OBE) { - if (KCS_BMC_EVENT_TYPE_OBE & state) - mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); - else + if (KCS_BMC_EVENT_TYPE_OBE & state) { + /* + * Given we don't have an OBE IRQ, delay by polling briefly to see if we can + * observe such an event before returning to the caller. This is not + * incorrect because OBF may have already become clear before enabling the + * IRQ if we had one, under which circumstance no event will be propagated + * anyway. + * + * The onus is on the client to perform a race-free check that it hasn't + * missed the event. + */ + rc = read_poll_timeout_atomic(aspeed_kcs_inb, str, + !(str & KCS_BMC_STR_OBF), 1, 100, false, + &priv->kcs_bmc, priv->kcs_bmc.ioreg.str); + /* Time for the slow path? */ + if (rc == -ETIMEDOUT) + mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); + } else { del_timer(&priv->obe.timer); + } } if (mask & KCS_BMC_EVENT_TYPE_IBF) { diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index 1b18ce5ebab1..0913d3eb8d51 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -90,16 +90,21 @@ int tpm_read_log_acpi(struct tpm_chip *chip) return -ENODEV; if (tbl->header.length < - sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) + sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) { + acpi_put_table((struct acpi_table_header *)tbl); return -ENODEV; + } tpm2_phy = (void *)tbl + sizeof(*tbl); len = tpm2_phy->log_area_minimum_length; start = tpm2_phy->log_area_start_address; - if (!start || !len) + if (!start || !len) { + acpi_put_table((struct acpi_table_header *)tbl); return -ENODEV; + } + acpi_put_table((struct acpi_table_header *)tbl); format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; } else { /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ @@ -120,8 +125,10 @@ int tpm_read_log_acpi(struct tpm_chip *chip) break; } + acpi_put_table((struct acpi_table_header *)buff); format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; } + if (!len) { dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__); return -EIO; @@ -156,5 +163,4 @@ err: kfree(log->bios_event_log); log->bios_event_log = NULL; return ret; - } diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 18606651d1aa..16fc481d6095 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev, iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, TPM2_TIMEOUT_C)) { - dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); + dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n"); return -ETIME; } @@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device) /* Should the FIFO driver handle this? */ sm = buf->start_method; - if (sm == ACPI_TPM2_MEMORY_MAPPED) - return -ENODEV; + if (sm == ACPI_TPM2_MEMORY_MAPPED) { + rc = -ENODEV; + goto out; + } priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + if (!priv) { + rc = -ENOMEM; + goto out; + } if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { @@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device) FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); - return -EINVAL; + rc = -EINVAL; + goto out; } crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); priv->smc_func_id = crb_smc->smc_func_id; @@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device) rc = crb_map_io(device, priv, buf); if (rc) - return rc; + goto out; chip = tpmm_chip_alloc(dev, &tpm_crb); - if (IS_ERR(chip)) - return PTR_ERR(chip); + if (IS_ERR(chip)) { + rc = PTR_ERR(chip); + goto out; + } dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; - return tpm_chip_register(chip); + rc = tpm_chip_register(chip); + +out: + acpi_put_table((struct acpi_table_header *)buf); + return rc; } static int crb_acpi_remove(struct acpi_device *device) diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 6e3235565a4d..d9daaafdd295 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -397,7 +397,13 @@ static int __init ftpm_mod_init(void) if (rc) return rc; - return driver_register(&ftpm_tee_driver.driver); + rc = driver_register(&ftpm_tee_driver.driver); + if (rc) { + platform_driver_unregister(&ftpm_tee_plat_driver); + return rc; + } + + return 0; } static void __exit ftpm_mod_exit(void) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index d3f2e5364c27..e53164c82808 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -125,6 +125,7 @@ static int check_acpi_tpm2(struct device *dev) const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev); struct acpi_table_tpm2 *tbl; acpi_status st; + int ret = 0; if (!aid || aid->driver_data != DEVICE_IS_TPM2) return 0; @@ -132,8 +133,7 @@ static int check_acpi_tpm2(struct device *dev) /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2 * table is mandatory */ - st = - acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); + st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; @@ -141,9 +141,10 @@ static int check_acpi_tpm2(struct device *dev) /* The tpm2_crb driver handles this device */ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) - return -ENODEV; + ret = -ENODEV; - return 0; + acpi_put_table((struct acpi_table_header *)tbl); + return ret; } #else static int check_acpi_tpm2(struct device *dev) diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 021355a24708..52903146fdba 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -30,7 +30,7 @@ static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", }; static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", }; static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; -static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; +static const char * const m7_alt_pll_bypass_sels[] = {"m7_alt_pll", "m7_alt_pll_ref_sel", }; static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", }; @@ -40,7 +40,7 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", }; -static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out", +static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "m7_alt_pll_out", "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", }; static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", @@ -108,27 +108,27 @@ static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out "sys_pll3_out", "clk_ext4", }; static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", - "clk_ext3", "clk_ext4", }; + "video_pll1_out", "sys_pll1_133m", "dummy", + "clk_ext2", "clk_ext3", }; static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext2", "clk_ext3", }; static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll1_out", "sys_pll1_133m", "dummy", "clk_ext2", "clk_ext3", }; static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", @@ -140,8 +140,8 @@ static const char * const imx8mn_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m" "clk_ext4", "video_pll1_out", }; static const char * const imx8mn_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m", - "sys_pll2_200m", "sys_pll2_500m", "video_pll1_out", - "audio_pll2_out", }; + "sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out", + "video_pll_out", "audio_pll2_out", }; static const char * const imx8mn_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out", @@ -228,10 +228,10 @@ static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys "sys_pll1_80m", "video_pll1_out", }; static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", - "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out", + "m7_alt_pll_out", "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m", }; -static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out", +static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "m7_alt_pll_out", "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m", "sys_pll2_500m", "sys_pll1_100m", }; @@ -328,7 +328,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); - hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); + hws[IMX8MN_M7_ALT_PLL_REF_SEL] = imx_clk_hw_mux("m7_alt_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); @@ -337,7 +337,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll); hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll); hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll); - hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll); + hws[IMX8MN_M7_ALT_PLL] = imx_clk_hw_pll14xx("m7_alt_pll", "m7_alt_pll_ref_sel", base + 0x74, &imx_1416x_pll); hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll); hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000); hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000); @@ -349,7 +349,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MN_M7_ALT_PLL_BYPASS] = imx_clk_hw_mux_flags("m7_alt_pll_bypass", base + 0x74, 28, 1, m7_alt_pll_bypass_sels, ARRAY_SIZE(m7_alt_pll_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); @@ -359,7 +359,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13); hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13); hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11); - hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11); + hws[IMX8MN_M7_ALT_PLL_OUT] = imx_clk_hw_gate("m7_alt_pll_out", "m7_alt_pll_bypass", base + 0x74, 11); hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11); hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11); diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c index 90046428693c..e74fc81a14d0 100644 --- a/drivers/clk/qcom/clk-krait.c +++ b/drivers/clk/qcom/clk-krait.c @@ -98,6 +98,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate, if (d->lpl) mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift; + else + mask <<= d->shift; spin_lock_irqsave(&krait_clock_reg_lock, flags); val = krait_get_l2_indirect_reg(d->offset); diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c index 9755ef4888c1..a0ba37656b07 100644 --- a/drivers/clk/qcom/gcc-sm8250.c +++ b/drivers/clk/qcom/gcc-sm8250.c @@ -3267,7 +3267,7 @@ static struct gdsc usb30_prim_gdsc = { .pd = { .name = "usb30_prim_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc usb30_sec_gdsc = { @@ -3275,7 +3275,7 @@ static struct gdsc usb30_sec_gdsc = { .pd = { .name = "usb30_sec_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c index ac09b7b840ab..a5731994cbed 100644 --- a/drivers/clk/qcom/lpasscorecc-sc7180.c +++ b/drivers/clk/qcom/lpasscorecc-sc7180.c @@ -356,7 +356,7 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = { .num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs), }; -static int lpass_create_pm_clks(struct platform_device *pdev) +static int lpass_setup_runtime_pm(struct platform_device *pdev) { int ret; @@ -375,7 +375,7 @@ static int lpass_create_pm_clks(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "failed to acquire iface clock\n"); - return ret; + return pm_runtime_resume_and_get(&pdev->dev); } static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) @@ -384,7 +384,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) struct regmap *regmap; int ret; - ret = lpass_create_pm_clks(pdev); + ret = lpass_setup_runtime_pm(pdev); if (ret) return ret; @@ -392,12 +392,14 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) desc = &lpass_audio_hm_sc7180_desc; ret = qcom_cc_probe_by_index(pdev, 1, desc); if (ret) - return ret; + goto exit; lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc"; regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc); - if (IS_ERR(regmap)) - return PTR_ERR(regmap); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto exit; + } /* * Keep the CLK always-ON @@ -415,6 +417,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap); pm_runtime_mark_last_busy(&pdev->dev); +exit: pm_runtime_put_autosuspend(&pdev->dev); return ret; @@ -425,14 +428,19 @@ static int lpass_hm_core_probe(struct platform_device *pdev) const struct qcom_cc_desc *desc; int ret; - ret = lpass_create_pm_clks(pdev); + ret = lpass_setup_runtime_pm(pdev); if (ret) return ret; lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core"; desc = &lpass_core_hm_sc7180_desc; - return qcom_cc_probe_by_index(pdev, 0, desc); + ret = qcom_cc_probe_by_index(pdev, 0, desc); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return ret; } static const struct of_device_id lpass_hm_sc7180_match_table[] = { diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index abc0891fd96d..3e43ae8480dd 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -386,7 +386,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, int error; int index; - while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, + while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++, &clkspec)) { if (clkspec.np != pd->dev.of_node) continue; @@ -399,7 +399,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, if (error) return error; } - i++; } return 0; diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index f7827b3b7fc1..6e5e502be44a 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -981,6 +981,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, return mux_clk; err_pll: + kfree(pll->rate_table); clk_unregister(mux_clk); mux_clk = pll_clk; err_mux: diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 5873a9354b50..4909e940f0ab 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -1385,6 +1385,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, if (ret) { pr_err("%s: failed to register pll clock %s : %d\n", __func__, pll_clk->name, ret); + kfree(pll->rate_table); kfree(pll); return; } diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 1ec9678d8cd3..ee2a2d284113 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -188,8 +188,10 @@ void __init socfpga_gate_init(struct device_node *node) return; ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL); - if (WARN_ON(!ops)) + if (WARN_ON(!ops)) { + kfree(socfpga_clk); return; + } rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); if (rc) @@ -243,6 +245,7 @@ void __init socfpga_gate_init(struct device_node *node) err = clk_hw_register(NULL, hw_clk); if (err) { + kfree(ops); kfree(socfpga_clk); return; } diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index 164285d6be97..ba18e58f0aae 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -1008,9 +1008,10 @@ static void __init st_of_quadfs_setup(struct device_node *np, clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data, reg, lock); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + kfree(lock); goto err_exit; - else + } else pr_debug("%s: parent %s rate %u\n", __clk_get_name(clk), __clk_get_name(clk_get_parent(clk)), diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index dd0956ad969c..d35548aa026f 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -116,6 +117,7 @@ struct sh_cmt_device { void __iomem *mapbase; struct clk *clk; unsigned long rate; + unsigned int reg_delay; raw_spinlock_t lock; /* Protect the shared start/stop register */ @@ -247,10 +249,17 @@ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value) { - if (ch->iostart) - ch->cmt->info->write_control(ch->iostart, 0, value); - else - ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + u32 old_value = sh_cmt_read_cmstr(ch); + + if (value != old_value) { + if (ch->iostart) { + ch->cmt->info->write_control(ch->iostart, 0, value); + udelay(ch->cmt->reg_delay); + } else { + ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + udelay(ch->cmt->reg_delay); + } + } } static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) @@ -260,7 +269,12 @@ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value) { - ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); + u32 old_value = sh_cmt_read_cmcsr(ch); + + if (value != old_value) { + ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); + udelay(ch->cmt->reg_delay); + } } static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) @@ -268,14 +282,33 @@ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) return ch->cmt->info->read_count(ch->ioctrl, CMCNT); } -static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) +static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) { + /* Tests showed that we need to wait 3 clocks here */ + unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2); + u32 reg; + + if (ch->cmt->info->model > SH_CMT_16BIT) { + int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg, + !(reg & SH_CMT32_CMCSR_WRFLG), + 1, cmcnt_delay, false, ch); + if (ret < 0) + return ret; + } + ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); + udelay(cmcnt_delay); + return 0; } static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value) { - ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); + u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR); + + if (value != old_value) { + ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); + udelay(ch->cmt->reg_delay); + } } static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped) @@ -319,7 +352,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) static int sh_cmt_enable(struct sh_cmt_channel *ch) { - int k, ret; + int ret; dev_pm_syscore_device(&ch->cmt->pdev->dev, true); @@ -347,26 +380,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch) } sh_cmt_write_cmcor(ch, 0xffffffff); - sh_cmt_write_cmcnt(ch, 0); + ret = sh_cmt_write_cmcnt(ch, 0); - /* - * According to the sh73a0 user's manual, as CMCNT can be operated - * only by the RCLK (Pseudo 32 kHz), there's one restriction on - * modifying CMCNT register; two RCLK cycles are necessary before - * this register is either read or any modification of the value - * it holds is reflected in the LSI's actual operation. - * - * While at it, we're supposed to clear out the CMCNT as of this - * moment, so make sure it's processed properly here. This will - * take RCLKx2 at maximum. - */ - for (k = 0; k < 100; k++) { - if (!sh_cmt_read_cmcnt(ch)) - break; - udelay(1); - } - - if (sh_cmt_read_cmcnt(ch)) { + if (ret || sh_cmt_read_cmcnt(ch)) { dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", ch->index); ret = -ETIMEDOUT; @@ -987,8 +1003,8 @@ MODULE_DEVICE_TABLE(of, sh_cmt_of_table); static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) { - unsigned int mask; - unsigned int i; + unsigned int mask, i; + unsigned long rate; int ret; cmt->pdev = pdev; @@ -1024,10 +1040,16 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) if (ret < 0) goto err_clk_unprepare; - if (cmt->info->width == 16) - cmt->rate = clk_get_rate(cmt->clk) / 512; - else - cmt->rate = clk_get_rate(cmt->clk) / 8; + rate = clk_get_rate(cmt->clk); + if (!rate) { + ret = -EINVAL; + goto err_clk_disable; + } + + /* We shall wait 2 input clks after register writes */ + if (cmt->info->model >= SH_CMT_48BIT) + cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate); + cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8); /* Map the memory resource(s). */ ret = sh_cmt_map_memory(cmt); diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 2737407ff069..632523c1232f 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -345,8 +345,10 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, return error; r = clk_get_rate(clock); - if (!r) + if (!r) { + clk_disable_unprepare(clock); return -ENODEV; + } if (is_ick) t->ick = clock; diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index 13656957c45f..fa7f86cf0ea3 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, /* ensure CMP & ARR registers are properly written */ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, - (val & STM32_LPTIM_CMPOK_ARROK), + (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, 100, 1000); if (ret) return ret; diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index d0b10baf039a..151771129c7b 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -124,6 +124,8 @@ static int __init amd_freq_sensitivity_init(void) if (!pcidev) { if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK)) return -ENODEV; + } else { + pci_dev_put(pcidev); } if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 799431d287ee..b998b5083953 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1212,6 +1212,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) goto err_free_rcpumask; + init_completion(&policy->kobj_unregister); ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, cpufreq_global_kobject, "policy%u", cpu); if (ret) { @@ -1250,7 +1251,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) init_rwsem(&policy->rwsem); spin_lock_init(&policy->transition_lock); init_waitqueue_head(&policy->transition_wait); - init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); policy->cpu = cpu; diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index bb2f59fd0de4..bbcba2c38e85 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -177,6 +177,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, } } else if (ret != -ENODEV) { dev_err(cpu_dev, "Invalid opp table in device tree\n"); + kfree(table); return ret; } else { policy->fast_switch_possible = true; diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index 252f2a9686a6..448bc796b0b4 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -223,6 +223,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, * also be 0 on platforms with missing DT idle states or legacy DT * configuration predating the DT idle states bindings. */ - return i; + return state_idx - start_idx; } EXPORT_SYMBOL_GPL(dt_init_idle_driver); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 51690e73153a..b46343b5c26b 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -772,7 +772,12 @@ config CRYPTO_DEV_IMGTEC_HASH config CRYPTO_DEV_ROCKCHIP tristate "Rockchip's Cryptographic Engine driver" depends on OF && ARCH_ROCKCHIP + depends on PM + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_DES select CRYPTO_AES + select CRYPTO_ENGINE select CRYPTO_LIB_DES select CRYPTO_MD5 select CRYPTO_SHA1 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 3c46ad8c3a1c..005eefecfdf5 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -105,7 +105,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; int i = 0; - u32 a; + dma_addr_t a; int err; rctx->ivlen = ivsize; diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 6e7ae896717c..937187027ad5 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -237,7 +237,6 @@ static int meson_crypto_probe(struct platform_device *pdev) return err; } - mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); for (i = 0; i < MAXFLOW; i++) { mc->irqs[i] = platform_get_irq(pdev, i); if (mc->irqs[i] < 0) diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index dc0f142324a3..8c0746a1d6d4 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -95,7 +95,7 @@ struct meson_dev { struct device *dev; struct meson_flow *chanlist; atomic_t flow; - int *irqs; + int irqs[MAXFLOW]; #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG struct dentry *dbgfs_dir; #endif diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c index 2e9c0d214363..199fcec9b8d0 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -191,6 +191,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev) ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); if (!ndev->iov.pf2vf_wq) { kfree(ndev->iov.vfdev); + ndev->iov.vfdev = NULL; return -ENOMEM; } /* enable pf2vf mailbox interrupts */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 88c672ad27e4..9470a9a19f29 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -320,6 +320,15 @@ static const struct psp_vdata pspv3 = { .inten_reg = 0x10690, .intsts_reg = 0x10694, }; + +static const struct psp_vdata pspv4 = { + .sev = &sevv2, + .tee = &teev1, + .feature_reg = 0x109fc, + .inten_reg = 0x10690, + .intsts_reg = 0x10694, +}; + #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -365,7 +374,7 @@ static const struct sp_dev_vdata dev_vdata[] = { { /* 5 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP - .psp_vdata = &pspv2, + .psp_vdata = &pspv4, #endif }, }; diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index 7083767602fc..8f008f024f8f 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void) cc_debugfs_dir = debugfs_create_dir("ccree", NULL); } -void __exit cc_debugfs_global_fini(void) +void cc_debugfs_global_fini(void) { debugfs_remove(cc_debugfs_dir); } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 790fa9058a36..41f0a404bdf9 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -656,9 +656,17 @@ static struct platform_driver ccree_driver = { static int __init ccree_init(void) { + int rc; + cc_debugfs_global_init(); - return platform_driver_register(&ccree_driver); + rc = platform_driver_register(&ccree_driver); + if (rc) { + cc_debugfs_global_fini(); + return rc; + } + + return 0; } module_init(ccree_init); diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 65a641396c07..edc61e4105f3 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1143,18 +1143,12 @@ err_with_qm_init: static void hpre_remove(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev); - int ret; hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &hpre_devices); hisi_qm_alg_unregister(qm, &hpre_devices); - if (qm->fun_type == QM_HW_PF && qm->vfs_num) { - ret = hisi_qm_sriov_disable(pdev, true); - if (ret) { - pci_err(pdev, "Disable SRIOV fail!\n"); - return; - } - } + if (qm->fun_type == QM_HW_PF && qm->vfs_num) + hisi_qm_sriov_disable(pdev, true); hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b8900a5dbf6e..fd89918abd19 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -5727,8 +5727,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) GFP_ATOMIC); dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); if (!qm->qdma.va) { - ret = -ENOMEM; - goto err_alloc_qdma; + ret = -ENOMEM; + goto err_destroy_idr; } QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); @@ -5744,7 +5744,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) err_alloc_qp_array: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); -err_alloc_qdma: +err_destroy_idr: + idr_destroy(&qm->qp_idr); kfree(qm->factor); return ret; diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 3068093229a5..bbb35de994eb 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -318,14 +318,14 @@ struct hisi_qp { static inline int q_num_set(const char *val, const struct kernel_param *kp, unsigned int device) { - struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, - device, NULL); + struct pci_dev *pdev; u32 n, q_num; int ret; if (!val) return -EINVAL; + pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); if (!pdev) { q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); pr_info("No device found currently, suppose queue number is %u\n", @@ -335,6 +335,8 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, q_num = QM_QNUM_V1; else q_num = QM_QNUM_V2; + + pci_dev_put(pdev); } ret = kstrtou32(val, 10, &n); diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index aa4c7b2af3e2..34b41cbcfa8d 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) static void img_hash_dma_task(unsigned long d) { struct img_hash_dev *hdev = (struct img_hash_dev *)d; - struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + struct img_hash_request_ctx *ctx; u8 *addr; size_t nbytes, bleft, wsend, len, tbc; struct scatterlist tsg; - if (!hdev->req || !ctx->sg) + if (!hdev->req) + return; + + ctx = ahash_request_ctx(hdev->req); + if (!ctx->sg) return; addr = sg_virt(ctx->sg); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 3b0bf6fea491..b4db560105a9 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1229,6 +1229,7 @@ struct n2_hash_tmpl { const u8 *hash_init; u8 hw_op_hashsz; u8 digest_size; + u8 statesize; u8 block_size; u8 auth_type; u8 hmac_type; @@ -1260,6 +1261,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_MD5, .hw_op_hashsz = MD5_DIGEST_SIZE, .digest_size = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), .block_size = MD5_HMAC_BLOCK_SIZE }, { .name = "sha1", .hash_zero = sha1_zero_message_hash, @@ -1268,6 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_SHA1, .hw_op_hashsz = SHA1_DIGEST_SIZE, .digest_size = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), .block_size = SHA1_BLOCK_SIZE }, { .name = "sha256", .hash_zero = sha256_zero_message_hash, @@ -1276,6 +1279,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_SHA256, .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .block_size = SHA256_BLOCK_SIZE }, { .name = "sha224", .hash_zero = sha224_zero_message_hash, @@ -1284,6 +1288,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_RESERVED, .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .block_size = SHA224_BLOCK_SIZE }, }; #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) @@ -1424,6 +1429,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) halg = &ahash->halg; halg->digestsize = tmpl->digest_size; + halg->statesize = tmpl->statesize; base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index f6bf53c00b61..4ec6949a7ca9 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2114,7 +2114,7 @@ static int omap_sham_probe(struct platform_device *pdev) pm_runtime_enable(dev); - err = pm_runtime_get_sync(dev); + err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "failed to get sync: %d\n", err); goto err_pm; diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 35d73061d156..14a0aef18ab1 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -65,186 +65,24 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev) clk_disable_unprepare(dev->sclk); } -static int check_alignment(struct scatterlist *sg_src, - struct scatterlist *sg_dst, - int align_mask) -{ - int in, out, align; - - in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && - IS_ALIGNED((uint32_t)sg_src->length, align_mask); - if (!sg_dst) - return in; - out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && - IS_ALIGNED((uint32_t)sg_dst->length, align_mask); - align = in && out; - - return (align && (sg_src->length == sg_dst->length)); -} - -static int rk_load_data(struct rk_crypto_info *dev, - struct scatterlist *sg_src, - struct scatterlist *sg_dst) -{ - unsigned int count; - - dev->aligned = dev->aligned ? - check_alignment(sg_src, sg_dst, dev->align_size) : - dev->aligned; - if (dev->aligned) { - count = min(dev->left_bytes, sg_src->length); - dev->left_bytes -= count; - - if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { - dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", - __func__, __LINE__); - return -EINVAL; - } - dev->addr_in = sg_dma_address(sg_src); - - if (sg_dst) { - if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { - dev_err(dev->dev, - "[%s:%d] dma_map_sg(dst) error\n", - __func__, __LINE__); - dma_unmap_sg(dev->dev, sg_src, 1, - DMA_TO_DEVICE); - return -EINVAL; - } - dev->addr_out = sg_dma_address(sg_dst); - } - } else { - count = (dev->left_bytes > PAGE_SIZE) ? - PAGE_SIZE : dev->left_bytes; - - if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, - dev->addr_vir, count, - dev->total - dev->left_bytes)) { - dev_err(dev->dev, "[%s:%d] pcopy err\n", - __func__, __LINE__); - return -EINVAL; - } - dev->left_bytes -= count; - sg_init_one(&dev->sg_tmp, dev->addr_vir, count); - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { - dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", - __func__, __LINE__); - return -ENOMEM; - } - dev->addr_in = sg_dma_address(&dev->sg_tmp); - - if (sg_dst) { - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, - DMA_FROM_DEVICE)) { - dev_err(dev->dev, - "[%s:%d] dma_map_sg(sg_tmp) error\n", - __func__, __LINE__); - dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, - DMA_TO_DEVICE); - return -ENOMEM; - } - dev->addr_out = sg_dma_address(&dev->sg_tmp); - } - } - dev->count = count; - return 0; -} - -static void rk_unload_data(struct rk_crypto_info *dev) -{ - struct scatterlist *sg_in, *sg_out; - - sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; - dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); - - if (dev->sg_dst) { - sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; - dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); - } -} - static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) { struct rk_crypto_info *dev = platform_get_drvdata(dev_id); u32 interrupt_status; - spin_lock(&dev->lock); interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + dev->status = 1; if (interrupt_status & 0x0a) { dev_warn(dev->dev, "DMA Error\n"); - dev->err = -EFAULT; + dev->status = 0; } - tasklet_schedule(&dev->done_task); + complete(&dev->complete); - spin_unlock(&dev->lock); return IRQ_HANDLED; } -static int rk_crypto_enqueue(struct rk_crypto_info *dev, - struct crypto_async_request *async_req) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&dev->lock, flags); - ret = crypto_enqueue_request(&dev->queue, async_req); - if (dev->busy) { - spin_unlock_irqrestore(&dev->lock, flags); - return ret; - } - dev->busy = true; - spin_unlock_irqrestore(&dev->lock, flags); - tasklet_schedule(&dev->queue_task); - - return ret; -} - -static void rk_crypto_queue_task_cb(unsigned long data) -{ - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; - struct crypto_async_request *async_req, *backlog; - unsigned long flags; - int err = 0; - - dev->err = 0; - spin_lock_irqsave(&dev->lock, flags); - backlog = crypto_get_backlog(&dev->queue); - async_req = crypto_dequeue_request(&dev->queue); - - if (!async_req) { - dev->busy = false; - spin_unlock_irqrestore(&dev->lock, flags); - return; - } - spin_unlock_irqrestore(&dev->lock, flags); - - if (backlog) { - backlog->complete(backlog, -EINPROGRESS); - backlog = NULL; - } - - dev->async_req = async_req; - err = dev->start(dev); - if (err) - dev->complete(dev->async_req, err); -} - -static void rk_crypto_done_task_cb(unsigned long data) -{ - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; - - if (dev->err) { - dev->complete(dev->async_req, dev->err); - return; - } - - dev->err = dev->update(dev); - if (dev->err) - dev->complete(dev->async_req, dev->err); -} - static struct rk_crypto_tmp *rk_cipher_algs[] = { &rk_ecb_aes_alg, &rk_cbc_aes_alg, @@ -337,8 +175,6 @@ static int rk_crypto_probe(struct platform_device *pdev) if (err) goto err_crypto; - spin_lock_init(&crypto_info->lock); - crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(crypto_info->reg)) { err = PTR_ERR(crypto_info->reg); @@ -389,18 +225,11 @@ static int rk_crypto_probe(struct platform_device *pdev) crypto_info->dev = &pdev->dev; platform_set_drvdata(pdev, crypto_info); - tasklet_init(&crypto_info->queue_task, - rk_crypto_queue_task_cb, (unsigned long)crypto_info); - tasklet_init(&crypto_info->done_task, - rk_crypto_done_task_cb, (unsigned long)crypto_info); - crypto_init_queue(&crypto_info->queue, 50); + crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); + crypto_engine_start(crypto_info->engine); + init_completion(&crypto_info->complete); - crypto_info->enable_clk = rk_crypto_enable_clk; - crypto_info->disable_clk = rk_crypto_disable_clk; - crypto_info->load_data = rk_load_data; - crypto_info->unload_data = rk_unload_data; - crypto_info->enqueue = rk_crypto_enqueue; - crypto_info->busy = false; + rk_crypto_enable_clk(crypto_info); err = rk_crypto_register(crypto_info); if (err) { @@ -412,9 +241,9 @@ static int rk_crypto_probe(struct platform_device *pdev) return 0; err_register_alg: - tasklet_kill(&crypto_info->queue_task); - tasklet_kill(&crypto_info->done_task); + crypto_engine_exit(crypto_info->engine); err_crypto: + dev_err(dev, "Crypto Accelerator not successfully registered\n"); return err; } @@ -423,8 +252,8 @@ static int rk_crypto_remove(struct platform_device *pdev) struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); rk_crypto_unregister(); - tasklet_kill(&crypto_tmp->done_task); - tasklet_kill(&crypto_tmp->queue_task); + rk_crypto_disable_clk(crypto_tmp); + crypto_engine_exit(crypto_tmp->engine); return 0; } diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index 97278c2574ff..045e811b4af8 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -5,9 +5,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -193,45 +195,15 @@ struct rk_crypto_info { struct reset_control *rst; void __iomem *reg; int irq; - struct crypto_queue queue; - struct tasklet_struct queue_task; - struct tasklet_struct done_task; - struct crypto_async_request *async_req; - int err; - /* device lock */ - spinlock_t lock; - /* the public variable */ - struct scatterlist *sg_src; - struct scatterlist *sg_dst; - struct scatterlist sg_tmp; - struct scatterlist *first; - unsigned int left_bytes; - void *addr_vir; - int aligned; - int align_size; - size_t src_nents; - size_t dst_nents; - unsigned int total; - unsigned int count; - dma_addr_t addr_in; - dma_addr_t addr_out; - bool busy; - int (*start)(struct rk_crypto_info *dev); - int (*update)(struct rk_crypto_info *dev); - void (*complete)(struct crypto_async_request *base, int err); - int (*enable_clk)(struct rk_crypto_info *dev); - void (*disable_clk)(struct rk_crypto_info *dev); - int (*load_data)(struct rk_crypto_info *dev, - struct scatterlist *sg_src, - struct scatterlist *sg_dst); - void (*unload_data)(struct rk_crypto_info *dev); - int (*enqueue)(struct rk_crypto_info *dev, - struct crypto_async_request *async_req); + struct crypto_engine *engine; + struct completion complete; + int status; }; /* the private variable of hash */ struct rk_ahash_ctx { + struct crypto_engine_ctx enginectx; struct rk_crypto_info *dev; /* for fallback */ struct crypto_ahash *fallback_tfm; @@ -241,14 +213,23 @@ struct rk_ahash_ctx { struct rk_ahash_rctx { struct ahash_request fallback_req; u32 mode; + int nrsg; }; /* the private variable of cipher */ struct rk_cipher_ctx { + struct crypto_engine_ctx enginectx; struct rk_crypto_info *dev; unsigned int keylen; - u32 mode; + u8 key[AES_MAX_KEY_SIZE]; u8 iv[AES_BLOCK_SIZE]; + struct crypto_skcipher *fallback_tfm; +}; + +struct rk_cipher_rctx { + u8 backup_iv[AES_BLOCK_SIZE]; + u32 mode; + struct skcipher_request fallback_req; // keep at the end }; enum alg_type { diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index ed03058497bc..edd40e16a3f0 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -9,6 +9,7 @@ * Some ideas are from marvell/cesa.c and s5p-sss.c driver. */ #include +#include #include "rk3288_crypto.h" /* @@ -16,6 +17,40 @@ * so we put the fixed hash out when met zero message. */ +static bool rk_ahash_need_fallback(struct ahash_request *req) +{ + struct scatterlist *sg; + + sg = req->src; + while (sg) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + return true; + } + if (sg->length % 4) { + return true; + } + sg = sg_next(sg); + } + return false; +} + +static int rk_ahash_digest_fb(struct ahash_request *areq) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; + + return crypto_ahash_digest(&rctx->fallback_req); +} + static int zero_message_process(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -38,16 +73,12 @@ static int zero_message_process(struct ahash_request *req) return 0; } -static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err) +static void rk_ahash_reg_init(struct ahash_request *req) { - if (base->complete) - base->complete(base, err); -} - -static void rk_ahash_reg_init(struct rk_crypto_info *dev) -{ - struct ahash_request *req = ahash_request_cast(dev->async_req); struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + struct rk_crypto_info *dev = tctx->dev; int reg_status; reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | @@ -74,7 +105,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) RK_CRYPTO_BYTESWAP_BRFIFO | RK_CRYPTO_BYTESWAP_BTFIFO); - CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes); } static int rk_ahash_init(struct ahash_request *req) @@ -167,48 +198,64 @@ static int rk_ahash_digest(struct ahash_request *req) struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct rk_crypto_info *dev = tctx->dev; + if (rk_ahash_need_fallback(req)) + return rk_ahash_digest_fb(req); + if (!req->nbytes) return zero_message_process(req); - else - return dev->enqueue(dev, &req->base); + + return crypto_transfer_hash_request_to_engine(dev->engine, req); } -static void crypto_ahash_dma_start(struct rk_crypto_info *dev) +static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) { - CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); - CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg)); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | (RK_CRYPTO_HASH_START << 16)); } -static int rk_ahash_set_data_start(struct rk_crypto_info *dev) +static int rk_hash_prepare(struct crypto_engine *engine, void *breq) { - int err; + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + int ret; - err = dev->load_data(dev, dev->sg_src, NULL); - if (!err) - crypto_ahash_dma_start(dev); - return err; + ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + if (ret <= 0) + return -EINVAL; + + rctx->nrsg = ret; + + return 0; } -static int rk_ahash_start(struct rk_crypto_info *dev) +static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) { - struct ahash_request *req = ahash_request_cast(dev->async_req); - struct crypto_ahash *tfm; - struct rk_ahash_rctx *rctx; + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + + dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); + return 0; +} + +static int rk_hash_run(struct crypto_engine *engine, void *breq) +{ + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + struct scatterlist *sg = areq->src; + int err = 0; + int i; + u32 v; - dev->total = req->nbytes; - dev->left_bytes = req->nbytes; - dev->aligned = 0; - dev->align_size = 4; - dev->sg_dst = NULL; - dev->sg_src = req->src; - dev->first = req->src; - dev->src_nents = sg_nents(req->src); - rctx = ahash_request_ctx(req); rctx->mode = 0; - tfm = crypto_ahash_reqtfm(req); switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: rctx->mode = RK_CRYPTO_HASH_SHA1; @@ -220,32 +267,26 @@ static int rk_ahash_start(struct rk_crypto_info *dev) rctx->mode = RK_CRYPTO_HASH_MD5; break; default: - return -EINVAL; + err = -EINVAL; + goto theend; } - rk_ahash_reg_init(dev); - return rk_ahash_set_data_start(dev); -} + rk_ahash_reg_init(areq); -static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) -{ - int err = 0; - struct ahash_request *req = ahash_request_cast(dev->async_req); - struct crypto_ahash *tfm; - - dev->unload_data(dev); - if (dev->left_bytes) { - if (dev->aligned) { - if (sg_is_last(dev->sg_src)) { - dev_warn(dev->dev, "[%s:%d], Lack of data\n", - __func__, __LINE__); - err = -ENOMEM; - goto out_rx; - } - dev->sg_src = sg_next(dev->sg_src); + while (sg) { + reinit_completion(&tctx->dev->complete); + tctx->dev->status = 0; + crypto_ahash_dma_start(tctx->dev, sg); + wait_for_completion_interruptible_timeout(&tctx->dev->complete, + msecs_to_jiffies(2000)); + if (!tctx->dev->status) { + dev_err(tctx->dev->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; } - err = rk_ahash_set_data_start(dev); - } else { + sg = sg_next(sg); + } + /* * it will take some time to process date after last dma * transmission. @@ -256,18 +297,20 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) * efficiency, and make it response quickly when dma * complete. */ - while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) - udelay(10); + while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS)) + udelay(10); - tfm = crypto_ahash_reqtfm(req); - memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, - crypto_ahash_digestsize(tfm)); - dev->complete(dev->async_req, 0); - tasklet_schedule(&dev->queue_task); + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { + v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); + put_unaligned_le32(v, areq->result + i * 4); } -out_rx: - return err; +theend: + local_bh_disable(); + crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); + + return 0; } static int rk_cra_hash_init(struct crypto_tfm *tfm) @@ -281,14 +324,6 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) algt = container_of(alg, struct rk_crypto_tmp, alg.hash); tctx->dev = algt->dev; - tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); - if (!tctx->dev->addr_vir) { - dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); - return -ENOMEM; - } - tctx->dev->start = rk_ahash_start; - tctx->dev->update = rk_ahash_crypto_rx; - tctx->dev->complete = rk_ahash_crypto_complete; /* for fallback */ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, @@ -297,19 +332,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); return PTR_ERR(tctx->fallback_tfm); } + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct rk_ahash_rctx) + crypto_ahash_reqsize(tctx->fallback_tfm)); - return tctx->dev->enable_clk(tctx->dev); + tctx->enginectx.op.do_one_request = rk_hash_run; + tctx->enginectx.op.prepare_request = rk_hash_prepare; + tctx->enginectx.op.unprepare_request = rk_hash_unprepare; + + return 0; } static void rk_cra_hash_exit(struct crypto_tfm *tfm) { struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); - free_page((unsigned long)tctx->dev->addr_vir); - return tctx->dev->disable_clk(tctx->dev); + crypto_free_ahash(tctx->fallback_tfm); } struct rk_crypto_tmp rk_ahash_sha1 = { diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 5bbf0d2722e1..67a7e05d5ae3 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -9,23 +9,77 @@ * Some ideas are from marvell-cesa.c and s5p-sss.c driver. */ #include +#include #include "rk3288_crypto.h" #define RK_CRYPTO_DEC BIT(0) -static void rk_crypto_complete(struct crypto_async_request *base, int err) +static int rk_cipher_need_fallback(struct skcipher_request *req) { - if (base->complete) - base->complete(base, err); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + unsigned int bs = crypto_skcipher_blocksize(tfm); + struct scatterlist *sgs, *sgd; + unsigned int stodo, dtodo, len; + + if (!req->cryptlen) + return true; + + len = req->cryptlen; + sgs = req->src; + sgd = req->dst; + while (sgs && sgd) { + if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { + return true; + } + if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { + return true; + } + stodo = min(len, sgs->length); + if (stodo % bs) { + return true; + } + dtodo = min(len, sgd->length); + if (dtodo % bs) { + return true; + } + if (stodo != dtodo) { + return true; + } + len -= stodo; + sgs = sg_next(sgs); + sgd = sg_next(sgd); + } + return false; +} + +static int rk_cipher_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + int err; + + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + if (rctx->mode & RK_CRYPTO_DEC) + err = crypto_skcipher_decrypt(&rctx->fallback_req); + else + err = crypto_skcipher_encrypt(&rctx->fallback_req); + return err; } static int rk_handle_req(struct rk_crypto_info *dev, struct skcipher_request *req) { - if (!IS_ALIGNED(req->cryptlen, dev->align_size)) - return -EINVAL; - else - return dev->enqueue(dev, &req->base); + struct crypto_engine *engine = dev->engine; + + if (rk_cipher_need_fallback(req)) + return rk_cipher_fallback(req); + + return crypto_transfer_skcipher_request_to_engine(engine, req); } static int rk_aes_setkey(struct crypto_skcipher *cipher, @@ -38,8 +92,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher, keylen != AES_KEYSIZE_256) return -EINVAL; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_des_setkey(struct crypto_skcipher *cipher, @@ -53,8 +108,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher, return err; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_tdes_setkey(struct crypto_skcipher *cipher, @@ -68,17 +124,19 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher, return err; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_aes_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_ECB_MODE; + rctx->mode = RK_CRYPTO_AES_ECB_MODE; return rk_handle_req(dev, req); } @@ -86,9 +144,10 @@ static int rk_aes_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -96,9 +155,10 @@ static int rk_aes_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_CBC_MODE; + rctx->mode = RK_CRYPTO_AES_CBC_MODE; return rk_handle_req(dev, req); } @@ -106,9 +166,10 @@ static int rk_aes_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -116,9 +177,10 @@ static int rk_des_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = 0; + rctx->mode = 0; return rk_handle_req(dev, req); } @@ -126,9 +188,10 @@ static int rk_des_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -136,9 +199,10 @@ static int rk_des_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_handle_req(dev, req); } @@ -146,9 +210,10 @@ static int rk_des_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -156,9 +221,10 @@ static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT; + rctx->mode = RK_CRYPTO_TDES_SELECT; return rk_handle_req(dev, req); } @@ -166,9 +232,10 @@ static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -176,9 +243,10 @@ static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_handle_req(dev, req); } @@ -186,43 +254,42 @@ static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } -static void rk_ablk_hw_init(struct rk_crypto_info *dev) +static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) { - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); - u32 ivsize, block, conf_reg = 0; + u32 block, conf_reg = 0; block = crypto_tfm_alg_blocksize(tfm); - ivsize = crypto_skcipher_ivsize(cipher); if (block == DES_BLOCK_SIZE) { - ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | + rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | RK_CRYPTO_TDES_BYTESWAP_KEY | RK_CRYPTO_TDES_BYTESWAP_IV; - CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode); - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); conf_reg = RK_CRYPTO_DESSEL; } else { - ctx->mode |= RK_CRYPTO_AES_FIFO_MODE | + rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | RK_CRYPTO_AES_KEY_CHANGE | RK_CRYPTO_AES_BYTESWAP_KEY | RK_CRYPTO_AES_BYTESWAP_IV; if (ctx->keylen == AES_KEYSIZE_192) - ctx->mode |= RK_CRYPTO_AES_192BIT_key; + rctx->mode |= RK_CRYPTO_AES_192BIT_key; else if (ctx->keylen == AES_KEYSIZE_256) - ctx->mode |= RK_CRYPTO_AES_256BIT_key; - CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode); - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); + rctx->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); } conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | RK_CRYPTO_BYTESWAP_BRFIFO; @@ -231,146 +298,138 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev) RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); } -static void crypto_dma_start(struct rk_crypto_info *dev) +static void crypto_dma_start(struct rk_crypto_info *dev, + struct scatterlist *sgs, + struct scatterlist *sgd, unsigned int todo) { - CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); - CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); - CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs)); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo); + CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd)); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | _SBF(RK_CRYPTO_BLOCK_START, 16)); } -static int rk_set_data_start(struct rk_crypto_info *dev) +static int rk_cipher_run(struct crypto_engine *engine, void *async_req) { - int err; - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + - dev->sg_src->offset + dev->sg_src->length - ivsize; - - /* Store the iv that need to be updated in chain mode. - * And update the IV buffer to contain the next IV for decryption mode. - */ - if (ctx->mode & RK_CRYPTO_DEC) { - memcpy(ctx->iv, src_last_blk, ivsize); - sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv, - ivsize, dev->total - ivsize); - } - - err = dev->load_data(dev, dev->sg_src, dev->sg_dst); - if (!err) - crypto_dma_start(dev); - return err; -} - -static int rk_ablk_start(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - unsigned long flags; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sgs, *sgd; int err = 0; + int ivsize = crypto_skcipher_ivsize(tfm); + int offset; + u8 iv[AES_BLOCK_SIZE]; + u8 biv[AES_BLOCK_SIZE]; + u8 *ivtouse = areq->iv; + unsigned int len = areq->cryptlen; + unsigned int todo; - dev->left_bytes = req->cryptlen; - dev->total = req->cryptlen; - dev->sg_src = req->src; - dev->first = req->src; - dev->src_nents = sg_nents(req->src); - dev->sg_dst = req->dst; - dev->dst_nents = sg_nents(req->dst); - dev->aligned = 1; - - spin_lock_irqsave(&dev->lock, flags); - rk_ablk_hw_init(dev); - err = rk_set_data_start(dev); - spin_unlock_irqrestore(&dev->lock, flags); - return err; -} - -static void rk_iv_copyback(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - - /* Update the IV buffer to contain the next IV for encryption mode. */ - if (!(ctx->mode & RK_CRYPTO_DEC)) { - if (dev->aligned) { - memcpy(req->iv, sg_virt(dev->sg_dst) + - dev->sg_dst->length - ivsize, ivsize); - } else { - memcpy(req->iv, dev->addr_vir + - dev->count - ivsize, ivsize); + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + if (rctx->mode & RK_CRYPTO_DEC) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(rctx->backup_iv, areq->src, + offset, ivsize, 0); } } -} -static void rk_update_iv(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - u8 *new_iv = NULL; + sgs = areq->src; + sgd = areq->dst; - if (ctx->mode & RK_CRYPTO_DEC) { - new_iv = ctx->iv; - } else { - new_iv = page_address(sg_page(dev->sg_dst)) + - dev->sg_dst->offset + dev->sg_dst->length - ivsize; - } - - if (ivsize == DES_BLOCK_SIZE) - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); - else if (ivsize == AES_BLOCK_SIZE) - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); -} - -/* return: - * true some err was occurred - * fault no err, continue - */ -static int rk_ablk_rx(struct rk_crypto_info *dev) -{ - int err = 0; - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - - dev->unload_data(dev); - if (!dev->aligned) { - if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, - dev->addr_vir, dev->count, - dev->total - dev->left_bytes - - dev->count)) { - err = -EINVAL; - goto out_rx; + while (sgs && sgd && len) { + if (!sgs->length) { + sgs = sg_next(sgs); + sgd = sg_next(sgd); + continue; } - } - if (dev->left_bytes) { - rk_update_iv(dev); - if (dev->aligned) { - if (sg_is_last(dev->sg_src)) { - dev_err(dev->dev, "[%s:%d] Lack of data\n", - __func__, __LINE__); - err = -ENOMEM; - goto out_rx; + if (rctx->mode & RK_CRYPTO_DEC) { + /* we backup last block of source to be used as IV at next step */ + offset = sgs->length - ivsize; + scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); + } + if (sgs == sgd) { + err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + } else { + err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_sgs; } - dev->sg_src = sg_next(dev->sg_src); - dev->sg_dst = sg_next(dev->sg_dst); } - err = rk_set_data_start(dev); - } else { - rk_iv_copyback(dev); - /* here show the calculation is over without any err */ - dev->complete(dev->async_req, 0); - tasklet_schedule(&dev->queue_task); + err = 0; + rk_ablk_hw_init(ctx->dev, areq); + if (ivsize) { + if (ivsize == DES_BLOCK_SIZE) + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); + else + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); + } + reinit_completion(&ctx->dev->complete); + ctx->dev->status = 0; + + todo = min(sg_dma_len(sgs), len); + len -= todo; + crypto_dma_start(ctx->dev, sgs, sgd, todo / 4); + wait_for_completion_interruptible_timeout(&ctx->dev->complete, + msecs_to_jiffies(2000)); + if (!ctx->dev->status) { + dev_err(ctx->dev->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; + } + if (sgs == sgd) { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); + } + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(iv, biv, ivsize); + ivtouse = iv; + } else { + offset = sgd->length - ivsize; + scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0); + ivtouse = iv; + } + sgs = sg_next(sgs); + sgd = sg_next(sgd); } -out_rx: + + if (areq->iv && ivsize > 0) { + offset = areq->cryptlen - ivsize; + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(areq->iv, rctx->backup_iv, ivsize); + memzero_explicit(rctx->backup_iv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); + } + } + +theend: + local_bh_disable(); + crypto_finalize_skcipher_request(engine, areq, err); + local_bh_enable(); + return 0; + +theend_sgs: + if (sgs == sgd) { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); + } +theend_iv: return err; } @@ -378,26 +437,34 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); struct rk_crypto_tmp *algt; algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); ctx->dev = algt->dev; - ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1; - ctx->dev->start = rk_ablk_start; - ctx->dev->update = rk_ablk_rx; - ctx->dev->complete = rk_crypto_complete; - ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); - return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { + dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(ctx->fallback_tfm)); + return PTR_ERR(ctx->fallback_tfm); + } + + tfm->reqsize = sizeof(struct rk_cipher_rctx) + + crypto_skcipher_reqsize(ctx->fallback_tfm); + + ctx->enginectx.op.do_one_request = rk_cipher_run; + + return 0; } static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - free_page((unsigned long)ctx->dev->addr_vir); - ctx->dev->disable_clk(ctx->dev); + memzero_explicit(ctx->key, ctx->keylen); + crypto_free_skcipher(ctx->fallback_tfm); } struct rk_crypto_tmp rk_ecb_aes_alg = { @@ -406,7 +473,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, @@ -428,7 +495,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, @@ -451,7 +518,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -473,7 +540,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -496,7 +563,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3-ede-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -518,7 +585,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3-ede-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 85faa7a5c7d1..a473b640c40a 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -775,8 +775,7 @@ static void remove_sysfs_files(struct devfreq *devfreq, * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. - * @data: private data for the governor. The devfreq framework does not - * touch this value. + * @data: devfreq driver pass to governors, governor should not change it. */ struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, @@ -1003,8 +1002,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res) * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. - * @data: private data for the governor. The devfreq framework does not - * touch this value. + * @data: devfreq driver pass to governors, governor should not change it. * * This function manages automatically the memory of devfreq device using device * resource management and simplify the free operation for memory of devfreq diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index ab9db7adb3ad..d69672ccacc4 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -21,7 +21,7 @@ struct userspace_data { static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) { - struct userspace_data *data = df->data; + struct userspace_data *data = df->governor_data; if (data->valid) *freq = data->user_frequency; @@ -40,7 +40,7 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr, int err = 0; mutex_lock(&devfreq->lock); - data = devfreq->data; + data = devfreq->governor_data; sscanf(buf, "%lu", &wanted); data->user_frequency = wanted; @@ -60,7 +60,7 @@ static ssize_t set_freq_show(struct device *dev, int err = 0; mutex_lock(&devfreq->lock); - data = devfreq->data; + data = devfreq->governor_data; if (data->valid) err = sprintf(buf, "%lu\n", data->user_frequency); @@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq) goto out; } data->valid = false; - devfreq->data = data; + devfreq->governor_data = data; err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); out: @@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq) if (devfreq->dev.kobj.sd) sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); - kfree(devfreq->data); - devfreq->data = NULL; + kfree(devfreq->governor_data); + devfreq->governor_data = NULL; } static int devfreq_userspace_handler(struct devfreq *devfreq, diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index 4c06c93c93d3..c7f7134adc21 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c @@ -110,6 +110,12 @@ static char dio_no_name[] = { 0 }; #endif /* CONFIG_DIO_CONSTANTS */ +static void dio_dev_release(struct device *dev) +{ + struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev); + kfree(ddev); +} + int __init dio_find(int deviceid) { /* Called to find a DIO device before the full bus scan has run. @@ -224,6 +230,7 @@ static int __init dio_init(void) dev->bus = &dio_bus; dev->dev.parent = &dio_bus.dev; dev->dev.bus = &dio_bus_type; + dev->dev.release = dio_dev_release; dev->scode = scode; dev->resource.start = pa; dev->resource.end = pa + DIO_SIZE(scode, va); @@ -251,6 +258,7 @@ static int __init dio_init(void) if (error) { pr_err("DIO: Error registering device %s\n", dev->name); + put_device(&dev->dev); continue; } error = dio_create_sysfs_dev_files(dev); diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 6cf50ee0b77c..e0af60833d28 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -198,11 +198,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus, if (unlikely(pci_enable_device(pdev) < 0)) { edac_dbg(2, "Failed to enable device %02x:%02x.%x\n", bus, dev, fun); + pci_dev_put(pdev); return NULL; } - pci_dev_get(pdev); - return pdev; } diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index c69d40ae5619..7684b3afa630 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -180,7 +180,7 @@ config EXTCON_USBC_CROS_EC config EXTCON_USBC_TUSB320 tristate "TI TUSB320 USB-C extcon support" - depends on I2C + depends on I2C && TYPEC select REGMAP_I2C help Say Y here to enable support for USB Type C cable detection extcon diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c index 805af73b4152..7223c4b9dc70 100644 --- a/drivers/extcon/extcon-usbc-tusb320.c +++ b/drivers/extcon/extcon-usbc-tusb320.c @@ -6,6 +6,7 @@ * Author: Michael Auchter */ +#include #include #include #include @@ -13,21 +14,70 @@ #include #include #include +#include + +#define TUSB320_REG8 0x8 +#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6) +#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB 0x0 +#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A 0x1 +#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A 0x2 +#define TUSB320_REG8_CURRENT_MODE_DETECT GENMASK(5, 4) +#define TUSB320_REG8_CURRENT_MODE_DETECT_DEF 0x0 +#define TUSB320_REG8_CURRENT_MODE_DETECT_MED 0x1 +#define TUSB320_REG8_CURRENT_MODE_DETECT_ACC 0x2 +#define TUSB320_REG8_CURRENT_MODE_DETECT_HI 0x3 +#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 2) +#define TUSB320_REG8_ACCESSORY_CONNECTED_NONE 0x0 +#define TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO 0x4 +#define TUSB320_REG8_ACCESSORY_CONNECTED_ACC 0x5 +#define TUSB320_REG8_ACCESSORY_CONNECTED_DEBUG 0x6 +#define TUSB320_REG8_ACTIVE_CABLE_DETECTION BIT(0) #define TUSB320_REG9 0x9 #define TUSB320_REG9_ATTACHED_STATE_SHIFT 6 #define TUSB320_REG9_ATTACHED_STATE_MASK 0x3 #define TUSB320_REG9_CABLE_DIRECTION BIT(5) #define TUSB320_REG9_INTERRUPT_STATUS BIT(4) -#define TUSB320_ATTACHED_STATE_NONE 0x0 -#define TUSB320_ATTACHED_STATE_DFP 0x1 -#define TUSB320_ATTACHED_STATE_UFP 0x2 -#define TUSB320_ATTACHED_STATE_ACC 0x3 + +#define TUSB320_REGA 0xa +#define TUSB320L_REGA_DISABLE_TERM BIT(0) +#define TUSB320_REGA_I2C_SOFT_RESET BIT(3) +#define TUSB320_REGA_MODE_SELECT_SHIFT 4 +#define TUSB320_REGA_MODE_SELECT_MASK 0x3 + +#define TUSB320L_REGA0_REVISION 0xa0 + +enum tusb320_attached_state { + TUSB320_ATTACHED_STATE_NONE, + TUSB320_ATTACHED_STATE_DFP, + TUSB320_ATTACHED_STATE_UFP, + TUSB320_ATTACHED_STATE_ACC, +}; + +enum tusb320_mode { + TUSB320_MODE_PORT, + TUSB320_MODE_UFP, + TUSB320_MODE_DFP, + TUSB320_MODE_DRP, +}; + +struct tusb320_priv; + +struct tusb320_ops { + int (*set_mode)(struct tusb320_priv *priv, enum tusb320_mode mode); + int (*get_revision)(struct tusb320_priv *priv, unsigned int *revision); +}; struct tusb320_priv { struct device *dev; struct regmap *regmap; struct extcon_dev *edev; + struct tusb320_ops *ops; + enum tusb320_attached_state state; + struct typec_port *port; + struct typec_capability cap; + enum typec_port_type port_type; + enum typec_pwr_opmode pwr_opmode; }; static const char * const tusb_attached_states[] = { @@ -62,19 +112,142 @@ static int tusb320_check_signature(struct tusb320_priv *priv) return 0; } -static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) +static int tusb320_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode) { - struct tusb320_priv *priv = dev_id; - int state, polarity; - unsigned reg; + int ret; - if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { - dev_err(priv->dev, "error during i2c read!\n"); - return IRQ_NONE; + /* Mode cannot be changed while cable is attached */ + if (priv->state != TUSB320_ATTACHED_STATE_NONE) + return -EBUSY; + + /* Write mode */ + ret = regmap_write_bits(priv->regmap, TUSB320_REGA, + TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT, + mode << TUSB320_REGA_MODE_SELECT_SHIFT); + if (ret) { + dev_err(priv->dev, "failed to write mode: %d\n", ret); + return ret; } - if (!(reg & TUSB320_REG9_INTERRUPT_STATUS)) - return IRQ_NONE; + return 0; +} + +static int tusb320l_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode) +{ + int ret; + + /* Disable CC state machine */ + ret = regmap_write_bits(priv->regmap, TUSB320_REGA, + TUSB320L_REGA_DISABLE_TERM, 1); + if (ret) { + dev_err(priv->dev, + "failed to disable CC state machine: %d\n", ret); + return ret; + } + + /* Write mode */ + ret = regmap_write_bits(priv->regmap, TUSB320_REGA, + TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT, + mode << TUSB320_REGA_MODE_SELECT_SHIFT); + if (ret) { + dev_err(priv->dev, "failed to write mode: %d\n", ret); + goto err; + } + + msleep(5); +err: + /* Re-enable CC state machine */ + ret = regmap_write_bits(priv->regmap, TUSB320_REGA, + TUSB320L_REGA_DISABLE_TERM, 0); + if (ret) + dev_err(priv->dev, + "failed to re-enable CC state machine: %d\n", ret); + + return ret; +} + +static int tusb320_reset(struct tusb320_priv *priv) +{ + int ret; + + /* Set mode to default (follow PORT pin) */ + ret = priv->ops->set_mode(priv, TUSB320_MODE_PORT); + if (ret && ret != -EBUSY) { + dev_err(priv->dev, + "failed to set mode to PORT: %d\n", ret); + return ret; + } + + /* Perform soft reset */ + ret = regmap_write_bits(priv->regmap, TUSB320_REGA, + TUSB320_REGA_I2C_SOFT_RESET, 1); + if (ret) { + dev_err(priv->dev, + "failed to write soft reset bit: %d\n", ret); + return ret; + } + + /* Wait for chip to go through reset */ + msleep(95); + + return 0; +} + +static int tusb320l_get_revision(struct tusb320_priv *priv, unsigned int *revision) +{ + return regmap_read(priv->regmap, TUSB320L_REGA0_REVISION, revision); +} + +static struct tusb320_ops tusb320_ops = { + .set_mode = tusb320_set_mode, +}; + +static struct tusb320_ops tusb320l_ops = { + .set_mode = tusb320l_set_mode, + .get_revision = tusb320l_get_revision, +}; + +static int tusb320_set_adv_pwr_mode(struct tusb320_priv *priv) +{ + u8 mode; + + if (priv->pwr_opmode == TYPEC_PWR_MODE_USB) + mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB; + else if (priv->pwr_opmode == TYPEC_PWR_MODE_1_5A) + mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A; + else if (priv->pwr_opmode == TYPEC_PWR_MODE_3_0A) + mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A; + else /* No other mode is supported. */ + return -EINVAL; + + return regmap_write_bits(priv->regmap, TUSB320_REG8, + TUSB320_REG8_CURRENT_MODE_ADVERTISE, + FIELD_PREP(TUSB320_REG8_CURRENT_MODE_ADVERTISE, + mode)); +} + +static int tusb320_port_type_set(struct typec_port *port, + enum typec_port_type type) +{ + struct tusb320_priv *priv = typec_get_drvdata(port); + + if (type == TYPEC_PORT_SRC) + return priv->ops->set_mode(priv, TUSB320_MODE_DFP); + else if (type == TYPEC_PORT_SNK) + return priv->ops->set_mode(priv, TUSB320_MODE_UFP); + else if (type == TYPEC_PORT_DRP) + return priv->ops->set_mode(priv, TUSB320_MODE_DRP); + else + return priv->ops->set_mode(priv, TUSB320_MODE_PORT); +} + +static const struct typec_operations tusb320_typec_ops = { + .port_type_set = tusb320_port_type_set, +}; + +static void tusb320_extcon_irq_handler(struct tusb320_priv *priv, u8 reg) +{ + int state, polarity; state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) & TUSB320_REG9_ATTACHED_STATE_MASK; @@ -96,35 +269,93 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) extcon_sync(priv->edev, EXTCON_USB); extcon_sync(priv->edev, EXTCON_USB_HOST); + priv->state = state; +} + +static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9) +{ + struct typec_port *port = priv->port; + struct device *dev = priv->dev; + u8 mode, role, state; + int ret, reg8; + bool ori; + + ori = reg9 & TUSB320_REG9_CABLE_DIRECTION; + typec_set_orientation(port, ori ? TYPEC_ORIENTATION_REVERSE : + TYPEC_ORIENTATION_NORMAL); + + state = (reg9 >> TUSB320_REG9_ATTACHED_STATE_SHIFT) & + TUSB320_REG9_ATTACHED_STATE_MASK; + if (state == TUSB320_ATTACHED_STATE_DFP) + role = TYPEC_SOURCE; + else + role = TYPEC_SINK; + + typec_set_vconn_role(port, role); + typec_set_pwr_role(port, role); + typec_set_data_role(port, role == TYPEC_SOURCE ? + TYPEC_HOST : TYPEC_DEVICE); + + ret = regmap_read(priv->regmap, TUSB320_REG8, ®8); + if (ret) { + dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret); + return; + } + + mode = FIELD_GET(TUSB320_REG8_CURRENT_MODE_DETECT, reg8); + if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_DEF) + typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB); + else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_MED) + typec_set_pwr_opmode(port, TYPEC_PWR_MODE_1_5A); + else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_HI) + typec_set_pwr_opmode(port, TYPEC_PWR_MODE_3_0A); + else /* Charge through accessory */ + typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB); +} + +static irqreturn_t tusb320_state_update_handler(struct tusb320_priv *priv, + bool force_update) +{ + unsigned int reg; + + if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { + dev_err(priv->dev, "error during i2c read!\n"); + return IRQ_NONE; + } + + if (!force_update && !(reg & TUSB320_REG9_INTERRUPT_STATUS)) + return IRQ_NONE; + + tusb320_extcon_irq_handler(priv, reg); + + /* + * Type-C support is optional. Only call the Type-C handler if a + * port had been registered previously. + */ + if (priv->port) + tusb320_typec_irq_handler(priv, reg); + regmap_write(priv->regmap, TUSB320_REG9, reg); return IRQ_HANDLED; } +static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) +{ + struct tusb320_priv *priv = dev_id; + + return tusb320_state_update_handler(priv, false); +} + static const struct regmap_config tusb320_regmap_config = { .reg_bits = 8, .val_bits = 8, }; -static int tusb320_extcon_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int tusb320_extcon_probe(struct tusb320_priv *priv) { - struct tusb320_priv *priv; int ret; - priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - priv->dev = &client->dev; - - priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config); - if (IS_ERR(priv->regmap)) - return PTR_ERR(priv->regmap); - - ret = tusb320_check_signature(priv); - if (ret) - return ret; - priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable); if (IS_ERR(priv->edev)) { dev_err(priv->dev, "failed to allocate extcon device\n"); @@ -142,8 +373,118 @@ static int tusb320_extcon_probe(struct i2c_client *client, extcon_set_property_capability(priv->edev, EXTCON_USB_HOST, EXTCON_PROP_USB_TYPEC_POLARITY); + return 0; +} + +static int tusb320_typec_probe(struct i2c_client *client, + struct tusb320_priv *priv) +{ + struct fwnode_handle *connector; + const char *cap_str; + int ret; + + /* The Type-C connector is optional, for backward compatibility. */ + connector = device_get_named_child_node(&client->dev, "connector"); + if (!connector) + return 0; + + /* Type-C connector found. */ + ret = typec_get_fw_cap(&priv->cap, connector); + if (ret) + return ret; + + priv->port_type = priv->cap.type; + + /* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */ + ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str); + if (ret) + return ret; + + ret = typec_find_pwr_opmode(cap_str); + if (ret < 0) + return ret; + if (ret == TYPEC_PWR_MODE_PD) + return -EINVAL; + + priv->pwr_opmode = ret; + + /* Initialize the hardware with the devicetree settings. */ + ret = tusb320_set_adv_pwr_mode(priv); + if (ret) + return ret; + + priv->cap.revision = USB_TYPEC_REV_1_1; + priv->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO; + priv->cap.accessory[1] = TYPEC_ACCESSORY_DEBUG; + priv->cap.orientation_aware = true; + priv->cap.driver_data = priv; + priv->cap.ops = &tusb320_typec_ops; + priv->cap.fwnode = connector; + + priv->port = typec_register_port(&client->dev, &priv->cap); + if (IS_ERR(priv->port)) + return PTR_ERR(priv->port); + + return 0; +} + +static int tusb320_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct tusb320_priv *priv; + const void *match_data; + unsigned int revision; + int ret; + + priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->dev = &client->dev; + + priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + ret = tusb320_check_signature(priv); + if (ret) + return ret; + + match_data = device_get_match_data(&client->dev); + if (!match_data) + return -EINVAL; + + priv->ops = (struct tusb320_ops*)match_data; + + if (priv->ops->get_revision) { + ret = priv->ops->get_revision(priv, &revision); + if (ret) + dev_warn(priv->dev, + "failed to read revision register: %d\n", ret); + else + dev_info(priv->dev, "chip revision %d\n", revision); + } + + ret = tusb320_extcon_probe(priv); + if (ret) + return ret; + + ret = tusb320_typec_probe(client, priv); + if (ret) + return ret; + /* update initial state */ - tusb320_irq_handler(client->irq, priv); + tusb320_state_update_handler(priv, true); + + /* Reset chip to its default state */ + ret = tusb320_reset(priv); + if (ret) + dev_warn(priv->dev, "failed to reset chip: %d\n", ret); + else + /* + * State and polarity might change after a reset, so update + * them again and make sure the interrupt status bit is cleared. + */ + tusb320_state_update_handler(priv, true); ret = devm_request_threaded_irq(priv->dev, client->irq, NULL, tusb320_irq_handler, @@ -154,13 +495,14 @@ static int tusb320_extcon_probe(struct i2c_client *client, } static const struct of_device_id tusb320_extcon_dt_match[] = { - { .compatible = "ti,tusb320", }, + { .compatible = "ti,tusb320", .data = &tusb320_ops, }, + { .compatible = "ti,tusb320l", .data = &tusb320l_ops, }, { } }; MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match); static struct i2c_driver tusb320_extcon_driver = { - .probe = tusb320_extcon_probe, + .probe = tusb320_probe, .driver = { .name = "extcon-tusb320", .of_match_table = tusb320_extcon_dt_match, diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 70be9c87fb67..ba03f5a4b30c 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -590,7 +590,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed)); if (seed != NULL) { - size = min(seed->size, EFI_RANDOM_SEED_SIZE); + size = min_t(u32, seed->size, SZ_1K); // sanity check early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); @@ -599,8 +599,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed) + size); if (seed != NULL) { - pr_notice("seeding entropy pool\n"); add_bootloader_randomness(seed->bits, size); + memzero_explicit(seed->bits, size); early_memunmap(seed, sizeof(*seed) + size); } else { pr_err("Could not map UEFI random seed!\n"); diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index cde0a2ef507d..fbffdd7290a3 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -766,6 +766,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed); +efi_status_t efi_random_get_seed(void); + efi_status_t check_platform_features(void); void *get_efi_config_table(efi_guid_t guid); diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 33ab56769595..f85d2c066877 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -67,27 +67,43 @@ efi_status_t efi_random_get_seed(void) efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; + struct linux_efi_random_seed *prev_seed, *seed = NULL; + int prev_seed_size = 0, seed_size = EFI_RANDOM_SEED_SIZE; efi_rng_protocol_t *rng = NULL; - struct linux_efi_random_seed *seed = NULL; efi_status_t status; status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; + /* + * Check whether a seed was provided by a prior boot stage. In that + * case, instead of overwriting it, let's create a new buffer that can + * hold both, and concatenate the existing and the new seeds. + * Note that we should read the seed size with caution, in case the + * table got corrupted in memory somehow. + */ + prev_seed = get_efi_config_table(LINUX_EFI_RANDOM_SEED_TABLE_GUID); + if (prev_seed && prev_seed->size <= 512U) { + prev_seed_size = prev_seed->size; + seed_size += prev_seed_size; + } + /* * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the * allocation will survive a kexec reboot (although we refresh the seed * beforehand) */ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, - sizeof(*seed) + EFI_RANDOM_SEED_SIZE, + struct_size(seed, bits, seed_size), (void **)&seed); - if (status != EFI_SUCCESS) - return status; + if (status != EFI_SUCCESS) { + efi_warn("Failed to allocate memory for RNG seed.\n"); + goto err_warn; + } status = efi_call_proto(rng, get_rng, &rng_algo_raw, - EFI_RANDOM_SEED_SIZE, seed->bits); + EFI_RANDOM_SEED_SIZE, seed->bits); if (status == EFI_UNSUPPORTED) /* @@ -100,14 +116,28 @@ efi_status_t efi_random_get_seed(void) if (status != EFI_SUCCESS) goto err_freepool; - seed->size = EFI_RANDOM_SEED_SIZE; + seed->size = seed_size; + if (prev_seed_size) + memcpy(seed->bits + EFI_RANDOM_SEED_SIZE, prev_seed->bits, + prev_seed_size); + status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); if (status != EFI_SUCCESS) goto err_freepool; + if (prev_seed_size) { + /* wipe and free the old seed if we managed to install the new one */ + memzero_explicit(prev_seed->bits, prev_seed_size); + efi_bs_call(free_pool, prev_seed); + } return EFI_SUCCESS; err_freepool: + memzero_explicit(seed, struct_size(seed, bits, seed_size)); efi_bs_call(free_pool, seed); + efi_warn("Failed to obtain seed from EFI_RNG_PROTOCOL\n"); +err_warn: + if (prev_seed) + efi_warn("Retaining bootloader-supplied seed only"); return status; } diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 93364e97e879..773ab66a506a 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -406,6 +406,7 @@ static int rpi_firmware_probe(struct platform_device *pdev) int ret = PTR_ERR(fw->chan); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get mbox channel: %d\n", ret); + kfree(fw); return ret; } diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index 7d82388b4ab7..f50236e68e88 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -209,6 +209,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) return -ENODEV; } parent = irq_find_host(irq_parent); + of_node_put(irq_parent); if (!parent) { dev_err(dev, "no IRQ parent domain\n"); return -ENODEV; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 937e7a8dd8a9..2a2e0691462b 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -54,6 +54,50 @@ static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); * interface to gpiolib GPIOs via ioctl()s. */ +typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); +typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); +typedef ssize_t (*read_fn)(struct file *, char __user *, + size_t count, loff_t *); + +static __poll_t call_poll_locked(struct file *file, + struct poll_table_struct *wait, + struct gpio_device *gdev, poll_fn func) +{ + __poll_t ret; + + down_read(&gdev->sem); + ret = func(file, wait); + up_read(&gdev->sem); + + return ret; +} + +static long call_ioctl_locked(struct file *file, unsigned int cmd, + unsigned long arg, struct gpio_device *gdev, + ioctl_fn func) +{ + long ret; + + down_read(&gdev->sem); + ret = func(file, cmd, arg); + up_read(&gdev->sem); + + return ret; +} + +static ssize_t call_read_locked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps, + struct gpio_device *gdev, read_fn func) +{ + ssize_t ret; + + down_read(&gdev->sem); + ret = func(file, buf, count, f_ps); + up_read(&gdev->sem); + + return ret; +} + /* * GPIO line handle management */ @@ -190,23 +234,25 @@ static long linehandle_set_config(struct linehandle_state *lh, return 0; } -static long linehandle_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct linehandle_state *lh = file->private_data; void __user *ip = (void __user *)arg; struct gpiohandle_data ghd; DECLARE_BITMAP(vals, GPIOHANDLES_MAX); - int i; + unsigned int i; + int ret; - if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { - /* NOTE: It's ok to read values of output lines. */ - int ret = gpiod_get_array_value_complex(false, - true, - lh->num_descs, - lh->descs, - NULL, - vals); + if (!lh->gdev->chip) + return -ENODEV; + + switch (cmd) { + case GPIOHANDLE_GET_LINE_VALUES_IOCTL: + /* NOTE: It's okay to read values of output lines */ + ret = gpiod_get_array_value_complex(false, true, + lh->num_descs, lh->descs, + NULL, vals); if (ret) return ret; @@ -218,7 +264,7 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, return -EFAULT; return 0; - } else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) { + case GPIOHANDLE_SET_LINE_VALUES_IOCTL: /* * All line descriptors were created at once with the same * flags so just check if the first one is really output. @@ -240,10 +286,20 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, lh->descs, NULL, vals); - } else if (cmd == GPIOHANDLE_SET_CONFIG_IOCTL) { + case GPIOHANDLE_SET_CONFIG_IOCTL: return linehandle_set_config(lh, ip); + default: + return -EINVAL; } - return -EINVAL; +} + +static long linehandle_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct linehandle_state *lh = file->private_data; + + return call_ioctl_locked(file, cmd, arg, lh->gdev, + linehandle_ioctl_unlocked); } #ifdef CONFIG_COMPAT @@ -1182,20 +1238,34 @@ static long linereq_set_config(struct linereq *lr, void __user *ip) return ret; } -static long linereq_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct linereq *lr = file->private_data; void __user *ip = (void __user *)arg; - if (cmd == GPIO_V2_LINE_GET_VALUES_IOCTL) - return linereq_get_values(lr, ip); - else if (cmd == GPIO_V2_LINE_SET_VALUES_IOCTL) - return linereq_set_values(lr, ip); - else if (cmd == GPIO_V2_LINE_SET_CONFIG_IOCTL) - return linereq_set_config(lr, ip); + if (!lr->gdev->chip) + return -ENODEV; - return -EINVAL; + switch (cmd) { + case GPIO_V2_LINE_GET_VALUES_IOCTL: + return linereq_get_values(lr, ip); + case GPIO_V2_LINE_SET_VALUES_IOCTL: + return linereq_set_values(lr, ip); + case GPIO_V2_LINE_SET_CONFIG_IOCTL: + return linereq_set_config(lr, ip); + default: + return -EINVAL; + } +} + +static long linereq_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct linereq *lr = file->private_data; + + return call_ioctl_locked(file, cmd, arg, lr->gdev, + linereq_ioctl_unlocked); } #ifdef CONFIG_COMPAT @@ -1206,12 +1276,15 @@ static long linereq_ioctl_compat(struct file *file, unsigned int cmd, } #endif -static __poll_t linereq_poll(struct file *file, - struct poll_table_struct *wait) +static __poll_t linereq_poll_unlocked(struct file *file, + struct poll_table_struct *wait) { struct linereq *lr = file->private_data; __poll_t events = 0; + if (!lr->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &lr->wait, wait); if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, @@ -1221,16 +1294,25 @@ static __poll_t linereq_poll(struct file *file, return events; } -static ssize_t linereq_read(struct file *file, - char __user *buf, - size_t count, - loff_t *f_ps) +static __poll_t linereq_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct linereq *lr = file->private_data; + + return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked); +} + +static ssize_t linereq_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) { struct linereq *lr = file->private_data; struct gpio_v2_line_event le; ssize_t bytes_read = 0; int ret; + if (!lr->gdev->chip) + return -ENODEV; + if (count < sizeof(le)) return -EINVAL; @@ -1275,6 +1357,15 @@ static ssize_t linereq_read(struct file *file, return bytes_read; } +static ssize_t linereq_read(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) +{ + struct linereq *lr = file->private_data; + + return call_read_locked(file, buf, count, f_ps, lr->gdev, + linereq_read_unlocked); +} + static void linereq_free(struct linereq *lr) { unsigned int i; @@ -1490,12 +1581,15 @@ struct lineevent_state { (GPIOEVENT_REQUEST_RISING_EDGE | \ GPIOEVENT_REQUEST_FALLING_EDGE) -static __poll_t lineevent_poll(struct file *file, - struct poll_table_struct *wait) +static __poll_t lineevent_poll_unlocked(struct file *file, + struct poll_table_struct *wait) { struct lineevent_state *le = file->private_data; __poll_t events = 0; + if (!le->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &le->wait, wait); if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) @@ -1504,15 +1598,21 @@ static __poll_t lineevent_poll(struct file *file, return events; } +static __poll_t lineevent_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct lineevent_state *le = file->private_data; + + return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked); +} + struct compat_gpioeevent_data { compat_u64 timestamp; u32 id; }; -static ssize_t lineevent_read(struct file *file, - char __user *buf, - size_t count, - loff_t *f_ps) +static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) { struct lineevent_state *le = file->private_data; struct gpioevent_data ge; @@ -1520,6 +1620,9 @@ static ssize_t lineevent_read(struct file *file, ssize_t ge_size; int ret; + if (!le->gdev->chip) + return -ENODEV; + /* * When compatible system call is being used the struct gpioevent_data, * in case of at least ia32, has different size due to the alignment @@ -1577,6 +1680,15 @@ static ssize_t lineevent_read(struct file *file, return bytes_read; } +static ssize_t lineevent_read(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) +{ + struct lineevent_state *le = file->private_data; + + return call_read_locked(file, buf, count, f_ps, le->gdev, + lineevent_read_unlocked); +} + static void lineevent_free(struct lineevent_state *le) { if (le->irq) @@ -1594,13 +1706,16 @@ static int lineevent_release(struct inode *inode, struct file *file) return 0; } -static long lineevent_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct lineevent_state *le = file->private_data; void __user *ip = (void __user *)arg; struct gpiohandle_data ghd; + if (!le->gdev->chip) + return -ENODEV; + /* * We can get the value for an event line but not set it, * because it is input by definition. @@ -1623,6 +1738,15 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd, return -EINVAL; } +static long lineevent_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct lineevent_state *le = file->private_data; + + return call_ioctl_locked(file, cmd, arg, le->gdev, + lineevent_ioctl_unlocked); +} + #ifdef CONFIG_COMPAT static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) @@ -2114,28 +2238,30 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -ENODEV; /* Fill in the struct and pass to userspace */ - if (cmd == GPIO_GET_CHIPINFO_IOCTL) { + switch (cmd) { + case GPIO_GET_CHIPINFO_IOCTL: return chipinfo_get(cdev, ip); #ifdef CONFIG_GPIO_CDEV_V1 - } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) { + case GPIO_GET_LINEHANDLE_IOCTL: return linehandle_create(gdev, ip); - } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) { + case GPIO_GET_LINEEVENT_IOCTL: return lineevent_create(gdev, ip); - } else if (cmd == GPIO_GET_LINEINFO_IOCTL || - cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) { - return lineinfo_get_v1(cdev, ip, - cmd == GPIO_GET_LINEINFO_WATCH_IOCTL); + case GPIO_GET_LINEINFO_IOCTL: + return lineinfo_get_v1(cdev, ip, false); + case GPIO_GET_LINEINFO_WATCH_IOCTL: + return lineinfo_get_v1(cdev, ip, true); #endif /* CONFIG_GPIO_CDEV_V1 */ - } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL || - cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) { - return lineinfo_get(cdev, ip, - cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL); - } else if (cmd == GPIO_V2_GET_LINE_IOCTL) { + case GPIO_V2_GET_LINEINFO_IOCTL: + return lineinfo_get(cdev, ip, false); + case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: + return lineinfo_get(cdev, ip, true); + case GPIO_V2_GET_LINE_IOCTL: return linereq_create(gdev, ip); - } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) { + case GPIO_GET_LINEINFO_UNWATCH_IOCTL: return lineinfo_unwatch(cdev, ip); + default: + return -EINVAL; } - return -EINVAL; } #ifdef CONFIG_COMPAT @@ -2177,12 +2303,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb, return NOTIFY_OK; } -static __poll_t lineinfo_watch_poll(struct file *file, - struct poll_table_struct *pollt) +static __poll_t lineinfo_watch_poll_unlocked(struct file *file, + struct poll_table_struct *pollt) { struct gpio_chardev_data *cdev = file->private_data; __poll_t events = 0; + if (!cdev->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &cdev->wait, pollt); if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, @@ -2192,8 +2321,17 @@ static __poll_t lineinfo_watch_poll(struct file *file, return events; } -static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, - size_t count, loff_t *off) +static __poll_t lineinfo_watch_poll(struct file *file, + struct poll_table_struct *pollt) +{ + struct gpio_chardev_data *cdev = file->private_data; + + return call_poll_locked(file, pollt, cdev->gdev, + lineinfo_watch_poll_unlocked); +} + +static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *off) { struct gpio_chardev_data *cdev = file->private_data; struct gpio_v2_line_info_changed event; @@ -2201,6 +2339,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, int ret; size_t event_size; + if (!cdev->gdev->chip) + return -ENODEV; + #ifndef CONFIG_GPIO_CDEV_V1 event_size = sizeof(struct gpio_v2_line_info_changed); if (count < event_size) @@ -2268,6 +2409,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, return bytes_read; } +static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, + size_t count, loff_t *off) +{ + struct gpio_chardev_data *cdev = file->private_data; + + return call_read_locked(file, buf, count, off, cdev->gdev, + lineinfo_watch_read_unlocked); +} + /** * gpio_chrdev_open() - open the chardev for ioctl operations * @inode: inode for this chardev @@ -2281,13 +2431,17 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) struct gpio_chardev_data *cdev; int ret = -ENOMEM; + down_read(&gdev->sem); + /* Fail on open if the backing gpiochip is gone */ - if (!gdev->chip) - return -ENODEV; + if (!gdev->chip) { + ret = -ENODEV; + goto out_unlock; + } cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) - return -ENOMEM; + goto out_unlock; cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); if (!cdev->watched_lines) @@ -2310,6 +2464,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) if (ret) goto out_unregister_notifier; + up_read(&gdev->sem); + return ret; out_unregister_notifier: @@ -2319,6 +2475,8 @@ out_free_bitmap: bitmap_free(cdev->watched_lines); out_free_cdev: kfree(cdev); +out_unlock: + up_read(&gdev->sem); return ret; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 5bcd37b36a96..aa95df6745b7 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -191,9 +191,8 @@ static int gpiochip_find_base(int ngpio) /* found a free space? */ if (gdev->base + gdev->ngpio <= base) break; - else - /* nope, check the space right before the chip */ - base = gdev->base - ngpio; + /* nope, check the space right before the chip */ + base = gdev->base - ngpio; } if (gpio_is_valid(base)) { @@ -730,6 +729,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, spin_unlock_irqrestore(&gpio_lock, flags); BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier); + init_rwsem(&gdev->sem); #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); @@ -868,6 +868,8 @@ void gpiochip_remove(struct gpio_chip *gc) unsigned long flags; unsigned int i; + down_write(&gdev->sem); + /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ gpiochip_sysfs_unregister(gdev); gpiochip_free_hogs(gc); @@ -902,6 +904,7 @@ void gpiochip_remove(struct gpio_chip *gc) * gone. */ gcdev_unregister(gdev); + up_write(&gdev->sem); put_device(&gdev->dev); } EXPORT_SYMBOL_GPL(gpiochip_remove); @@ -2412,8 +2415,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) ret = gpiod_direction_input(desc); goto set_output_flag; } - } - else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { + } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE); if (!ret) goto set_output_value; @@ -2570,9 +2572,9 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) static int gpio_chip_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { - if (gc->get_multiple) { + if (gc->get_multiple) return gc->get_multiple(gc, mask, bits); - } else if (gc->get) { + if (gc->get) { int i, value; for_each_set_bit(i, mask, gc->ngpio) { diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index c31f4626915d..73b732a1d9c9 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -15,6 +15,7 @@ #include #include #include +#include #define GPIOCHIP_NAME "gpiochip" @@ -37,6 +38,12 @@ * or name of the IP component in a System on Chip. * @data: per-instance data assigned by the driver * @list: links gpio_device:s together for traversal + * @notifier: used to notify subscribers about lines being requested, released + * or reconfigured + * @sem: protects the structure from a NULL-pointer dereference of @chip by + * user-space operations when the device gets unregistered during + * a hot-unplug event + * @pin_ranges: range of pins served by the GPIO driver * * This state container holds most of the runtime variable data * for a GPIO device and can hold references and live on after the @@ -57,6 +64,7 @@ struct gpio_device { void *data; struct list_head list; struct blocking_notifier_head notifier; + struct rw_semaphore sem; #ifdef CONFIG_PINCTRL /* @@ -72,6 +80,20 @@ struct gpio_device { /* gpio suffixes used for ACPI and device tree lookup */ static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" }; +/** + * struct gpio_array - Opaque descriptor for a structure of GPIO array attributes + * + * @desc: Array of pointers to the GPIO descriptors + * @size: Number of elements in desc + * @chip: Parent GPIO chip + * @get_mask: Get mask used in fastpath + * @set_mask: Set mask used in fastpath + * @invert_mask: Invert mask used in fastpath + * + * This structure is attached to struct gpiod_descs obtained from + * gpiod_get_array() and can be passed back to get/set array functions in order + * to activate fast processing path if applicable. + */ struct gpio_array { struct gpio_desc **desc; unsigned int size; @@ -96,6 +118,23 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, extern spinlock_t gpio_lock; extern struct list_head gpio_devices; + +/** + * struct gpio_desc - Opaque descriptor for a GPIO + * + * @gdev: Pointer to the parent GPIO device + * @flags: Binary descriptor flags + * @label: Name of the consumer + * @name: Line name + * @hog: Pointer to the device node that hogs this line (if any) + * @debounce_period_us: Debounce period in microseconds + * + * These are obtained using gpiod_get() and are preferable to the old + * integer-based handles. + * + * Contrary to integers, a pointer to a &struct gpio_desc is guaranteed to be + * valid until the GPIO is released. + */ struct gpio_desc { struct gpio_device *gdev; unsigned long flags; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 477ab3551177..34303dd3ada9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1910,7 +1910,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, ret = drm_vma_node_allow(&obj->vma_node, drm_priv); if (ret) { - kfree(mem); + kfree(*mem); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 27b19503773b..71354f505b84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -317,6 +317,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) if (!found) return false; + pci_dev_put(pdev); adev->bios = kmalloc(size, GFP_KERNEL); if (!adev->bios) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 36cc89f56cea..0d998bc830c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4902,6 +4902,8 @@ static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) pm_runtime_enable(&(p->dev)); pm_runtime_resume(&(p->dev)); } + + pci_dev_put(p); } static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) @@ -4940,6 +4942,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) if (expires < ktime_get_mono_fast_ns()) { dev_warn(adev->dev, "failed to suspend display audio\n"); + pci_dev_put(p); /* TODO: abort the succeeding gpu reset? */ return -ETIMEDOUT; } @@ -4947,6 +4950,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) pm_runtime_disable(&(p->dev)); + pci_dev_put(p); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 28dea2eb61c7..cabbf02eb054 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2008,6 +2008,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, "See modparam exp_hw_support\n"); return -ENODEV; } + /* differentiate between P10 and P11 asics with the same DID */ + if (pdev->device == 0x67FF && + (pdev->revision == 0xE3 || + pdev->revision == 0xE7 || + pdev->revision == 0xF3 || + pdev->revision == 0xF7)) { + flags &= ~AMD_ASIC_MASK; + flags |= CHIP_POLARIS10; + } /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, * however, SME requires an indirect IOMMU mapping because the encryption @@ -2081,12 +2090,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, ddev); - ret = amdgpu_driver_load_kms(adev, ent->driver_data); + ret = amdgpu_driver_load_kms(adev, flags); if (ret) goto err_pci; retry_init: - ret = drm_dev_register(ddev, ent->driver_data); + ret = drm_dev_register(ddev, flags); if (ret == -EAGAIN && ++retry <= 3) { DRM_INFO("retry init %d\n", retry); /* Don't request EX mode too frequently which is attacking */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a0b1bf17cb74..b1d0cad00b2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1510,7 +1510,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain) { - if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { + if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) && + ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) { domain = AMDGPU_GEM_DOMAIN_VRAM; if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) domain = AMDGPU_GEM_DOMAIN_GTT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index ce31d4fdee93..4af3610f4a82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -62,6 +62,8 @@ struct amdgpu_vf_error_buffer { uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; }; +enum idh_request; + /** * struct amdgpu_virt_ops - amdgpu device virt operations */ @@ -71,7 +73,8 @@ struct amdgpu_virt_ops { int (*req_init_data)(struct amdgpu_device *adev); int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); - void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); + void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, + u32 data1, u32 data2, u32 data3); }; /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index d793eec69d61..6fee12c91ef5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -40,39 +40,6 @@ #include "dm_helpers.h" -struct monitor_patch_info { - unsigned int manufacturer_id; - unsigned int product_id; - void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); - unsigned int patch_param; -}; -static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); - -static const struct monitor_patch_info monitor_patch_table[] = { -{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, -{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, -}; - -static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) -{ - if (edid_caps) - edid_caps->panel_patch.max_dsc_target_bpp_limit = param; -} - -static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) -{ - int i, ret = 0; - - for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) - if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) - && (edid_caps->product_id == monitor_patch_table[i].product_id)) { - monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); - ret++; - } - - return ret; -} - /* dm_helpers_parse_edid_caps * * Parse edid caps @@ -158,8 +125,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps( kfree(sads); kfree(sadb); - amdgpu_dm_patch_edid_caps(edid_caps); - return result; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 6dbde74c1e06..1d86fd5610c0 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -352,6 +352,7 @@ static enum bp_result get_gpio_i2c_info( uint32_t count = 0; unsigned int table_index = 0; bool find_valid = false; + struct atom_gpio_pin_assignment *pin; if (!info) return BP_RESULT_BADINPUT; @@ -379,20 +380,17 @@ static enum bp_result get_gpio_i2c_info( - sizeof(struct atom_common_table_header)) / sizeof(struct atom_gpio_pin_assignment); + pin = (struct atom_gpio_pin_assignment *) header->gpio_pin; + for (table_index = 0; table_index < count; table_index++) { - if (((record->i2c_id & I2C_HW_CAP) == ( - header->gpio_pin[table_index].gpio_id & - I2C_HW_CAP)) && - ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_ENGINE_ID_MASK)) && - ((record->i2c_id & I2C_HW_LANE_MUX) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_LANE_MUX))) { + if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) && + ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) && + ((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) { /* still valid */ find_valid = true; break; } + pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment)); } /* If we don't find the entry that we are looking for then diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c index dcfa0a3efa00..bf72d3f60d7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c @@ -1127,6 +1127,7 @@ struct resource_pool *dce60_create_resource_pool( if (dce60_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1324,6 +1325,7 @@ struct resource_pool *dce61_create_resource_pool( if (dce61_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1517,6 +1519,7 @@ struct resource_pool *dce64_create_resource_pool( if (dce64_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 725d92e40cd3..52d1f9746e8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -1138,6 +1138,7 @@ struct resource_pool *dce80_create_resource_pool( if (dce80_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1337,6 +1338,7 @@ struct resource_pool *dce81_create_resource_pool( if (dce81_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 91ab4dbbe1a6..c655d03ef754 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -804,6 +804,32 @@ static void false_optc_underflow_wa( tg->funcs->clear_optc_underflow(tg); } +static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) +{ + struct pipe_ctx *other_pipe; + int vready_offset = pipe->pipe_dlg_param.vready_offset; + + /* Always use the largest vready_offset of all connected pipes */ + for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + + return vready_offset; +} + enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -838,7 +864,7 @@ enum dc_status dcn10_enable_stream_timing( pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, &stream->timing, - pipe_ctx->pipe_dlg_param.vready_offset, + calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width, @@ -2776,7 +2802,7 @@ void dcn10_program_pipe( pipe_ctx->stream_res.tg->funcs->program_global_sync( pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, + calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 58eea3aa3bfc..bf2a8f53694b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1564,6 +1564,31 @@ static void dcn20_update_dchubp_dpp( hubp->funcs->set_blank(hubp, false); } +static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) +{ + struct pipe_ctx *other_pipe; + int vready_offset = pipe->pipe_dlg_param.vready_offset; + + /* Always use the largest vready_offset of all connected pipes */ + for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + + return vready_offset; +} static void dcn20_program_pipe( struct dc *dc, @@ -1582,7 +1607,7 @@ static void dcn20_program_pipe( pipe_ctx->stream_res.tg->funcs->program_global_sync( pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, + calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); @@ -1875,7 +1900,7 @@ bool dcn20_update_bandwidth( pipe_ctx->stream_res.tg->funcs->program_global_sync( pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, + calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 7aad0340f794..67d83417ec33 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -1344,6 +1344,20 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; } + + // WA: patch strobe modes to compensate for DCN303 BW issue + if (dcn3_03_soc.num_chans <= 4) { + for (i = 0; i < dcn3_03_soc.num_states; i++) { + if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700) + break; + + if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) { + dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100; + dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100; + } + } + } + /* re-init DML with updated bb */ dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); if (dc->current_state) diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index bac15c466733..6e27c8b16391 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -341,7 +341,8 @@ struct amd_pm_funcs { int (*get_power_profile_mode)(void *handle, char *buf); int (*set_power_profile_mode)(void *handle, long *input, uint32_t size); int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size); - int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size); + int (*odn_edit_dpm_table)(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state); int (*smu_i2c_bus_access)(void *handle, bool acquire); int (*gfx_state_change_set)(void *handle, uint32_t state); diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 321215003643..0f5930e797bd 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -924,7 +924,8 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size); } -static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size) +static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c index 67d7da0b6fed..1d829402cd2e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c @@ -75,8 +75,10 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) for (i = 0; i < table_entries; i++) { result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); if (result) { + kfree(hwmgr->current_ps); kfree(hwmgr->request_ps); kfree(hwmgr->ps); + hwmgr->current_ps = NULL; hwmgr->request_ps = NULL; hwmgr->ps = NULL; return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 85d55ab4e369..299b5c838bf7 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -2961,7 +2961,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); - int32_t input_index, input_clk, input_vol, i; + int32_t input_clk, input_vol, i; + uint32_t input_index; int od8_id; int ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 614c3d049514..83fa3d20a1d5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1595,6 +1595,10 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) return false; + /* return true if ASIC is in BACO state already */ + if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) + return true; + /* Arcturus does not support this bit mask */ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index aeeb09a27202..fdd8e3d3232e 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -395,7 +395,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) void adv7533_dsi_power_on(struct adv7511 *adv); void adv7533_dsi_power_off(struct adv7511 *adv); -void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode); +enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, + const struct drm_display_mode *mode); int adv7533_patch_registers(struct adv7511 *adv); int adv7533_patch_cec_registers(struct adv7511 *adv); int adv7533_attach_dsi(struct adv7511 *adv); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 3dc551d223d6..44762116aac9 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -697,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) } static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { if (mode->clock > 165000) return MODE_CLOCK_HIGH; @@ -791,9 +791,6 @@ static void adv7511_mode_set(struct adv7511 *adv7511, regmap_update_bits(adv7511->regmap, 0x17, 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); - if (adv7511->type == ADV7533 || adv7511->type == ADV7535) - adv7533_mode_set(adv7511, adj_mode); - drm_mode_copy(&adv7511->curr_mode, adj_mode); /* @@ -913,6 +910,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge, adv7511_mode_set(adv, mode, adj_mode); } +static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct adv7511 *adv = bridge_to_adv7511(bridge); + + if (adv->type == ADV7533 || adv->type == ADV7535) + return adv7533_mode_valid(adv, mode); + else + return adv7511_mode_valid(adv, mode); +} + static int adv7511_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { @@ -963,6 +972,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = { .enable = adv7511_bridge_enable, .disable = adv7511_bridge_disable, .mode_set = adv7511_bridge_mode_set, + .mode_valid = adv7511_bridge_mode_valid, .attach = adv7511_bridge_attach, .detect = adv7511_bridge_detect, .get_edid = adv7511_bridge_get_edid, diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index 59d718bde8c4..7eda12f338a1 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -100,26 +100,27 @@ void adv7533_dsi_power_off(struct adv7511 *adv) regmap_write(adv->regmap_cec, 0x27, 0x0b); } -void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode) +enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, + const struct drm_display_mode *mode) { + int lanes; struct mipi_dsi_device *dsi = adv->dsi; - int lanes, ret; - - if (adv->num_dsi_lanes != 4) - return; if (mode->clock > 80000) lanes = 4; else lanes = 3; - if (lanes != dsi->lanes) { - mipi_dsi_detach(dsi); - dsi->lanes = lanes; - ret = mipi_dsi_attach(dsi); - if (ret) - dev_err(&dsi->dev, "failed to change host lanes\n"); - } + /* + * TODO: add support for dynamic switching of lanes + * by using the bridge pre_enable() op . Till then filter + * out the modes which shall need different number of lanes + * than what was configured in the device tree. + */ + if (lanes != dsi->lanes) + return MODE_BAD; + + return MODE_OK; } int adv7533_patch_registers(struct adv7511 *adv) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 3d9974de09b9..38174461d48c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -563,6 +563,9 @@ void drm_connector_cleanup(struct drm_connector *connector) mutex_destroy(&connector->mutex); memset(connector, 0, sizeof(*connector)); + + if (dev->registered) + drm_sysfs_hotplug_event(dev); } EXPORT_SYMBOL(drm_connector_cleanup); diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index eda832f9200d..32ee023aed26 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -260,12 +260,15 @@ const struct drm_format_info *__drm_format_info(u32 format) .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_Q410, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, - .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, - .vsub = 0, .is_yuv = true }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Q401, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, - .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, - .vsub = 0, .is_yuv = true }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2, + .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true}, }; unsigned int i; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index cc5b07f86346..e8ff70be449a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -416,6 +416,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) if (gpu->identity.model == chipModel_GC700) gpu->identity.features &= ~chipFeatures_FAST_CLEAR; + /* These models/revisions don't have the 2D pipe bit */ + if ((gpu->identity.model == chipModel_GC500 && + gpu->identity.revision <= 2) || + gpu->identity.model == chipModel_GC300) + gpu->identity.features |= chipFeatures_PIPE_2D; + if ((gpu->identity.model == chipModel_GC500 && gpu->identity.revision < 2) || (gpu->identity.model == chipModel_GC300 && @@ -449,8 +455,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); } - /* GC600 idle register reports zero bits where modules aren't present */ - if (gpu->identity.model == chipModel_GC600) + /* GC600/300 idle register reports zero bits where modules aren't present */ + if (gpu->identity.model == chipModel_GC600 || + gpu->identity.model == chipModel_GC300) gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | VIVS_HI_IDLE_STATE_RA | VIVS_HI_IDLE_STATE_SE | diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 4d4a715b429d..2c2b92324a2e 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -60,8 +60,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) return drm_panel_get_modes(fsl_connector->panel, connector); } -static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { if (mode->hdisplay & 0xf) return MODE_ERROR; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 1ccdf2da042b..64a15b636e8d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3245,61 +3245,6 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, } } -static void -intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); - enum pipe pipe = crtc->pipe; - u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; - - trans_ddi_func_ctl_value = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(pipe)); - trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); - dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); - - trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | - TGL_TRANS_DDI_PORT_MASK); - trans_conf_value &= ~PIPECONF_ENABLE; - dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; - - intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), - trans_ddi_func_ctl_value); - intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); -} - -static void -intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum port port = dig_port->base.port; - struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); - enum pipe pipe = crtc->pipe; - u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; - - trans_ddi_func_ctl_value = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(pipe)); - trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); - dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); - - trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | - TGL_TRANS_DDI_SELECT_PORT(port); - trans_conf_value |= PIPECONF_ENABLE; - dp_tp_ctl_value |= DP_TP_CTL_ENABLE; - - intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); - intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), - trans_ddi_func_ctl_value); -} - static void intel_dp_process_phy_request(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -3317,14 +3262,10 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); - intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); - intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); intel_dp_phy_pattern_update(intel_dp, crtc_state); - intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); - drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, intel_dp->train_set, crtc_state->lane_count); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 0a88088a11e8..55dd02a01f1a 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -133,9 +133,9 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, return ffs(intel_dsi->ports) - 1; if (seq_port) { - if (intel_dsi->ports & PORT_B) + if (intel_dsi->ports & BIT(PORT_B)) return PORT_B; - else if (intel_dsi->ports & PORT_C) + else if (intel_dsi->ports & BIT(PORT_C)) return PORT_C; } diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index 1dac21aa7e5c..5b59a6effc20 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -13,7 +13,6 @@ struct insert_pte_data { u64 offset; - bool is_lmem; }; #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ @@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm, struct insert_pte_data *d = data; vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, - d->is_lmem ? PTE_LM : 0); + i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0); d->offset += PAGE_SIZE; } @@ -134,8 +133,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) goto err_vm; /* Now allow the GPU to rewrite the PTE via its own ppGTT */ - d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]); - vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d); + vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d); } return &vm->vm; @@ -281,10 +279,10 @@ static int emit_pte(struct i915_request *rq, GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); /* Compute the page directory offset for the target address range */ - offset += (u64)rq->engine->instance << 32; offset >>= 12; offset *= sizeof(u64); offset += 2 * CHUNK_SZ; + offset += (u64)rq->engine->instance << 32; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 9f1c209d9251..e08ed0e9f165 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -175,8 +175,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) */ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) { - debugfs_remove_recursive(vgpu->debugfs); - vgpu->debugfs = NULL; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_minor *minor = gvt->gt->i915->drm.primary; + + if (minor->debugfs_root && gvt->debugfs_root) { + debugfs_remove_recursive(vgpu->debugfs); + vgpu->debugfs = NULL; + } } /** @@ -199,6 +204,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt) */ void intel_gvt_debugfs_clean(struct intel_gvt *gvt) { - debugfs_remove_recursive(gvt->debugfs_root); - gvt->debugfs_root = NULL; + struct drm_minor *minor = gvt->gt->i915->drm.primary; + + if (minor->debugfs_root) { + debugfs_remove_recursive(gvt->debugfs_root); + gvt->debugfs_root = NULL; + } } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1bb1be5c48c8..0291d42cfba8 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -694,6 +694,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || !workload->shadow_mm->ppgtt_mm.shadowed) { + intel_vgpu_unpin_mm(workload->shadow_mm); gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 846c1aae69c8..924a66f53951 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -619,6 +619,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, break; } + if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG) + width = ipu_src_rect_width(new_state); + else + width = drm_rect_width(&new_state->src) >> 16; + eba = drm_plane_state_to_eba(new_state, 0); /* @@ -627,8 +632,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, */ if (ipu_state->use_pre) { axi_id = ipu_chan_assign_axi_id(ipu_plane->dma); - ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, - ipu_src_rect_width(new_state), + ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width, drm_rect_height(&new_state->src) >> 16, fb->pitches[0], fb->format->format, fb->modifier, &eba); @@ -683,9 +687,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, break; } - ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8)); + ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width); - width = ipu_src_rect_width(new_state); height = drm_rect_height(&new_state->src) >> 16; info = drm_format_info(fb->format->format); ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0], @@ -749,8 +752,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16); ipu_cpmem_zero(ipu_plane->alpha_ch); - ipu_cpmem_set_resolution(ipu_plane->alpha_ch, - ipu_src_rect_width(new_state), + ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width, drm_rect_height(&new_state->src) >> 16); ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8); ipu_cpmem_set_high_priority(ipu_plane->alpha_ch); diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index a5df1c8d34cd..d9231b89d73e 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -1326,7 +1326,11 @@ static int ingenic_drm_init(void) return err; } - return platform_driver_register(&ingenic_drm_driver); + err = platform_driver_register(&ingenic_drm_driver); + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err) + platform_driver_unregister(ingenic_ipu_driver_ptr); + + return err; } module_init(ingenic_drm_init); diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 41c783349321..94c6bd3b0082 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -387,9 +387,6 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) if (--dpi->refcount != 0) return; - if (dpi->pinctrl && dpi->pins_gpio) - pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); - mtk_dpi_disable(dpi); clk_disable_unprepare(dpi->pixel_clk); clk_disable_unprepare(dpi->engine_clk); @@ -414,9 +411,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) goto err_pixel; } - if (dpi->pinctrl && dpi->pins_dpi) - pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); - return 0; err_pixel: @@ -630,12 +624,18 @@ static void mtk_dpi_bridge_disable(struct drm_bridge *bridge) struct mtk_dpi *dpi = bridge_to_dpi(bridge); mtk_dpi_power_off(dpi); + + if (dpi->pinctrl && dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); } static void mtk_dpi_bridge_enable(struct drm_bridge *bridge) { struct mtk_dpi *dpi = bridge_to_dpi(bridge); + if (dpi->pinctrl && dpi->pins_dpi) + pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); + mtk_dpi_power_on(dpi); mtk_dpi_set_display_mode(dpi, &dpi->mode); mtk_dpi_enable(dpi); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 3196189429bc..7613b0fa2be6 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1203,9 +1203,10 @@ static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi) return mtk_hdmi_update_plugged_status(hdmi); } -static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, - const struct drm_display_info *info, - const struct drm_display_mode *mode) +static enum drm_mode_status +mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) { struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); struct drm_bridge *next_bridge; diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index d4b907889a21..cd399b0b7181 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv) /* Initialize OSD1 fifo control register */ reg = VIU_OSD_DDR_PRIORITY_URGENT | - VIU_OSD_HOLD_FIFO_LINES(31) | VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */ VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */ VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) - reg |= VIU_OSD_BURST_LENGTH_32; + reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31)); else - reg |= VIU_OSD_BURST_LENGTH_64; + reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c index 52be08b744ad..87f9846b9b4f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_pll.c +++ b/drivers/gpu/drm/mgag200/mgag200_pll.c @@ -268,7 +268,8 @@ static void mgag200_pixpll_update_g200se_04(struct mgag200_pll *pixpll, pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; - xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1); + // For G200SE A, BIT(7) should be set unconditionally. + xpixpllcm = BIT(7) | pixpllcm; xpixpllcn = pixpllcn; xpixpllcp = (pixpllcs << 3) | pixpllcp; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index c0dec5b919d4..2d07c02c59f1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1746,7 +1746,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) if (val == UINT_MAX) { DRM_DEV_ERROR(dev, - "missing support for speed-bin: %u. Some OPPs may not be supported by hardware", + "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", fuse); return UINT_MAX; } @@ -1756,7 +1756,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) { - u32 supp_hw = UINT_MAX; + u32 supp_hw; u32 speedbin; int ret; @@ -1768,15 +1768,13 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) if (ret == -ENOENT) { return 0; } else if (ret) { - DRM_DEV_ERROR(dev, - "failed to read speed-bin (%d). Some OPPs may not be supported by hardware", - ret); - goto done; + dev_err_probe(dev, ret, + "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); + return ret; } supp_hw = fuse_to_supp_hw(dev, rev, speedbin); -done: ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index d13fd39f05de..15e38ad7aefb 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -840,7 +840,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display, dp = container_of(dp_display, struct dp_display_private, dp_display); - dp->panel->dp_mode.drm_mode = mode->drm_mode; + drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); dp->panel->dp_mode.bpp = mode->bpp; dp->panel->dp_mode.capabilities = mode->capabilities; dp_panel_init_panel_info(dp->panel); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index f6b09e8eca67..e1a9b52d0a29 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -247,7 +247,21 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) hdmi->pwr_clks[i] = clk; } - pm_runtime_enable(&pdev->dev); + hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); + /* This will catch e.g. -EPROBE_DEFER */ + if (IS_ERR(hdmi->hpd_gpiod)) { + ret = PTR_ERR(hdmi->hpd_gpiod); + DRM_DEV_ERROR(&pdev->dev, "failed to get hpd gpio: (%d)\n", ret); + goto fail; + } + + if (!hdmi->hpd_gpiod) + DBG("failed to get HPD gpio"); + + if (hdmi->hpd_gpiod) + gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD"); + + devm_pm_runtime_enable(&pdev->dev); hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); @@ -429,20 +443,6 @@ static struct hdmi_platform_config hdmi_tx_8996_config = { .hpd_freq = hpd_clk_freq_8x74, }; -static const struct { - const char *name; - const bool output; - const int value; - const char *label; -} msm_hdmi_gpio_pdata[] = { - { "qcom,hdmi-tx-ddc-clk", true, 1, "HDMI_DDC_CLK" }, - { "qcom,hdmi-tx-ddc-data", true, 1, "HDMI_DDC_DATA" }, - { "qcom,hdmi-tx-hpd", false, 1, "HDMI_HPD" }, - { "qcom,hdmi-tx-mux-en", true, 1, "HDMI_MUX_EN" }, - { "qcom,hdmi-tx-mux-sel", true, 0, "HDMI_MUX_SEL" }, - { "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" }, -}; - /* * HDMI audio codec callbacks */ @@ -555,7 +555,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; struct device_node *of_node = dev->of_node; - int i, err; + int err; hdmi_cfg = (struct hdmi_platform_config *) of_device_get_match_data(dev); @@ -567,42 +567,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) hdmi_cfg->mmio_name = "core_physical"; hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; - for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { - const char *name = msm_hdmi_gpio_pdata[i].name; - struct gpio_desc *gpiod; - - /* - * We are fetching the GPIO lines "as is" since the connector - * code is enabling and disabling the lines. Until that point - * the power-on default value will be kept. - */ - gpiod = devm_gpiod_get_optional(dev, name, GPIOD_ASIS); - /* This will catch e.g. -PROBE_DEFER */ - if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); - if (!gpiod) { - /* Try a second time, stripping down the name */ - char name3[32]; - - /* - * Try again after stripping out the "qcom,hdmi-tx" - * prefix. This is mainly to match "hpd-gpios" used - * in the upstream bindings. - */ - if (sscanf(name, "qcom,hdmi-tx-%s", name3)) - gpiod = devm_gpiod_get_optional(dev, name3, GPIOD_ASIS); - if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); - if (!gpiod) - DBG("failed to get gpio: %s", name); - } - hdmi_cfg->gpios[i].gpiod = gpiod; - if (gpiod) - gpiod_set_consumer_name(gpiod, msm_hdmi_gpio_pdata[i].label); - hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output; - hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value; - } - dev->platform_data = hdmi_cfg; hdmi = msm_hdmi_init(to_platform_device(dev)); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 8d2706bec3b9..20f554312b17 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -19,17 +19,9 @@ #include "msm_drv.h" #include "hdmi.xml.h" -#define HDMI_MAX_NUM_GPIO 6 - struct hdmi_phy; struct hdmi_platform_config; -struct hdmi_gpio_data { - struct gpio_desc *gpiod; - bool output; - int value; -}; - struct hdmi_audio { bool enabled; struct hdmi_audio_infoframe infoframe; @@ -61,6 +53,8 @@ struct hdmi { struct clk **hpd_clks; struct clk **pwr_clks; + struct gpio_desc *hpd_gpiod; + struct hdmi_phy *phy; struct device *phy_dev; @@ -109,9 +103,6 @@ struct hdmi_platform_config { /* clks that need to be on for screen pwr (ie pixel clk): */ const char **pwr_clk_names; int pwr_clk_cnt; - - /* gpio's: */ - struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO]; }; struct hdmi_bridge { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c index c3a236bb952c..52ebe562ca9b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c @@ -60,48 +60,6 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi) } } -static int gpio_config(struct hdmi *hdmi, bool on) -{ - const struct hdmi_platform_config *config = hdmi->config; - int i; - - if (on) { - for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { - struct hdmi_gpio_data gpio = config->gpios[i]; - - if (gpio.gpiod) { - if (gpio.output) { - gpiod_direction_output(gpio.gpiod, - gpio.value); - } else { - gpiod_direction_input(gpio.gpiod); - gpiod_set_value_cansleep(gpio.gpiod, - gpio.value); - } - } - } - - DBG("gpio on"); - } else { - for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { - struct hdmi_gpio_data gpio = config->gpios[i]; - - if (!gpio.gpiod) - continue; - - if (gpio.output) { - int value = gpio.value ? 0 : 1; - - gpiod_set_value_cansleep(gpio.gpiod, value); - } - } - - DBG("gpio off"); - } - - return 0; -} - static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) { const struct hdmi_platform_config *config = hdmi->config; @@ -157,11 +115,8 @@ int msm_hdmi_hpd_enable(struct drm_bridge *bridge) goto fail; } - ret = gpio_config(hdmi, true); - if (ret) { - DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret); - goto fail; - } + if (hdmi->hpd_gpiod) + gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1); pm_runtime_get_sync(dev); enable_hpd_clocks(hdmi, true); @@ -210,10 +165,6 @@ void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge) enable_hpd_clocks(hdmi, false); pm_runtime_put_autosuspend(dev); - ret = gpio_config(hdmi, false); - if (ret) - dev_warn(dev, "failed to unconfigure GPIOs: %d\n", ret); - ret = pinctrl_pm_select_sleep_state(dev); if (ret) dev_warn(dev, "pinctrl state chg failed: %d\n", ret); @@ -275,10 +226,7 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi) #define HPD_GPIO_INDEX 2 static enum drm_connector_status detect_gpio(struct hdmi *hdmi) { - const struct hdmi_platform_config *config = hdmi->config; - struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; - - return gpiod_get_value(hpd_gpio.gpiod) ? + return gpiod_get_value(hdmi->hpd_gpiod) ? connector_status_connected : connector_status_disconnected; } @@ -288,8 +236,6 @@ enum drm_connector_status msm_hdmi_bridge_detect( { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; - const struct hdmi_platform_config *config = hdmi->config; - struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; enum drm_connector_status stat_gpio, stat_reg; int retry = 20; @@ -297,7 +243,7 @@ enum drm_connector_status msm_hdmi_bridge_detect( * some platforms may not have hpd gpio. Rely only on the status * provided by REG_HDMI_HPD_INT_STATUS in this case. */ - if (!hpd_gpio.gpiod) + if (!hdmi->hpd_gpiod) return detect_reg(hdmi); do { diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c index 0744f6810706..2b1abc95fe13 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c @@ -626,7 +626,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi) mipi_dsi_set_drvdata(dsi, st7701); st7701->dsi = dsi; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret) + goto err_attach; + + return 0; + +err_attach: + drm_panel_remove(&st7701->panel); + return ret; } static int st7701_dsi_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index e48e357ea4f1..4c271244092b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -82,6 +82,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; struct panfrost_gem_mapping *mapping; + int ret; if (!args->size || args->pad || (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) @@ -92,21 +93,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, !(args->flags & PANFROST_BO_NOEXEC)) return -EINVAL; - bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, - &args->handle); + bo = panfrost_gem_create(dev, args->size, args->flags); if (IS_ERR(bo)) return PTR_ERR(bo); + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); + if (ret) + goto out; + mapping = panfrost_gem_mapping_get(bo, priv); - if (!mapping) { - drm_gem_object_put(&bo->base.base); - return -EINVAL; + if (mapping) { + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); + } else { + /* This can only happen if the handle from + * drm_gem_handle_create() has already been guessed and freed + * by user space + */ + ret = -EINVAL; } - args->offset = mapping->mmnode.start << PAGE_SHIFT; - panfrost_gem_mapping_put(mapping); - - return 0; +out: + drm_gem_object_put(&bo->base.base); + return ret; } /** diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 6d9bdb9180cb..55e3a68ed28a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -234,12 +234,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t } struct panfrost_gem_object * -panfrost_gem_create_with_handle(struct drm_file *file_priv, - struct drm_device *dev, size_t size, - u32 flags, - uint32_t *handle) +panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags) { - int ret; struct drm_gem_shmem_object *shmem; struct panfrost_gem_object *bo; @@ -255,16 +251,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, bo->noexec = !!(flags & PANFROST_BO_NOEXEC); bo->is_heap = !!(flags & PANFROST_BO_HEAP); - /* - * Allocate an id of idr table where the obj is registered - * and handle has the id what user can see. - */ - ret = drm_gem_handle_create(file_priv, &shmem->base, handle); - /* drop reference from allocate - handle holds it now. */ - drm_gem_object_put(&shmem->base); - if (ret) - return ERR_PTR(ret); - return bo; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 8088d5fd8480..ad2877eeeccd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt); struct panfrost_gem_object * -panfrost_gem_create_with_handle(struct drm_file *file_priv, - struct drm_device *dev, size_t size, - u32 flags, - uint32_t *handle); +panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags); int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); void panfrost_gem_close(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 33121655d50b..63bdc9f6fc24 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -227,6 +227,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) if (!found) return false; + pci_dev_put(pdev); rdev->bios = kmalloc(size, GFP_KERNEL); if (!rdev->bios) { @@ -612,13 +613,14 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) acpi_size tbl_size; UEFI_ACPI_VFCT *vfct; unsigned offset; + bool r = false; if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) return false; tbl_size = hdr->length; if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); - return false; + goto out; } vfct = (UEFI_ACPI_VFCT *)hdr; @@ -631,13 +633,13 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) offset += sizeof(VFCT_IMAGE_HEADER); if (offset > tbl_size) { DRM_ERROR("ACPI VFCT image header truncated\n"); - return false; + goto out; } offset += vhdr->ImageLength; if (offset > tbl_size) { DRM_ERROR("ACPI VFCT image truncated\n"); - return false; + goto out; } if (vhdr->ImageLength && @@ -649,15 +651,18 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); + if (rdev->bios) + r = true; - if (!rdev->bios) - return false; - return true; + goto out; } } DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); - return false; + +out: + acpi_put_table(hdr); + return r; } #else static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 6b5d0722afa6..20e63cadec8c 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -565,7 +565,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); - memcpy(&dp->mode, adjusted, sizeof(*mode)); + drm_mode_copy(&dp->mode, adjusted); } static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index 7afdc54eb3ec..78120da5e63a 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -488,7 +488,7 @@ static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder, inno_hdmi_setup(hdmi, adj_mode); /* Store the display mode for plugin/DPMS poweron events */ - memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); + drm_mode_copy(&hdmi->previous_mode, adj_mode); } static void inno_hdmi_encoder_enable(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index 1c546c3a8998..17e7c40a9e7b 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -383,7 +383,7 @@ rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder, struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder); /* Store the display mode for plugin/DPMS poweron events. */ - memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); + drm_mode_copy(&hdmi->previous_mode, adj_mode); } static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 551653940e39..2550429df49f 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -145,7 +145,7 @@ static int rk3288_lvds_poweron(struct rockchip_lvds *lvds) DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret); return ret; } - ret = pm_runtime_get_sync(lvds->dev); + ret = pm_runtime_resume_and_get(lvds->dev); if (ret < 0) { DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret); clk_disable(lvds->pclk); @@ -329,16 +329,20 @@ static int px30_lvds_poweron(struct rockchip_lvds *lvds) { int ret; - ret = pm_runtime_get_sync(lvds->dev); + ret = pm_runtime_resume_and_get(lvds->dev); if (ret < 0) { DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret); return ret; } /* Enable LVDS mode */ - return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1, + ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1, PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1), PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1)); + if (ret) + pm_runtime_put(lvds->dev); + + return ret; } static void px30_lvds_poweroff(struct rockchip_lvds *lvds) diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index b6ee8a82e656..577c477b5f46 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -288,7 +288,7 @@ static void sti_dvo_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); - memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&dvo->mode, mode); /* According to the path used (main or aux), the dvo clocks should * have a different parent clock. */ @@ -346,8 +346,9 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector) #define CLK_TOLERANCE_HZ 50 -static int sti_dvo_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_dvo_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 03f3377f918c..aa54a6400ab8 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -523,7 +523,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); - memcpy(&hda->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&hda->mode, mode); if (!hda_get_mode_idx(hda->mode, &mode_idx)) { DRM_ERROR("Undefined mode\n"); @@ -600,8 +600,9 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector) #define CLK_TOLERANCE_HZ 50 -static int sti_hda_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_hda_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index f3ace11209dd..36bea1551ef8 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -940,7 +940,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); /* Copy the drm display mode in the connector local structure */ - memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&hdmi->mode, mode); /* Update clock framerate according to the selected mode */ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000); @@ -1003,8 +1003,9 @@ fail: #define CLK_TOLERANCE_HZ 50 -static int sti_hdmi_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +sti_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { int target = mode->clock * 1000; int target_min = target - CLK_TOLERANCE_HZ; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index a29d64f87563..abb409b08bc6 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -3022,8 +3022,10 @@ static int tegra_dc_probe(struct platform_device *pdev) usleep_range(2000, 4000); err = reset_control_assert(dc->rst); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(dc->clk); return err; + } usleep_range(2000, 4000); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 171e90c4b9f3..01d5a01af259 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -186,7 +186,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, if (cmd->dma.guest.ptr.offset % PAGE_SIZE || box->x != 0 || box->y != 0 || box->z != 0 || box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || - box->d != 1 || box_count != 1) { + box->d != 1 || box_count != 1 || + box->w > 64 || box->h > 64) { /* TODO handle none page aligned offsets */ /* TODO handle more dst & src != 0 */ /* TODO handle more then one copy */ diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c index ab149b80f86c..911a23a9bcd1 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c @@ -166,6 +166,10 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8, &cl_data->sensor_dma_addr[i], GFP_KERNEL); + if (!in_data->sensor_virt_addr[i]) { + rc = -ENOMEM; + goto cleanup; + } cl_data->sensor_sts[i] = SENSOR_DISABLED; cl_data->sensor_requested_cnt[i] = 0; cl_data->cur_hid_dev = i; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5864d66783d4..59057db31483 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -969,7 +969,10 @@ #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003 #define USB_VENDOR_ID_PLANTRONICS 0x047f +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES 0xc055 #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058 #define USB_VENDOR_ID_PANASONIC 0x04da #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c index de52e9f7bb8c..560eeec4035a 100644 --- a/drivers/hid/hid-mcp2221.c +++ b/drivers/hid/hid-mcp2221.c @@ -840,12 +840,19 @@ static int mcp2221_probe(struct hid_device *hdev, return ret; } - ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); + /* + * This driver uses the .raw_event callback and therefore does not need any + * HID_CONNECT_xxx flags. + */ + ret = hid_hw_start(hdev, 0); if (ret) { hid_err(hdev, "can't start hardware\n"); return ret; } + hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8, + hdev->version & 0xff, hdev->name, hdev->phys); + ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "can't open device\n"); @@ -870,8 +877,7 @@ static int mcp2221_probe(struct hid_device *hdev, mcp->adapter.retries = 1; mcp->adapter.dev.parent = &hdev->dev; snprintf(mcp->adapter.name, sizeof(mcp->adapter.name), - "MCP2221 usb-i2c bridge on hidraw%d", - ((struct hidraw *)hdev->hidraw)->minor); + "MCP2221 usb-i2c bridge"); ret = i2c_add_adapter(&mcp->adapter); if (ret) { diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 08462ac72b89..6b86d368d5e7 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1965,6 +1965,10 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x313a) }, + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_ELAN, 0x3148) }, + /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index e81b7cec2d12..3d414ae194ac 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -198,9 +198,18 @@ err: } static const struct hid_device_id plantronics_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES), + .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES), .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES), + .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES), + .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { } }; diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index 32c2306e240d..602465ad2745 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -62,7 +62,7 @@ struct hid_sensor_sample { u32 raw_len; } __packed; -static struct attribute hid_custom_attrs[] = { +static struct attribute hid_custom_attrs[HID_CUSTOM_TOTAL_ATTRS] = { {.name = "name", .mode = S_IRUGO}, {.name = "units", .mode = S_IRUGO}, {.name = "unit-expo", .mode = S_IRUGO}, diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index d8ab0139e5cd..785d81d61ba4 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -172,6 +172,7 @@ static int uclogic_probe(struct hid_device *hdev, * than the pen, so use QUIRK_MULTI_INPUT for all tablets. */ hdev->quirks |= HID_QUIRK_MULTI_INPUT; + hdev->quirks |= HID_QUIRK_HIDINPUT_FORCE; /* Allocate and assign driver data */ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL); diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 7e67c41d97a4..2b6388da545e 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -160,6 +160,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, { struct wacom *wacom = hid_get_drvdata(hdev); + if (wacom->wacom_wac.features.type == BOOTLOADER) + return 0; + if (size > WACOM_PKGLEN_MAX) return 1; @@ -2792,6 +2795,11 @@ static int wacom_probe(struct hid_device *hdev, return error; } + if (features->type == BOOTLOADER) { + hid_warn(hdev, "Using device in hidraw-only mode"); + return hid_hw_start(hdev, HID_CONNECT_HIDRAW); + } + error = wacom_parse_and_register(wacom, false); if (error) return error; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index c454231afec8..546aaaaec016 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -4813,6 +4813,9 @@ static const struct wacom_features wacom_features_0x3c8 = static const struct wacom_features wacom_features_HID_ANY_ID = { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; +static const struct wacom_features wacom_features_0x94 = + { "Wacom Bootloader", .type = BOOTLOADER }; + #define USB_DEVICE_WACOM(prod) \ HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ .driver_data = (kernel_ulong_t)&wacom_features_##prod @@ -4886,6 +4889,7 @@ const struct hid_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x84) }, { USB_DEVICE_WACOM(0x90) }, { USB_DEVICE_WACOM(0x93) }, + { USB_DEVICE_WACOM(0x94) }, { USB_DEVICE_WACOM(0x97) }, { USB_DEVICE_WACOM(0x9A) }, { USB_DEVICE_WACOM(0x9F) }, diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 466b62cc16dc..44e0763a0ede 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -242,6 +242,7 @@ enum { MTTPC, MTTPC_B, HID_GENERIC, + BOOTLOADER, MAX_TYPE }; diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index eb9820158318..26f2c3c01297 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -502,8 +502,10 @@ static int ssi_probe(struct platform_device *pd) platform_set_drvdata(pd, ssi); err = ssi_add_controller(ssi, pd); - if (err < 0) + if (err < 0) { + hsi_put_controller(ssi); goto out1; + } pm_runtime_enable(&pd->dev); @@ -536,9 +538,9 @@ out3: device_for_each_child(&pd->dev, NULL, ssi_remove_ports); out2: ssi_remove_controller(ssi); + pm_runtime_disable(&pd->dev); out1: platform_set_drvdata(pd, NULL); - pm_runtime_disable(&pd->dev); return err; } @@ -629,7 +631,13 @@ static int __init ssi_init(void) { if (ret) return ret; - return platform_driver_register(&ssi_port_pdriver); + ret = platform_driver_register(&ssi_port_pdriver); + if (ret) { + platform_driver_unregister(&ssi_pdriver); + return ret; + } + + return 0; } module_init(ssi_init); diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index f4091143213b..1475ea77351e 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -249,6 +249,19 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) ring_info->pkt_buffer_size = 0; } +/* + * Check if the ring buffer spinlock is available to take or not; used on + * atomic contexts, like panic path (see the Hyper-V framebuffer driver). + */ + +bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *rinfo = &channel->outbound; + + return spin_is_locked(&rinfo->ring_lock); +} +EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy); + /* Write to the ring buffer. */ int hv_ringbuffer_write(struct vmbus_channel *channel, const struct kvec *kv_list, u32 kv_count, diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 996cdc103639..9d4451a6f00f 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -776,6 +776,7 @@ config SENSORS_IT87 config SENSORS_JC42 tristate "JEDEC JC42.4 compliant memory module temperature sensors" depends on I2C + select REGMAP_I2C help If you say yes here, you get support for JEDEC JC42.4 compliant temperature sensors, which are used on many DDR3 memory modules for diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 4a03d010ec5a..52f341d46029 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -19,6 +19,7 @@ #include #include #include +#include /* Addresses to scan */ static const unsigned short normal_i2c[] = { @@ -189,31 +190,14 @@ static struct jc42_chips jc42_chips[] = { { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK }, }; -enum temp_index { - t_input = 0, - t_crit, - t_min, - t_max, - t_num_temp -}; - -static const u8 temp_regs[t_num_temp] = { - [t_input] = JC42_REG_TEMP, - [t_crit] = JC42_REG_TEMP_CRITICAL, - [t_min] = JC42_REG_TEMP_LOWER, - [t_max] = JC42_REG_TEMP_UPPER, -}; - /* Each client has this additional data */ struct jc42_data { - struct i2c_client *client; struct mutex update_lock; /* protect register access */ + struct regmap *regmap; bool extended; /* true if extended range supported */ bool valid; - unsigned long last_updated; /* In jiffies */ u16 orig_config; /* original configuration */ u16 config; /* current configuration */ - u16 temp[t_num_temp];/* Temperatures */ }; #define JC42_TEMP_MIN_EXTENDED (-40000) @@ -238,85 +222,102 @@ static int jc42_temp_from_reg(s16 reg) return reg * 125 / 2; } -static struct jc42_data *jc42_update_device(struct device *dev) -{ - struct jc42_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - struct jc42_data *ret = data; - int i, val; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { - for (i = 0; i < t_num_temp; i++) { - val = i2c_smbus_read_word_swapped(client, temp_regs[i]); - if (val < 0) { - ret = ERR_PTR(val); - goto abort; - } - data->temp[i] = val; - } - data->last_updated = jiffies; - data->valid = true; - } -abort: - mutex_unlock(&data->update_lock); - return ret; -} - static int jc42_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { - struct jc42_data *data = jc42_update_device(dev); - int temp, hyst; + struct jc42_data *data = dev_get_drvdata(dev); + unsigned int regval; + int ret, temp, hyst; - if (IS_ERR(data)) - return PTR_ERR(data); + mutex_lock(&data->update_lock); switch (attr) { case hwmon_temp_input: - *val = jc42_temp_from_reg(data->temp[t_input]); - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); + if (ret) + break; + + *val = jc42_temp_from_reg(regval); + break; case hwmon_temp_min: - *val = jc42_temp_from_reg(data->temp[t_min]); - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP_LOWER, ®val); + if (ret) + break; + + *val = jc42_temp_from_reg(regval); + break; case hwmon_temp_max: - *val = jc42_temp_from_reg(data->temp[t_max]); - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, ®val); + if (ret) + break; + + *val = jc42_temp_from_reg(regval); + break; case hwmon_temp_crit: - *val = jc42_temp_from_reg(data->temp[t_crit]); - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, + ®val); + if (ret) + break; + + *val = jc42_temp_from_reg(regval); + break; case hwmon_temp_max_hyst: - temp = jc42_temp_from_reg(data->temp[t_max]); + ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, ®val); + if (ret) + break; + + temp = jc42_temp_from_reg(regval); hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK) >> JC42_CFG_HYST_SHIFT]; *val = temp - hyst; - return 0; + break; case hwmon_temp_crit_hyst: - temp = jc42_temp_from_reg(data->temp[t_crit]); + ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, + ®val); + if (ret) + break; + + temp = jc42_temp_from_reg(regval); hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK) >> JC42_CFG_HYST_SHIFT]; *val = temp - hyst; - return 0; + break; case hwmon_temp_min_alarm: - *val = (data->temp[t_input] >> JC42_ALARM_MIN_BIT) & 1; - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); + if (ret) + break; + + *val = (regval >> JC42_ALARM_MIN_BIT) & 1; + break; case hwmon_temp_max_alarm: - *val = (data->temp[t_input] >> JC42_ALARM_MAX_BIT) & 1; - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); + if (ret) + break; + + *val = (regval >> JC42_ALARM_MAX_BIT) & 1; + break; case hwmon_temp_crit_alarm: - *val = (data->temp[t_input] >> JC42_ALARM_CRIT_BIT) & 1; - return 0; + ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); + if (ret) + break; + + *val = (regval >> JC42_ALARM_CRIT_BIT) & 1; + break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + break; } + + mutex_unlock(&data->update_lock); + + return ret; } static int jc42_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { struct jc42_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; + unsigned int regval; int diff, hyst; int ret; @@ -324,21 +325,23 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, switch (attr) { case hwmon_temp_min: - data->temp[t_min] = jc42_temp_to_reg(val, data->extended); - ret = i2c_smbus_write_word_swapped(client, temp_regs[t_min], - data->temp[t_min]); + ret = regmap_write(data->regmap, JC42_REG_TEMP_LOWER, + jc42_temp_to_reg(val, data->extended)); break; case hwmon_temp_max: - data->temp[t_max] = jc42_temp_to_reg(val, data->extended); - ret = i2c_smbus_write_word_swapped(client, temp_regs[t_max], - data->temp[t_max]); + ret = regmap_write(data->regmap, JC42_REG_TEMP_UPPER, + jc42_temp_to_reg(val, data->extended)); break; case hwmon_temp_crit: - data->temp[t_crit] = jc42_temp_to_reg(val, data->extended); - ret = i2c_smbus_write_word_swapped(client, temp_regs[t_crit], - data->temp[t_crit]); + ret = regmap_write(data->regmap, JC42_REG_TEMP_CRITICAL, + jc42_temp_to_reg(val, data->extended)); break; case hwmon_temp_crit_hyst: + ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, + ®val); + if (ret) + break; + /* * JC42.4 compliant chips only support four hysteresis values. * Pick best choice and go from there. @@ -346,7 +349,7 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED : JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX); - diff = jc42_temp_from_reg(data->temp[t_crit]) - val; + diff = jc42_temp_from_reg(regval) - val; hyst = 0; if (diff > 0) { if (diff < 2250) @@ -358,9 +361,8 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, } data->config = (data->config & ~JC42_CFG_HYST_MASK) | (hyst << JC42_CFG_HYST_SHIFT); - ret = i2c_smbus_write_word_swapped(data->client, - JC42_REG_CONFIG, - data->config); + ret = regmap_write(data->regmap, JC42_REG_CONFIG, + data->config); break; default: ret = -EOPNOTSUPP; @@ -458,51 +460,80 @@ static const struct hwmon_chip_info jc42_chip_info = { .info = jc42_info, }; +static bool jc42_readable_reg(struct device *dev, unsigned int reg) +{ + return (reg >= JC42_REG_CAP && reg <= JC42_REG_DEVICEID) || + reg == JC42_REG_SMBUS; +} + +static bool jc42_writable_reg(struct device *dev, unsigned int reg) +{ + return (reg >= JC42_REG_CONFIG && reg <= JC42_REG_TEMP_CRITICAL) || + reg == JC42_REG_SMBUS; +} + +static bool jc42_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == JC42_REG_CONFIG || reg == JC42_REG_TEMP; +} + +static const struct regmap_config jc42_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .val_format_endian = REGMAP_ENDIAN_BIG, + .max_register = JC42_REG_SMBUS, + .writeable_reg = jc42_writable_reg, + .readable_reg = jc42_readable_reg, + .volatile_reg = jc42_volatile_reg, + .cache_type = REGCACHE_RBTREE, +}; + static int jc42_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct device *hwmon_dev; + unsigned int config, cap; struct jc42_data *data; - int config, cap; + int ret; data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL); if (!data) return -ENOMEM; - data->client = client; + data->regmap = devm_regmap_init_i2c(client, &jc42_regmap_config); + if (IS_ERR(data->regmap)) + return PTR_ERR(data->regmap); + i2c_set_clientdata(client, data); mutex_init(&data->update_lock); - cap = i2c_smbus_read_word_swapped(client, JC42_REG_CAP); - if (cap < 0) - return cap; + ret = regmap_read(data->regmap, JC42_REG_CAP, &cap); + if (ret) + return ret; data->extended = !!(cap & JC42_CAP_RANGE); if (device_property_read_bool(dev, "smbus-timeout-disable")) { - int smbus; - /* * Not all chips support this register, but from a * quick read of various datasheets no chip appears * incompatible with the below attempt to disable * the timeout. And the whole thing is opt-in... */ - smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS); - if (smbus < 0) - return smbus; - i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS, - smbus | SMBUS_STMOUT); + ret = regmap_set_bits(data->regmap, JC42_REG_SMBUS, + SMBUS_STMOUT); + if (ret) + return ret; } - config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG); - if (config < 0) - return config; + ret = regmap_read(data->regmap, JC42_REG_CONFIG, &config); + if (ret) + return ret; data->orig_config = config; if (config & JC42_CFG_SHUTDOWN) { config &= ~JC42_CFG_SHUTDOWN; - i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config); + regmap_write(data->regmap, JC42_REG_CONFIG, config); } data->config = config; @@ -523,7 +554,7 @@ static int jc42_remove(struct i2c_client *client) config = (data->orig_config & ~JC42_CFG_HYST_MASK) | (data->config & JC42_CFG_HYST_MASK); - i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config); + regmap_write(data->regmap, JC42_REG_CONFIG, config); } return 0; } @@ -535,8 +566,11 @@ static int jc42_suspend(struct device *dev) struct jc42_data *data = dev_get_drvdata(dev); data->config |= JC42_CFG_SHUTDOWN; - i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG, - data->config); + regmap_write(data->regmap, JC42_REG_CONFIG, data->config); + + regcache_cache_only(data->regmap, true); + regcache_mark_dirty(data->regmap); + return 0; } @@ -544,10 +578,13 @@ static int jc42_resume(struct device *dev) { struct jc42_data *data = dev_get_drvdata(dev); + regcache_cache_only(data->regmap, false); + data->config &= ~JC42_CFG_SHUTDOWN; - i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG, - data->config); - return 0; + regmap_write(data->regmap, JC42_REG_CONFIG, data->config); + + /* Restore cached register values to hardware */ + return regcache_sync(data->regmap); } static const struct dev_pm_ops jc42_dev_pm_ops = { diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 7dddb85b9059..fac63d092c7b 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -1030,6 +1030,7 @@ static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata) static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata) { + cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node); cpuhp_remove_multi_state(drvdata->trbe_online); } diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 483428c5e30b..10cdd501d4c5 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -509,6 +509,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, if (read_write == I2C_SMBUS_WRITE) { /* Block Write */ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n"); + if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + dma_size = data->block[0] + 1; dma_direction = DMA_TO_DEVICE; desc->wr_len_cmd = dma_size; diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c index f614cade432b..30e38bc8b6db 100644 --- a/drivers/i2c/busses/i2c-pxa-pci.c +++ b/drivers/i2c/busses/i2c-pxa-pci.c @@ -105,7 +105,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev, int i; struct ce4100_devices *sds; - ret = pci_enable_device_mem(dev); + ret = pcim_enable_device(dev); if (ret) return ret; @@ -114,10 +114,8 @@ static int ce4100_i2c_probe(struct pci_dev *dev, return -EINVAL; } sds = kzalloc(sizeof(*sds), GFP_KERNEL); - if (!sds) { - ret = -ENOMEM; - goto err_mem; - } + if (!sds) + return -ENOMEM; for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { sds->pdev[i] = add_i2c_device(dev, i); @@ -133,8 +131,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev, err_dev_add: kfree(sds); -err_mem: - pci_disable_device(dev); return ret; } diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index 0e0679f65cf7..30a6de1694e0 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c @@ -183,13 +183,12 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) if (!mux->data.reg) { dev_info(&pdev->dev, "Register not set, using platform resource\n"); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mux->data.reg_size = resource_size(res); - mux->data.reg = devm_ioremap_resource(&pdev->dev, res); + mux->data.reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(mux->data.reg)) { ret = PTR_ERR(mux->data.reg); goto err_put_parent; } + mux->data.reg_size = resource_size(res); } if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c index 7a434e2884d4..dfb8e2e5bdf5 100644 --- a/drivers/iio/accel/adis16201.c +++ b/drivers/iio/accel/adis16201.c @@ -300,3 +300,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16201"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c index ac08e866d612..5a9c6e2296f1 100644 --- a/drivers/iio/accel/adis16209.c +++ b/drivers/iio/accel/adis16209.c @@ -310,3 +310,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16209"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index 1d652d9b2f5c..bd5c49571d1a 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -280,10 +280,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, unsigned int data_reg; int ret = 0; - if (iio_buffer_enabled(indio_dev)) - return -EBUSY; + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; - mutex_lock(&indio_dev->mlock); ad_sigma_delta_set_channel(sigma_delta, chan->address); spi_bus_lock(sigma_delta->spi->master); @@ -322,7 +322,7 @@ out: ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); sigma_delta->bus_locked = false; spi_bus_unlock(sigma_delta->spi->master); - mutex_unlock(&indio_dev->mlock); + iio_device_release_direct_mode(indio_dev); if (ret) return ret; diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c index 83c1ae07b3e9..8618ae7bc067 100644 --- a/drivers/iio/adc/ti-adc128s052.c +++ b/drivers/iio/adc/ti-adc128s052.c @@ -193,13 +193,13 @@ static int adc128_remove(struct spi_device *spi) } static const struct of_device_id adc128_of_match[] = { - { .compatible = "ti,adc128s052", }, - { .compatible = "ti,adc122s021", }, - { .compatible = "ti,adc122s051", }, - { .compatible = "ti,adc122s101", }, - { .compatible = "ti,adc124s021", }, - { .compatible = "ti,adc124s051", }, - { .compatible = "ti,adc124s101", }, + { .compatible = "ti,adc128s052", .data = (void*)0L, }, + { .compatible = "ti,adc122s021", .data = (void*)1L, }, + { .compatible = "ti,adc122s051", .data = (void*)1L, }, + { .compatible = "ti,adc122s101", .data = (void*)1L, }, + { .compatible = "ti,adc124s021", .data = (void*)2L, }, + { .compatible = "ti,adc124s051", .data = (void*)2L, }, + { .compatible = "ti,adc124s101", .data = (void*)2L, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, adc128_of_match); diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c index 36879f01e28c..71295709f2b9 100644 --- a/drivers/iio/gyro/adis16136.c +++ b/drivers/iio/gyro/adis16136.c @@ -591,3 +591,4 @@ module_spi_driver(adis16136_driver); MODULE_AUTHOR("Lars-Peter Clausen "); MODULE_DESCRIPTION("Analog Devices ADIS16133/ADIS16135/ADIS16136 gyroscope driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c index 66b6b7bd5e1b..eaf57bd339ed 100644 --- a/drivers/iio/gyro/adis16260.c +++ b/drivers/iio/gyro/adis16260.c @@ -433,3 +433,4 @@ module_spi_driver(adis16260_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index d4e692b187cd..bc40240b29e2 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -30,8 +30,8 @@ * @value: The value to write to device (up to 4 bytes) * @size: The size of the @value (in bytes) */ -int __adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int value, unsigned int size) +int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value, + unsigned int size) { unsigned int page = reg / ADIS_PAGE_SIZE; int ret, i; @@ -114,14 +114,14 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, ret = spi_sync(adis->spi, &msg); if (ret) { dev_err(&adis->spi->dev, "Failed to write register 0x%02X: %d\n", - reg, ret); + reg, ret); } else { adis->current_page = page; } return ret; } -EXPORT_SYMBOL_GPL(__adis_write_reg); +EXPORT_SYMBOL_NS_GPL(__adis_write_reg, IIO_ADISLIB); /** * __adis_read_reg() - read N bytes from register (unlocked version) @@ -130,8 +130,8 @@ EXPORT_SYMBOL_GPL(__adis_write_reg); * @val: The value read back from the device * @size: The size of the @val buffer */ -int __adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size) +int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, + unsigned int size) { unsigned int page = reg / ADIS_PAGE_SIZE; struct spi_message msg; @@ -201,12 +201,12 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, ret = spi_sync(adis->spi, &msg); if (ret) { dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n", - reg, ret); + reg, ret); return ret; - } else { - adis->current_page = page; } + adis->current_page = page; + switch (size) { case 4: *val = get_unaligned_be32(adis->rx); @@ -218,7 +218,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, return ret; } -EXPORT_SYMBOL_GPL(__adis_read_reg); +EXPORT_SYMBOL_NS_GPL(__adis_read_reg, IIO_ADISLIB); /** * __adis_update_bits_base() - ADIS Update bits function - Unlocked version * @adis: The adis device @@ -243,17 +243,17 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, return __adis_write_reg(adis, reg, __val, size); } -EXPORT_SYMBOL_GPL(__adis_update_bits_base); +EXPORT_SYMBOL_NS_GPL(__adis_update_bits_base, IIO_ADISLIB); #ifdef CONFIG_DEBUG_FS -int adis_debugfs_reg_access(struct iio_dev *indio_dev, - unsigned int reg, unsigned int writeval, unsigned int *readval) +int adis_debugfs_reg_access(struct iio_dev *indio_dev, unsigned int reg, + unsigned int writeval, unsigned int *readval) { struct adis *adis = iio_device_get_drvdata(indio_dev); if (readval) { - uint16_t val16; + u16 val16; int ret; ret = adis_read_reg_16(adis, reg, &val16); @@ -261,36 +261,41 @@ int adis_debugfs_reg_access(struct iio_dev *indio_dev, *readval = val16; return ret; - } else { - return adis_write_reg_16(adis, reg, writeval); } + + return adis_write_reg_16(adis, reg, writeval); } -EXPORT_SYMBOL(adis_debugfs_reg_access); +EXPORT_SYMBOL_NS(adis_debugfs_reg_access, IIO_ADISLIB); #endif /** - * adis_enable_irq() - Enable or disable data ready IRQ + * __adis_enable_irq() - Enable or disable data ready IRQ (unlocked) * @adis: The adis device * @enable: Whether to enable the IRQ * * Returns 0 on success, negative error code otherwise */ -int adis_enable_irq(struct adis *adis, bool enable) +int __adis_enable_irq(struct adis *adis, bool enable) { - int ret = 0; - uint16_t msc; + int ret; + u16 msc; - mutex_lock(&adis->state_lock); + if (adis->data->enable_irq) + return adis->data->enable_irq(adis, enable); - if (adis->data->enable_irq) { - ret = adis->data->enable_irq(adis, enable); - goto out_unlock; + if (adis->data->unmasked_drdy) { + if (enable) + enable_irq(adis->spi->irq); + else + disable_irq(adis->spi->irq); + + return 0; } ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc); if (ret) - goto out_unlock; + return ret; msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH; msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2; @@ -299,13 +304,9 @@ int adis_enable_irq(struct adis *adis, bool enable) else msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN; - ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc); - -out_unlock: - mutex_unlock(&adis->state_lock); - return ret; + return __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc); } -EXPORT_SYMBOL(adis_enable_irq); +EXPORT_SYMBOL_NS(__adis_enable_irq, IIO_ADISLIB); /** * __adis_check_status() - Check the device for error conditions (unlocked) @@ -315,7 +316,7 @@ EXPORT_SYMBOL(adis_enable_irq); */ int __adis_check_status(struct adis *adis) { - uint16_t status; + u16 status; int ret; int i; @@ -337,7 +338,7 @@ int __adis_check_status(struct adis *adis) return -EIO; } -EXPORT_SYMBOL_GPL(__adis_check_status); +EXPORT_SYMBOL_NS_GPL(__adis_check_status, IIO_ADISLIB); /** * __adis_reset() - Reset the device (unlocked version) @@ -351,7 +352,7 @@ int __adis_reset(struct adis *adis) const struct adis_timeout *timeouts = adis->data->timeouts; ret = __adis_write_reg_8(adis, adis->data->glob_cmd_reg, - ADIS_GLOB_CMD_SW_RESET); + ADIS_GLOB_CMD_SW_RESET); if (ret) { dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret); return ret; @@ -361,7 +362,7 @@ int __adis_reset(struct adis *adis) return 0; } -EXPORT_SYMBOL_GPL(__adis_reset); +EXPORT_SYMBOL_NS_GPL(__adis_reset, IIO_ADIS_LIB); static int adis_self_test(struct adis *adis) { @@ -407,7 +408,7 @@ int __adis_initial_startup(struct adis *adis) { const struct adis_timeout *timeouts = adis->data->timeouts; struct gpio_desc *gpio; - uint16_t prod_id; + u16 prod_id; int ret; /* check if the device has rst pin low */ @@ -416,7 +417,7 @@ int __adis_initial_startup(struct adis *adis) return PTR_ERR(gpio); if (gpio) { - msleep(10); + usleep_range(10, 12); /* bring device out of reset */ gpiod_set_value_cansleep(gpio, 0); msleep(timeouts->reset_ms); @@ -430,7 +431,13 @@ int __adis_initial_startup(struct adis *adis) if (ret) return ret; - adis_enable_irq(adis, false); + /* + * don't bother calling this if we can't unmask the IRQ as in this case + * the IRQ is most likely not yet requested and we will request it + * with 'IRQF_NO_AUTOEN' anyways. + */ + if (!adis->data->unmasked_drdy) + __adis_enable_irq(adis, false); if (!adis->data->prod_id_reg) return 0; @@ -446,7 +453,7 @@ int __adis_initial_startup(struct adis *adis) return 0; } -EXPORT_SYMBOL_GPL(__adis_initial_startup); +EXPORT_SYMBOL_NS_GPL(__adis_initial_startup, IIO_ADISLIB); /** * adis_single_conversion() - Performs a single sample conversion @@ -464,7 +471,8 @@ EXPORT_SYMBOL_GPL(__adis_initial_startup); * a error bit in the channels raw value set error_mask to 0. */ int adis_single_conversion(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, unsigned int error_mask, int *val) + const struct iio_chan_spec *chan, + unsigned int error_mask, int *val) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int uval; @@ -473,7 +481,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, mutex_lock(&adis->state_lock); ret = __adis_read_reg(adis, chan->address, &uval, - chan->scan_type.storagebits / 8); + chan->scan_type.storagebits / 8); if (ret) goto err_unlock; @@ -493,7 +501,7 @@ err_unlock: mutex_unlock(&adis->state_lock); return ret; } -EXPORT_SYMBOL_GPL(adis_single_conversion); +EXPORT_SYMBOL_NS_GPL(adis_single_conversion, IIO_ADISLIB); /** * adis_init() - Initialize adis device structure @@ -508,7 +516,7 @@ EXPORT_SYMBOL_GPL(adis_single_conversion); * called. */ int adis_init(struct adis *adis, struct iio_dev *indio_dev, - struct spi_device *spi, const struct adis_data *data) + struct spi_device *spi, const struct adis_data *data) { if (!data || !data->timeouts) { dev_err(&spi->dev, "No config data or timeouts not defined!\n"); @@ -530,7 +538,7 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev, return 0; } -EXPORT_SYMBOL_GPL(adis_init); +EXPORT_SYMBOL_NS_GPL(adis_init, IIO_ADISLIB); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lars-Peter Clausen "); diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c index b12917a7cb60..9bcd9a9261b9 100644 --- a/drivers/iio/imu/adis16400.c +++ b/drivers/iio/imu/adis16400.c @@ -1230,3 +1230,4 @@ module_spi_driver(adis16400_driver); MODULE_AUTHOR("Manuel Stahl "); MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c index a6f9fba3e03f..40fc0e582a9f 100644 --- a/drivers/iio/imu/adis16460.c +++ b/drivers/iio/imu/adis16460.c @@ -444,3 +444,4 @@ module_spi_driver(adis16460_driver); MODULE_AUTHOR("Dragos Bogdan "); MODULE_DESCRIPTION("Analog Devices ADIS16460 IMU driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c index 287fff39a927..9d28534db3b0 100644 --- a/drivers/iio/imu/adis16475.c +++ b/drivers/iio/imu/adis16475.c @@ -1382,3 +1382,4 @@ module_spi_driver(adis16475_driver); MODULE_AUTHOR("Nuno Sa "); MODULE_DESCRIPTION("Analog Devices ADIS16475 IMU driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index f9b4540db1f4..44bbe3d19907 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -1538,3 +1538,4 @@ module_spi_driver(adis16480_driver); MODULE_AUTHOR("Lars-Peter Clausen "); MODULE_DESCRIPTION("Analog Devices ADIS16480 IMU driver"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c index 351c303c8a8c..928933027ae3 100644 --- a/drivers/iio/imu/adis_buffer.c +++ b/drivers/iio/imu/adis_buffer.c @@ -20,7 +20,7 @@ #include static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, - const unsigned long *scan_mask) + const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int burst_length, burst_max_length; @@ -67,7 +67,7 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, } int adis_update_scan_mode(struct iio_dev *indio_dev, - const unsigned long *scan_mask) + const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); const struct iio_chan_spec *chan; @@ -124,7 +124,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, return 0; } -EXPORT_SYMBOL_GPL(adis_update_scan_mode); +EXPORT_SYMBOL_NS_GPL(adis_update_scan_mode, IIO_ADISLIB); static irqreturn_t adis_trigger_handler(int irq, void *p) { @@ -158,7 +158,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, - pf->timestamp); + pf->timestamp); irq_done: iio_trigger_notify_done(indio_dev->trig); @@ -212,5 +212,5 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, return devm_add_action_or_reset(&adis->spi->dev, adis_buffer_cleanup, adis); } -EXPORT_SYMBOL_GPL(devm_adis_setup_buffer_and_trigger); +EXPORT_SYMBOL_NS_GPL(devm_adis_setup_buffer_and_trigger, IIO_ADISLIB); diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index 48eedc29b28a..f890bf842db8 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c @@ -15,8 +15,7 @@ #include #include -static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig, - bool state) +static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct adis *adis = iio_trigger_get_drvdata(trig); @@ -30,6 +29,10 @@ static const struct iio_trigger_ops adis_trigger_ops = { static int adis_validate_irq_flag(struct adis *adis) { unsigned long direction = adis->irq_flag & IRQF_TRIGGER_MASK; + + /* We cannot mask the interrupt so ensure it's not enabled at request */ + if (adis->data->unmasked_drdy) + adis->irq_flag |= IRQF_NO_AUTOEN; /* * Typically this devices have data ready either on the rising edge or * on the falling edge of the data ready pin. This checks enforces that @@ -84,5 +87,5 @@ int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) return devm_iio_trigger_register(&adis->spi->dev, adis->trig); } -EXPORT_SYMBOL_GPL(devm_adis_probe_trigger); +EXPORT_SYMBOL_NS_GPL(devm_adis_probe_trigger, IIO_ADISLIB); diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index d0732eac0f0a..07bf47a1a356 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -549,7 +549,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group); if (ret) - goto error_free_setup_event_lines; + goto error_free_group_attrs; ev_int->ioctl_handler.ioctl = iio_event_ioctl; iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev, @@ -557,6 +557,8 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) return 0; +error_free_group_attrs: + kfree(ev_int->group.attrs); error_free_setup_event_lines: iio_free_chan_devattr_list(&ev_int->dev_attr_list); kfree(ev_int); diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c index 8306daa77908..b2ae2d2c7eef 100644 --- a/drivers/iio/temperature/ltc2983.c +++ b/drivers/iio/temperature/ltc2983.c @@ -205,6 +205,7 @@ struct ltc2983_data { * Holds the converted temperature */ __be32 temp ____cacheline_aligned; + __be32 chan_val; }; struct ltc2983_sensor { @@ -309,19 +310,18 @@ static int __ltc2983_fault_handler(const struct ltc2983_data *st, return 0; } -static int __ltc2983_chan_assign_common(const struct ltc2983_data *st, +static int __ltc2983_chan_assign_common(struct ltc2983_data *st, const struct ltc2983_sensor *sensor, u32 chan_val) { u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan); - __be32 __chan_val; chan_val |= LTC2983_CHAN_TYPE(sensor->type); dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg, chan_val); - __chan_val = cpu_to_be32(chan_val); - return regmap_bulk_write(st->regmap, reg, &__chan_val, - sizeof(__chan_val)); + st->chan_val = cpu_to_be32(chan_val); + return regmap_bulk_write(st->regmap, reg, &st->chan_val, + sizeof(st->chan_val)); } static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st, diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 1519379b116e..ab2106a09f9c 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2848,8 +2848,8 @@ err: static void __exit ib_core_cleanup(void) { roce_gid_mgmt_cleanup(); - nldev_exit(); rdma_nl_unregister(RDMA_NL_LS); + nldev_exit(); unregister_pernet_device(&rdma_dev_net_ops); unregister_blocking_lsm_notifier(&ibdev_lsm_nb); ib_sa_cleanup(); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 1893aa613ad7..674344eb8e2f 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -59,9 +59,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_qp_info *qp_info, struct trace_event_raw_ib_mad_send_template *entry) { - u16 pkey; - struct ib_device *dev = qp_info->port_priv->device; - u32 pnum = qp_info->port_priv->port_num; struct ib_ud_wr *wr = &mad_send_wr->send_wr; struct rdma_ah_attr attr = {}; @@ -69,8 +66,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, /* These are common */ entry->sl = attr.sl; - ib_query_pkey(dev, pnum, wr->pkey_index, &pkey); - entry->pkey = pkey; entry->rqpn = wr->remote_qpn; entry->rqkey = wr->remote_qkey; entry->dlid = rdma_ah_get_dlid(&attr); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index cd89e59cbe33..7ad3ba7d5a0a 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -511,7 +511,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, /* In create_qp() port is not set yet */ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) - return -EINVAL; + return -EMSGSIZE; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); if (ret) @@ -550,7 +550,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_cm_id *cm_id = &id_priv->id; if (port && port != cm_id->port_num) - return 0; + return -EAGAIN; if (cm_id->port_num && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) @@ -892,6 +892,8 @@ static int fill_stat_counter_qps(struct sk_buff *msg, int ret = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); + if (!table_attr) + return -EMSGSIZE; rt = &counter->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index 1f935d9f6178..01a499a8b88d 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -343,8 +343,6 @@ void rdma_restrack_del(struct rdma_restrack_entry *res) rt = &dev->res[res->type]; old = xa_erase(&rt->xa, res->id); - if (res->type == RDMA_RESTRACK_MR) - return; WARN_ON(old != res); out: diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 8d709986b88c..253ccaf343f6 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1198,6 +1198,9 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, p->port_num = port_num; kobject_init(&p->kobj, &port_type); + if (device->port_data && is_full_dev) + device->port_data[port_num].sysfs = p; + cur_group = p->groups_list; ret = alloc_port_table_group("gids", &p->groups[0], p->attrs_list, attr->gid_tbl_len, show_port_gid); @@ -1243,9 +1246,6 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, } list_add_tail(&p->kobj.entry, &coredev->port_list); - if (device->port_data && is_full_dev) - device->port_data[port_num].sysfs = p; - return p; err_groups: @@ -1253,6 +1253,8 @@ err_groups: err_del: kobject_del(&p->kobj); err_put: + if (device->port_data && is_full_dev) + device->port_data[port_num].sysfs = NULL; kobject_put(&p->kobj); return ERR_PTR(ret); } @@ -1261,14 +1263,17 @@ static void destroy_port(struct ib_core_device *coredev, struct ib_port *port) { bool is_full_dev = &port->ibdev->coredev == coredev; - if (port->ibdev->port_data && - port->ibdev->port_data[port->port_num].sysfs == port) - port->ibdev->port_data[port->port_num].sysfs = NULL; list_del(&port->kobj.entry); if (is_full_dev) sysfs_remove_groups(&port->kobj, port->ibdev->ops.port_groups); + sysfs_remove_groups(&port->kobj, port->groups_list); kobject_del(&port->kobj); + + if (port->ibdev->port_data && + port->ibdev->port_data[port->port_num].sysfs == port) + port->ibdev->port_data[port->port_num].sysfs = NULL; + kobject_put(&port->kobj); } diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 98c813ba4304..4c403d9e90cb 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -178,6 +178,8 @@ out: for (node = 0; node < node_affinity.num_possible_nodes; node++) hfi1_per_node_cntr[node] = 1; + pci_dev_put(dev); + return 0; } diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c index 31e63e245ea9..ddf3217893f8 100644 --- a/drivers/infiniband/hw/hfi1/firmware.c +++ b/drivers/infiniband/hw/hfi1/firmware.c @@ -1744,6 +1744,7 @@ int parse_platform_config(struct hfi1_devdata *dd) if (!dd->platform_config.data) { dd_dev_err(dd, "%s: Missing config file\n", __func__); + ret = -EINVAL; goto bail; } ptr = (u32 *)dd->platform_config.data; @@ -1752,6 +1753,7 @@ int parse_platform_config(struct hfi1_devdata *dd) ptr++; if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) { dd_dev_err(dd, "%s: Bad config file\n", __func__); + ret = -EINVAL; goto bail; } @@ -1775,6 +1777,7 @@ int parse_platform_config(struct hfi1_devdata *dd) if (file_length > dd->platform_config.size) { dd_dev_info(dd, "%s:File claims to be larger than read size\n", __func__); + ret = -EINVAL; goto bail; } else if (file_length < dd->platform_config.size) { dd_dev_info(dd, @@ -1795,6 +1798,7 @@ int parse_platform_config(struct hfi1_devdata *dd) dd_dev_err(dd, "%s: Failed validation at offset %ld\n", __func__, (ptr - (u32 *) dd->platform_config.data)); + ret = -EINVAL; goto bail; } @@ -1838,6 +1842,7 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__, table_type, (ptr - (u32 *) dd->platform_config.data)); + ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table = ptr; @@ -1857,6 +1862,7 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__, table_type, (ptr - (u32 *)dd->platform_config.data)); + ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table_metadata = diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 1421896abaf0..79d92b789984 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -151,21 +151,29 @@ static void set_atomic_seg(const struct ib_send_wr *wr, hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); } +static unsigned int get_std_sge_num(struct hns_roce_qp *qp) +{ + if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) + return 0; + + return HNS_ROCE_SGE_IN_WQE; +} + static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, const struct ib_send_wr *wr, unsigned int *sge_idx, u32 msg_len) { struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; - unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg); - unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len; unsigned int left_len_in_pg; unsigned int idx = *sge_idx; + unsigned int std_sge_num; unsigned int i = 0; unsigned int len; void *addr; void *dseg; - if (msg_len > ext_sge_sz) { + std_sge_num = get_std_sge_num(qp); + if (msg_len > (qp->sq.max_gs - std_sge_num) * HNS_ROCE_SGE_SIZE) { ibdev_err(ibdev, "no enough extended sge space for inline data.\n"); return -EINVAL; @@ -185,7 +193,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, if (len <= left_len_in_pg) { memcpy(dseg, addr, len); - idx += len / dseg_len; + idx += len / HNS_ROCE_SGE_SIZE; i++; if (i >= wr->num_sge) @@ -200,7 +208,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, len -= left_len_in_pg; addr += left_len_in_pg; - idx += left_len_in_pg / dseg_len; + idx += left_len_in_pg / HNS_ROCE_SGE_SIZE; dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT; @@ -1249,6 +1257,30 @@ static void update_cmdq_status(struct hns_roce_dev *hr_dev) hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; } +static int hns_roce_cmd_err_convert_errno(u16 desc_ret) +{ + struct hns_roce_cmd_errcode errcode_table[] = { + {CMD_EXEC_SUCCESS, 0}, + {CMD_NO_AUTH, -EPERM}, + {CMD_NOT_EXIST, -EOPNOTSUPP}, + {CMD_CRQ_FULL, -EXFULL}, + {CMD_NEXT_ERR, -ENOSR}, + {CMD_NOT_EXEC, -ENOTBLK}, + {CMD_PARA_ERR, -EINVAL}, + {CMD_RESULT_ERR, -ERANGE}, + {CMD_TIMEOUT, -ETIME}, + {CMD_HILINK_ERR, -ENOLINK}, + {CMD_INFO_ILLEGAL, -ENXIO}, + {CMD_INVALID, -EBADR}, + }; + u16 i; + + for (i = 0; i < ARRAY_SIZE(errcode_table); i++) + if (desc_ret == errcode_table[i].return_status) + return errcode_table[i].errno; + return -EIO; +} + static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { @@ -1293,7 +1325,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, dev_err_ratelimited(hr_dev->dev, "Cmdq IO error, opcode = %x, return = %x\n", desc->opcode, desc_ret); - ret = -EIO; + ret = hns_roce_cmd_err_convert_errno(desc_ret); } } else { /* FW/HW reset or incorrect number of desc */ @@ -2363,6 +2395,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M, V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S); + if (!(caps->page_size_cap & PAGE_SIZE)) + caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; + return 0; } @@ -3016,7 +3051,8 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, int i, count; count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, - ARRAY_SIZE(pages), &pbl_ba); + min_t(int, ARRAY_SIZE(pages), mr->npages), + &pbl_ba); if (count < 1) { ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", count); @@ -5121,6 +5157,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, rdma_ah_set_sl(&qp_attr->ah_attr, hr_reg_read(&context, QPC_SL)); + rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1); + rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); grh->flow_label = hr_reg_read(&context, QPC_FL); grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX); grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 2f4a0019a716..67f5b6fcfa1b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -276,6 +276,11 @@ enum hns_roce_cmd_return_status { CMD_OTHER_ERR = 0xff }; +struct hns_roce_cmd_errcode { + enum hns_roce_cmd_return_status return_status; + int errno; +}; + enum hns_roce_sgid_type { GID_TYPE_FLAG_ROCE_V1 = 0, GID_TYPE_FLAG_ROCE_V2_IPV4, diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 20360df25771..a593c142cd6b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -415,10 +415,10 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, return &mr->ibmr; -err_key: - free_mr_key(hr_dev, mr); err_pbl: free_mr_pbl(hr_dev, mr); +err_key: + free_mr_key(hr_dev, mr); err_free: kfree(mr); return ERR_PTR(ret); diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 911902d2b93e..c5971a840b87 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -60,36 +60,6 @@ static int irdma_query_device(struct ib_device *ibdev, return 0; } -/** - * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed - * @link_speed: netdev phy link speed - * @active_speed: IB port speed - * @active_width: IB port width - */ -static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed, - u8 *active_width) -{ - if (link_speed <= SPEED_1000) { - *active_width = IB_WIDTH_1X; - *active_speed = IB_SPEED_SDR; - } else if (link_speed <= SPEED_10000) { - *active_width = IB_WIDTH_1X; - *active_speed = IB_SPEED_FDR10; - } else if (link_speed <= SPEED_20000) { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_DDR; - } else if (link_speed <= SPEED_25000) { - *active_width = IB_WIDTH_1X; - *active_speed = IB_SPEED_EDR; - } else if (link_speed <= SPEED_40000) { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_FDR10; - } else { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_EDR; - } -} - /** * irdma_query_port - get port attributes * @ibdev: device pointer from stack @@ -117,8 +87,9 @@ static int irdma_query_port(struct ib_device *ibdev, u32 port, props->state = IB_PORT_DOWN; props->phys_state = IB_PORT_PHYS_STATE_DISABLED; } - irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed, - &props->active_width); + + ib_get_eth_speed(ibdev, port, &props->active_speed, + &props->active_width); if (rdma_protocol_roce(ibdev, 1)) { props->gid_tbl_len = 32; diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 224ba36f2946..1a0ecf439c09 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -249,7 +249,6 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); struct mlx5_core_dev *mdev; int ret, num_counters; - u32 mdev_port_num; if (!stats) return -EINVAL; @@ -270,8 +269,9 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, } if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { - mdev = mlx5_ib_get_native_port_mdev(dev, port_num, - &mdev_port_num); + if (!port_num) + port_num = 1; + mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL); if (!mdev) { /* If port is not affiliated yet, its in down state * which doesn't have any counters yet, so it would be diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e5abbcfc1d57..55b05a3e31b8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4499,6 +4499,40 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev, return false; } +static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr, + int attr_mask, enum ib_qp_type qp_type) +{ + int log_max_ra_res; + int log_max_ra_req; + + if (qp_type == MLX5_IB_QPT_DCI) { + log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_res_dc); + log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_req_dc); + } else { + log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_res_qp); + log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_req_qp); + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > log_max_ra_res) { + mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", + attr->max_rd_atomic); + return false; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > log_max_ra_req) { + mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", + attr->max_dest_rd_atomic); + return false; + } + return true; +} + int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { @@ -4586,21 +4620,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; } - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) { - mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", - attr->max_rd_atomic); + if (!validate_rd_atomic(dev, attr, attr_mask, qp_type)) goto out; - } - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) { - mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", - attr->max_dest_rd_atomic); - goto out; - } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 57ebf4871608..d7a968356a9b 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -830,12 +830,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work) qp->resp.mr = NULL; } - if (qp_type(qp) == IB_QPT_RC) - sk_dst_reset(qp->sk->sk); - free_rd_atomic_resources(qp); if (qp->sk) { + if (qp_type(qp) == IB_QPT_RC) + sk_dst_reset(qp->sk->sk); + kernel_sock_shutdown(qp->sk, SHUT_RDWR); sock_release(qp->sk); } diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c index d68e37859e73..403029de6b92 100644 --- a/drivers/infiniband/sw/siw/siw_cq.c +++ b/drivers/infiniband/sw/siw/siw_cq.c @@ -56,8 +56,6 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { memset(wc, 0, sizeof(*wc)); wc->wr_id = cqe->id; - wc->status = map_cqe_status[cqe->status].ib; - wc->opcode = map_wc_opcode[cqe->opcode]; wc->byte_len = cqe->bytes; /* @@ -71,10 +69,32 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) wc->wc_flags = IB_WC_WITH_INVALIDATE; } wc->qp = cqe->base_qp; + wc->opcode = map_wc_opcode[cqe->opcode]; + wc->status = map_cqe_status[cqe->status].ib; siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%pK\n", cq->cq_get % cq->num_cqe, cqe->opcode, cqe->flags, (void *)(uintptr_t)cqe->id); + } else { + /* + * A malicious user may set invalid opcode or + * status in the user mmapped CQE array. + * Sanity check and correct values in that case + * to avoid out-of-bounds access to global arrays + * for opcode and status mapping. + */ + u8 opcode = cqe->opcode; + u16 status = cqe->status; + + if (opcode >= SIW_NUM_OPCODES) { + opcode = 0; + status = SIW_WC_GENERAL_ERR; + } else if (status >= SIW_NUM_WC_STATUS) { + status = SIW_WC_GENERAL_ERR; + } + wc->opcode = map_wc_opcode[opcode]; + wc->status = map_cqe_status[status].ib; + } WRITE_ONCE(cqe->flags, 0); cq->cq_get++; diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 7d47b521070b..05052b49107f 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) - return virt_to_page((void *)paddr); + return virt_to_page((void *)(uintptr_t)paddr); return NULL; } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index ff33659acffa..9c7fbda9e068 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -674,13 +674,45 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { - struct siw_sqe sqe = {}; int rv = 0; while (wr) { - sqe.id = wr->wr_id; - sqe.opcode = wr->opcode; - rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); + struct siw_sqe sqe = {}; + + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + sqe.opcode = SIW_OP_WRITE; + break; + case IB_WR_RDMA_READ: + sqe.opcode = SIW_OP_READ; + break; + case IB_WR_RDMA_READ_WITH_INV: + sqe.opcode = SIW_OP_READ_LOCAL_INV; + break; + case IB_WR_SEND: + sqe.opcode = SIW_OP_SEND; + break; + case IB_WR_SEND_WITH_IMM: + sqe.opcode = SIW_OP_SEND_WITH_IMM; + break; + case IB_WR_SEND_WITH_INV: + sqe.opcode = SIW_OP_SEND_REMOTE_INV; + break; + case IB_WR_LOCAL_INV: + sqe.opcode = SIW_OP_INVAL_STAG; + break; + case IB_WR_REG_MR: + sqe.opcode = SIW_OP_REG_MR; + break; + default: + rv = -EINVAL; + break; + } + if (!rv) { + sqe.id = wr->wr_id; + rv = siw_sqe_complete(qp, &sqe, 0, + SIW_WC_WR_FLUSH_ERR); + } if (rv) { if (bad_wr) *bad_wr = wr; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index 5b05cf3837da..28e9b70844e4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -42,6 +42,11 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = { [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 }, }; +static unsigned int ipoib_get_max_num_queues(void) +{ + return min_t(unsigned int, num_possible_cpus(), 128); +} + static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -173,6 +178,8 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = { .changelink = ipoib_changelink, .get_size = ipoib_get_size, .fill_info = ipoib_fill_info, + .get_num_rx_queues = ipoib_get_max_num_queues, + .get_num_tx_queues = ipoib_get_max_num_queues, }; struct rtnl_link_ops *ipoib_get_link_ops(void) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 2f4991cea98c..a6117a7d0ab1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3397,7 +3397,8 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_PKEY: - if (match_hex(args, &token)) { + ret = match_hex(args, &token); + if (ret) { pr_warn("bad P_Key parameter '%s'\n", p); goto out; } @@ -3457,7 +3458,8 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_MAX_SECT: - if (match_int(args, &token)) { + ret = match_int(args, &token); + if (ret) { pr_warn("bad max sect parameter '%s'\n", p); goto out; } @@ -3465,8 +3467,15 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_QUEUE_SIZE: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad queue_size parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->scsi_host->can_queue = token; @@ -3477,25 +3486,40 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_MAX_CMD_PER_LUN: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad max cmd_per_lun parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->scsi_host->cmd_per_lun = token; break; case SRP_OPT_TARGET_CAN_QUEUE: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad max target_can_queue parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->target_can_queue = token; break; case SRP_OPT_IO_CLASS: - if (match_hex(args, &token)) { + ret = match_hex(args, &token); + if (ret) { pr_warn("bad IO class parameter '%s'\n", p); goto out; } @@ -3504,6 +3528,7 @@ static int srp_parse_options(struct net *net, const char *buf, pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); + ret = -EINVAL; goto out; } target->io_class = token; @@ -3526,16 +3551,24 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_CMD_SG_ENTRIES: - if (match_int(args, &token) || token < 1 || token > 255) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1 || token > 255) { pr_warn("bad max cmd_sg_entries parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->cmd_sg_cnt = token; break; case SRP_OPT_ALLOW_EXT_SG: - if (match_int(args, &token)) { + ret = match_int(args, &token); + if (ret) { pr_warn("bad allow_ext_sg parameter '%s'\n", p); goto out; } @@ -3543,43 +3576,77 @@ static int srp_parse_options(struct net *net, const char *buf, break; case SRP_OPT_SG_TABLESIZE: - if (match_int(args, &token) || token < 1 || - token > SG_MAX_SEGMENTS) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1 || token > SG_MAX_SEGMENTS) { pr_warn("bad max sg_tablesize parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->sg_tablesize = token; break; case SRP_OPT_COMP_VECTOR: - if (match_int(args, &token) || token < 0) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 0) { pr_warn("bad comp_vector parameter '%s'\n", p); + ret = -EINVAL; goto out; } target->comp_vector = token; break; case SRP_OPT_TL_RETRY_COUNT: - if (match_int(args, &token) || token < 2 || token > 7) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 2 || token > 7) { pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", p); + ret = -EINVAL; goto out; } target->tl_retry_count = token; break; case SRP_OPT_MAX_IT_IU_SIZE: - if (match_int(args, &token) || token < 0) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 0) { pr_warn("bad maximum initiator to target IU size '%s'\n", p); + ret = -EINVAL; goto out; } target->max_it_iu_size = token; break; case SRP_OPT_CH_COUNT: - if (match_int(args, &token) || token < 1) { + ret = match_int(args, &token); + if (ret) { + pr_warn("match_int() failed for channel count parameter '%s', Error %d\n", + p, ret); + goto out; + } + if (token < 1) { pr_warn("bad channel count %s\n", p); + ret = -EINVAL; goto out; } target->ch_count = token; @@ -3588,6 +3655,7 @@ static int srp_parse_options(struct net *net, const char *buf, default: pr_warn("unknown parameter or missing value '%s' in target creation request\n", p); + ret = -EINVAL; goto out; } } diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 483653110314..809c89666877 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -46,6 +46,7 @@ config JOYSTICK_A3D config JOYSTICK_ADC tristate "Simple joystick connected over ADC" depends on IIO + select IIO_BUFFER select IIO_BUFFER_CB help Say Y here if you have a simple joystick connected over ADC. diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index dd5227cf8696..b5b8ddb536be 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -330,7 +330,7 @@ config INPUT_CPCAP_PWRBUTTON config INPUT_WISTRON_BTNS tristate "x86 Wistron laptop button interface" - depends on X86_32 + depends on X86_32 && !UML select INPUT_SPARSEKMAP select NEW_LEDS select LEDS_CLASS diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index b9e2219efbb8..6858a3e20a0c 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1329,14 +1329,12 @@ static int elants_i2c_power_on(struct elants_data *ts) if (IS_ERR_OR_NULL(ts->reset_gpio)) return 0; - gpiod_set_value_cansleep(ts->reset_gpio, 1); - error = regulator_enable(ts->vcc33); if (error) { dev_err(&ts->client->dev, "failed to enable vcc33 regulator: %d\n", error); - goto release_reset_gpio; + return error; } error = regulator_enable(ts->vccio); @@ -1345,7 +1343,7 @@ static int elants_i2c_power_on(struct elants_data *ts) "failed to enable vccio regulator: %d\n", error); regulator_disable(ts->vcc33); - goto release_reset_gpio; + return error; } /* @@ -1354,7 +1352,6 @@ static int elants_i2c_power_on(struct elants_data *ts) */ udelay(ELAN_POWERON_DELAY_USEC); -release_reset_gpio: gpiod_set_value_cansleep(ts->reset_gpio, 0); if (error) return error; @@ -1462,7 +1459,7 @@ static int elants_i2c_probe(struct i2c_client *client) return error; } - ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW); + ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ts->reset_gpio)) { error = PTR_ERR(ts->reset_gpio); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 9a7742732d73..ce91a6d8532f 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3226,6 +3226,13 @@ static int __init parse_ivrs_acpihid(char *str) return 1; } + /* + * Ignore leading zeroes after ':', so e.g., AMDI0095:00 + * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match + */ + while (*uid == '0' && *(uid + 1)) + uid++; + i = early_acpihid_map_size++; memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index a45c5536d250..c96cf9b21719 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -588,6 +588,7 @@ out_drop_state: put_device_state(dev_state); out: + pci_dev_put(pdev); return ret; } diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index fc38b1fba7cf..bb5d253188a1 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -865,7 +865,7 @@ static int fsl_pamu_probe(struct platform_device *pdev) ret = create_csd(ppaact_phys, mem_size, csd_port_id); if (ret) { dev_err(dev, "could not create coherence subdomain\n"); - return ret; + goto error; } } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 7f23ad61c094..823f1a7d8c6e 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -280,19 +280,17 @@ static u32 rk_mk_pte(phys_addr_t page, int prot) * 11:9 - Page address bit 34:32 * 8:4 - Page address bit 39:35 * 3 - Security - * 2 - Readable - * 1 - Writable + * 2 - Writable + * 1 - Readable * 0 - 1 if Page @ Page address is valid */ -#define RK_PTE_PAGE_READABLE_V2 BIT(2) -#define RK_PTE_PAGE_WRITABLE_V2 BIT(1) static u32 rk_mk_pte_v2(phys_addr_t page, int prot) { u32 flags = 0; - flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0; - flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0; + flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; + flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; return rk_mk_dte_v2(page) | flags; } diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index 92997021e188..ed3574195599 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -27,6 +27,7 @@ #include #define IOMMU_RESET_REG 0x010 +#define IOMMU_RESET_RELEASE_ALL 0xffffffff #define IOMMU_ENABLE_REG 0x020 #define IOMMU_ENABLE_ENABLE BIT(0) @@ -270,7 +271,7 @@ static u32 sun50i_mk_pte(phys_addr_t page, int prot) enum sun50i_iommu_aci aci; u32 flags = 0; - if (prot & (IOMMU_READ | IOMMU_WRITE)) + if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE)) aci = SUN50I_IOMMU_ACI_RD_WR; else if (prot & IOMMU_READ) aci = SUN50I_IOMMU_ACI_RD; @@ -511,7 +512,7 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain, sun50i_iommu_free_page_table(iommu, drop_pt); } - sun50i_table_flush(sun50i_domain, page_table, PT_SIZE); + sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES); sun50i_table_flush(sun50i_domain, dte_addr, 1); return page_table; @@ -601,7 +602,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type) struct sun50i_iommu_domain *sun50i_domain; if (type != IOMMU_DOMAIN_DMA && - type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_UNMANAGED) return NULL; @@ -869,8 +869,8 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) { + u32 status, l1_status, l2_status, resets; struct sun50i_iommu *iommu = dev_id; - u32 status; spin_lock(&iommu->iommu_lock); @@ -880,6 +880,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) return IRQ_NONE; } + l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG); + l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG); + if (status & IOMMU_INT_INVALID_L2PG) sun50i_iommu_handle_pt_irq(iommu, IOMMU_INT_ERR_ADDR_L2_REG, @@ -893,8 +896,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) iommu_write(iommu, IOMMU_INT_CLR_REG, status); - iommu_write(iommu, IOMMU_RESET_REG, ~status); - iommu_write(iommu, IOMMU_RESET_REG, status); + resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK; + iommu_write(iommu, IOMMU_RESET_REG, ~resets); + iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL); spin_unlock(&iommu->iommu_lock); diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c index b60e1853593f..3989d16f997b 100644 --- a/drivers/irqchip/irq-gic-pm.c +++ b/drivers/irqchip/irq-gic-pm.c @@ -102,7 +102,7 @@ static int gic_probe(struct platform_device *pdev) pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) goto rpm_disable; diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c index f3ac392d5bc8..36d0d0cf3fa2 100644 --- a/drivers/irqchip/irq-wpcm450-aic.c +++ b/drivers/irqchip/irq-wpcm450-aic.c @@ -146,6 +146,7 @@ static int __init wpcm450_aic_of_init(struct device_node *node, aic->regs = of_iomap(node, 0); if (!aic->regs) { pr_err("Failed to map WPCM450 AIC registers\n"); + kfree(aic); return -ENOMEM; } diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 4f7eaa17fb27..e840609c50eb 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -3217,6 +3217,7 @@ static int hfcm_l1callback(struct dchannel *dch, u_int cmd) { struct hfc_multi *hc = dch->hw; + struct sk_buff_head free_queue; u_long flags; switch (cmd) { @@ -3245,6 +3246,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd) l1_event(dch->l1, HW_POWERUP_IND); break; case HW_DEACT_REQ: + __skb_queue_head_init(&free_queue); /* start deactivation */ spin_lock_irqsave(&hc->lock, flags); if (hc->ctype == HFC_TYPE_E1) { @@ -3264,20 +3266,21 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd) plxsd_checksync(hc, 0); } } - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) del_timer(&dch->timer); spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); break; case HW_POWERUP_REQ: spin_lock_irqsave(&hc->lock, flags); @@ -3384,6 +3387,9 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) case PH_DEACTIVATE_REQ: test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); if (dch->dev.D.protocol != ISDN_P_TE_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); spin_lock_irqsave(&hc->lock, flags); if (debug & DEBUG_HFCMULTI_MSG) printk(KERN_DEBUG @@ -3405,14 +3411,14 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) /* deactivate */ dch->state = 1; } - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); @@ -3424,6 +3430,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) #endif ret = 0; spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); } else ret = l1_event(dch->l1, hh->prim); break; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index af17459c1a5c..eba58b99cd29 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1617,16 +1617,19 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); spin_lock_irqsave(&hc->lock, flags); if (hc->hw.protocol == ISDN_P_NT_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); /* prepare deactivation */ Write_hfc(hc, HFCPCI_STATES, 0x40); - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); @@ -1639,10 +1642,12 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) hc->hw.mst_m &= ~HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); ret = 0; + spin_unlock_irqrestore(&hc->lock, flags); + __skb_queue_purge(&free_queue); } else { ret = l1_event(dch->l1, hh->prim); + spin_unlock_irqrestore(&hc->lock, flags); } - spin_unlock_irqrestore(&hc->lock, flags); break; } if (!ret) diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index cd5642cef01f..e8b37bd5e34a 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -326,20 +326,24 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); if (hw->protocol == ISDN_P_NT_S0) { + struct sk_buff_head free_queue; + + __skb_queue_head_init(&free_queue); hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT); spin_lock_irqsave(&hw->lock, flags); - skb_queue_purge(&dch->squeue); + skb_queue_splice_init(&dch->squeue, &free_queue); if (dch->tx_skb) { - dev_kfree_skb(dch->tx_skb); + __skb_queue_tail(&free_queue, dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { - dev_kfree_skb(dch->rx_skb); + __skb_queue_tail(&free_queue, dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); spin_unlock_irqrestore(&hw->lock, flags); + __skb_queue_purge(&free_queue); #ifdef FIXME if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) dchannel_sched_event(&hc->dch, D_CLEARBUSY); @@ -1330,7 +1334,7 @@ tx_iso_complete(struct urb *urb) printk("\n"); } - dev_kfree_skb(tx_skb); + dev_consume_skb_irq(tx_skb); tx_skb = NULL; if (fifo->dch && get_next_dframe(fifo->dch)) tx_skb = fifo->dch->tx_skb; diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c index dc634c2932fd..dd2465586140 100644 --- a/drivers/macintosh/macio-adb.c +++ b/drivers/macintosh/macio-adb.c @@ -105,6 +105,10 @@ int macio_init(void) return -ENXIO; } adb = ioremap(r.start, sizeof(struct adb_regs)); + if (!adb) { + of_node_put(adbs); + return -ENOMEM; + } out_8(&adb->ctrl.r, 0); out_8(&adb->intr.r, 0); diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index c1fdf2896021..df69d648f6d0 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -423,7 +423,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, if (of_device_register(&dev->ofdev) != 0) { printk(KERN_DEBUG"macio: device registration error for %s!\n", dev_name(&dev->ofdev.dev)); - kfree(dev); + put_device(&dev->ofdev.dev); return NULL; } diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c index d997f8ebfa98..3af15083a25a 100644 --- a/drivers/mailbox/arm_mhuv2.c +++ b/drivers/mailbox/arm_mhuv2.c @@ -1061,8 +1061,8 @@ static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id) int ret = -EINVAL; reg = devm_of_iomap(dev, dev->of_node, 0, NULL); - if (!reg) - return -ENOMEM; + if (IS_ERR(reg)) + return PTR_ERR(reg); mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL); if (!mhu) diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c index cfacb3f320a6..853901acaeec 100644 --- a/drivers/mailbox/mailbox-mpfs.c +++ b/drivers/mailbox/mailbox-mpfs.c @@ -2,7 +2,7 @@ /* * Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver * - * Copyright (c) 2020 Microchip Corporation. All rights reserved. + * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved. * * Author: Conor Dooley * @@ -56,7 +56,7 @@ #define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY) #define SCB_STATUS_POS (16) -#define SCB_STATUS_MASK GENMASK_ULL(SCB_STATUS_POS + SCB_MASK_WIDTH, SCB_STATUS_POS) +#define SCB_STATUS_MASK GENMASK(SCB_STATUS_POS + SCB_MASK_WIDTH - 1, SCB_STATUS_POS) struct mpfs_mbox { struct mbox_controller controller; @@ -130,13 +130,38 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan) struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; struct mpfs_mss_response *response = mbox->response; u16 num_words = ALIGN((response->resp_size), (4)) / 4U; - u32 i; + u32 i, status; if (!response->resp_msg) { dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM); return; } + /* + * The status is stored in bits 31:16 of the SERVICES_SR register. + * It is only valid when BUSY == 0. + * We should *never* get an interrupt while the controller is + * still in the busy state. If we do, something has gone badly + * wrong & the content of the mailbox would not be valid. + */ + if (mpfs_mbox_busy(mbox)) { + dev_err(mbox->dev, "got an interrupt but system controller is busy\n"); + response->resp_status = 0xDEAD; + return; + } + + status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET); + + /* + * If the status of the individual servers is non-zero, the service has + * failed. The contents of the mailbox at this point are not be valid, + * so don't bother reading them. Set the status so that the driver + * implementing the service can handle the result. + */ + response->resp_status = (status & SCB_STATUS_MASK) >> SCB_STATUS_POS; + if (response->resp_status) + return; + if (!mpfs_mbox_busy(mbox)) { for (i = 0; i < num_words; i++) { response->resp_msg[i] = diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index f44079d62b1a..527204c6d5cd 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -493,6 +493,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, ret = device_register(&ipi_mbox->dev); if (ret) { dev_err(dev, "Failed to register ipi mbox dev.\n"); + put_device(&ipi_mbox->dev); return ret; } mdev = &ipi_mbox->dev; @@ -619,7 +620,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) ipi_mbox = &pdata->ipi_mboxes[i]; if (ipi_mbox->dev.parent) { mbox_controller_unregister(&ipi_mbox->mbox); - device_unregister(&ipi_mbox->dev); + if (device_is_registered(&ipi_mbox->dev)) + device_unregister(&ipi_mbox->dev); } } } diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index cf128b3471d7..0530db548231 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c @@ -71,8 +71,10 @@ static int mcb_probe(struct device *dev) get_device(dev); ret = mdrv->probe(mdev, found_id); - if (ret) + if (ret) { module_put(carrier_mod); + put_device(dev); + } return ret; } diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 0266bfddfbe2..aa6938da0db8 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -108,7 +108,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, return 0; err: - mcb_free_dev(mdev); + put_device(&mdev->dev); return ret; } diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 89a73204dbf4..0f6f74e3030f 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -551,11 +551,13 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, return r; } -static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd) +static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd, + bool destroy_bm) { dm_sm_destroy(cmd->metadata_sm); dm_tm_destroy(cmd->tm); - dm_block_manager_destroy(cmd->bm); + if (destroy_bm) + dm_block_manager_destroy(cmd->bm); } typedef unsigned long (*flags_mutator)(unsigned long); @@ -826,7 +828,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, cmd2 = lookup(bdev); if (cmd2) { mutex_unlock(&table_lock); - __destroy_persistent_data_objects(cmd); + __destroy_persistent_data_objects(cmd, true); kfree(cmd); return cmd2; } @@ -874,7 +876,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd) mutex_unlock(&table_lock); if (!cmd->fail_io) - __destroy_persistent_data_objects(cmd); + __destroy_persistent_data_objects(cmd, true); kfree(cmd); } } @@ -1808,14 +1810,52 @@ int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result) int dm_cache_metadata_abort(struct dm_cache_metadata *cmd) { - int r; + int r = -EINVAL; + struct dm_block_manager *old_bm = NULL, *new_bm = NULL; + + /* fail_io is double-checked with cmd->root_lock held below */ + if (unlikely(cmd->fail_io)) + return r; + + /* + * Replacement block manager (new_bm) is created and old_bm destroyed outside of + * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of + * shrinker associated with the block manager's bufio client vs cmd root_lock). + * - must take shrinker_rwsem without holding cmd->root_lock + */ + new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, + CACHE_MAX_CONCURRENT_LOCKS); WRITE_LOCK(cmd); - __destroy_persistent_data_objects(cmd); - r = __create_persistent_data_objects(cmd, false); + if (cmd->fail_io) { + WRITE_UNLOCK(cmd); + goto out; + } + + __destroy_persistent_data_objects(cmd, false); + old_bm = cmd->bm; + if (IS_ERR(new_bm)) { + DMERR("could not create block manager during abort"); + cmd->bm = NULL; + r = PTR_ERR(new_bm); + goto out_unlock; + } + + cmd->bm = new_bm; + r = __open_or_format_metadata(cmd, false); + if (r) { + cmd->bm = NULL; + goto out_unlock; + } + new_bm = NULL; +out_unlock: if (r) cmd->fail_io = true; WRITE_UNLOCK(cmd); + dm_block_manager_destroy(old_bm); +out: + if (new_bm && !IS_ERR(new_bm)) + dm_block_manager_destroy(new_bm); return r; } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index bdd500447dea..abfe7e37b76f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -915,16 +915,16 @@ static void abort_transaction(struct cache *cache) if (get_cache_mode(cache) >= CM_READ_ONLY) return; - if (dm_cache_metadata_set_needs_check(cache->cmd)) { - DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); - set_cache_mode(cache, CM_FAIL); - } - DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); if (dm_cache_metadata_abort(cache->cmd)) { DMERR("%s: failed to abort metadata transaction", dev_name); set_cache_mode(cache, CM_FAIL); } + + if (dm_cache_metadata_set_needs_check(cache->cmd)) { + DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); + set_cache_mode(cache, CM_FAIL); + } } static void metadata_operation_failed(struct cache *cache, const char *op, int r) @@ -1895,6 +1895,7 @@ static void destroy(struct cache *cache) if (cache->prison) dm_bio_prison_destroy_v2(cache->prison); + cancel_delayed_work_sync(&cache->waker); if (cache->wq) destroy_workqueue(cache->wq); diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index edd22e4d65df..ec4f2487ef10 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -1959,6 +1959,7 @@ static void clone_dtr(struct dm_target *ti) mempool_exit(&clone->hydration_pool); dm_kcopyd_client_destroy(clone->kcopyd_client); + cancel_delayed_work_sync(&clone->waker); destroy_workqueue(clone->wq); hash_table_exit(clone); dm_clone_metadata_close(clone->cmd); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 9705f3c358dd..508e81bfef2c 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -4539,6 +4539,8 @@ static void dm_integrity_dtr(struct dm_target *ti) BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); BUG_ON(!list_empty(&ic->wait_list)); + if (ic->mode == 'B') + cancel_delayed_work_sync(&ic->bitmap_flush_work); if (ic->metadata_wq) destroy_workqueue(ic->metadata_wq); if (ic->wait_wq) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 0ada99572b68..3ce7017bf9d5 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -724,6 +724,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd) goto bad_cleanup_data_sm; } + /* + * For pool metadata opening process, root setting is redundant + * because it will be set again in __begin_transaction(). But dm + * pool aborting process really needs to get last transaction's + * root to avoid accessing broken btree. + */ + pmd->root = le64_to_cpu(disk_super->data_mapping_root); + pmd->details_root = le64_to_cpu(disk_super->device_details_root); + __setup_btree_details(pmd); dm_bm_unlock(sblock); @@ -776,13 +785,15 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f return r; } -static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) +static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd, + bool destroy_bm) { dm_sm_destroy(pmd->data_sm); dm_sm_destroy(pmd->metadata_sm); dm_tm_destroy(pmd->nb_tm); dm_tm_destroy(pmd->tm); - dm_block_manager_destroy(pmd->bm); + if (destroy_bm) + dm_block_manager_destroy(pmd->bm); } static int __begin_transaction(struct dm_pool_metadata *pmd) @@ -989,7 +1000,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) } pmd_write_unlock(pmd); if (!pmd->fail_io) - __destroy_persistent_data_objects(pmd); + __destroy_persistent_data_objects(pmd, true); kfree(pmd); return 0; @@ -1888,19 +1899,52 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd) int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) { int r = -EINVAL; + struct dm_block_manager *old_bm = NULL, *new_bm = NULL; + + /* fail_io is double-checked with pmd->root_lock held below */ + if (unlikely(pmd->fail_io)) + return r; + + /* + * Replacement block manager (new_bm) is created and old_bm destroyed outside of + * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of + * shrinker associated with the block manager's bufio client vs pmd root_lock). + * - must take shrinker_rwsem without holding pmd->root_lock + */ + new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, + THIN_MAX_CONCURRENT_LOCKS); pmd_write_lock(pmd); - if (pmd->fail_io) + if (pmd->fail_io) { + pmd_write_unlock(pmd); goto out; + } __set_abort_with_changes_flags(pmd); - __destroy_persistent_data_objects(pmd); - r = __create_persistent_data_objects(pmd, false); + __destroy_persistent_data_objects(pmd, false); + old_bm = pmd->bm; + if (IS_ERR(new_bm)) { + DMERR("could not create block manager during abort"); + pmd->bm = NULL; + r = PTR_ERR(new_bm); + goto out_unlock; + } + + pmd->bm = new_bm; + r = __open_or_format_metadata(pmd, false); + if (r) { + pmd->bm = NULL; + goto out_unlock; + } + new_bm = NULL; +out_unlock: if (r) pmd->fail_io = true; - -out: pmd_write_unlock(pmd); + dm_block_manager_destroy(old_bm); +out: + if (new_bm && !IS_ERR(new_bm)) + dm_block_manager_destroy(new_bm); return r; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 0a85e4cd607c..cce26f46ded5 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2907,6 +2907,8 @@ static void __pool_destroy(struct pool *pool) dm_bio_prison_destroy(pool->prison); dm_kcopyd_client_destroy(pool->copier); + cancel_delayed_work_sync(&pool->waker); + cancel_delayed_work_sync(&pool->no_space_timeout); if (pool->wq) destroy_workqueue(pool->wq); @@ -3566,20 +3568,28 @@ static int pool_preresume(struct dm_target *ti) */ r = bind_control_target(pool, ti); if (r) - return r; + goto out; r = maybe_resize_data_dev(ti, &need_commit1); if (r) - return r; + goto out; r = maybe_resize_metadata_dev(ti, &need_commit2); if (r) - return r; + goto out; if (need_commit1 || need_commit2) (void) commit(pool); +out: + /* + * When a thin-pool is PM_FAIL, it cannot be rebuilt if + * bio is in deferred list. Therefore need to return 0 + * to allow pool_resume() to flush IO. + */ + if (r && get_pool_mode(pool) == PM_FAIL) + r = 0; - return 0; + return r; } static void pool_suspend_active_thins(struct pool *pool) diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 8cc11b1987ec..062142559caa 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap) sb = kmap_atomic(bitmap->storage.sb_page); pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); - pr_debug(" version: %d\n", le32_to_cpu(sb->version)); + pr_debug(" version: %u\n", le32_to_cpu(sb->version)); pr_debug(" uuid: %08x.%08x.%08x.%08x\n", le32_to_cpu(*(__le32 *)(sb->uuid+0)), le32_to_cpu(*(__le32 *)(sb->uuid+4)), @@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap) pr_debug("events cleared: %llu\n", (unsigned long long) le64_to_cpu(sb->events_cleared)); pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); - pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); - pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); + pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize)); + pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep)); pr_debug(" sync size: %llu KB\n", (unsigned long long)le64_to_cpu(sb->sync_size)/2); - pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); + pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind)); kunmap_atomic(sb); } @@ -2106,7 +2106,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, bytes = DIV_ROUND_UP(chunks, 8); if (!bitmap->mddev->bitmap_info.external) bytes += sizeof(bitmap_super_t); - } while (bytes > (space << 9)); + } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) < + (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1)); } else chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; @@ -2151,7 +2152,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, bitmap->counts.missing_pages = pages; bitmap->counts.chunkshift = chunkshift; bitmap->counts.chunks = chunks; - bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + + bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift + BITMAP_BLOCK_SHIFT); blocks = min(old_counts.chunks << old_counts.chunkshift, @@ -2177,8 +2178,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, bitmap->counts.missing_pages = old_counts.pages; bitmap->counts.chunkshift = old_counts.chunkshift; bitmap->counts.chunks = old_counts.chunks; - bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + - BITMAP_BLOCK_SHIFT); + bitmap->mddev->bitmap_info.chunksize = + 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT); blocks = old_counts.chunks << old_counts.chunkshift; pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); break; @@ -2196,20 +2197,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (set) { bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); - if (*bmc_new == 0) { - /* need to set on-disk bits too. */ - sector_t end = block + new_blocks; - sector_t start = block >> chunkshift; - start <<= chunkshift; - while (start < end) { - md_bitmap_file_set_bit(bitmap, block); - start += 1 << chunkshift; + if (bmc_new) { + if (*bmc_new == 0) { + /* need to set on-disk bits too. */ + sector_t end = block + new_blocks; + sector_t start = block >> chunkshift; + + start <<= chunkshift; + while (start < end) { + md_bitmap_file_set_bit(bitmap, block); + start += 1 << chunkshift; + } + *bmc_new = 2; + md_bitmap_count_page(&bitmap->counts, block, 1); + md_bitmap_set_pending(&bitmap->counts, block); } - *bmc_new = 2; - md_bitmap_count_page(&bitmap->counts, block, 1); - md_bitmap_set_pending(&bitmap->counts, block); + *bmc_new |= NEEDED_MASK; } - *bmc_new |= NEEDED_MASK; if (new_blocks < old_blocks) old_blocks = new_blocks; } @@ -2516,6 +2520,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len) if (csize < 512 || !is_power_of_2(csize)) return -EINVAL; + if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE * + sizeof(((bitmap_super_t *)0)->chunksize)))) + return -EOVERFLOW; mddev->bitmap_info.chunksize = csize; return len; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 04e1e294b4b1..59ab99844df8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -526,13 +526,14 @@ static void md_end_flush(struct bio *bio) struct md_rdev *rdev = bio->bi_private; struct mddev *mddev = rdev->mddev; + bio_put(bio); + rdev_dec_pending(rdev, mddev); if (atomic_dec_and_test(&mddev->flush_pending)) { /* The pre-request flush has finished */ queue_work(md_wq, &mddev->flush_work); } - bio_put(bio); } static void md_submit_flush_data(struct work_struct *ws); @@ -935,10 +936,12 @@ static void super_written(struct bio *bio) } else clear_bit(LastDev, &rdev->flags); + bio_put(bio); + + rdev_dec_pending(rdev, mddev); + if (atomic_dec_and_test(&mddev->pending_writes)) wake_up(&mddev->sb_wait); - rdev_dec_pending(rdev, mddev); - bio_put(bio); } void md_super_write(struct mddev *mddev, struct md_rdev *rdev, diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 9fa479493642..783763f6845f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3141,6 +3141,7 @@ static int raid1_run(struct mddev *mddev) * RAID1 needs at least one disk in active */ if (conf->raid_disks - mddev->degraded < 1) { + md_unregister_thread(&conf->thread); ret = -EINVAL; goto abort; } diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 01f288fa37e0..8abf7f44d96b 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -800,6 +800,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file) if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; + if (dmxdev->exit) { + mutex_unlock(&dmxdev->mutex); + return -ENODEV; + } + for (i = 0; i < dmxdev->filternum; i++) if (dmxdev->filter[i].state == DMXDEV_STATE_FREE) break; @@ -1458,7 +1463,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init); void dvb_dmxdev_release(struct dmxdev *dmxdev) { + mutex_lock(&dmxdev->mutex); dmxdev->exit = 1; + mutex_unlock(&dmxdev->mutex); + if (dmxdev->dvbdev->users > 1) { wait_event(dmxdev->dvbdev->wait_queue, dmxdev->dvbdev->users == 1); diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index 15a08d8c69ef..c2d2792227f8 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -157,7 +157,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca) { unsigned int i; - dvb_free_device(ca->dvbdev); + dvb_device_put(ca->dvbdev); for (i = 0; i < ca->slot_count; i++) vfree(ca->slot_info[i].rx_buffer.data); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 258637d762d6..09facc78d88a 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -136,7 +136,7 @@ static void __dvb_frontend_free(struct dvb_frontend *fe) struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fepriv) - dvb_free_device(fepriv->dvbdev); + dvb_device_put(fepriv->dvbdev); dvb_frontend_invoke_release(fe, fe->ops.release); @@ -2985,6 +2985,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb, .name = fe->ops.info.name, #endif }; + int ret; dev_dbg(dvb->device, "%s:\n", __func__); @@ -3018,8 +3019,13 @@ int dvb_register_frontend(struct dvb_adapter *dvb, "DVB: registering adapter %i frontend %i (%s)...\n", fe->dvb->num, fe->id, fe->ops.info.name); - dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template, + ret = dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template, fe, DVB_DEVICE_FRONTEND, 0); + if (ret) { + dvb_frontend_put(fe); + mutex_unlock(&frontend_mutex); + return ret; + } /* * Initialize the cache to the proper values according with the diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 795d9bfaba5c..6e2b7e97da17 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -107,7 +107,7 @@ static int dvb_device_open(struct inode *inode, struct file *file) new_fops = fops_get(dvbdev->fops); if (!new_fops) goto fail; - file->private_data = dvbdev; + file->private_data = dvb_device_get(dvbdev); replace_fops(file, new_fops); if (file->f_op->open) err = file->f_op->open(inode, file); @@ -171,6 +171,9 @@ int dvb_generic_release(struct inode *inode, struct file *file) } dvbdev->users++; + + dvb_device_put(dvbdev); + return 0; } EXPORT_SYMBOL(dvb_generic_release); @@ -342,6 +345,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev, GFP_KERNEL); if (!dvbdev->pads) { kfree(dvbdev->entity); + dvbdev->entity = NULL; return -ENOMEM; } } @@ -488,6 +492,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, } memcpy(dvbdev, template, sizeof(struct dvb_device)); + kref_init(&dvbdev->ref); dvbdev->type = type; dvbdev->id = id; dvbdev->adapter = adap; @@ -518,7 +523,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, #endif dvbdev->minor = minor; - dvb_minors[minor] = dvbdev; + dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads); @@ -563,6 +568,7 @@ void dvb_remove_device(struct dvb_device *dvbdev) down_write(&minor_rwsem); dvb_minors[dvbdev->minor] = NULL; + dvb_device_put(dvbdev); up_write(&minor_rwsem); dvb_media_device_free(dvbdev); @@ -574,21 +580,34 @@ void dvb_remove_device(struct dvb_device *dvbdev) EXPORT_SYMBOL(dvb_remove_device); -void dvb_free_device(struct dvb_device *dvbdev) +static void dvb_free_device(struct kref *ref) { - if (!dvbdev) - return; + struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref); kfree (dvbdev->fops); kfree (dvbdev); } -EXPORT_SYMBOL(dvb_free_device); + + +struct dvb_device *dvb_device_get(struct dvb_device *dvbdev) +{ + kref_get(&dvbdev->ref); + return dvbdev; +} +EXPORT_SYMBOL(dvb_device_get); + + +void dvb_device_put(struct dvb_device *dvbdev) +{ + if (dvbdev) + kref_put(&dvbdev->ref, dvb_free_device); +} void dvb_unregister_device(struct dvb_device *dvbdev) { dvb_remove_device(dvbdev); - dvb_free_device(dvbdev); + dvb_device_put(dvbdev); } EXPORT_SYMBOL(dvb_unregister_device); diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c index da0ff7b44da4..68b92b4419cf 100644 --- a/drivers/media/dvb-frontends/bcm3510.c +++ b/drivers/media/dvb-frontends/bcm3510.c @@ -649,6 +649,7 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe) deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size); if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) { err("firmware download failed: %d\n",ret); + release_firmware(fw); return ret; } i += 4 + len; diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c index 3d54a0ec86af..3ae1f3a2f142 100644 --- a/drivers/media/dvb-frontends/stv0288.c +++ b/drivers/media/dvb-frontends/stv0288.c @@ -440,9 +440,8 @@ static int stv0288_set_frontend(struct dvb_frontend *fe) struct stv0288_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; - char tm; - unsigned char tda[3]; - u8 reg, time_out = 0; + u8 tda[3], reg, time_out = 0; + s8 tm; dprintk("%s : FE_SET_FRONTEND\n", __func__); diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c index 2958a4694461..07639ecc85aa 100644 --- a/drivers/media/i2c/ad5820.c +++ b/drivers/media/i2c/ad5820.c @@ -327,18 +327,18 @@ static int ad5820_probe(struct i2c_client *client, ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL); if (ret < 0) - goto cleanup2; + goto clean_mutex; ret = v4l2_async_register_subdev(&coil->subdev); if (ret < 0) - goto cleanup; + goto clean_entity; return ret; -cleanup2: - mutex_destroy(&coil->power_lock); -cleanup: +clean_entity: media_entity_cleanup(&coil->subdev.entity); +clean_mutex: + mutex_destroy(&coil->power_lock); return ret; } diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c index 02eabe10ab97..00095c7762c2 100644 --- a/drivers/media/i2c/adv748x/adv748x-afe.c +++ b/drivers/media/i2c/adv748x/adv748x-afe.c @@ -521,6 +521,10 @@ int adv748x_afe_init(struct adv748x_afe *afe) } } + adv748x_afe_s_input(afe, afe->input); + + adv_dbg(state, "AFE Default input set to %d\n", afe->input); + /* Entity pads and sinks are 0-indexed to match the pads */ for (i = ADV748X_AFE_SINK_AIN0; i <= ADV748X_AFE_SINK_AIN7; i++) afe->pads[i].flags = MEDIA_PAD_FL_SINK; diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 7973ae42873a..c10997e2271d 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1259,7 +1259,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev, if (saa7164_dev_setup(dev) < 0) { err = -EINVAL; - goto fail_free; + goto fail_dev; } /* print pci info */ @@ -1427,6 +1427,8 @@ fail_fw: fail_irq: saa7164_dev_unregister(dev); +fail_dev: + pci_disable_device(pci_dev); fail_free: v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c index 4a546eeefe38..6d87fbb0ee04 100644 --- a/drivers/media/pci/solo6x10/solo6x10-core.c +++ b/drivers/media/pci/solo6x10/solo6x10-core.c @@ -420,6 +420,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev) solo_dev->nr_chans); if (device_register(dev)) { + put_device(dev); dev->parent = NULL; return -ENOMEM; } diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index c484c008ab02..582a6c581f3c 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -852,7 +852,7 @@ static void coda_setup_iram(struct coda_ctx *ctx) /* Only H.264BP and H.263P3 are considered */ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64); iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64); - if (!iram_info->buf_dbk_c_use) + if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use) goto out; iram_info->axi_sram_use |= dbk_bits; @@ -876,7 +876,7 @@ static void coda_setup_iram(struct coda_ctx *ctx) iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128); iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128); - if (!iram_info->buf_dbk_c_use) + if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use) goto out; iram_info->axi_sram_use |= dbk_bits; @@ -1082,10 +1082,16 @@ static int coda_start_encoding(struct coda_ctx *ctx) } if (dst_fourcc == V4L2_PIX_FMT_JPEG) { - if (!ctx->params.jpeg_qmat_tab[0]) + if (!ctx->params.jpeg_qmat_tab[0]) { ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL); - if (!ctx->params.jpeg_qmat_tab[1]) + if (!ctx->params.jpeg_qmat_tab[0]) + return -ENOMEM; + } + if (!ctx->params.jpeg_qmat_tab[1]) { ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL); + if (!ctx->params.jpeg_qmat_tab[1]) + return -ENOMEM; + } coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality); } diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c index a72f4655e5ad..b7bf529f18f7 100644 --- a/drivers/media/platform/coda/coda-jpeg.c +++ b/drivers/media/platform/coda/coda-jpeg.c @@ -1052,10 +1052,16 @@ static int coda9_jpeg_start_encoding(struct coda_ctx *ctx) v4l2_err(&dev->v4l2_dev, "error loading Huffman tables\n"); return ret; } - if (!ctx->params.jpeg_qmat_tab[0]) + if (!ctx->params.jpeg_qmat_tab[0]) { ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL); - if (!ctx->params.jpeg_qmat_tab[1]) + if (!ctx->params.jpeg_qmat_tab[0]) + return -ENOMEM; + } + if (!ctx->params.jpeg_qmat_tab[1]) { ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL); + if (!ctx->params.jpeg_qmat_tab[1]) + return -ENOMEM; + } coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality); return 0; diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index bfdee771cef9..4afe0b9b1773 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c @@ -1174,7 +1174,7 @@ int __init fimc_register_driver(void) return platform_driver_register(&fimc_driver); } -void __exit fimc_unregister_driver(void) +void fimc_unregister_driver(void) { platform_driver_unregister(&fimc_driver); } diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index fa648721eaab..b19d7c8ddc06 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -1380,9 +1380,7 @@ static int subdev_notifier_bound(struct v4l2_async_notifier *notifier, /* Find platform data for this sensor subdev */ for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++) - if (fmd->sensor[i].asd && - fmd->sensor[i].asd->match.fwnode == - of_fwnode_handle(subdev->dev->of_node)) + if (fmd->sensor[i].asd == asd) si = &fmd->sensor[i]; if (si == NULL) @@ -1474,7 +1472,7 @@ static int fimc_md_probe(struct platform_device *pdev) pinctrl = devm_pinctrl_get(dev); if (IS_ERR(pinctrl)) { ret = PTR_ERR(pinctrl); - if (ret != EPROBE_DEFER) + if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get pinctrl: %d\n", ret); goto err_clk; } @@ -1586,7 +1584,11 @@ static int __init fimc_md_init(void) if (ret) return ret; - return platform_driver_register(&fimc_md_driver); + ret = platform_driver_register(&fimc_md_driver); + if (ret) + fimc_unregister_driver(); + + return ret; } static void __exit fimc_md_exit(void) diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c b/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c index 718b7b08f93e..8936d5ce886c 100644 --- a/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c +++ b/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c @@ -76,12 +76,14 @@ void print_wrapper_info(struct device *dev, void __iomem *reg) void mxc_jpeg_enable_irq(void __iomem *reg, int slot) { - writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN)); + writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS)); + writel(0xF0C, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN)); } void mxc_jpeg_disable_irq(void __iomem *reg, int slot) { writel(0x0, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN)); + writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS)); } void mxc_jpeg_sw_reset(void __iomem *reg) diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index f282275af626..5173b79995ee 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -493,7 +493,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) ret = media_pipeline_start(&vdev->entity, &video->pipe); if (ret < 0) - return ret; + goto flush_buffers; ret = video_check_format(video); if (ret < 0) @@ -522,6 +522,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) error: media_pipeline_stop(&vdev->entity); +flush_buffers: video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); return ret; diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c index 03fc82cb3fea..055513a7301f 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.c +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -869,8 +869,8 @@ static int vcodec_domains_get(struct venus_core *core) for (i = 0; i < res->vcodec_pmdomains_num; i++) { pd = dev_pm_domain_attach_by_name(dev, res->vcodec_pmdomains[i]); - if (IS_ERR(pd)) - return PTR_ERR(pd); + if (IS_ERR_OR_NULL(pd)) + return PTR_ERR(pd) ? : -ENODATA; core->pmdomains[i] = pd; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 4fc135d9f38b..4c511b026bd7 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1583,8 +1583,18 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = { .port_num = MFC_NUM_PORTS_V7, .buf_size = &buf_size_v7, .fw_name[0] = "s5p-mfc-v7.fw", - .clk_names = {"mfc", "sclk_mfc"}, - .num_clocks = 2, + .clk_names = {"mfc"}, + .num_clocks = 1, +}; + +static struct s5p_mfc_variant mfc_drvdata_v7_3250 = { + .version = MFC_VERSION_V7, + .version_bit = MFC_V7_BIT, + .port_num = MFC_NUM_PORTS_V7, + .buf_size = &buf_size_v7, + .fw_name[0] = "s5p-mfc-v7.fw", + .clk_names = {"mfc", "sclk_mfc"}, + .num_clocks = 2, }; static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = { @@ -1654,6 +1664,9 @@ static const struct of_device_id exynos_mfc_match[] = { }, { .compatible = "samsung,mfc-v7", .data = &mfc_drvdata_v7, + }, { + .compatible = "samsung,exynos3250-mfc", + .data = &mfc_drvdata_v7_3250, }, { .compatible = "samsung,mfc-v8", .data = &mfc_drvdata_v8, diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index da138c314963..58822ec5370e 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -468,8 +468,10 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx) s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); /* Wait until instance is returned or timeout occurred */ if (s5p_mfc_wait_for_done_ctx(ctx, - S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) + S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)){ + clear_work_bit_irqsave(ctx); mfc_err("Err returning instance\n"); + } /* Free resources */ s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 1fad99edb091..3da1775a65f1 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -1218,6 +1218,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) unsigned long mb_y_addr, mb_c_addr; int slice_type; unsigned int strm_size; + bool src_ready; slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); @@ -1257,7 +1258,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) } } } - if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) { + if (ctx->src_queue_cnt > 0 && (ctx->state == MFCINST_RUNNING || + ctx->state == MFCINST_FINISHING)) { mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); if (mb_entry->flags & MFC_BUF_FLAG_USED) { @@ -1288,7 +1290,13 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size); vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); } - if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) + + src_ready = true; + if (ctx->state == MFCINST_RUNNING && ctx->src_queue_cnt == 0) + src_ready = false; + if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0) + src_ready = false; + if (!src_ready || ctx->dst_queue_cnt == 0) clear_work_bit(ctx); return 0; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index a1453053e31a..ef8169f6c428 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -1060,7 +1060,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) } /* aspect ratio VUI */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 5); reg |= ((p_h264->vui_sar & 0x1) << 5); writel(reg, mfc_regs->e_h264_options); @@ -1083,7 +1083,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) /* intra picture period for H.264 open GOP */ /* control */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 4); reg |= ((p_h264->open_gop & 0x1) << 4); writel(reg, mfc_regs->e_h264_options); @@ -1097,23 +1097,23 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) } /* 'WEIGHTED_BI_PREDICTION' for B is disable */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x3 << 9); writel(reg, mfc_regs->e_h264_options); /* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 14); writel(reg, mfc_regs->e_h264_options); /* ASO */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 6); reg |= ((p_h264->aso & 0x1) << 6); writel(reg, mfc_regs->e_h264_options); /* hier qp enable */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 8); reg |= ((p_h264->open_gop & 0x1) << 8); writel(reg, mfc_regs->e_h264_options); @@ -1134,7 +1134,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) writel(reg, mfc_regs->e_h264_num_t_layer); /* frame packing SEI generation */ - readl(mfc_regs->e_h264_options); + reg = readl(mfc_regs->e_h264_options); reg &= ~(0x1 << 25); reg |= ((p_h264->sei_frame_packing & 0x1) << 25); writel(reg, mfc_regs->e_h264_options); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index 338b205ae3a7..88d0188397e7 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -940,6 +940,7 @@ static int configure_channels(struct c8sectpfei *fei) if (ret) { dev_err(fei->dev, "configure_memdma_and_inputblock failed\n"); + of_node_put(child); goto err_unmap; } index++; diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 3f8634a46573..1365ae732b79 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -733,8 +733,10 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, /* start radio */ retval = si470x_start_usb(radio); - if (retval < 0) + if (retval < 0 && !radio->int_in_running) goto err_buf; + else if (retval < 0) /* in case of radio->int_in_running == 1 */ + goto err_all; /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 97355e3ebdfd..72e4bb0fb71e 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -646,15 +646,14 @@ static int send_packet(struct imon_context *ictx) pr_err_ratelimited("error submitting urb(%d)\n", retval); } else { /* Wait for transmission to complete (or abort) */ - mutex_unlock(&ictx->lock); retval = wait_for_completion_interruptible( &ictx->tx.finished); if (retval) { usb_kill_urb(ictx->tx_urb); pr_err_ratelimited("task interrupted\n"); } - mutex_lock(&ictx->lock); + ictx->tx.busy = false; retval = ictx->tx.status; if (retval) pr_err_ratelimited("packet tx failed (%d)\n", retval); @@ -955,7 +954,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf, if (ictx->disconnected) return -ENODEV; - mutex_lock(&ictx->lock); + if (mutex_lock_interruptible(&ictx->lock)) + return -ERESTARTSYS; if (!ictx->dev_present_intf0) { pr_err_ratelimited("no iMON device present\n"); diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c index 82620613d56b..dff7265a42ca 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c +++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c @@ -459,26 +459,20 @@ fail_dmx_conn: for (j = j - 1; j >= 0; --j) dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->dmx_fe[j]); -fail_dmx_dev: dvb_dmxdev_release(&dvb->dmx_dev); -fail_dmx: +fail_dmx_dev: dvb_dmx_release(&dvb->demux); -fail_fe: - for (j = i; j >= 0; --j) - dvb_unregister_frontend(dvb->fe[j]); -fail_tuner_probe: - for (j = i; j >= 0; --j) - if (dvb->i2c_client_tuner[j]) - dvb_module_release(dvb->i2c_client_tuner[j]); - +fail_dmx: fail_demod_probe: - for (j = i; j >= 0; --j) - if (dvb->i2c_client_demod[j]) - dvb_module_release(dvb->i2c_client_demod[j]); - + for (i = i - 1; i >= 0; --i) { + dvb_unregister_frontend(dvb->fe[i]); +fail_fe: + dvb_module_release(dvb->i2c_client_tuner[i]); +fail_tuner_probe: + dvb_module_release(dvb->i2c_client_demod[i]); + } fail_adapter: dvb_unregister_adapter(&dvb->adapter); - fail_i2c: i2c_del_adapter(&dvb->i2c_adapter); diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c index 4b0ae6f51d76..857529ce3638 100644 --- a/drivers/media/test-drivers/vimc/vimc-core.c +++ b/drivers/media/test-drivers/vimc/vimc-core.c @@ -357,7 +357,7 @@ static int __init vimc_init(void) if (ret) { dev_err(&vimc_pdev.dev, "platform driver registration failed (err=%d)\n", ret); - platform_driver_unregister(&vimc_pdrv); + platform_device_unregister(&vimc_pdev); return ret; } diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index 99139a8cd4c4..331a3f4286d2 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -961,6 +961,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection if (dev->has_compose_cap) { v4l2_rect_set_min_size(compose, &min_rect); v4l2_rect_set_max_size(compose, &max_rect); + v4l2_rect_map_inside(compose, &fmt); } dev->fmt_cap_rect = fmt; tpg_s_buf_height(&dev->tpg, fmt.height); diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c index 86788771175b..32b4ee65c280 100644 --- a/drivers/media/usb/dvb-usb/az6027.c +++ b/drivers/media/usb/dvb-usb/az6027.c @@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n if (msg[i].addr == 0x99) { req = 0xBE; index = 0; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } value = msg[i].buf[0] & 0x00ff; length = 1; az6027_usb_out_op(d, req, value, index, data, length); diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 61439c8f33ca..58eea8ab5477 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -81,7 +81,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) ret = dvb_usb_adapter_stream_init(adap); if (ret) - return ret; + goto stream_init_err; ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs); if (ret) @@ -114,6 +114,8 @@ frontend_init_err: dvb_usb_adapter_dvb_exit(adap); dvb_init_err: dvb_usb_adapter_stream_exit(adap); +stream_init_err: + kfree(adap->priv); return ret; } diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c index 4cc1266f5eb9..9b50ad70476b 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-core.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c @@ -1575,7 +1575,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len); - if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) { + if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) { handler_set_err(hdl, -EINVAL); return NULL; } diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index 52312ce2ba05..f2c439359557 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory { static int __videobuf_dc_alloc(struct device *dev, struct videobuf_dma_contig_memory *mem, - unsigned long size, gfp_t flags) + unsigned long size) { mem->size = size; - mem->vaddr = dma_alloc_coherent(dev, mem->size, - &mem->dma_handle, flags); - + mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle, + GFP_KERNEL); if (!mem->vaddr) { dev_err(dev, "memory alloc size %ld failed\n", mem->size); return -ENOMEM; @@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, return videobuf_dma_contig_user_get(mem, vb); /* allocate memory for the read() method */ - if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size), - GFP_KERNEL)) + if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size))) return -ENOMEM; break; case V4L2_MEMORY_OVERLAY: @@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); - if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize), - GFP_KERNEL | __GFP_COMP)) + if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize))) goto error; - /* Try to remap memory */ - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - /* the "vm_pgoff" is just used in v4l2 to find the * corresponding buffer data structure which is allocated * earlier and it does not mean the offset from the physical * buffer start address as usual. So set it to 0 to pass - * the sanity check in vm_iomap_memory(). + * the sanity check in dma_mmap_coherent(). */ vma->vm_pgoff = 0; - - retval = vm_iomap_memory(vma, mem->dma_handle, mem->size); + retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle, + mem->size); if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", retval); diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index f854822f84d6..7619c30b4ee1 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -2150,6 +2150,11 @@ static int msb_init_disk(struct memstick_dev *card) msb->usage_count = 1; msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM); + if (!msb->io_queue) { + rc = -ENOMEM; + goto out_cleanup_disk; + } + INIT_WORK(&msb->io_work, msb_io_work); sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); @@ -2157,10 +2162,16 @@ static int msb_init_disk(struct memstick_dev *card) set_disk_ro(msb->disk, 1); msb_start(card); - device_add_disk(&card->dev, msb->disk, NULL); + rc = device_add_disk(&card->dev, msb->disk, NULL); + if (rc) + goto out_destroy_workqueue; dbg("Disk added"); return 0; +out_destroy_workqueue: + destroy_workqueue(msb->io_queue); +out_cleanup_disk: + blk_cleanup_disk(msb->disk); out_free_tag_set: blk_mq_free_tag_set(&msb->tag_set); out_release_id: diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 24e376bcb1ee..270f21711bc1 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -2013,6 +2013,7 @@ config MFD_ROHM_BD957XMUF depends on I2C=y depends on OF select REGMAP_I2C + select REGMAP_IRQ select MFD_CORE help Select this option to get support for the ROHM BD9576MUF and diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c index 6eaa6775b888..d3b32eb79837 100644 --- a/drivers/mfd/mt6360-core.c +++ b/drivers/mfd/mt6360-core.c @@ -402,7 +402,7 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size, struct mt6360_ddata *ddata = context; u8 bank = *(u8 *)reg; u8 reg_addr = *(u8 *)(reg + 1); - struct i2c_client *i2c = ddata->i2c[bank]; + struct i2c_client *i2c; bool crc_needed = false; u8 *buf; int buf_len = MT6360_ALLOC_READ_SIZE(val_size); @@ -410,6 +410,11 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size, u8 crc; int ret; + if (bank >= MT6360_SLAVE_MAX) + return -EINVAL; + + i2c = ddata->i2c[bank]; + if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) { crc_needed = true; ret = mt6360_xlate_pmicldo_addr(®_addr, val_size); @@ -453,13 +458,18 @@ static int mt6360_regmap_write(void *context, const void *val, size_t val_size) struct mt6360_ddata *ddata = context; u8 bank = *(u8 *)val; u8 reg_addr = *(u8 *)(val + 1); - struct i2c_client *i2c = ddata->i2c[bank]; + struct i2c_client *i2c; bool crc_needed = false; u8 *buf; int buf_len = MT6360_ALLOC_WRITE_SIZE(val_size); int write_size = val_size - MT6360_REGMAP_REG_BYTE_SIZE; int ret; + if (bank >= MT6360_SLAVE_MAX) + return -EINVAL; + + i2c = ddata->i2c[bank]; + if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) { crc_needed = true; ret = mt6360_xlate_pmicldo_addr(®_addr, val_size - MT6360_REGMAP_REG_BYTE_SIZE); diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c index c472d7f8103c..9f3c4a01b4c1 100644 --- a/drivers/mfd/qcom-pm8008.c +++ b/drivers/mfd/qcom-pm8008.c @@ -54,13 +54,6 @@ enum { #define PM8008_PERIPH_OFFSET(paddr) (paddr - PM8008_PERIPH_0_BASE) -struct pm8008_data { - struct device *dev; - struct regmap *regmap; - int irq; - struct regmap_irq_chip_data *irq_data; -}; - static unsigned int p0_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_0_BASE)}; static unsigned int p1_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_1_BASE)}; static unsigned int p2_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_2_BASE)}; @@ -150,7 +143,7 @@ static struct regmap_config qcom_mfd_regmap_cfg = { .max_register = 0xFFFF, }; -static int pm8008_init(struct pm8008_data *chip) +static int pm8008_init(struct regmap *regmap) { int rc; @@ -160,34 +153,31 @@ static int pm8008_init(struct pm8008_data *chip) * This is required to enable the writing of TYPE registers in * regmap_irq_sync_unlock(). */ - rc = regmap_write(chip->regmap, - (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET), - BIT(0)); + rc = regmap_write(regmap, (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); if (rc) return rc; /* Do the same for GPIO1 and GPIO2 peripherals */ - rc = regmap_write(chip->regmap, - (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); + rc = regmap_write(regmap, (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); if (rc) return rc; - rc = regmap_write(chip->regmap, - (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); + rc = regmap_write(regmap, (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); return rc; } -static int pm8008_probe_irq_peripherals(struct pm8008_data *chip, +static int pm8008_probe_irq_peripherals(struct device *dev, + struct regmap *regmap, int client_irq) { int rc, i; struct regmap_irq_type *type; struct regmap_irq_chip_data *irq_data; - rc = pm8008_init(chip); + rc = pm8008_init(regmap); if (rc) { - dev_err(chip->dev, "Init failed: %d\n", rc); + dev_err(dev, "Init failed: %d\n", rc); return rc; } @@ -207,10 +197,10 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip, IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW); } - rc = devm_regmap_add_irq_chip(chip->dev, chip->regmap, client_irq, + rc = devm_regmap_add_irq_chip(dev, regmap, client_irq, IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data); if (rc) { - dev_err(chip->dev, "Failed to add IRQ chip: %d\n", rc); + dev_err(dev, "Failed to add IRQ chip: %d\n", rc); return rc; } @@ -220,26 +210,23 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip, static int pm8008_probe(struct i2c_client *client) { int rc; - struct pm8008_data *chip; + struct device *dev; + struct regmap *regmap; - chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; + dev = &client->dev; + regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); - chip->dev = &client->dev; - chip->regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg); - if (!chip->regmap) - return -ENODEV; + i2c_set_clientdata(client, regmap); - i2c_set_clientdata(client, chip); - - if (of_property_read_bool(chip->dev->of_node, "interrupt-controller")) { - rc = pm8008_probe_irq_peripherals(chip, client->irq); + if (of_property_read_bool(dev->of_node, "interrupt-controller")) { + rc = pm8008_probe_irq_peripherals(dev, regmap, client->irq); if (rc) - dev_err(chip->dev, "Failed to probe irq periphs: %d\n", rc); + dev_err(dev, "Failed to probe irq periphs: %d\n", rc); } - return devm_of_platform_populate(chip->dev); + return devm_of_platform_populate(dev); } static const struct of_device_id pm8008_match[] = { diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c index 71bc34b74bc9..8fea0e511550 100644 --- a/drivers/mfd/qcom_rpm.c +++ b/drivers/mfd/qcom_rpm.c @@ -547,7 +547,7 @@ static int qcom_rpm_probe(struct platform_device *pdev) init_completion(&rpm->ack); /* Enable message RAM clock */ - rpm->ramclk = devm_clk_get(&pdev->dev, "ram"); + rpm->ramclk = devm_clk_get_enabled(&pdev->dev, "ram"); if (IS_ERR(rpm->ramclk)) { ret = PTR_ERR(rpm->ramclk); if (ret == -EPROBE_DEFER) @@ -558,7 +558,6 @@ static int qcom_rpm_probe(struct platform_device *pdev) */ rpm->ramclk = NULL; } - clk_prepare_enable(rpm->ramclk); /* Accepts NULL */ irq_ack = platform_get_irq_byname(pdev, "ack"); if (irq_ack < 0) @@ -673,22 +672,11 @@ static int qcom_rpm_probe(struct platform_device *pdev) if (ret) dev_warn(&pdev->dev, "failed to mark wakeup irq as wakeup\n"); - return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); -} - -static int qcom_rpm_remove(struct platform_device *pdev) -{ - struct qcom_rpm *rpm = dev_get_drvdata(&pdev->dev); - - of_platform_depopulate(&pdev->dev); - clk_disable_unprepare(rpm->ramclk); - - return 0; + return devm_of_platform_populate(&pdev->dev); } static struct platform_driver qcom_rpm_driver = { .probe = qcom_rpm_probe, - .remove = qcom_rpm_remove, .driver = { .name = "qcom_rpm", .of_match_table = qcom_rpm_of_match, diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 186308f1f8eb..6334376826a9 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -959,10 +959,10 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n * if it returns an error! */ if ((rc = cxl_register_afu(afu))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_afu_add(afu))) - goto err_put1; + goto err_del_dev; /* * pHyp doesn't expose the programming models supported by the @@ -978,7 +978,7 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n afu->modes_supported = CXL_MODE_DIRECTED; if ((rc = cxl_afu_select_best_mode(afu))) - goto err_put2; + goto err_remove_sysfs; adapter->afu[afu->slice] = afu; @@ -998,10 +998,12 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n return 0; -err_put2: +err_remove_sysfs: cxl_sysfs_afu_remove(afu); -err_put1: - device_unregister(&afu->dev); +err_del_dev: + device_del(&afu->dev); +err_put_dev: + put_device(&afu->dev); free = false; guest_release_serr_irq(afu); err2: @@ -1135,18 +1137,20 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic * even if it returns an error! */ if ((rc = cxl_register_adapter(adapter))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_adapter_add(adapter))) - goto err_put1; + goto err_del_dev; /* release the context lock as the adapter is configured */ cxl_adapter_context_unlock(adapter); return adapter; -err_put1: - device_unregister(&adapter->dev); +err_del_dev: + device_del(&adapter->dev); +err_put_dev: + put_device(&adapter->dev); free = false; cxl_guest_remove_chardev(adapter); err1: diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 2ba899f5659f..d183836d80e3 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -387,6 +387,7 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, rc = get_phb_index(np, phb_index); if (rc) { pr_err("cxl: invalid phb index\n"); + of_node_put(np); return rc; } @@ -1164,10 +1165,10 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) * if it returns an error! */ if ((rc = cxl_register_afu(afu))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_afu_add(afu))) - goto err_put1; + goto err_del_dev; adapter->afu[afu->slice] = afu; @@ -1176,10 +1177,12 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) return 0; -err_put1: +err_del_dev: + device_del(&afu->dev); +err_put_dev: pci_deconfigure_afu(afu); cxl_debugfs_afu_remove(afu); - device_unregister(&afu->dev); + put_device(&afu->dev); return rc; err_free_native: @@ -1667,23 +1670,25 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) * even if it returns an error! */ if ((rc = cxl_register_adapter(adapter))) - goto err_put1; + goto err_put_dev; if ((rc = cxl_sysfs_adapter_add(adapter))) - goto err_put1; + goto err_del_dev; /* Release the context lock as adapter is configured */ cxl_adapter_context_unlock(adapter); return adapter; -err_put1: +err_del_dev: + device_del(&adapter->dev); +err_put_dev: /* This should mirror cxl_remove_adapter, except without the * sysfs parts */ cxl_debugfs_adapter_remove(adapter); cxl_deconfigure_adapter(adapter); - device_unregister(&adapter->dev); + put_device(&adapter->dev); return ERR_PTR(rc); err_release: diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index a68738f38252..f1f669efe050 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -204,6 +204,18 @@ static int read_dvsec_vendor(struct pci_dev *dev) return 0; } +/** + * get_dvsec_vendor0() - Find a related PCI device (function 0) + * @dev: PCI device to match + * @dev0: The PCI device (function 0) found + * @out_pos: The position of PCI device (function 0) + * + * Returns 0 on success, negative on failure. + * + * NOTE: If it's successful, the reference of dev0 is increased, + * so after using it, the callers must call pci_dev_put() to give + * up the reference. + */ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, int *out_pos) { @@ -213,10 +225,14 @@ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, dev = get_function_0(dev); if (!dev) return -1; + } else { + dev = pci_dev_get(dev); } pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); - if (!pos) + if (!pos) { + pci_dev_put(dev); return -1; + } *dev0 = dev; *out_pos = pos; return 0; @@ -233,6 +249,7 @@ int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val) pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, &reset_reload); + pci_dev_put(dev0); *val = !!(reset_reload & BIT(0)); return 0; } @@ -254,6 +271,7 @@ int ocxl_config_set_reset_reload(struct pci_dev *dev, int val) reset_reload &= ~BIT(0); pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, reset_reload); + pci_dev_put(dev0); return 0; } diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 134806c2e67e..a199c7ce3f81 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -543,8 +543,11 @@ int ocxl_file_register_afu(struct ocxl_afu *afu) goto err_put; rc = device_register(&info->dev); - if (rc) - goto err_put; + if (rc) { + free_minor(info); + put_device(&info->dev); + return rc; + } rc = ocxl_sysfs_register_afu(info); if (rc) diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index d7ef61e602ed..b836936e9747 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -648,6 +648,7 @@ int gru_handle_user_call_os(unsigned long cb) if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) return -EINVAL; +again: gts = gru_find_lock_gts(cb); if (!gts) return -EINVAL; @@ -656,7 +657,11 @@ int gru_handle_user_call_os(unsigned long cb) if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) goto exit; - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + goto again; + } /* * CCH may contain stale data if ts_force_cch_reload is set. @@ -874,7 +879,11 @@ int gru_set_context_option(unsigned long arg) } else { gts->ts_user_blade_id = req.val1; gts->ts_user_chiplet_id = req.val0; - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + return ret; + } } break; case sco_gseg_owner: diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 9afda47efbf2..3a16eb8e03f7 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -716,9 +716,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru, * chiplet. Misassignment can occur if the process migrates to a different * blade or if the user changes the selected blade/chiplet. */ -void gru_check_context_placement(struct gru_thread_state *gts) +int gru_check_context_placement(struct gru_thread_state *gts) { struct gru_state *gru; + int ret = 0; /* * If the current task is the context owner, verify that the @@ -726,15 +727,23 @@ void gru_check_context_placement(struct gru_thread_state *gts) * references. Pthread apps use non-owner references to the CBRs. */ gru = gts->ts_gru; + /* + * If gru or gts->ts_tgid_owner isn't initialized properly, return + * success to indicate that the caller does not need to unload the + * gru context.The caller is responsible for their inspection and + * reinitialization if needed. + */ if (!gru || gts->ts_tgid_owner != current->tgid) - return; + return ret; if (!gru_check_chiplet_assignment(gru, gts)) { STAT(check_context_unload); - gru_unload_context(gts, 1); + ret = -EINVAL; } else if (gru_retarget_intr(gts)) { STAT(check_context_retarget_intr); } + + return ret; } @@ -934,7 +943,12 @@ again: mutex_lock(>s->ts_ctxlock); preempt_disable(); - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + preempt_enable(); + mutex_unlock(>s->ts_ctxlock); + gru_unload_context(gts, 1); + return VM_FAULT_NOPAGE; + } if (!gts->ts_gru) { STAT(load_user_context); diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index e4c067c61251..5c9783150cdf 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -638,7 +638,7 @@ extern int gru_user_flush_tlb(unsigned long arg); extern int gru_user_unload_context(unsigned long arg); extern int gru_get_exception_detail(unsigned long arg); extern int gru_set_context_option(unsigned long address); -extern void gru_check_context_placement(struct gru_thread_state *gts); +extern int gru_check_context_placement(struct gru_thread_state *gts); extern int gru_cpu_fault_map_id(void); extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); extern void gru_flush_all_tlb(struct gru_state *gru); diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 228f2eb1d476..2aebbfda104d 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -190,7 +190,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work) spin_unlock_irqrestore(&fm->lock, flags); } if (sock) - tifm_free_device(&sock->dev); + put_device(&sock->dev); } spin_lock_irqsave(&fm->lock, flags); } diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 86a8a1f56583..592166e53dce 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1252,7 +1252,7 @@ static int sd_read_ext_regs(struct mmc_card *card) */ err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf); if (err) { - pr_warn("%s: error %d reading general info of SD ext reg\n", + pr_err("%s: error %d reading general info of SD ext reg\n", mmc_hostname(card->host), err); goto out; } @@ -1266,7 +1266,12 @@ static int sd_read_ext_regs(struct mmc_card *card) /* Number of extensions to be find. */ num_ext = gen_info_buf[4]; - /* We support revision 0, but limit it to 512 bytes for simplicity. */ + /* + * We only support revision 0 and limit it to 512 bytes for simplicity. + * No matter what, let's return zero to allow us to continue using the + * card, even if we can't support the features from the SD function + * extensions registers. + */ if (rev != 0 || len > 512) { pr_warn("%s: non-supported SD ext reg layout\n", mmc_hostname(card->host)); @@ -1281,7 +1286,7 @@ static int sd_read_ext_regs(struct mmc_card *card) for (i = 0; i < num_ext; i++) { err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr); if (err) { - pr_warn("%s: error %d parsing SD ext reg\n", + pr_err("%s: error %d parsing SD ext reg\n", mmc_hostname(card->host), err); goto out; } diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index bfb8efeb7eb8..d01df01d4b4d 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c @@ -1114,7 +1114,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) alcor_hw_init(host); dev_set_drvdata(&pdev->dev, host); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto free_host; + return 0; free_host: diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 807177c953f3..6f971a3e7e49 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2223,6 +2223,7 @@ static int atmci_init_slot(struct atmel_mci *host, { struct mmc_host *mmc; struct atmel_mci_slot *slot; + int ret; mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); if (!mmc) @@ -2306,11 +2307,13 @@ static int atmci_init_slot(struct atmel_mci *host, host->slot[id] = slot; mmc_regulator_get_supply(mmc); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { + mmc_free_host(mmc); + return ret; + } if (gpio_is_valid(slot->detect_pin)) { - int ret; - timer_setup(&slot->detect_timer, atmci_detect_change, 0); ret = request_irq(gpio_to_irq(slot->detect_pin), diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 9b2e2548bd18..753f9ea254d4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1291,7 +1291,9 @@ static int meson_mmc_probe(struct platform_device *pdev) } mmc->ops = &meson_mmc_ops; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto err_free_irq; return 0; diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 3765e2f4ad98..2c4eda83ca18 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2254,7 +2254,9 @@ static int mmci_probe(struct amba_device *dev, pm_runtime_set_autosuspend_delay(&dev->dev, 50); pm_runtime_use_autosuspend(&dev->dev); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto clk_disable; pm_runtime_put(&dev->dev); return 0; diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index dfc3ffd5b1f8..52ed30f2d9f4 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -665,7 +665,9 @@ static int moxart_probe(struct platform_device *pdev) goto out; dev_set_drvdata(dev, mmc); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto out; dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 9bf95ba217fa..97227ad71715 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1143,7 +1143,9 @@ static int mxcmci_probe(struct platform_device *pdev) timer_setup(&host->watchdog, mxcmci_watchdog, 0); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto out_free_dma; return 0; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 2f8038d69f67..eb0bd46b7e81 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1987,7 +1987,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev) if (!ret) mmc->caps |= MMC_CAP_SDIO_IRQ; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto err_irq; if (mmc_pdata(host)->name != NULL) { ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 55868b6b8658..e25e9bb34eb3 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -763,7 +763,12 @@ static int pxamci_probe(struct platform_device *pdev) dev_warn(dev, "gpio_ro and get_ro() both defined\n"); } - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { + if (host->pdata && host->pdata->exit) + host->pdata->exit(dev, mmc); + goto out; + } return 0; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 387f2a4f693a..12921fba4f52 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -520,7 +520,7 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host, SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) & sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); - if (priv->adjust_hs400_calib_table) + if (priv->quirks && (priv->quirks->hs400_calib_table || priv->quirks->hs400_bad_taps)) renesas_sdhi_adjust_hs400_mode_disable(host); sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | @@ -1037,11 +1037,14 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (ver >= SDHI_VER_GEN3_SD) host->get_timeout_cycles = renesas_sdhi_gen3_get_cycles; + /* Check for SCC so we can reset it if needed */ + if (of_data && of_data->scc_offset && ver >= SDHI_VER_GEN2_SDR104) + priv->scc_ctl = host->ctl + of_data->scc_offset; + /* Enable tuning iff we have an SCC and a supported mode */ - if (of_data && of_data->scc_offset && - (host->mmc->caps & MMC_CAP_UHS_SDR104 || - host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | - MMC_CAP2_HS400_1_8V))) { + if (priv->scc_ctl && (host->mmc->caps & MMC_CAP_UHS_SDR104 || + host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | + MMC_CAP2_HS400_1_8V))) { const struct renesas_sdhi_scc *taps = of_data->taps; bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; bool hit = false; @@ -1061,7 +1064,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!hit) dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); - priv->scc_ctl = host->ctl + of_data->scc_offset; host->check_retune = renesas_sdhi_check_scc_error; host->ops.execute_tuning = renesas_sdhi_execute_tuning; host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index e1580f78c6b2..8098726dcc0b 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -1474,6 +1474,7 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) struct realtek_pci_sdmmc *host; struct rtsx_pcr *pcr; struct pcr_handle *handle = pdev->dev.platform_data; + int ret; if (!handle) return -ENXIO; @@ -1511,7 +1512,13 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); + mmc_free_host(mmc); + return ret; + } return 0; } diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index 5fe4528e296e..1be3a355f10d 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -1332,6 +1332,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) #ifdef RTSX_USB_USE_LEDS_CLASS int err; #endif + int ret; ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent)); if (!ucr) @@ -1368,7 +1369,15 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) INIT_WORK(&host->led_work, rtsx_usb_update_led); #endif - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { +#ifdef RTSX_USB_USE_LEDS_CLASS + led_classdev_unregister(&host->led); +#endif + mmc_free_host(mmc); + pm_runtime_disable(&pdev->dev); + return ret; + } return 0; } diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index e85c95f3a868..256260339f69 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -224,13 +224,15 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); sdhci_enable_clk(host, div); - /* enable auto gate sdhc_enable_auto_gate */ - val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); - mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | - SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; - if (mask != (val & mask)) { - val |= mask; - sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + /* Enable CLK_AUTO when the clock is greater than 400K. */ + if (clk > 400000) { + val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); + mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | + SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; + if (mask != (val & mask)) { + val |= mask; + sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + } } } diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 3f5977979cf2..6c4f43e11282 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -168,6 +168,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) if (reg & SDHCI_CAN_DO_8BIT) priv->vendor_hs200 = F_SDH30_EMMC_HS200; + if (!(reg & SDHCI_TIMEOUT_CLK_MASK)) + host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; + ret = sdhci_add_host(host); if (ret) goto err_add_host; diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c index 8d037c2071ab..497791ffada6 100644 --- a/drivers/mmc/host/toshsd.c +++ b/drivers/mmc/host/toshsd.c @@ -651,7 +651,9 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto unmap; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto free_irq; base = pci_resource_start(pdev, 0); dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq); @@ -660,6 +662,8 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +free_irq: + free_irq(pdev->irq, host); unmap: pci_iounmap(pdev, host->ioaddr); release: diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 88662a90ed96..a2b0d9461665 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1151,7 +1151,9 @@ static int via_sd_probe(struct pci_dev *pcidev, pcidev->subsystem_device == 0x3891) sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto unmap; return 0; diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index 97beece62fec..72f65f32abbc 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -2049,6 +2049,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) return; kref_get(&vub300->kref); if (enable) { + set_current_state(TASK_RUNNING); mutex_lock(&vub300->irq_mutex); if (vub300->irqs_queued) { vub300->irqs_queued -= 1; @@ -2064,6 +2065,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) vub300_queue_poll_work(vub300, 0); } mutex_unlock(&vub300->irq_mutex); + set_current_state(TASK_INTERRUPTIBLE); } else { vub300->irq_enabled = 0; } @@ -2299,14 +2301,14 @@ static int vub300_probe(struct usb_interface *interface, 0x0000, 0x0000, &vub300->system_port_status, sizeof(vub300->system_port_status), 1000); if (retval < 0) { - goto error4; + goto error5; } else if (sizeof(vub300->system_port_status) == retval) { vub300->card_present = (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; vub300->read_only = (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; } else { - goto error4; + goto error5; } usb_set_intfdata(interface, vub300); INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); @@ -2329,8 +2331,13 @@ static int vub300_probe(struct usb_interface *interface, "USB vub300 remote SDIO host controller[%d]" "connected with no SD/SDIO card inserted\n", interface_to_InterfaceNumber(interface)); - mmc_add_host(mmc); + retval = mmc_add_host(mmc); + if (retval) + goto error6; + return 0; +error6: + del_timer_sync(&vub300->inactivity_timer); error5: mmc_free_host(mmc); /* diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 67ecd342fe5f..7c7ec8d10232 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1698,7 +1698,17 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, */ wbsd_init_device(host); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) { + if (!pnp) + wbsd_chip_poweroff(host); + + wbsd_release_resources(host); + wbsd_free_mmc(dev); + + mmc_free_host(mmc); + return ret; + } pr_info("%s: W83L51xD", mmc_hostname(mmc)); if (host->chip_id != 0) diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 8df722ec57ed..393319548857 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -859,11 +859,15 @@ static int wmt_mci_probe(struct platform_device *pdev) /* configure the controller to a known 'ready' state */ wmt_reset_hardware(mmc); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto fail7; dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); return 0; +fail7: + clk_disable_unprepare(priv->clk_sdmmc); fail6: clk_put(priv->clk_sdmmc); fail5_and_a_half: diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c index 72f5c7b30079..add4386f99f0 100644 --- a/drivers/mtd/lpddr/lpddr2_nvm.c +++ b/drivers/mtd/lpddr/lpddr2_nvm.c @@ -433,6 +433,8 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) /* lpddr2_nvm address range */ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!add_range) + return -ENODEV; /* Populate map_info data structure */ *map = (struct map_info) { diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index 7d96758a8f04..6e5e55755970 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c @@ -66,6 +66,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) if (!info->map.virt) { printk(KERN_WARNING "Failed to ioremap %s\n", info->map.name); + kfree(info); return -ENOMEM; } info->map.cached = ioremap_cache(info->map.phys, info->map.size); @@ -87,6 +88,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) iounmap((void *)info->map.virt); if (info->map.cached) iounmap(info->map.cached); + kfree(info); return -EIO; } info->mtd->dev.parent = &pdev->dev; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 61f236e0378a..3abaac109e75 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -671,8 +671,10 @@ int add_mtd_device(struct mtd_info *mtd) dev_set_drvdata(&mtd->dev, mtd); of_node_get(mtd_get_of_node(mtd)); error = device_register(&mtd->dev); - if (error) + if (error) { + put_device(&mtd->dev); goto fail_added; + } /* Add the nvmem provider */ error = mtd_nvmem_add(mtd); diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index eb5d7b3d1860..d5dcc74a625e 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -1409,6 +1409,8 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, continue; erase = &map->erase_type[i]; + if (!erase->size) + continue; /* Alignment is not mandatory for overlaid regions */ if (region->offset & SNOR_OVERLAID_REGION && @@ -2155,7 +2157,8 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor, spi_nor_spimem_setup_op(nor, &op, read->proto); /* convert the dummy cycles to the number of bytes */ - op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; + op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) * + op.dummy.buswidth / 8; if (spi_nor_protocol_is_dtr(nor->read_proto)) op.dummy.nbytes *= 2; diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c index 9aec9d8a98ad..4c3b351aef24 100644 --- a/drivers/mtd/spi-nor/sysfs.c +++ b/drivers/mtd/spi-nor/sysfs.c @@ -67,6 +67,19 @@ static struct bin_attribute *spi_nor_sysfs_bin_entries[] = { NULL }; +static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct spi_device *spi = to_spi_device(kobj_to_dev(kobj)); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len) + return 0; + + return 0444; +} + static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj, struct bin_attribute *attr, int n) { @@ -82,6 +95,7 @@ static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj, static const struct attribute_group spi_nor_sysfs_group = { .name = "spi-nor", + .is_visible = spi_nor_sysfs_is_visible, .is_bin_visible = spi_nor_sysfs_is_bin_visible, .attrs = spi_nor_sysfs_entries, .bin_attrs = spi_nor_sysfs_bin_entries, diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 8ad095c19f27..ff6d4e74a186 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1539,6 +1539,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) slave_err(bond->dev, port->slave->dev, "Port %d did not find a suitable aggregator\n", port->actor_port_number); + return; } } /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 402dffc508ef..456298919d54 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2499,12 +2499,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in /* called with rcu_read_lock() */ static int bond_miimon_inspect(struct bonding *bond) { + bool ignore_updelay = false; int link_state, commit = 0; struct list_head *iter; struct slave *slave; - bool ignore_updelay; - ignore_updelay = !rcu_dereference(bond->curr_active_slave); + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { + ignore_updelay = !rcu_dereference(bond->curr_active_slave); + } else { + struct bond_up_slave *usable_slaves; + + usable_slaves = rcu_dereference(bond->usable_slaves); + + if (usable_slaves && usable_slaves->count == 0) + ignore_updelay = true; + } bond_for_each_slave_rcu(bond, slave, iter) { bond_propose_link_state(slave, BOND_LINK_NOCHANGE); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 46ab6155795c..e027229c1955 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1248,10 +1248,17 @@ static int m_can_set_bittiming(struct net_device *dev) * - setup bittiming * - configure timestamp generation */ -static void m_can_chip_config(struct net_device *dev) +static int m_can_chip_config(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); u32 cccr, test; + int err; + + err = m_can_init_ram(cdev); + if (err) { + dev_err(cdev->dev, "Message RAM configuration failed\n"); + return err; + } m_can_config_endisable(cdev, true); @@ -1375,18 +1382,25 @@ static void m_can_chip_config(struct net_device *dev) if (cdev->ops->init) cdev->ops->init(cdev); + + return 0; } -static void m_can_start(struct net_device *dev) +static int m_can_start(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); + int ret; /* basic m_can configuration */ - m_can_chip_config(dev); + ret = m_can_chip_config(dev); + if (ret) + return ret; cdev->can.state = CAN_STATE_ERROR_ACTIVE; m_can_enable_all_interrupts(cdev); + + return 0; } static int m_can_set_mode(struct net_device *dev, enum can_mode mode) @@ -1824,7 +1838,9 @@ static int m_can_open(struct net_device *dev) } /* start the m_can controller */ - m_can_start(dev); + err = m_can_start(dev); + if (err) + goto exit_irq_fail; can_led_event(dev, CAN_LED_EVENT_OPEN); @@ -2082,9 +2098,13 @@ int m_can_class_resume(struct device *dev) ret = m_can_clk_start(cdev); if (ret) return ret; + ret = m_can_start(ndev); + if (ret) { + m_can_clk_stop(cdev); + + return ret; + } - m_can_init_ram(cdev); - m_can_start(ndev); netif_device_attach(ndev); netif_start_queue(ndev); } diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index eee47bad0592..de6d8e01bf2e 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -140,10 +140,6 @@ static int m_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mcan_class); - ret = m_can_init_ram(mcan_class); - if (ret) - goto probe_fail; - pm_runtime_enable(mcan_class->dev); ret = m_can_class_register(mcan_class); if (ret) diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 04687b15b250..c83b347be1cf 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -10,7 +10,7 @@ #define TCAN4X5X_DEV_ID1 0x04 #define TCAN4X5X_REV 0x08 #define TCAN4X5X_STATUS 0x0C -#define TCAN4X5X_ERROR_STATUS 0x10 +#define TCAN4X5X_ERROR_STATUS_MASK 0x10 #define TCAN4X5X_CONTROL 0x14 #define TCAN4X5X_CONFIG 0x800 @@ -204,17 +204,7 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) if (ret) return ret; - ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG, - TCAN4X5X_ENABLE_MCAN_INT); - if (ret) - return ret; - - ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, - TCAN4X5X_CLEAR_ALL_INT); - if (ret) - return ret; - - return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS, + return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, TCAN4X5X_CLEAR_ALL_INT); } @@ -234,8 +224,8 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) if (ret) return ret; - /* Zero out the MCAN buffers */ - ret = m_can_init_ram(cdev); + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK, + TCAN4X5X_CLEAR_ALL_INT); if (ret) return ret; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 62958f04a2f2..5699531f8787 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context { int dlc; }; +struct kvaser_usb_busparams { + __le32 bitrate; + u8 tseg1; + u8 tseg2; + u8 sjw; + u8 nsamples; +} __packed; + struct kvaser_usb { struct usb_device *udev; struct usb_interface *intf; @@ -104,13 +112,19 @@ struct kvaser_usb_net_priv { struct can_priv can; struct can_berr_counter bec; + /* subdriver-specific data */ + void *sub_priv; + struct kvaser_usb *dev; struct net_device *netdev; int channel; - struct completion start_comp, stop_comp, flush_comp; + struct completion start_comp, stop_comp, flush_comp, + get_busparams_comp; struct usb_anchor tx_submitted; + struct kvaser_usb_busparams busparams_nominal, busparams_data; + spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */ int active_tx_contexts; struct kvaser_usb_tx_urb_context tx_contexts[]; @@ -120,11 +134,15 @@ struct kvaser_usb_net_priv { * struct kvaser_usb_dev_ops - Device specific functions * @dev_set_mode: used for can.do_set_mode * @dev_set_bittiming: used for can.do_set_bittiming + * @dev_get_busparams: readback arbitration busparams * @dev_set_data_bittiming: used for can.do_set_data_bittiming + * @dev_get_data_busparams: readback data busparams * @dev_get_berr_counter: used for can.do_get_berr_counter * * @dev_setup_endpoints: setup USB in and out endpoints * @dev_init_card: initialize card + * @dev_init_channel: initialize channel + * @dev_remove_channel: uninitialize channel * @dev_get_software_info: get software info * @dev_get_software_details: get software details * @dev_get_card_info: get card info @@ -140,12 +158,18 @@ struct kvaser_usb_net_priv { */ struct kvaser_usb_dev_ops { int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode); - int (*dev_set_bittiming)(struct net_device *netdev); - int (*dev_set_data_bittiming)(struct net_device *netdev); + int (*dev_set_bittiming)(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams); + int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv); + int (*dev_set_data_bittiming)(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams); + int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv); int (*dev_get_berr_counter)(const struct net_device *netdev, struct can_berr_counter *bec); int (*dev_setup_endpoints)(struct kvaser_usb *dev); int (*dev_init_card)(struct kvaser_usb *dev); + int (*dev_init_channel)(struct kvaser_usb_net_priv *priv); + void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv); int (*dev_get_software_info)(struct kvaser_usb *dev); int (*dev_get_software_details)(struct kvaser_usb *dev); int (*dev_get_card_info)(struct kvaser_usb *dev); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index bdcaccf8e2b2..09dbc51347d7 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -443,10 +443,6 @@ static int kvaser_usb_open(struct net_device *netdev) if (err) return err; - err = kvaser_usb_setup_rx_urbs(dev); - if (err) - goto error; - err = ops->dev_set_opt_mode(priv); if (err) goto error; @@ -537,6 +533,93 @@ static int kvaser_usb_close(struct net_device *netdev) return 0; } +static int kvaser_usb_set_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; + struct can_bittiming *bt = &priv->can.bittiming; + + struct kvaser_usb_busparams busparams; + int tseg1 = bt->prop_seg + bt->phase_seg1; + int tseg2 = bt->phase_seg2; + int sjw = bt->sjw; + int err = -EOPNOTSUPP; + + busparams.bitrate = cpu_to_le32(bt->bitrate); + busparams.sjw = (u8)sjw; + busparams.tseg1 = (u8)tseg1; + busparams.tseg2 = (u8)tseg2; + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + busparams.nsamples = 3; + else + busparams.nsamples = 1; + + err = ops->dev_set_bittiming(netdev, &busparams); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(priv->dev); + if (err) + return err; + + err = ops->dev_get_busparams(priv); + if (err) { + /* Treat EOPNOTSUPP as success */ + if (err == -EOPNOTSUPP) + err = 0; + return err; + } + + if (memcmp(&busparams, &priv->busparams_nominal, + sizeof(priv->busparams_nominal)) != 0) + err = -EINVAL; + + return err; +} + +static int kvaser_usb_set_data_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; + struct can_bittiming *dbt = &priv->can.data_bittiming; + + struct kvaser_usb_busparams busparams; + int tseg1 = dbt->prop_seg + dbt->phase_seg1; + int tseg2 = dbt->phase_seg2; + int sjw = dbt->sjw; + int err; + + if (!ops->dev_set_data_bittiming || + !ops->dev_get_data_busparams) + return -EOPNOTSUPP; + + busparams.bitrate = cpu_to_le32(dbt->bitrate); + busparams.sjw = (u8)sjw; + busparams.tseg1 = (u8)tseg1; + busparams.tseg2 = (u8)tseg2; + busparams.nsamples = 1; + + err = ops->dev_set_data_bittiming(netdev, &busparams); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(priv->dev); + if (err) + return err; + + err = ops->dev_get_data_busparams(priv); + if (err) + return err; + + if (memcmp(&busparams, &priv->busparams_data, + sizeof(priv->busparams_data)) != 0) + err = -EINVAL; + + return err; +} + static void kvaser_usb_write_bulk_callback(struct urb *urb) { struct kvaser_usb_tx_urb_context *context = urb->context; @@ -672,6 +755,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = { static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) { + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int i; for (i = 0; i < dev->nchannels; i++) { @@ -687,6 +771,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) if (!dev->nets[i]) continue; + if (ops->dev_remove_channel) + ops->dev_remove_channel(dev->nets[i]); + free_candev(dev->nets[i]->netdev); } } @@ -718,6 +805,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) init_completion(&priv->start_comp); init_completion(&priv->stop_comp); init_completion(&priv->flush_comp); + init_completion(&priv->get_busparams_comp); priv->can.ctrlmode_supported = 0; priv->dev = dev; @@ -730,7 +818,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = dev->cfg->clock.freq; priv->can.bittiming_const = dev->cfg->bittiming_const; - priv->can.do_set_bittiming = ops->dev_set_bittiming; + priv->can.do_set_bittiming = kvaser_usb_set_bittiming; priv->can.do_set_mode = ops->dev_set_mode; if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) || (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) @@ -742,7 +830,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; - priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming; + priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming; } netdev->flags |= IFF_ECHO; @@ -754,17 +842,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) dev->nets[channel] = priv; + if (ops->dev_init_channel) { + err = ops->dev_init_channel(priv); + if (err) + goto err; + } + err = register_candev(netdev); if (err) { dev_err(&dev->intf->dev, "Failed to register CAN device\n"); - free_candev(netdev); - dev->nets[channel] = NULL; - return err; + goto err; } netdev_dbg(netdev, "device registered\n"); return 0; + +err: + free_candev(netdev); + dev->nets[channel] = NULL; + return err; } static int kvaser_usb_probe(struct usb_interface *intf, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 3ff2cd9828d2..6cc65bf28d03 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt; /* Minihydra command IDs */ #define CMD_SET_BUSPARAMS_REQ 16 +#define CMD_GET_BUSPARAMS_REQ 17 +#define CMD_GET_BUSPARAMS_RESP 18 #define CMD_GET_CHIP_STATE_REQ 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_DRIVERMODE_REQ 21 @@ -195,21 +198,26 @@ struct kvaser_cmd_chip_state_event { #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01 #define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02 struct kvaser_cmd_set_busparams { - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 nsamples; + struct kvaser_usb_busparams busparams_nominal; u8 reserved0[4]; - __le32 bitrate_d; - u8 tseg1_d; - u8 tseg2_d; - u8 sjw_d; - u8 nsamples_d; + struct kvaser_usb_busparams busparams_data; u8 canfd_mode; u8 reserved1[7]; } __packed; +/* Busparam type */ +#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00 +#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01 +struct kvaser_cmd_get_busparams_req { + u8 type; + u8 reserved[27]; +} __packed; + +struct kvaser_cmd_get_busparams_res { + struct kvaser_usb_busparams busparams; + u8 reserved[20]; +} __packed; + /* Ctrl modes */ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01 #define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02 @@ -280,6 +288,8 @@ struct kvaser_cmd { struct kvaser_cmd_error_event error_event; struct kvaser_cmd_set_busparams set_busparams_req; + struct kvaser_cmd_get_busparams_req get_busparams_req; + struct kvaser_cmd_get_busparams_res get_busparams_res; struct kvaser_cmd_chip_state_event chip_state_event; @@ -295,6 +305,7 @@ struct kvaser_cmd { #define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) #define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) #define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) +#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6) /* CAN frame flags. Used in ext_rx_can and ext_tx_can */ #define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) #define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) @@ -361,6 +372,10 @@ struct kvaser_cmd_ext { } __packed; } __packed; +struct kvaser_usb_net_hydra_priv { + int pending_get_busparams_type; +}; + static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .name = "kvaser_usb_kcan", .tseg1_min = 1, @@ -838,6 +853,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev, complete(&priv->flush_comp); } +static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + struct kvaser_usb_net_hydra_priv *hydra; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + hydra = priv->sub_priv; + if (!hydra) + return; + + switch (hydra->pending_get_busparams_type) { + case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN: + memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams, + sizeof(priv->busparams_nominal)); + break; + case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD: + memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams, + sizeof(priv->busparams_nominal)); + break; + default: + dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n", + hydra->pending_get_busparams_type); + break; + } + hydra->pending_get_busparams_type = -1; + + complete(&priv->get_busparams_comp); +} + static void kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, u8 bus_status, @@ -1125,6 +1173,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_net_priv *priv; unsigned long irq_flags; bool one_shot_fail = false; + bool is_err_frame = false; u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); @@ -1143,10 +1192,13 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); one_shot_fail = true; } + + is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK && + flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; } context = &priv->tx_contexts[transid % dev->max_tx_urbs]; - if (!one_shot_fail) { + if (!one_shot_fail && !is_err_frame) { struct net_device_stats *stats = &priv->netdev->stats; stats->tx_packets++; @@ -1320,6 +1372,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, kvaser_usb_hydra_state_event(dev, cmd); break; + case CMD_GET_BUSPARAMS_RESP: + kvaser_usb_hydra_get_busparams_reply(dev, cmd); + break; + case CMD_ERROR_EVENT: kvaser_usb_hydra_error_event(dev, cmd); break; @@ -1520,15 +1576,58 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev, return err; } -static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) +static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, + int busparams_type) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; + struct kvaser_cmd *cmd; + int err; + + if (!hydra) + return -EINVAL; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + cmd->get_busparams_req.type = busparams_type; + hydra->pending_get_busparams_type = busparams_type; + + reinit_completion(&priv->get_busparams_comp); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->get_busparams_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return err; +} + +static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv) +{ + return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN); +} + +static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv) +{ + return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD); +} + +static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; - int tseg1 = bt->prop_seg + bt->phase_seg1; - int tseg2 = bt->phase_seg2; - int sjw = bt->sjw; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -1536,11 +1635,8 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; - cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate); - cmd->set_busparams_req.sjw = (u8)sjw; - cmd->set_busparams_req.tseg1 = (u8)tseg1; - cmd->set_busparams_req.tseg2 = (u8)tseg2; - cmd->set_busparams_req.nsamples = 1; + memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, + sizeof(cmd->set_busparams_req.busparams_nominal)); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); @@ -1554,15 +1650,12 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) return err; } -static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) +static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *dbt = &priv->can.data_bittiming; struct kvaser_usb *dev = priv->dev; - int tseg1 = dbt->prop_seg + dbt->phase_seg1; - int tseg2 = dbt->phase_seg2; - int sjw = dbt->sjw; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -1570,11 +1663,8 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; - cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate); - cmd->set_busparams_req.sjw_d = (u8)sjw; - cmd->set_busparams_req.tseg1_d = (u8)tseg1; - cmd->set_busparams_req.tseg2_d = (u8)tseg2; - cmd->set_busparams_req.nsamples_d = 1; + memcpy(&cmd->set_busparams_req.busparams_data, busparams, + sizeof(cmd->set_busparams_req.busparams_data)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) @@ -1681,6 +1771,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev) return 0; } +static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_hydra_priv *hydra; + + hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL); + if (!hydra) + return -ENOMEM; + + priv->sub_priv = hydra; + + return 0; +} + static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) { struct kvaser_cmd cmd; @@ -2025,10 +2128,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { .dev_set_mode = kvaser_usb_hydra_set_mode, .dev_set_bittiming = kvaser_usb_hydra_set_bittiming, + .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams, .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming, + .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams, .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter, .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints, .dev_init_card = kvaser_usb_hydra_init_card, + .dev_init_channel = kvaser_usb_hydra_init_channel, .dev_get_software_info = kvaser_usb_hydra_get_software_info, .dev_get_software_details = kvaser_usb_hydra_get_software_details, .dev_get_card_info = kvaser_usb_hydra_get_card_info, @@ -2044,7 +2150,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { .clock = { - .freq = 80000000, + .freq = 80 * MEGA /* Hz */, }, .timestamp_freq = 80, .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, @@ -2053,7 +2159,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { .clock = { - .freq = 24000000, + .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_flexc_bittiming_const, @@ -2061,7 +2167,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = { .clock = { - .freq = 80000000, + .freq = 80 * MEGA /* Hz */, }, .timestamp_freq = 24, .bittiming_const = &kvaser_usb_hydra_rt_bittiming_c, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 7a71097281c2..ad3103391c79 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -19,7 +19,9 @@ #include #include #include +#include #include +#include #include #include @@ -55,6 +57,9 @@ #define CMD_RX_EXT_MESSAGE 14 #define CMD_TX_EXT_MESSAGE 15 #define CMD_SET_BUS_PARAMS 16 +#define CMD_GET_BUS_PARAMS 17 +#define CMD_GET_BUS_PARAMS_REPLY 18 +#define CMD_GET_CHIP_STATE 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_CTRL_MODE 21 #define CMD_RESET_CHIP 24 @@ -69,10 +74,13 @@ #define CMD_GET_CARD_INFO_REPLY 35 #define CMD_GET_SOFTWARE_INFO 38 #define CMD_GET_SOFTWARE_INFO_REPLY 39 +#define CMD_ERROR_EVENT 45 #define CMD_FLUSH_QUEUE 48 #define CMD_TX_ACKNOWLEDGE 50 #define CMD_CAN_ERROR_EVENT 51 #define CMD_FLUSH_QUEUE_REPLY 68 +#define CMD_GET_CAPABILITIES_REQ 95 +#define CMD_GET_CAPABILITIES_RESP 96 #define CMD_LEAF_LOG_MESSAGE 106 @@ -82,6 +90,8 @@ #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) +#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12) + /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) @@ -156,11 +166,7 @@ struct usbcan_cmd_softinfo { struct kvaser_cmd_busparams { u8 tid; u8 channel; - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 no_samp; + struct kvaser_usb_busparams busparams; } __packed; struct kvaser_cmd_tx_can { @@ -229,7 +235,7 @@ struct kvaser_cmd_tx_acknowledge_header { u8 tid; } __packed; -struct leaf_cmd_error_event { +struct leaf_cmd_can_error_event { u8 tid; u8 flags; __le16 time[3]; @@ -241,7 +247,7 @@ struct leaf_cmd_error_event { u8 error_factor; } __packed; -struct usbcan_cmd_error_event { +struct usbcan_cmd_can_error_event { u8 tid; u8 padding; u8 tx_errors_count_ch0; @@ -253,6 +259,28 @@ struct usbcan_cmd_error_event { __le16 time; } __packed; +/* CMD_ERROR_EVENT error codes */ +#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8 +#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9 + +struct leaf_cmd_error_event { + u8 tid; + u8 error_code; + __le16 timestamp[3]; + __le16 padding; + __le16 info1; + __le16 info2; +} __packed; + +struct usbcan_cmd_error_event { + u8 tid; + u8 error_code; + __le16 info1; + __le16 info2; + __le16 timestamp; + __le16 padding; +} __packed; + struct kvaser_cmd_ctrl_mode { u8 tid; u8 channel; @@ -277,6 +305,28 @@ struct leaf_cmd_log_message { u8 data[8]; } __packed; +/* Sub commands for cap_req and cap_res */ +#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02 +#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05 +struct kvaser_cmd_cap_req { + __le16 padding0; + __le16 cap_cmd; + __le16 padding1; + __le16 channel; +} __packed; + +/* Status codes for cap_res */ +#define KVASER_USB_LEAF_CAP_STAT_OK 0x00 +#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01 +#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02 +struct kvaser_cmd_cap_res { + __le16 padding; + __le16 cap_cmd; + __le16 status; + __le32 mask; + __le32 value; +} __packed; + struct kvaser_cmd { u8 len; u8 id; @@ -292,14 +342,18 @@ struct kvaser_cmd { struct leaf_cmd_softinfo softinfo; struct leaf_cmd_rx_can rx_can; struct leaf_cmd_chip_state_event chip_state_event; - struct leaf_cmd_error_event error_event; + struct leaf_cmd_can_error_event can_error_event; struct leaf_cmd_log_message log_message; + struct leaf_cmd_error_event error_event; + struct kvaser_cmd_cap_req cap_req; + struct kvaser_cmd_cap_res cap_res; } __packed leaf; union { struct usbcan_cmd_softinfo softinfo; struct usbcan_cmd_rx_can rx_can; struct usbcan_cmd_chip_state_event chip_state_event; + struct usbcan_cmd_can_error_event can_error_event; struct usbcan_cmd_error_event error_event; } __packed usbcan; @@ -322,7 +376,10 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), - [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event), + [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res), + [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams), + [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), /* ignored events: */ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, }; @@ -336,7 +393,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), - [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event), + [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), /* ignored events: */ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY, }; @@ -364,6 +422,12 @@ struct kvaser_usb_err_summary { }; }; +struct kvaser_usb_net_leaf_priv { + struct kvaser_usb_net_priv *net; + + struct delayed_work chip_state_req_work; +}; + static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { .name = "kvaser_usb_ucii", .tseg1_min = 4, @@ -390,7 +454,7 @@ static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = { static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { .clock = { - .freq = 8000000, + .freq = 8 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const, @@ -398,7 +462,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = { .clock = { - .freq = 16000000, + .freq = 16 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, @@ -414,7 +478,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = { static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { .clock = { - .freq = 24000000, + .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_flexc_bittiming_const, @@ -422,7 +486,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { .clock = { - .freq = 32000000, + .freq = 32 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_flexc_bittiming_const, @@ -607,6 +671,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, dev->fw_version = le32_to_cpu(softinfo->fw_version); dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); + if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP) + dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP; + if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { /* Firmware expects bittiming parameters calculated for 16MHz * clock, regardless of the actual clock @@ -694,6 +761,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) return 0; } +static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev, + u16 cap_cmd_req, u16 *status) +{ + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + struct kvaser_cmd *cmd; + u32 value = 0; + u32 mask = 0; + u16 cap_cmd_res; + int err; + int i; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_GET_CAPABILITIES_REQ; + cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req); + + err = kvaser_usb_send_cmd(dev, cmd, cmd->len); + if (err) + goto end; + + err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); + if (err) + goto end; + + *status = le16_to_cpu(cmd->u.leaf.cap_res.status); + + if (*status != KVASER_USB_LEAF_CAP_STAT_OK) + goto end; + + cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd); + switch (cap_cmd_res) { + case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: + case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: + value = le32_to_cpu(cmd->u.leaf.cap_res.value); + mask = le32_to_cpu(cmd->u.leaf.cap_res.mask); + break; + default: + dev_warn(&dev->intf->dev, "Unknown capability command %u\n", + cap_cmd_res); + break; + } + + for (i = 0; i < dev->nchannels; i++) { + if (BIT(i) & (value & mask)) { + switch (cap_cmd_res) { + case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: + card_data->ctrlmode_supported |= + CAN_CTRLMODE_LISTENONLY; + break; + case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: + card_data->capabilities |= + KVASER_USB_CAP_BERR_CAP; + break; + } + } + } + +end: + kfree(cmd); + + return err; +} + +static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev) +{ + int err; + u16 status; + + if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { + dev_info(&dev->intf->dev, + "No extended capability support. Upgrade device firmware.\n"); + return 0; + } + + err = kvaser_usb_leaf_get_single_capability(dev, + KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n", + status); + + err = kvaser_usb_leaf_get_single_capability(dev, + KVASER_USB_LEAF_CAP_CMD_ERR_REPORT, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n", + status); + + return 0; +} + +static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev) +{ + int err = 0; + + if (dev->driver_info->family == KVASER_LEAF) + err = kvaser_usb_leaf_get_capabilities_leaf(dev); + + return err; +} + static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -722,7 +899,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, context = &priv->tx_contexts[tid % dev->max_tx_urbs]; /* Sometimes the state change doesn't come after a bus-off event */ - if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) { + if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) { struct sk_buff *skb; struct can_frame *cf; @@ -778,6 +955,16 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, return err; } +static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work) +{ + struct kvaser_usb_net_leaf_priv *leaf = + container_of(work, struct kvaser_usb_net_leaf_priv, + chip_state_req_work.work); + struct kvaser_usb_net_priv *priv = leaf->net; + + kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE); +} + static void kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_err_summary *es, @@ -796,20 +983,16 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, new_state = CAN_STATE_BUS_OFF; } else if (es->status & M16C_STATE_BUS_PASSIVE) { new_state = CAN_STATE_ERROR_PASSIVE; - } else if (es->status & M16C_STATE_BUS_ERROR) { + } else if ((es->status & M16C_STATE_BUS_ERROR) && + cur_state >= CAN_STATE_BUS_OFF) { /* Guard against spurious error events after a busoff */ - if (cur_state < CAN_STATE_BUS_OFF) { - if (es->txerr >= 128 || es->rxerr >= 128) - new_state = CAN_STATE_ERROR_PASSIVE; - else if (es->txerr >= 96 || es->rxerr >= 96) - new_state = CAN_STATE_ERROR_WARNING; - else if (cur_state > CAN_STATE_ERROR_ACTIVE) - new_state = CAN_STATE_ERROR_ACTIVE; - } - } - - if (!es->status) + } else if (es->txerr >= 128 || es->rxerr >= 128) { + new_state = CAN_STATE_ERROR_PASSIVE; + } else if (es->txerr >= 96 || es->rxerr >= 96) { + new_state = CAN_STATE_ERROR_WARNING; + } else { new_state = CAN_STATE_ERROR_ACTIVE; + } if (new_state != cur_state) { tx_state = (es->txerr >= es->rxerr) ? new_state : 0; @@ -819,7 +1002,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, } if (priv->can.restart_ms && - cur_state >= CAN_STATE_BUS_OFF && + cur_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; @@ -853,6 +1036,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, struct sk_buff *skb; struct net_device_stats *stats; struct kvaser_usb_net_priv *priv; + struct kvaser_usb_net_leaf_priv *leaf; enum can_state old_state, new_state; if (es->channel >= dev->nchannels) { @@ -862,8 +1046,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } priv = dev->nets[es->channel]; + leaf = priv->sub_priv; stats = &priv->netdev->stats; + /* Ignore e.g. state change to bus-off reported just after stopping */ + if (!netif_running(priv->netdev)) + return; + /* Update all of the CAN interface's state and error counters before * trying any memory allocation that can actually fail with -ENOMEM. * @@ -878,6 +1067,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); new_state = priv->can.state; + /* If there are errors, request status updates periodically as we do + * not get automatic notifications of improved state. + */ + if (new_state < CAN_STATE_BUS_OFF && + (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE)) + schedule_delayed_work(&leaf->chip_state_req_work, + msecs_to_jiffies(500)); + skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; @@ -895,7 +1092,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && + old_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_RESTARTED; netif_carrier_on(priv->netdev); @@ -995,11 +1192,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, case CMD_CAN_ERROR_EVENT: es.channel = 0; - es.status = cmd->u.usbcan.error_event.status_ch0; - es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; - es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; + es.status = cmd->u.usbcan.can_error_event.status_ch0; + es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0; + es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0; es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch1; + cmd->u.usbcan.can_error_event.status_ch1; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); /* The USBCAN firmware supports up to 2 channels. @@ -1007,13 +1204,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, */ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { es.channel = 1; - es.status = cmd->u.usbcan.error_event.status_ch1; + es.status = cmd->u.usbcan.can_error_event.status_ch1; es.txerr = - cmd->u.usbcan.error_event.tx_errors_count_ch1; + cmd->u.usbcan.can_error_event.tx_errors_count_ch1; es.rxerr = - cmd->u.usbcan.error_event.rx_errors_count_ch1; + cmd->u.usbcan.can_error_event.rx_errors_count_ch1; es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch0; + cmd->u.usbcan.can_error_event.status_ch0; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); } break; @@ -1030,11 +1227,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, switch (cmd->id) { case CMD_CAN_ERROR_EVENT: - es.channel = cmd->u.leaf.error_event.channel; - es.status = cmd->u.leaf.error_event.status; - es.txerr = cmd->u.leaf.error_event.tx_errors_count; - es.rxerr = cmd->u.leaf.error_event.rx_errors_count; - es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; + es.channel = cmd->u.leaf.can_error_event.channel; + es.status = cmd->u.leaf.can_error_event.status; + es.txerr = cmd->u.leaf.can_error_event.tx_errors_count; + es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count; + es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor; break; case CMD_LEAF_LOG_MESSAGE: es.channel = cmd->u.leaf.log_message.channel; @@ -1166,6 +1363,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, netif_rx(skb); } +static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u16 info1 = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + info1 = le16_to_cpu(cmd->u.leaf.error_event.info1); + break; + case KVASER_USBCAN: + info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1); + break; + } + + /* info1 will contain the offending cmd_no */ + switch (info1) { + case CMD_SET_CTRL_MODE: + dev_warn(&dev->intf->dev, + "CMD_SET_CTRL_MODE error in parameter\n"); + break; + + case CMD_SET_BUS_PARAMS: + dev_warn(&dev->intf->dev, + "CMD_SET_BUS_PARAMS error in parameter\n"); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled parameter error event cmd_no (%u)\n", + info1); + break; + } +} + +static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u8 error_code = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + error_code = cmd->u.leaf.error_event.error_code; + break; + case KVASER_USBCAN: + error_code = cmd->u.usbcan.error_event.error_code; + break; + } + + switch (error_code) { + case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL: + /* Received additional CAN message, when firmware TX queue is + * already full. Something is wrong with the driver. + * This should never happen! + */ + dev_err(&dev->intf->dev, + "Received error event TX_QUEUE_FULL\n"); + break; + case KVASER_USB_LEAF_ERROR_EVENT_PARAM: + kvaser_usb_leaf_error_event_parameter(dev, cmd); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled error event (%d)\n", error_code); + break; + } +} + static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -1206,6 +1471,25 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, complete(&priv->stop_comp); } +static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + u8 channel = cmd->u.busparams.channel; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams, + sizeof(priv->busparams_nominal)); + + complete(&priv->get_busparams_comp); +} + static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -1244,6 +1528,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, kvaser_usb_leaf_tx_acknowledge(dev, cmd); break; + case CMD_ERROR_EVENT: + kvaser_usb_leaf_error_event(dev, cmd); + break; + + case CMD_GET_BUS_PARAMS_REPLY: + kvaser_usb_leaf_get_busparams_reply(dev, cmd); + break; + /* Ignored commands */ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: if (dev->driver_info->family != KVASER_USBCAN) @@ -1340,10 +1632,13 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) { + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; reinit_completion(&priv->stop_comp); + cancel_delayed_work(&leaf->chip_state_req_work); + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); if (err) @@ -1390,10 +1685,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) return 0; } -static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) +static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_leaf_priv *leaf; + + leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL); + if (!leaf) + return -ENOMEM; + + leaf->net = priv; + INIT_DELAYED_WORK(&leaf->chip_state_req_work, + kvaser_usb_leaf_chip_state_req_work); + + priv->sub_priv = leaf; + + return 0; +} + +static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; + + if (leaf) + cancel_delayed_work_sync(&leaf->chip_state_req_work); +} + +static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; int rc; @@ -1406,15 +1726,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); cmd->u.busparams.channel = priv->channel; cmd->u.busparams.tid = 0xff; - cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); - cmd->u.busparams.sjw = bt->sjw; - cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; - cmd->u.busparams.tseg2 = bt->phase_seg2; - - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - cmd->u.busparams.no_samp = 3; - else - cmd->u.busparams.no_samp = 1; + memcpy(&cmd->u.busparams.busparams, busparams, + sizeof(cmd->u.busparams.busparams)); rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); @@ -1422,6 +1735,27 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) return rc; } +static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv) +{ + int err; + + if (priv->dev->driver_info->family == KVASER_USBCAN) + return -EOPNOTSUPP; + + reinit_completion(&priv->get_busparams_comp); + + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->get_busparams_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + static int kvaser_usb_leaf_set_mode(struct net_device *netdev, enum can_mode mode) { @@ -1483,14 +1817,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_set_mode = kvaser_usb_leaf_set_mode, .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, + .dev_get_busparams = kvaser_usb_leaf_get_busparams, .dev_set_data_bittiming = NULL, + .dev_get_data_busparams = NULL, .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, .dev_init_card = kvaser_usb_leaf_init_card, + .dev_init_channel = kvaser_usb_leaf_init_channel, + .dev_remove_channel = kvaser_usb_leaf_remove_channel, .dev_get_software_info = kvaser_usb_leaf_get_software_info, .dev_get_software_details = NULL, .dev_get_card_info = kvaser_usb_leaf_get_card_info, - .dev_get_capabilities = NULL, + .dev_get_capabilities = kvaser_usb_leaf_get_capabilities, .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, .dev_start_chip = kvaser_usb_leaf_start_chip, .dev_stop_chip = kvaser_usb_leaf_stop_chip, diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 59a803e3c8d0..22547b10dfe5 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1003,9 +1003,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, ret = lan9303_read_switch_port( chip, port, lan9303_mib[u].offset, ®); - if (ret) + if (ret) { dev_warn(chip->dev, "Reading status port %d reg %u failed\n", port, lan9303_mib[u].offset); + reg = 0; + } data[u] = reg; } } diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 7a2445a34eb7..e3181d5471df 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -2,7 +2,6 @@ config NET_DSA_MV88E6XXX tristate "Marvell 88E6xxx Ethernet switch fabric support" depends on NET_DSA - depends on PTP_1588_CLOCK_OPTIONAL select IRQ_DOMAIN select NET_DSA_TAG_EDSA select NET_DSA_TAG_DSA @@ -13,7 +12,8 @@ config NET_DSA_MV88E6XXX config NET_DSA_MV88E6XXX_PTP bool "PTP support for Marvell 88E6xxx" default n - depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK + depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \ + (NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK) help Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch chips that support it. diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index ab413fc1f68e..f0faad149a3b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2392,29 +2392,18 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return -EOPNOTSUPP; } - switch (func) { - case ENA_ADMIN_TOEPLITZ: - if (key) { - if (key_len != sizeof(hash_key->key)) { - netdev_err(ena_dev->net_device, - "key len (%u) doesn't equal the supported size (%zu)\n", - key_len, sizeof(hash_key->key)); - return -EINVAL; - } - memcpy(hash_key->key, key, key_len); - rss->hash_init_val = init_val; - hash_key->key_parts = key_len / sizeof(hash_key->key[0]); + if ((func == ENA_ADMIN_TOEPLITZ) && key) { + if (key_len != sizeof(hash_key->key)) { + netdev_err(ena_dev->net_device, + "key len (%u) doesn't equal the supported size (%zu)\n", + key_len, sizeof(hash_key->key)); + return -EINVAL; } - break; - case ENA_ADMIN_CRC32: - rss->hash_init_val = init_val; - break; - default: - netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n", - func); - return -EINVAL; + memcpy(hash_key->key, key, key_len); + hash_key->key_parts = key_len / sizeof(hash_key->key[0]); } + rss->hash_init_val = init_val; old_func = rss->hash_func; rss->hash_func = func; rc = ena_com_set_hash_function(ena_dev); diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 13e745cf3781..413082f10dc1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -880,11 +880,7 @@ static int ena_set_tunable(struct net_device *netdev, switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: len = *(u32 *)data; - if (len > adapter->netdev->mtu) { - ret = -EINVAL; - break; - } - adapter->rx_copybreak = len; + ret = ena_set_rx_copybreak(adapter, len); break; default: ret = -EINVAL; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index f032e58a4c3c..23c9750850e9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -378,9 +378,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n, static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) { + u32 verdict = ENA_XDP_PASS; struct bpf_prog *xdp_prog; struct ena_ring *xdp_ring; - u32 verdict = XDP_PASS; struct xdp_frame *xdpf; u64 *xdp_stat; @@ -397,7 +397,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) if (unlikely(!xdpf)) { trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; - verdict = XDP_ABORTED; + verdict = ENA_XDP_DROP; break; } @@ -413,29 +413,35 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) spin_unlock(&xdp_ring->xdp_tx_lock); xdp_stat = &rx_ring->rx_stats.xdp_tx; + verdict = ENA_XDP_TX; break; case XDP_REDIRECT: if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { xdp_stat = &rx_ring->rx_stats.xdp_redirect; + verdict = ENA_XDP_REDIRECT; break; } trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; - verdict = XDP_ABORTED; + verdict = ENA_XDP_DROP; break; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; + verdict = ENA_XDP_DROP; break; case XDP_DROP: xdp_stat = &rx_ring->rx_stats.xdp_drop; + verdict = ENA_XDP_DROP; break; case XDP_PASS: xdp_stat = &rx_ring->rx_stats.xdp_pass; + verdict = ENA_XDP_PASS; break; default: bpf_warn_invalid_xdp_action(verdict); xdp_stat = &rx_ring->rx_stats.xdp_invalid; + verdict = ENA_XDP_DROP; } ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); @@ -516,16 +522,18 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, struct bpf_prog *prog, int first, int count) { + struct bpf_prog *old_bpf_prog; struct ena_ring *rx_ring; int i = 0; for (i = first; i < count; i++) { rx_ring = &adapter->rx_ring[i]; - xchg(&rx_ring->xdp_bpf_prog, prog); - if (prog) { + old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog); + + if (!old_bpf_prog && prog) { ena_xdp_register_rxq_info(rx_ring); rx_ring->rx_headroom = XDP_PACKET_HEADROOM; - } else { + } else if (old_bpf_prog && !prog) { ena_xdp_unregister_rxq_info(rx_ring); rx_ring->rx_headroom = NET_SKB_PAD; } @@ -676,6 +684,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter, ring->ena_dev = adapter->ena_dev; ring->per_napi_packets = 0; ring->cpu = 0; + ring->numa_node = 0; ring->no_interrupt_event_cnt = 0; u64_stats_init(&ring->syncp); } @@ -779,6 +788,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->cpu = ena_irq->cpu; + tx_ring->numa_node = node; return 0; err_push_buf_intermediate_buf: @@ -911,6 +921,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; rx_ring->cpu = ena_irq->cpu; + rx_ring->numa_node = node; return 0; } @@ -1629,12 +1640,12 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) * we expect, then we simply drop it */ if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) - return XDP_DROP; + return ENA_XDP_DROP; ret = ena_xdp_execute(rx_ring, xdp); /* The xdp program might expand the headers */ - if (ret == XDP_PASS) { + if (ret == ENA_XDP_PASS) { rx_info->page_offset = xdp->data - xdp->data_hard_start; rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; } @@ -1673,7 +1684,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); do { - xdp_verdict = XDP_PASS; + xdp_verdict = ENA_XDP_PASS; skb = NULL; ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; ena_rx_ctx.max_bufs = rx_ring->sgl_size; @@ -1701,7 +1712,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); /* allocate skb and fill it */ - if (xdp_verdict == XDP_PASS) + if (xdp_verdict == ENA_XDP_PASS) skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, @@ -1719,14 +1730,15 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, /* Packets was passed for transmission, unmap it * from RX side. */ - if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) { + if (xdp_verdict & ENA_XDP_FORWARDED) { ena_unmap_rx_buff(rx_ring, &rx_ring->rx_buffer_info[req_id]); rx_ring->rx_buffer_info[req_id].page = NULL; } } - if (xdp_verdict != XDP_PASS) { + if (xdp_verdict != ENA_XDP_PASS) { xdp_flags |= xdp_verdict; + total_len += ena_rx_ctx.ena_bufs[0].len; res_budget--; continue; } @@ -1770,7 +1782,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ena_refill_rx_bufs(rx_ring, refill_required); } - if (xdp_flags & XDP_REDIRECT) + if (xdp_flags & ENA_XDP_REDIRECT) xdp_do_flush_map(); return work_done; @@ -1827,8 +1839,9 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) static void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring) { + u32 rx_interval = tx_ring->smoothed_interval; struct ena_eth_io_intr_reg intr_reg; - u32 rx_interval = 0; + /* Rx ring can be NULL when for XDP tx queues which don't have an * accompanying rx_ring pair. */ @@ -1866,20 +1879,27 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring, if (likely(tx_ring->cpu == cpu)) goto out; + tx_ring->cpu = cpu; + if (rx_ring) + rx_ring->cpu = cpu; + numa_node = cpu_to_node(cpu); + + if (likely(tx_ring->numa_node == numa_node)) + goto out; + put_cpu(); if (numa_node != NUMA_NO_NODE) { ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); - if (rx_ring) + tx_ring->numa_node = numa_node; + if (rx_ring) { + rx_ring->numa_node = numa_node; ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); + } } - tx_ring->cpu = cpu; - if (rx_ring) - rx_ring->cpu = cpu; - return; out: put_cpu(); @@ -2000,11 +2020,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget) if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) ena_adjust_adaptive_rx_intr_moderation(ena_napi); + ena_update_ring_numa_node(tx_ring, rx_ring); ena_unmask_interrupt(tx_ring, rx_ring); } - ena_update_ring_numa_node(tx_ring, rx_ring); - ret = rx_work_done; } else { ret = budget; @@ -2391,7 +2410,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) ctx.mem_queue_type = ena_dev->tx_mem_queue_type; ctx.msix_vector = msix_vector; ctx.queue_size = tx_ring->ring_size; - ctx.numa_node = cpu_to_node(tx_ring->cpu); + ctx.numa_node = tx_ring->numa_node; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { @@ -2459,7 +2478,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; ctx.msix_vector = msix_vector; ctx.queue_size = rx_ring->ring_size; - ctx.numa_node = cpu_to_node(rx_ring->cpu); + ctx.numa_node = rx_ring->numa_node; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { @@ -2820,6 +2839,24 @@ int ena_update_queue_sizes(struct ena_adapter *adapter, return dev_was_up ? ena_up(adapter) : 0; } +int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak) +{ + struct ena_ring *rx_ring; + int i; + + if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE)) + return -EINVAL; + + adapter->rx_copybreak = rx_copybreak; + + for (i = 0; i < adapter->num_io_queues; i++) { + rx_ring = &adapter->rx_ring[i]; + rx_ring->rx_copybreak = rx_copybreak; + } + + return 0; +} + int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) { struct ena_com_dev *ena_dev = adapter->ena_dev; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 0c39fc2fa345..bf2a39c91c00 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -273,9 +273,11 @@ struct ena_ring { bool disable_meta_caching; u16 no_interrupt_event_cnt; - /* cpu for TPH */ + /* cpu and NUMA for TPH */ int cpu; - /* number of tx/rx_buffer_info's entries */ + int numa_node; + + /* number of tx/rx_buffer_info's entries */ int ring_size; enum ena_admin_placement_policy_type tx_mem_queue_type; @@ -404,6 +406,8 @@ int ena_update_queue_sizes(struct ena_adapter *adapter, int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count); +int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak); + int ena_get_sset_count(struct net_device *netdev, int sset); enum ena_xdp_errors_t { @@ -412,6 +416,15 @@ enum ena_xdp_errors_t { ENA_XDP_NO_ENOUGH_QUEUES, }; +enum ENA_XDP_ACTIONS { + ENA_XDP_PASS = 0, + ENA_XDP_TX = BIT(0), + ENA_XDP_REDIRECT = BIT(1), + ENA_XDP_DROP = BIT(2) +}; + +#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT) + static inline bool ena_xdp_present(struct ena_adapter *adapter) { return !!adapter->xdp_bpf_prog; diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 9d2f49fd945e..a0971ed00453 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -821,7 +821,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev) lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; dev->stats.tx_bytes += skb->len; - dev_kfree_skb( skb ); + dev_consume_skb_irq(skb); lp->cur_tx++; while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { lp->cur_tx -= TX_RING_SIZE; diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 945bf1d87507..6c2d72024e21 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -999,7 +999,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); lp->tx_ring[entry].base = ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; - dev_kfree_skb(skb); + dev_consume_skb_irq(skb); } else { lp->tx_skbuff[entry] = skb; lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index e6883d52d230..555db1871ec9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1064,6 +1064,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata) devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + tasklet_kill(&pdata->tasklet_dev); + tasklet_kill(&pdata->tasklet_ecc); + if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c index 22d4fc547a0a..a9ccc4258ee5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c @@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata) xgbe_i2c_disable(pdata); xgbe_i2c_clear_all_interrupts(pdata); - if (pdata->dev_irq != pdata->i2c_irq) + if (pdata->dev_irq != pdata->i2c_irq) { devm_free_irq(pdata->dev, pdata->i2c_irq, pdata); + tasklet_kill(&pdata->tasklet_i2c); + } } static int xgbe_i2c_start(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 4e97b4869522..0c5c1b155683 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1390,8 +1390,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) /* Disable auto-negotiation */ xgbe_an_disable_all(pdata); - if (pdata->dev_irq != pdata->an_irq) + if (pdata->dev_irq != pdata->an_irq) { devm_free_irq(pdata->dev, pdata->an_irq, pdata); + tasklet_kill(&pdata->tasklet_an); + } pdata->phy_if.phy_impl.stop(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index a7166cd1179f..97e32c0490f8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -189,6 +189,7 @@ enum xgbe_sfp_cable { XGBE_SFP_CABLE_UNKNOWN = 0, XGBE_SFP_CABLE_ACTIVE, XGBE_SFP_CABLE_PASSIVE, + XGBE_SFP_CABLE_FIBER, }; enum xgbe_sfp_base { @@ -236,10 +237,7 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_BR 12 #define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a -#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 -#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 -#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78 #define XGBE_SFP_BASE_CU_CABLE_LEN 18 @@ -826,29 +824,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, enum xgbe_sfp_speed sfp_speed) { - u8 *sfp_base, min, max; + u8 *sfp_base, min; sfp_base = sfp_eeprom->base; switch (sfp_speed) { case XGBE_SFP_SPEED_1000: min = XGBE_SFP_BASE_BR_1GBE_MIN; - max = XGBE_SFP_BASE_BR_1GBE_MAX; break; case XGBE_SFP_SPEED_10000: min = XGBE_SFP_BASE_BR_10GBE_MIN; - if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], - XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0) - max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX; - else - max = XGBE_SFP_BASE_BR_10GBE_MAX; break; default: return false; } - return ((sfp_base[XGBE_SFP_BASE_BR] >= min) && - (sfp_base[XGBE_SFP_BASE_BR] <= max)); + return sfp_base[XGBE_SFP_BASE_BR] >= min; } static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) @@ -1149,16 +1140,18 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); - /* Assume ACTIVE cable unless told it is PASSIVE */ + /* Assume FIBER cable unless told otherwise */ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN]; - } else { + } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; + } else { + phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER; } /* Determine the type of SFP */ - if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE && + if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER && xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index a989d2df59ad..7a966361d83f 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1511,7 +1511,7 @@ static void bmac_tx_timeout(struct timer_list *t) i = bp->tx_empty; ++dev->stats.tx_errors; if (i != bp->tx_fill) { - dev_kfree_skb(bp->tx_bufs[i]); + dev_kfree_skb_irq(bp->tx_bufs[i]); bp->tx_bufs[i] = NULL; if (++i >= N_TX_RING) i = 0; bp->tx_empty = i; diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 4b80e3a52a19..44037e9e197f 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -841,7 +841,7 @@ static void mace_tx_timeout(struct timer_list *t) if (mp->tx_bad_runt) { mp->tx_bad_runt = 0; } else if (i != mp->tx_fill) { - dev_kfree_skb(mp->tx_bufs[i]); + dev_kfree_skb_irq(mp->tx_bufs[i]); if (++i >= N_TX_RING) i = 0; mp->tx_empty = i; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 6c51cf991dad..14dc2e13bf03 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -550,11 +550,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); + spin_unlock_irqrestore(&bp->lock, flags); + /* free the buffer */ dev_kfree_skb(skb); - spin_unlock_irqrestore(&bp->lock, flags); - return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 222a250fba84..adccb14c1644 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1220,23 +1220,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, rx_ring->stats.xdp_drops++; } -static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first, - int rx_ring_last) -{ - while (rx_ring_first != rx_ring_last) { - struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; - - if (rx_swbd->page) { - dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, - rx_swbd->dir); - __free_page(rx_swbd->page); - rx_swbd->page = NULL; - } - enetc_bdr_idx_inc(rx_ring, &rx_ring_first); - } - rx_ring->stats.xdp_redirect_failures++; -} - static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, struct napi_struct *napi, int work_limit, struct bpf_prog *prog) @@ -1258,8 +1241,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, int orig_i, orig_cleaned_cnt; struct xdp_buff xdp_buff; struct sk_buff *skb; - int tmp_orig_i, err; u32 bd_status; + int err; rxbd = enetc_rxbd(rx_ring, i); bd_status = le32_to_cpu(rxbd->r.lstatus); @@ -1346,18 +1329,16 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, break; } - tmp_orig_i = orig_i; - - while (orig_i != i) { - enetc_flip_rx_buff(rx_ring, - &rx_ring->rx_swbd[orig_i]); - enetc_bdr_idx_inc(rx_ring, &orig_i); - } - err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); if (unlikely(err)) { - enetc_xdp_free(rx_ring, tmp_orig_i, i); + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_redirect_failures++; } else { + while (orig_i != i) { + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[orig_i]); + enetc_bdr_idx_inc(rx_ring, &orig_i); + } xdp_redirect_frm_cnt++; rx_ring->stats.xdp_redirect++; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 818a028703c6..dc835f316d47 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1005,9 +1005,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, return false; if (ALIGN(len, dma_get_cache_alignment()) > space) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_spare_full++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_spare_full); return false; } @@ -1024,9 +1022,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, return false; if (space < HNS3_MAX_SGL_SIZE) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_spare_full++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_spare_full); return false; } @@ -1554,9 +1550,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, ret = hns3_handle_vtags(ring, skb); if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_vlan_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_vlan_err); return ret; } else if (ret == HNS3_INNER_VLAN_TAG) { inner_vtag = skb_vlan_tag_get(skb); @@ -1591,9 +1585,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l4_proto_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_l4_proto_err); return ret; } @@ -1601,18 +1593,14 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, &type_cs_vlan_tso, &ol_type_vlan_len_msec); if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l2l3l4_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_l2l3l4_err); return ret; } ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, &type_cs_vlan_tso, &desc_cb->send_bytes); if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_tso_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_tso_err); return ret; } } @@ -1705,9 +1693,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, } if (unlikely(dma_mapping_error(dev, dma))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -1853,9 +1839,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, * recursion level of over HNS3_MAX_RECURSION_LEVEL. */ if (bd_num == UINT_MAX) { - u64_stats_update_begin(&ring->syncp); - ring->stats.over_max_recursion++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, over_max_recursion); return -ENOMEM; } @@ -1864,16 +1848,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, */ if (skb->len > HNS3_MAX_TSO_SIZE || (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.hw_limitation++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, hw_limitation); return -ENOMEM; } if (__skb_linearize(skb)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -1903,9 +1883,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, bd_num = hns3_tx_bd_count(skb->len); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_copy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_copy); } out: @@ -1925,9 +1903,7 @@ out: return bd_num; } - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_busy); return -EBUSY; } @@ -2012,9 +1988,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, ring->pending_buf += num; if (!doorbell) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_more++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_more); return; } @@ -2064,9 +2038,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, ret = skb_copy_bits(skb, 0, buf, size); if (unlikely(ret < 0)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.copy_bits_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, copy_bits_err); return ret; } @@ -2089,9 +2061,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, dma_sync_single_for_device(ring_to_dev(ring), dma, size, DMA_TO_DEVICE); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_bounce++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_bounce); + return bd_num; } @@ -2121,9 +2092,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); if (unlikely(nents < 0)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.skb2sgl_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, skb2sgl_err); return -ENOMEM; } @@ -2132,9 +2101,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, DMA_TO_DEVICE); if (unlikely(!sgt->nents)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.map_sg_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, map_sg_err); return -ENOMEM; } @@ -2146,10 +2113,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, for (i = 0; i < sgt->nents; i++) bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), sg_dma_len(sgt->sgl + i)); - - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_sgl++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_sgl); return bd_num; } @@ -2188,9 +2152,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return NETDEV_TX_OK; } @@ -3522,17 +3484,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, for (i = 0; i < cleand_count; i++) { desc_cb = &ring->desc_cb[ring->next_to_use]; if (desc_cb->reuse_flag) { - u64_stats_update_begin(&ring->syncp); - ring->stats.reuse_pg_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, reuse_pg_cnt); hns3_reuse_buffer(ring, ring->next_to_use); } else { ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); hns3_rl_err(ring_to_netdev(ring), "alloc rx buffer failed: %d\n", @@ -3544,9 +3502,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); - u64_stats_update_begin(&ring->syncp); - ring->stats.non_reuse_pg++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, non_reuse_pg); } ring_ptr_move_fw(ring, next_to_use); @@ -3561,6 +3517,34 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) return page_count(cb->priv) == cb->pagecnt_bias; } +static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, + int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + u32 frag_offset = desc_cb->page_offset + pull_len; + int size = le16_to_cpu(desc->rx.size); + u32 frag_size = size - pull_len; + void *frag = napi_alloc_frag(frag_size); + + if (unlikely(!frag)) { + hns3_ring_stats_update(ring, frag_alloc_err); + + hns3_rl_err(ring_to_netdev(ring), + "failed to allocate rx frag\n"); + return -ENOMEM; + } + + desc_cb->reuse_flag = 1; + memcpy(frag, desc_cb->buf + frag_offset, frag_size); + skb_add_rx_frag(skb, i, virt_to_page(frag), + offset_in_page(frag), frag_size, frag_size); + + hns3_ring_stats_update(ring, frag_alloc); + return 0; +} + static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) @@ -3570,6 +3554,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, int size = le16_to_cpu(desc->rx.size); u32 truesize = hns3_buf_size(ring); u32 frag_size = size - pull_len; + int ret = 0; bool reused; if (ring->page_pool) { @@ -3604,27 +3589,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, desc_cb->page_offset = 0; desc_cb->reuse_flag = 1; } else if (frag_size <= ring->rx_copybreak) { - void *frag = napi_alloc_frag(frag_size); - - if (unlikely(!frag)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.frag_alloc_err++; - u64_stats_update_end(&ring->syncp); - - hns3_rl_err(ring_to_netdev(ring), - "failed to allocate rx frag\n"); - goto out; - } - - desc_cb->reuse_flag = 1; - memcpy(frag, desc_cb->buf + frag_offset, frag_size); - skb_add_rx_frag(skb, i, virt_to_page(frag), - offset_in_page(frag), frag_size, frag_size); - - u64_stats_update_begin(&ring->syncp); - ring->stats.frag_alloc++; - u64_stats_update_end(&ring->syncp); - return; + ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); + if (!ret) + return; } out: @@ -3700,20 +3667,16 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) return 0; } -static bool hns3_checksum_complete(struct hns3_enet_ring *ring, +static void hns3_checksum_complete(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 ptype, u16 csum) { if (ptype == HNS3_INVALID_PTYPE || hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) - return false; + return; - u64_stats_update_begin(&ring->syncp); - ring->stats.csum_complete++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, csum_complete); skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)csum); - - return true; } static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info, @@ -3773,8 +3736,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, HNS3_RXD_PTYPE_S); - if (hns3_checksum_complete(ring, skb, ptype, csum)) - return; + hns3_checksum_complete(ring, skb, ptype, csum); /* check if hardware has done checksum */ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) @@ -3783,9 +3745,8 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | BIT(HNS3_RXD_OL3E_B) | BIT(HNS3_RXD_OL4E_B)))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.l3l4_csum_err++; - u64_stats_update_end(&ring->syncp); + skb->ip_summed = CHECKSUM_NONE; + hns3_ring_stats_update(ring, l3l4_csum_err); return; } @@ -3876,10 +3837,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, skb = ring->skb; if (unlikely(!skb)) { hns3_rl_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -3910,9 +3868,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, if (ring->page_pool) skb_mark_for_recycle(skb); - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, seg_pkt_cnt); ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); __skb_put(skb, ring->pull_len); @@ -4104,9 +4060,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info, ol_info, csum); if (unlikely(ret)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.rx_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, rx_err_cnt); return ret; } @@ -5318,9 +5272,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) if (!ring->desc_cb[ring->next_to_use].reuse_flag) { ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); /* if alloc new buffer fail, exit directly * and reclear in up flow. */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index f09a61d9c626..91b656adaacb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -654,6 +654,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define hns3_buf_size(_ring) ((_ring)->buf_size) +#define hns3_ring_stats_update(ring, cnt) do { \ + typeof(ring) (tmp) = (ring); \ + u64_stats_update_begin(&(tmp)->syncp); \ + ((tmp)->stats.cnt)++; \ + u64_stats_update_end(&(tmp)->syncp); \ +} while (0) \ + static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) { #if (PAGE_SIZE < 8192) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2102b38b9c35..f4d58fcdba27 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -12825,60 +12825,71 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable) return ret; } -static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) { - struct hclge_vport *vport = &hdev->vport[0]; struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + bool uc_en = false; + bool mc_en = false; u8 tmp_flags; + bool bc_en; int ret; - u16 i; if (vport->last_promisc_flags != vport->overflow_promisc_flags) { set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); vport->last_promisc_flags = vport->overflow_promisc_flags; } - if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) { + if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state)) + return 0; + + /* for PF */ + if (!vport->vport_id) { tmp_flags = handle->netdev_flags | vport->last_promisc_flags; ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, tmp_flags & HNAE3_MPE); - if (!ret) { - clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, - &vport->state); + if (!ret) set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); - } - } - - for (i = 1; i < hdev->num_alloc_vport; i++) { - bool uc_en = false; - bool mc_en = false; - bool bc_en; - - vport = &hdev->vport[i]; - - if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, - &vport->state)) - continue; - - if (vport->vf_info.trusted) { - uc_en = vport->vf_info.request_uc_en > 0 || - vport->overflow_promisc_flags & - HNAE3_OVERFLOW_UPE; - mc_en = vport->vf_info.request_mc_en > 0 || - vport->overflow_promisc_flags & - HNAE3_OVERFLOW_MPE; - } - bc_en = vport->vf_info.request_bc_en > 0; - - ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, - mc_en, bc_en); - if (ret) { + else set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + return ret; + } + + /* for VF */ + if (vport->vf_info.trusted) { + uc_en = vport->vf_info.request_uc_en > 0 || + vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; + mc_en = vport->vf_info.request_mc_en > 0 || + vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; + } + bc_en = vport->vf_info.request_bc_en > 0; + + ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, + mc_en, bc_en); + if (ret) { + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + return ret; + } + hclge_set_vport_vlan_fltr_change(vport); + + return 0; +} + +static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + + ret = hclge_sync_vport_promisc_mode(vport); + if (ret) return; - } - hclge_set_vport_vlan_fltr_change(vport); } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 21678c12afa2..3c1ff3313221 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -3258,7 +3258,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev) struct pci_dev *pdev = hdev->pdev; int ret = 0; - if (hdev->reset_type == HNAE3_VF_FULL_RESET && + if ((hdev->reset_type == HNAE3_VF_FULL_RESET || + hdev->reset_type == HNAE3_FLR_RESET) && test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { hclgevf_misc_irq_uninit(hdev); hclgevf_uninit_msi(hdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ed2c961902b6..c013d86559af 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3548,6 +3548,24 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) return err; } +/** + * i40e_calculate_vsi_rx_buf_len - Calculates buffer length + * + * @vsi: VSI to calculate rx_buf_len from + */ +static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) +{ + if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) + return I40E_RXBUFFER_2048; + +#if (PAGE_SIZE < 8192) + if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN) + return I40E_RXBUFFER_1536 - NET_IP_ALIGN; +#endif + + return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; +} + /** * i40e_vsi_configure_rx - Configure the VSI for Rx * @vsi: the VSI being configured @@ -3559,20 +3577,14 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) int err = 0; u16 i; - if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { - vsi->max_frame = I40E_MAX_RXBUFFER; - vsi->rx_buf_len = I40E_RXBUFFER_2048; + vsi->max_frame = I40E_MAX_RXBUFFER; + vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); + #if (PAGE_SIZE < 8192) - } else if (!I40E_2K_TOO_SMALL_WITH_PADDING && - (vsi->netdev->mtu <= ETH_DATA_LEN)) { + if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && + vsi->netdev->mtu <= ETH_DATA_LEN) vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; - vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; #endif - } else { - vsi->max_frame = I40E_MAX_RXBUFFER; - vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : - I40E_RXBUFFER_2048; - } /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) @@ -13147,7 +13159,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, int i; /* Don't allow frames that span over multiple buffers */ - if (frame_size > vsi->rx_buf_len) { + if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) { NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f19e64830739..b246ff8b7c20 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1204,8 +1204,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, if (!q_vector) { q_vector = kzalloc(size, GFP_KERNEL); } else if (size > ksize(q_vector)) { - kfree_rcu(q_vector, rcu); - q_vector = kzalloc(size, GFP_KERNEL); + struct igb_q_vector *new_q_vector; + + new_q_vector = kzalloc(size, GFP_KERNEL); + if (new_q_vector) + kfree_rcu(q_vector, rcu); + q_vector = new_q_vector; } else { memset(q_vector, 0, size); } @@ -7410,7 +7414,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - u32 reg, msgbuf[3]; + u32 reg, msgbuf[3] = {}; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 3e386c38d016..66678cd72a6c 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -94,6 +94,8 @@ struct igc_ring { u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ u32 start_time; u32 end_time; @@ -182,6 +184,7 @@ struct igc_adapter { ktime_t base_time; ktime_t cycle_time; + bool qbv_enable; /* OS defined structs */ struct pci_dev *pdev; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index a4bbee748798..f171bc99e58c 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -324,6 +324,8 @@ #define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + /* Transmit Control */ #define IGC_TCTL_EN 0x00000002 /* enable Tx */ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 2a84f57ea68b..3726c8413c74 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -999,25 +999,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev) return netdev_mc_count(netdev); } -static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) { + struct igc_adapter *adapter = netdev_priv(ring->netdev); ktime_t cycle_time = adapter->cycle_time; ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; u32 launchtime; + s64 n; - /* FIXME: when using ETF together with taprio, we may have a - * case where 'delta' is larger than the cycle_time, this may - * cause problems if we don't read the current value of - * IGC_BASET, as the value writen into the launchtime - * descriptor field may be misinterpreted. + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(txtime, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. */ - div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; return cpu_to_le32(launchtime); } +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, - struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) { @@ -1036,26 +1129,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) mss_l4len_idx |= tx_ring->reg_idx << 4; + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - - /* We assume there is always a valid Tx time available. Invalid times - * should have been handled by the upper layers. - */ - if (tx_ring->launchtime_enable) { - struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); - ktime_t txtime = first->skb->tstamp; - - skb_txtime_consumed(first->skb); - context_desc->launch_time = igc_tx_launchtime(adapter, - txtime); - } else { - context_desc->launch_time = 0; - } + context_desc->launch_time = launch_time; } -static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; @@ -1095,7 +1179,8 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); } static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) @@ -1319,6 +1404,7 @@ dma_error: static int igc_tso(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, u8 *hdr_len) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; @@ -1405,8 +1491,8 @@ static int igc_tso(struct igc_ring *tx_ring, vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); return 1; } @@ -1414,11 +1500,14 @@ static int igc_tso(struct igc_ring *tx_ring, static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { + bool first_flag = false, insert_empty = false; u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = vlan_get_protocol(skb); struct igc_tx_buffer *first; + __le32 launch_time = 0; u32 tx_flags = 0; unsigned short f; + ktime_t txtime; u8 hdr_len = 0; int tso = 0; @@ -1432,11 +1521,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size( &skb_shinfo(skb)->frags[f])); - if (igc_maybe_stop_tx(tx_ring, count + 3)) { + if (igc_maybe_stop_tx(tx_ring, count + 5)) { /* this is a hard error */ return NETDEV_TX_BUSY; } + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->type = IGC_TX_BUFFER_TYPE_SKB; @@ -1473,11 +1591,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = protocol; - tso = igc_tso(tx_ring, first, &hdr_len); + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); if (tso < 0) goto out_drop; else if (!tso) - igc_tx_csum(tx_ring, first); + igc_tx_csum(tx_ring, first, launch_time, first_flag); igc_tx_map(tx_ring, first, hdr_len); @@ -5801,9 +5919,10 @@ static bool validate_schedule(struct igc_adapter *adapter, return false; for (n = 0; n < qopt->num_entries; n++) { - const struct tc_taprio_sched_entry *e; + const struct tc_taprio_sched_entry *e, *prev; int i; + prev = n ? &qopt->entries[n - 1] : NULL; e = &qopt->entries[n]; /* i225 only supports "global" frame preemption @@ -5816,7 +5935,12 @@ static bool validate_schedule(struct igc_adapter *adapter, if (e->gate_mask & BIT(i)) queue_uses[i]++; - if (queue_uses[i] > 1) + /* There are limitations: A single queue cannot be + * opened and closed multiple times per cycle unless the + * gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) return false; } } @@ -5860,12 +5984,19 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter) static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; u32 start_time = 0, end_time = 0; size_t n; + int i; + + adapter->qbv_enable = qopt->enable; if (!qopt->enable) return igc_tsn_clear_schedule(adapter); + if (qopt->base_time < 0) + return -ERANGE; + if (adapter->base_time) return -EALREADY; @@ -5875,28 +6006,58 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, adapter->cycle_time = qopt->cycle_time; adapter->base_time = qopt->base_time; - /* FIXME: be a little smarter about cases when the gate for a - * queue stays open for more than one entry. - */ for (n = 0; n < qopt->num_entries; n++) { struct tc_taprio_sched_entry *e = &qopt->entries[n]; - int i; end_time += e->interval; + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; if (!(e->gate_mask & BIT(i))) continue; - ring->start_time = start_time; + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; ring->end_time = end_time; + + queue_configured[i] = true; } start_time += e->interval; } + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + if (!queue_configured[i]) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 0fce22de2ab8..356c7455c5ce 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -36,7 +36,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) { unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; - if (adapter->base_time) + if (adapter->qbv_enable) new_flags |= IGC_FLAG_TSN_QBV_ENABLED; if (is_any_launchtime(adapter)) @@ -110,15 +110,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_STQT(i), ring->start_time); wr32(IGC_ENDQT(i), ring->end_time); - if (adapter->base_time) { - /* If we have a base_time we are in "taprio" - * mode and we need to be strict about the - * cycles: only transmit a packet if it can be - * completed during that cycle. - */ - txqctl |= IGC_TXQCTL_STRICT_CYCLE | - IGC_TXQCTL_STRICT_END; - } + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; if (ring->launchtime_enable) txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index e14624caddc6..f6306eedd59b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -962,6 +962,7 @@ static void otx2_pool_refill_task(struct work_struct *work) rbpool = cq->rbpool; free_ptrs = cq->pool_ptrs; + get_cpu(); while (cq->pool_ptrs) { if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { /* Schedule a WQ if we fails to free atleast half of the @@ -981,6 +982,7 @@ static void otx2_pool_refill_task(struct work_struct *work) pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } + put_cpu(); cq->refill_task_sched = false; } @@ -1314,6 +1316,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) if (err) goto fail; + get_cpu(); /* Allocate pointers and free them to aura/pool */ for (qidx = 0; qidx < hw->tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); @@ -1322,18 +1325,24 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) sq = &qset->sq[qidx]; sq->sqb_count = 0; sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); - if (!sq->sqb_ptrs) - return -ENOMEM; + if (!sq->sqb_ptrs) { + err = -ENOMEM; + goto err_mem; + } for (ptr = 0; ptr < num_sqbs; ptr++) { - if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) - return -ENOMEM; + err = otx2_alloc_rbuf(pfvf, pool, &bufptr); + if (err) + goto err_mem; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; } } - return 0; +err_mem: + put_cpu(); + return err ? -ENOMEM : 0; + fail: otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_aura_pool_free(pfvf); @@ -1372,18 +1381,21 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) if (err) goto fail; + get_cpu(); /* Allocate pointers and free them to aura/pool */ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { pool = &pfvf->qset.pool[pool_id]; for (ptr = 0; ptr < num_ptrs; ptr++) { - if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) - return -ENOMEM; + err = otx2_alloc_rbuf(pfvf, pool, &bufptr); + if (err) + goto err_mem; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM); } } - - return 0; +err_mem: + put_cpu(); + return err ? -ENOMEM : 0; fail: otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_aura_pool_free(pfvf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 700c463ea367..a8d7f07ee2ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -188,12 +188,19 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, int err; list_for_each_entry(flow, flow_list, tmp_list) { - if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW)) + if (!mlx5e_is_offloaded_flow(flow)) continue; attr = flow->attr; esw_attr = attr->esw_attr; spec = &attr->parse_attr->spec; + /* Clear pkt_reformat before checking slow path flag. Because + * in next iteration, the same flow is already set slow path + * flag, but still need to clear the pkt_reformat. + */ + if (flow_flag_test(flow, SLOW)) + continue; + /* update from encap rule to slow path rule */ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); /* mark the flow's encap dest as non-valid */ @@ -1342,7 +1349,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv, continue; } - err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow); + err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr); if (err) { mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c1c4f380803a..be19f5cf9d15 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -977,7 +977,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; - sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN; sq->xsk_pool = xsk_pool; sq->stats = sq->xsk_pool ? diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 843c8435387f..8f2f99689aba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1342,10 +1342,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro } int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) { - struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts; + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; struct mlx5_modify_hdr *mod_hdr; mod_hdr = mlx5_modify_header_alloc(priv->mdev, @@ -1355,8 +1355,8 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, if (IS_ERR(mod_hdr)) return PTR_ERR(mod_hdr); - WARN_ON(flow->attr->modify_hdr); - flow->attr->modify_hdr = mod_hdr; + WARN_ON(attr->modify_hdr); + attr->modify_hdr = mod_hdr; return 0; } @@ -1457,7 +1457,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) { if (vf_tun) { - err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow); + err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr); if (err) goto err_out; } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 1a4cd882f0fb..f48af82781f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -241,8 +241,8 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, u32 data); int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow); + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, int namespace, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c index 60a73990017c..6b4c9ffad95b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c @@ -67,6 +67,7 @@ static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport) int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + bool vst_mode_steering = esw_vst_mode_is_steering(esw); struct mlx5_flow_destination drop_ctr_dst = {}; struct mlx5_flow_destination *dst = NULL; struct mlx5_fc *drop_counter = NULL; @@ -77,6 +78,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, */ int table_size = 2; int dest_num = 0; + int actions_flag; int err = 0; if (vport->egress.legacy.drop_counter) { @@ -119,8 +121,11 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, vport->vport, vport->info.vlan, vport->info.qos); /* Allowed vlan rule */ + actions_flag = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + if (vst_mode_steering) + actions_flag |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan, - MLX5_FLOW_CONTEXT_ACTION_ALLOW); + actions_flag); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index b1a5199260f6..093ed86a0acd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -139,11 +139,14 @@ static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport) int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + bool vst_mode_steering = esw_vst_mode_is_steering(esw); struct mlx5_flow_destination drop_ctr_dst = {}; struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_act flow_act = {}; struct mlx5_flow_spec *spec = NULL; struct mlx5_fc *counter = NULL; + bool vst_check_cvlan = false; + bool vst_push_cvlan = false; /* The ingress acl table contains 4 groups * (2 active rules at the same time - * 1 allow rule from one of the first 3 groups. @@ -203,7 +206,26 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, goto out; } - if (vport->info.vlan || vport->info.qos) + if ((vport->info.vlan || vport->info.qos)) { + if (vst_mode_steering) + vst_push_cvlan = true; + else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always)) + vst_check_cvlan = true; + } + + if (vst_check_cvlan || vport->info.spoofchk) + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + /* Create ingress allow rule */ + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + if (vst_push_cvlan) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; + flow_act.vlan[0].prio = vport->info.qos; + flow_act.vlan[0].vid = vport->info.vlan; + flow_act.vlan[0].ethtype = ETH_P_8021Q; + } + + if (vst_check_cvlan) MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); @@ -218,9 +240,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, ether_addr_copy(smac_v, vport->info.mac); } - /* Create ingress allow rule */ - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, &flow_act, NULL, 0); if (IS_ERR(vport->ingress.allow_rule)) { @@ -232,6 +251,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, goto out; } + if (!vst_check_cvlan && !vport->info.spoofchk) + goto out; + memset(&flow_act, 0, sizeof(flow_act)); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; /* Attach drop flow counter */ @@ -257,7 +279,8 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, return 0; out: - esw_acl_ingress_lgcy_cleanup(esw, vport); + if (err) + esw_acl_ingress_lgcy_cleanup(esw, vport); kvfree(spec); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 51a8cecc4a7c..2b9278002354 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -160,10 +160,17 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, esw_vport_context.vport_cvlan_strip, 1); if (set_flags & SET_VLAN_INSERT) { - /* insert only if no vlan in packet */ - MLX5_SET(modify_esw_vport_context_in, in, - esw_vport_context.vport_cvlan_insert, 1); - + if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) { + /* insert either if vlan exist in packet or not */ + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.vport_cvlan_insert, + MLX5_VPORT_CVLAN_INSERT_ALWAYS); + } else { + /* insert only if no vlan in packet */ + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.vport_cvlan_insert, + MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN); + } MLX5_SET(modify_esw_vport_context_in, in, esw_vport_context.cvlan_pcp, qos); MLX5_SET(modify_esw_vport_context_in, in, @@ -773,6 +780,7 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + bool vst_mode_steering = esw_vst_mode_is_steering(esw); u16 vport_num = vport->vport; int flags; int err; @@ -802,8 +810,9 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) flags = (vport->info.vlan || vport->info.qos) ? SET_VLAN_STRIP | SET_VLAN_INSERT : 0; - modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, - vport->info.qos, flags); + if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) + modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, + vport->info.qos, flags); return 0; } @@ -1846,6 +1855,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos, u8 set_flags) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + bool vst_mode_steering = esw_vst_mode_is_steering(esw); int err = 0; if (IS_ERR(evport)) @@ -1853,9 +1863,11 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, if (vlan > 4095 || qos > 7) return -EINVAL; - err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); - if (err) - return err; + if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) { + err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); + if (err) + return err; + } evport->info.vlan = vlan; evport->info.qos = qos; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2c7444101bb9..0e2c9e6fccb6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -505,6 +505,12 @@ static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw) return esw->qos.enabled; } +static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw) +{ + return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) && + MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan)); +} + static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, u8 vlan_depth) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 037e18dd4be0..3dceab45986d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -614,6 +614,12 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); + mutex_lock(&dev->intf_state_mutex); + if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) { + mlx5_core_err(dev, "health works are not permitted at this stage\n"); + return; + } + mutex_unlock(&dev->intf_state_mutex); enter_error_state(dev, false); if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) { if (mlx5_health_try_recover(dev)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index cfde0a45b8b8..10940b8dc83e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -70,6 +70,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, params->packet_merge.type = MLX5E_PACKET_MERGE_NONE; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; params->tunneled_offload_en = false; + + /* CQE compression is not supported for IPoIB */ + params->rx_cqe_compress_def = false; + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); } /* Called directly after IPoIB netdevice was created to initialize SW structs */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 19c11d33f4b6..145e56f5eeee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -928,6 +928,8 @@ err_rl_cleanup: err_tables_cleanup: mlx5_geneve_destroy(dev->geneve); mlx5_vxlan_destroy(dev->vxlan); + mlx5_cleanup_clock(dev); + mlx5_cleanup_reserved_gids(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_fw_reset_cleanup(dev); err_events_cleanup: diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 0463f20da17b..174d89ee6374 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -779,7 +779,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) if (err) goto cleanup_config; - if (!of_get_mac_address(np, sparx5->base_mac)) { + if (of_get_mac_address(np, sparx5->base_mac)) { dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n"); eth_random_addr(sparx5->base_mac); sparx5->base_mac[5] = 0; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 052696ce5096..97c2604df019 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3923,6 +3923,7 @@ abort_with_slices: myri10ge_free_slices(mgp); abort_with_firmware: + kfree(mgp->msix_vectors); myri10ge_dummy_rdma(mgp, 0); abort_with_ioremap: diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index f4703f53bcdc..65ccdbe665e5 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2386,7 +2386,7 @@ static void free_tx_buffers(struct s2io_nic *nic) skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); if (skb) { swstats->mem_freed += skb->truesize; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); cnt++; } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 6ab3e60d4928..4b4077cf2d26 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1796,9 +1796,10 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, u8 split_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0; + u8 port_id = 0, pf_id = 0, vf_id = 0; bool read_using_dmae = false; u32 thresh; + u16 fid; if (!dump) return len; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 27dffa299ca6..7c3cf9ad4563 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2505,7 +2505,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) goto disable_mbx_intr; qlcnic_83xx_clear_function_resources(adapter); - qlcnic_dcb_enable(adapter->dcb); + + err = qlcnic_dcb_enable(adapter->dcb); + if (err) { + qlcnic_dcb_free(adapter->dcb); + goto disable_mbx_intr; + } + qlcnic_83xx_initialize_nic(adapter, 1); qlcnic_dcb_get_info(adapter->dcb); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 7519773eaca6..22afa2be85fd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -41,11 +41,6 @@ struct qlcnic_dcb { unsigned long state; }; -static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) -{ - kfree(dcb); -} - static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { if (dcb && dcb->ops->get_hw_capability) @@ -112,9 +107,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb) dcb->ops->init_dcbnl_ops(dcb); } -static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb) +static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb) { - if (dcb && qlcnic_dcb_attach(dcb)) - qlcnic_clear_dcb_ops(dcb); + return dcb ? qlcnic_dcb_attach(dcb) : 0; } #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 75960a29f80e..cec07d5bbe67 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2616,7 +2616,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Device does not support MSI interrupts\n"); if (qlcnic_82xx_check(adapter)) { - qlcnic_dcb_enable(adapter->dcb); + err = qlcnic_dcb_enable(adapter->dcb); + if (err) { + qlcnic_dcb_free(adapter->dcb); + dev_err(&pdev->dev, "Failed to enable DCB\n"); + goto err_out_free_hw; + } + qlcnic_dcb_get_info(adapter->dcb); err = qlcnic_setup_intr(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 42a44c97572a..df9b84f6600f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -221,6 +221,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) return 0; qlcnic_destroy_async_wq: + while (i--) + kfree(sriov->vf_info[i].vp); destroy_workqueue(bc->bc_async_wq); qlcnic_destroy_trans_wq: diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 01ef5efd7bc2..5a8a6977ec9a 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1159,10 +1159,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Failed to register net device\n"); - goto err_out_mdio_unregister; + goto err_out_phy_disconnect; } return 0; +err_out_phy_disconnect: + phy_disconnect(dev->phydev); err_out_mdio_unregister: mdiobus_unregister(lp->mii_bus); err_out_mdio: @@ -1186,6 +1188,7 @@ static void r6040_remove_one(struct pci_dev *pdev) struct r6040_private *lp = netdev_priv(dev); unregister_netdev(dev); + phy_disconnect(dev->phydev); mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); netif_napi_del(&lp->napi); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 77a19336abec..c89bcdd15f16 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2378,11 +2378,11 @@ static int ravb_remove(struct platform_device *pdev) priv->desc_bat_dma); /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); - pm_runtime_put_sync(&pdev->dev); unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); free_netdev(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index d68ef72dcdde..4538e4fd8189 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -47,7 +47,8 @@ static void config_sub_second_increment(void __iomem *ioaddr, if (!(value & PTP_TCR_TSCTRLSSR)) data = (data * 1000) / 465; - data &= PTP_SSIR_SSINC_MASK; + if (data > PTP_SSIR_SSINC_MAX) + data = PTP_SSIR_SSINC_MAX; reg_value = data; if (gmac4) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eba97adaf1fb..15b0daf416f3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7046,7 +7046,8 @@ int stmmac_dvr_probe(struct device *device, priv->wq = create_singlethread_workqueue("stmmac_wq"); if (!priv->wq) { dev_err(priv->device, "failed to create workqueue\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error_wq_init; } INIT_WORK(&priv->service_task, stmmac_service_task); @@ -7273,6 +7274,7 @@ error_mdio_register: stmmac_napi_del(ndev); error_hw_init: destroy_workqueue(priv->wq); +error_wq_init: bitmap_free(priv->af_xdp_zc_qps); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 53172a439810..bf619295d079 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -64,7 +64,7 @@ #define PTP_TCR_TSENMACADDR BIT(18) /* SSIR defines */ -#define PTP_SSIR_SSINC_MASK 0xff +#define PTP_SSIR_SSINC_MAX 0xff #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 /* Auxiliary Control defines */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index dd5c4ef92ef3..ea7200b7b647 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -1654,12 +1654,16 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv) } ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); - if (ret) + if (ret) { + kfree_skb(skb); goto cleanup; + } ret = dev_set_promiscuity(priv->dev, 1); - if (ret) + if (ret) { + kfree_skb(skb); goto cleanup; + } ret = dev_direct_xmit(skb, 0); if (ret) diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index eda2961c0fe2..07bdeece1723 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1262,7 +1262,7 @@ out: } /* Submit the packet */ -static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_stats *tx_stats = &netcp->stats; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 97c1d1ecba34..b1971c4d5313 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -543,7 +543,7 @@ static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue) xemaclite_enable_interrupts(lp); if (lp->deferred_skb) { - dev_kfree_skb(lp->deferred_skb); + dev_kfree_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; dev->stats.tx_errors++; } diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 6d1e3f49a3d3..ebf502290e5f 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3831,10 +3831,24 @@ static int dfx_init(void) int status; status = pci_register_driver(&dfx_pci_driver); - if (!status) - status = eisa_driver_register(&dfx_eisa_driver); - if (!status) - status = tc_register_driver(&dfx_tc_driver); + if (status) + goto err_pci_register; + + status = eisa_driver_register(&dfx_eisa_driver); + if (status) + goto err_eisa_register; + + status = tc_register_driver(&dfx_tc_driver); + if (status) + goto err_tc_register; + + return 0; + +err_tc_register: + eisa_driver_unregister(&dfx_eisa_driver); +err_eisa_register: + pci_unregister_driver(&dfx_pci_driver); +err_pci_register: return status; } diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 6b6f28d5b8d5..f9d03f7b9101 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -758,7 +758,7 @@ static void epp_bh(struct work_struct *work) * ===================== network driver interface ========================= */ -static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index e0bb131a33d7..39db3cae4dd1 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -301,12 +301,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc) spin_lock_irqsave(&scc->lock, flags); if (scc->tx_buff != NULL) { - dev_kfree_skb(scc->tx_buff); + dev_kfree_skb_irq(scc->tx_buff); scc->tx_buff = NULL; } while (!skb_queue_empty(&scc->tx_queue)) - dev_kfree_skb(skb_dequeue(&scc->tx_queue)); + dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue)); spin_unlock_irqrestore(&scc->lock, flags); } @@ -1668,7 +1668,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) { struct sk_buff *skb_del; skb_del = skb_dequeue(&scc->tx_queue); - dev_kfree_skb(skb_del); + dev_kfree_skb_irq(skb_del); } skb_queue_tail(&scc->tx_queue, skb); netif_trans_update(dev); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index a1c77cc00416..498e5c8013ef 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -208,7 +208,7 @@ static __net_init int loopback_net_init(struct net *net) int err; err = -ENOMEM; - dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup); + dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup); if (!dev) goto out; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 88e44eb39285..10b3f4fb2612 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2580,7 +2580,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) const struct macsec_ops *ops; struct macsec_context ctx; struct macsec_dev *macsec; - int ret; + int ret = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -2593,28 +2593,36 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) macsec_genl_offload_policy, NULL)) return -EINVAL; + rtnl_lock(); + dev = get_dev_from_nl(genl_info_net(info), attrs); - if (IS_ERR(dev)) - return PTR_ERR(dev); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } macsec = macsec_priv(dev); - if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) - return -EINVAL; + if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { + ret = -EINVAL; + goto out; + } offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); if (macsec->offload == offload) - return 0; + goto out; /* Check if the offloading mode is supported by the underlying layers */ if (offload != MACSEC_OFFLOAD_OFF && - !macsec_check_offload(offload, macsec)) - return -EOPNOTSUPP; + !macsec_check_offload(offload, macsec)) { + ret = -EOPNOTSUPP; + goto out; + } /* Check if the net device is busy. */ - if (netif_running(dev)) - return -EBUSY; - - rtnl_lock(); + if (netif_running(dev)) { + ret = -EBUSY; + goto out; + } prev_offload = macsec->offload; macsec->offload = offload; @@ -2649,7 +2657,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) rollback: macsec->offload = prev_offload; - +out: rtnl_unlock(); return ret; } diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 1b7d588ff3c5..b701ee83e64a 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -137,7 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, enqueue_again: rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); ndev->stats.rx_errors++; ndev->stats.rx_fifo_errors++; } @@ -192,7 +192,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, ndev->stats.tx_aborted_errors++; } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { /* Make sure anybody stopping the queue after this sees the new diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 8dcb49ed1f3d..7fd9fe6a602b 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -105,6 +105,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) if (!priv->phy_dev->drv) { dev_info(dev, "Attached phy not ready\n"); + put_device(&priv->phy_dev->mdio.dev); return -EPROBE_DEFER; } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 829d6ada1704..c1f11d1df4cd 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1742,6 +1742,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) int len; unsigned char *cp; + skb->dev = ppp->dev; + if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER /* check if we should pass this packet */ diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index bedd36ab5cf0..e5f6614da5ac 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -255,7 +255,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, off = le32_to_cpu(u.get_c->offset); len = le32_to_cpu(u.get_c->len); - if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE)) + if (unlikely((off > CONTROL_BUFFER_SIZE - 8) || + (len > CONTROL_BUFFER_SIZE - 8 - off))) goto response_error; if (*reply_len != -1 && len != *reply_len) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 64fa8e9c0a22..41cb9179e8b7 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -916,6 +916,9 @@ static int veth_poll(struct napi_struct *napi, int budget) xdp_set_return_frame_no_direct(); done = veth_xdp_rcv(rq, budget, &bq, &stats); + if (stats.xdp_redirect > 0) + xdp_do_flush(); + if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); @@ -929,8 +932,6 @@ static int veth_poll(struct napi_struct *napi, int budget) if (stats.xdp_tx > 0) veth_xdp_flush(rq, &bq); - if (stats.xdp_redirect > 0) - xdp_do_flush(); xdp_clear_return_frame_no_direct(); return done; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 21896e221300..b88092a6bc85 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1242,6 +1242,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, (le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { skb->ip_summed = CHECKSUM_UNNECESSARY; + if ((le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) { + skb->csum_level = 1; + } WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && !(le32_to_cpu(gdesc->dword[0]) & (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); @@ -1251,6 +1255,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & (1 << VMXNET3_RCD_TUC_SHIFT))) { skb->ip_summed = CHECKSUM_UNNECESSARY; + if ((le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) { + skb->csum_level = 1; + } WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && !(le32_to_cpu(gdesc->dword[0]) & (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 6a212c085435..5b01642ca44e 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2545,6 +2545,7 @@ fst_remove_one(struct pci_dev *pdev) struct net_device *dev = port_to_dev(&card->ports[i]); unregister_hdlc_device(dev); + free_netdev(dev); } fst_disable_intr(card); @@ -2564,6 +2565,7 @@ fst_remove_one(struct pci_dev *pdev) card->tx_dma_handle_card); } fst_card_array[card->card_no] = NULL; + kfree(card); } static struct pci_driver fst_driver = { diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 1baec4b412c8..efe38b2c1df7 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb) } } +static void ar5523_cancel_tx_cmd(struct ar5523 *ar) +{ + usb_kill_urb(ar->tx_cmd.urb_tx); +} + static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, int ilen, void *odata, int olen, int flags) { @@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, } if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) { + ar5523_cancel_tx_cmd(ar); cmd->odata = NULL; ar5523_err(ar, "timeout waiting for command %02x reply\n", code); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 4d4e2f91e15c..85a30c301dad 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3793,18 +3793,22 @@ static struct pci_driver ath10k_pci_driver = { static int __init ath10k_pci_init(void) { - int ret; + int ret1, ret2; - ret = pci_register_driver(&ath10k_pci_driver); - if (ret) + ret1 = pci_register_driver(&ath10k_pci_driver); + if (ret1) printk(KERN_ERR "failed to register ath10k pci driver: %d\n", - ret); + ret1); - ret = ath10k_ahb_init(); - if (ret) - printk(KERN_ERR "ahb init failed: %d\n", ret); + ret2 = ath10k_ahb_init(); + if (ret2) + printk(KERN_ERR "ahb init failed: %d\n", ret2); - return ret; + if (ret1 && ret2) + return ret1; + + /* registered to at least one bus */ + return 0; } module_init(ath10k_pci_init); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index f06eec99de68..f938ac1a4abd 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -709,14 +709,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) struct rx_buf *rx_buf = (struct rx_buf *)urb->context; struct hif_device_usb *hif_dev = rx_buf->hif_dev; struct sk_buff *skb = rx_buf->skb; - struct sk_buff *nskb; int ret; if (!skb) return; if (!hif_dev) - goto free; + goto free_skb; switch (urb->status) { case 0: @@ -725,7 +724,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: - goto free; + goto free_skb; default: skb_reset_tail_pointer(skb); skb_trim(skb, 0); @@ -736,25 +735,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) if (likely(urb->actual_length != 0)) { skb_put(skb, urb->actual_length); - /* Process the command first */ + /* + * Process the command first. + * skb is either freed here or passed to be + * managed to another callback function. + */ ath9k_htc_rx_msg(hif_dev->htc_handle, skb, skb->len, USB_REG_IN_PIPE); - - nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); - if (!nskb) { + skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); + if (!skb) { dev_err(&hif_dev->udev->dev, "ath9k_htc: REG_IN memory allocation failure\n"); - urb->context = NULL; - return; + goto free_rx_buf; } - rx_buf->skb = nskb; + rx_buf->skb = skb; usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), - nskb->data, MAX_REG_IN_BUF_SIZE, + skb->data, MAX_REG_IN_BUF_SIZE, ath9k_hif_usb_reg_in_cb, rx_buf, 1); } @@ -763,12 +764,13 @@ resubmit: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { usb_unanchor_urb(urb); - goto free; + goto free_skb; } return; -free: +free_skb: kfree_skb(skb); +free_rx_buf: kfree(rx_buf); urb->context = NULL; } @@ -781,14 +783,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_buf, list) { - usb_get_urb(tx_buf->urb); - spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); - usb_kill_urb(tx_buf->urb); list_del(&tx_buf->list); usb_free_urb(tx_buf->urb); kfree(tx_buf->buf); kfree(tx_buf); - spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); @@ -1330,10 +1328,24 @@ static int send_eject_command(struct usb_interface *interface) static int ath9k_hif_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { + struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out; struct usb_device *udev = interface_to_usbdev(interface); + struct usb_host_interface *alt; struct hif_device_usb *hif_dev; int ret = 0; + /* Verify the expected endpoints are present */ + alt = interface->cur_altsetting; + if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 || + usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE || + usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE || + usb_endpoint_num(int_in) != USB_REG_IN_PIPE || + usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) { + dev_err(&udev->dev, + "ath9k_htc: Device endpoint numbers are not the expected ones\n"); + return -ENODEV; + } + if (id->driver_info == STORAGE_DEVICE) return send_eject_command(interface); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index c5497208f231..3f259492491f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -765,6 +765,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, u32 i, j; char end = '\0'; + if (chiprev >= BITS_PER_TYPE(u32)) { + brcmf_err("Invalid chip revision %u\n", chiprev); + return NULL; + } + for (i = 0; i < table_size; i++) { if (mapping_table[i].chipid == chip && mapping_table[i].revmask & BIT(chiprev)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index d9c45c4d525c..f19ff0bff8f6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -626,7 +626,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, } if (!brcmf_chip_set_active(devinfo->ci, resetintr)) - return -EINVAL; + return -EIO; return 0; } @@ -1118,6 +1118,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) BRCMF_NROF_H2D_COMMON_MSGRINGS; max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS; } + if (max_flowrings > 256) { + brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings); + return -EIO; + } if (devinfo->dma_idx_sz != 0) { bufsz = (max_submissionrings + max_completionrings) * diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 67b0ad9819f4..7d12a9b6b52e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3416,6 +3416,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, /* Take arm out of reset */ if (!brcmf_chip_set_active(bus->ci, rstvec)) { brcmf_err("error getting out of ARM core reset\n"); + bcmerror = -EIO; goto err; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index b5368cb57ca8..e354918c2480 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1150,6 +1150,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; + struct sk_buff *orig_skb = skb; if (WARN_ON_ONCE(!mvmsta)) return -1; @@ -1182,8 +1183,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); if (ret) { + /* Free skbs created as part of TSO logic that have not yet been dequeued */ __skb_queue_purge(&mpdus_skbs); - return ret; + /* skb here is not necessarily same as skb that entered this method, + * so free it explicitly. + */ + if (skb == orig_skb) + ieee80211_free_txskb(mvm->hw, skb); + else + kfree_skb(skb); + /* there was error, but we consumed skb one way or another, so return 0 */ + return 0; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index d1f00706d41e..4e4af6e17b50 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1021,8 +1021,9 @@ static inline bool mt76_is_skb_pktid(u8 pktid) static inline u8 mt76_tx_power_nss_delta(u8 nss) { static const u8 nss_delta[4] = { 0, 6, 9, 12 }; + u8 idx = nss - 1; - return nss_delta[nss - 1]; + return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0; } static inline bool mt76_testmode_enabled(struct mt76_phy *phy) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 8f1338dae211..96667b7d722d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -290,7 +290,8 @@ static void mt7615_init_dfs_state(struct mt7615_phy *phy) if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) return; - if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) + if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) && + !(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR)) return; if (mphy->chandef.chan->center_freq == chandef->chan->center_freq && diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 6aca470e2401..7a4f277a1622 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -302,7 +302,8 @@ static void mt7915_init_dfs_state(struct mt7915_phy *phy) if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) return; - if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) + if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) && + !(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR)) return; if (mphy->chandef.chan->center_freq == chandef->chan->center_freq && diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 426e7a32bdc8..6cf0c9b1b8b9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -1476,7 +1476,7 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy) mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0), MT_MIB_RTS_FAIL_COUNT_MASK); - for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { + for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) { u32 val, val2; val = mt76_rr(dev, MT_TX_AGG_CNT(0, i)); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 1e9f60bb811a..b47343e321b8 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -814,6 +814,9 @@ static void mt76u_status_worker(struct mt76_worker *w) struct mt76_queue *q; int i; + if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state)) + return; + for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = dev->phy.q_tx[i]; if (!q) @@ -833,11 +836,11 @@ static void mt76u_status_worker(struct mt76_worker *w) wake_up(&dev->tx_wait); mt76_worker_schedule(&dev->tx_worker); - - if (dev->drv->tx_status_data && - !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) - queue_work(dev->wq, &dev->usb.stat_work); } + + if (dev->drv->tx_status_data && + !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) + queue_work(dev->wq, &dev->usb.stat_work); } static void mt76u_tx_status_data(struct work_struct *work) diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 6c0727fc4abd..cb4efbfd0811 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -20,6 +20,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) }, { }, }; +MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids); #define WILC_SDIO_BLOCK_SIZE 512 diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 7ddce3c3f0c4..cd3ff9847ced 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1190,7 +1190,7 @@ struct rtl8723bu_c2h { u8 bw; } __packed ra_report; }; -}; +} __packed; struct rtl8xxxu_fileops; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index e74c885a04e5..7370d92a3bda 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1607,18 +1607,18 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv) static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; - u32 val32, bonding; + u32 val32, bonding, sys_cfg; u16 val16; - val32 = rtl8xxxu_read32(priv, REG_SYS_CFG); - priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >> + sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG); + priv->chip_cut = (sys_cfg & SYS_CFG_CHIP_VERSION_MASK) >> SYS_CFG_CHIP_VERSION_SHIFT; - if (val32 & SYS_CFG_TRP_VAUX_EN) { + if (sys_cfg & SYS_CFG_TRP_VAUX_EN) { dev_info(dev, "Unsupported test chip\n"); return -ENOTSUPP; } - if (val32 & SYS_CFG_BT_FUNC) { + if (sys_cfg & SYS_CFG_BT_FUNC) { if (priv->chip_cut >= 3) { sprintf(priv->chip_name, "8723BU"); priv->rtl_chip = RTL8723B; @@ -1640,7 +1640,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) if (val32 & MULTI_GPS_FUNC_EN) priv->has_gps = 1; priv->is_multi_func = 1; - } else if (val32 & SYS_CFG_TYPE_ID) { + } else if (sys_cfg & SYS_CFG_TYPE_ID) { bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); bonding &= HPON_FSM_BONDING_MASK; if (priv->fops->tx_desc_size == @@ -1688,7 +1688,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) case RTL8188E: case RTL8192E: case RTL8723B: - switch (val32 & SYS_CFG_VENDOR_EXT_MASK) { + switch (sys_cfg & SYS_CFG_VENDOR_EXT_MASK) { case SYS_CFG_VENDOR_ID_TSMC: sprintf(priv->chip_vendor, "TSMC"); break; @@ -1705,7 +1705,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) } break; default: - if (val32 & SYS_CFG_VENDOR_ID) { + if (sys_cfg & SYS_CFG_VENDOR_ID) { sprintf(priv->chip_vendor, "UMC"); priv->vendor_umc = 1; } else { @@ -5517,7 +5517,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) rarpt->txrate.flags = 0; rate = c2h->ra_report.rate; sgi = c2h->ra_report.sgi; - bw = c2h->ra_report.bw; if (rate < DESC_RATE_MCS0) { rarpt->txrate.legacy = @@ -5534,8 +5533,13 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) RATE_INFO_FLAGS_SHORT_GI; } - if (bw == RATE_INFO_BW_20) - rarpt->txrate.bw |= RATE_INFO_BW_20; + if (skb->len >= offsetofend(typeof(*c2h), ra_report.bw)) { + if (c2h->ra_report.bw == RTL8XXXU_CHANNEL_WIDTH_40) + bw = RATE_INFO_BW_40; + else + bw = RATE_INFO_BW_20; + rarpt->txrate.bw = bw; + } } bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate); rarpt->bit_rate = bit_rate; diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 6bfaab48b507..6b64a103f39f 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -465,7 +465,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) tid, 0); } } - if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + + if (IEEE80211_SKB_CB(skb)->control.flags & + IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { q_num = MGMT_SOFT_Q; skb->priority = q_num; } diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index dca81a4bbdd7..30d2eccbcadd 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -162,12 +162,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) u8 header_size; u8 vap_id = 0; u8 dword_align_bytes; + bool tx_eapol; u16 seq_num; info = IEEE80211_SKB_CB(skb); vif = info->control.vif; tx_params = (struct skb_info *)info->driver_data; + tx_eapol = IEEE80211_SKB_CB(skb)->control.flags & + IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); @@ -231,7 +235,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) } } - if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + if (tx_eapol) { rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n"); data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index 6dc0af63440f..939d27652a4c 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -1297,6 +1297,8 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg, if (IS_ERR(resp)) return PTR_ERR(resp); + memset(&nfc_target, 0, sizeof(struct nfc_target)); + rsp = (struct pn533_cmd_jump_dep_response *)resp->data; rc = rsp->status & PN533_CMD_RET_MASK; @@ -1928,6 +1930,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, dev_dbg(dev->dev, "Creating new target\n"); + memset(&nfc_target, 0, sizeof(struct nfc_target)); + nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; nfc_target.nfcid1_len = 10; memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 692ee0f4a1ec..672f53d5651a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1113,6 +1113,18 @@ static u32 nvme_known_admin_effects(u8 opcode) return 0; } +static u32 nvme_known_nvm_effects(u8 opcode) +{ + switch (opcode) { + case nvme_cmd_write: + case nvme_cmd_write_zeroes: + case nvme_cmd_write_uncor: + return NVME_CMD_EFFECTS_LBCC; + default: + return 0; + } +} + u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) { u32 effects = 0; @@ -1120,16 +1132,24 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) if (ns) { if (ns->head->effects) effects = le32_to_cpu(ns->head->effects->iocs[opcode]); + if (ns->head->ids.csi == NVME_CAP_CSS_NVM) + effects |= nvme_known_nvm_effects(opcode); if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) dev_warn_once(ctrl->device, - "IO command:%02x has unhandled effects:%08x\n", + "IO command:%02x has unusual effects:%08x\n", opcode, effects); - return 0; - } - if (ctrl->effects) - effects = le32_to_cpu(ctrl->effects->acs[opcode]); - effects |= nvme_known_admin_effects(opcode); + /* + * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, + * which would deadlock when done on an I/O command. Note that + * We already warn about an unusual effect above. + */ + effects &= ~NVME_CMD_EFFECTS_CSE_MASK; + } else { + if (ctrl->effects) + effects = le32_to_cpu(ctrl->effects->acs[opcode]); + effects |= nvme_known_admin_effects(opcode); + } return effects; } @@ -2874,7 +2894,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) - return 0; + return -ENOMEM; c.identify.opcode = nvme_admin_identify; c.identify.cns = NVME_ID_CNS_CS_CTRL; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 7f52b2b179b8..39ca48babbe8 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -799,7 +799,7 @@ static inline void nvme_trace_bio_complete(struct request *req) { struct nvme_ns *ns = req->q->queuedata; - if (req->cmd_flags & REQ_NVME_MPATH) + if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) trace_block_bio_complete(ns->head->disk->queue, req->bio); } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d49df7123677..0165e65cf548 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -33,7 +33,7 @@ #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) -#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) /* * These can be higher, but we need to ensure that any command doesn't @@ -142,9 +142,9 @@ struct nvme_dev { mempool_t *iod_mempool; /* shadow doorbell buffer support: */ - u32 *dbbuf_dbs; + __le32 *dbbuf_dbs; dma_addr_t dbbuf_dbs_dma_addr; - u32 *dbbuf_eis; + __le32 *dbbuf_eis; dma_addr_t dbbuf_eis_dma_addr; /* host memory buffer support: */ @@ -208,10 +208,10 @@ struct nvme_queue { #define NVMEQ_SQ_CMB 1 #define NVMEQ_DELETE_ERROR 2 #define NVMEQ_POLLED 3 - u32 *dbbuf_sq_db; - u32 *dbbuf_cq_db; - u32 *dbbuf_sq_ei; - u32 *dbbuf_cq_ei; + __le32 *dbbuf_sq_db; + __le32 *dbbuf_cq_db; + __le32 *dbbuf_sq_ei; + __le32 *dbbuf_cq_ei; struct completion delete_done; }; @@ -332,11 +332,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) } /* Update dbbuf and return true if an MMIO is required */ -static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, - volatile u32 *dbbuf_ei) +static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, + volatile __le32 *dbbuf_ei) { if (dbbuf_db) { - u16 old_value; + u16 old_value, event_idx; /* * Ensure that the queue is written before updating @@ -344,8 +344,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ wmb(); - old_value = *dbbuf_db; - *dbbuf_db = value; + old_value = le32_to_cpu(*dbbuf_db); + *dbbuf_db = cpu_to_le32(value); /* * Ensure that the doorbell is updated before reading the event @@ -355,7 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ mb(); - if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) + event_idx = le32_to_cpu(*dbbuf_ei); + if (!nvme_dbbuf_need_event(event_idx, value, old_value)) return false; } @@ -369,9 +370,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, */ static int nvme_pci_npages_prp(void) { - unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, - NVME_CTRL_PAGE_SIZE); - return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); + unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; + unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); + return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); } /* @@ -381,7 +382,7 @@ static int nvme_pci_npages_prp(void) static int nvme_pci_npages_sgl(void) { return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), - PAGE_SIZE); + NVME_CTRL_PAGE_SIZE); } static size_t nvme_pci_iod_alloc_size(void) @@ -731,7 +732,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, sge->length = cpu_to_le32(entries * sizeof(*sge)); sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } else { - sge->length = cpu_to_le32(PAGE_SIZE); + sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE); sge->type = NVME_SGL_FMT_SEG_DESC << 4; } } diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 52bb262d267a..bf78c58ed41d 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -164,26 +164,29 @@ out: static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) { - log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0); - log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_get_log_page] = + log->acs[nvme_admin_identify] = + log->acs[nvme_admin_abort_cmd] = + log->acs[nvme_admin_set_features] = + log->acs[nvme_admin_get_features] = + log->acs[nvme_admin_async_event] = + log->acs[nvme_admin_keep_alive] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); - log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_read] = + log->iocs[nvme_cmd_write] = + log->iocs[nvme_cmd_flush] = + log->iocs[nvme_cmd_dsm] = + log->iocs[nvme_cmd_write_zeroes] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); } static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) { - log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0); - log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_zone_append] = + log->iocs[nvme_cmd_zone_mgmt_send] = + log->iocs[nvme_cmd_zone_mgmt_recv] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); } static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 87a347248c38..cfd038551156 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -15,6 +15,7 @@ #include "nvmet.h" +struct kmem_cache *nvmet_bvec_cache; struct workqueue_struct *buffered_io_wq; struct workqueue_struct *zbd_wq; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; @@ -1607,26 +1608,28 @@ void nvmet_subsys_put(struct nvmet_subsys *subsys) static int __init nvmet_init(void) { - int error; + int error = -ENOMEM; nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; + nvmet_bvec_cache = kmem_cache_create("nvmet-bvec", + NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!nvmet_bvec_cache) + return -ENOMEM; + zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0); if (!zbd_wq) - return -ENOMEM; + goto out_destroy_bvec_cache; buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", WQ_MEM_RECLAIM, 0); - if (!buffered_io_wq) { - error = -ENOMEM; + if (!buffered_io_wq) goto out_free_zbd_work_queue; - } nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); - if (!nvmet_wq) { - error = -ENOMEM; + if (!nvmet_wq) goto out_free_buffered_work_queue; - } error = nvmet_init_discovery(); if (error) @@ -1645,6 +1648,8 @@ out_free_buffered_work_queue: destroy_workqueue(buffered_io_wq); out_free_zbd_work_queue: destroy_workqueue(zbd_wq); +out_destroy_bvec_cache: + kmem_cache_destroy(nvmet_bvec_cache); return error; } @@ -1656,6 +1661,7 @@ static void __exit nvmet_exit(void) destroy_workqueue(nvmet_wq); destroy_workqueue(buffered_io_wq); destroy_workqueue(zbd_wq); + kmem_cache_destroy(nvmet_bvec_cache); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 228871d48106..eadba13b276d 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -11,7 +11,6 @@ #include #include "nvmet.h" -#define NVMET_MAX_MPOOL_BVEC 16 #define NVMET_MIN_MPOOL_OBJ 16 int nvmet_file_ns_revalidate(struct nvmet_ns *ns) @@ -33,8 +32,6 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns) flush_workqueue(buffered_io_wq); mempool_destroy(ns->bvec_pool); ns->bvec_pool = NULL; - kmem_cache_destroy(ns->bvec_cache); - ns->bvec_cache = NULL; fput(ns->file); ns->file = NULL; } @@ -68,16 +65,8 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns) ns->blksize_shift = min_t(u8, file_inode(ns->file)->i_blkbits, 12); - ns->bvec_cache = kmem_cache_create("nvmet-bvec", - NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (!ns->bvec_cache) { - ret = -ENOMEM; - goto err; - } - ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab, - mempool_free_slab, ns->bvec_cache); + mempool_free_slab, nvmet_bvec_cache); if (!ns->bvec_pool) { ret = -ENOMEM; @@ -86,9 +75,10 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns) return ret; err: + fput(ns->file); + ns->file = NULL; ns->size = 0; ns->blksize_shift = 0; - nvmet_file_ns_disable(ns); return ret; } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index dbeb0b8c1194..fdb06a9d430d 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -77,7 +77,6 @@ struct nvmet_ns { struct completion disable_done; mempool_t *bvec_pool; - struct kmem_cache *bvec_cache; int use_p2pmem; struct pci_dev *p2p_dev; @@ -363,6 +362,8 @@ struct nvmet_req { u64 error_slba; }; +#define NVMET_MAX_MPOOL_BVEC 16 +extern struct kmem_cache *nvmet_bvec_cache; extern struct workqueue_struct *buffered_io_wq; extern struct workqueue_struct *zbd_wq; extern struct workqueue_struct *nvmet_wq; diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 6220e1dd961a..9b5929754195 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -271,14 +271,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) } /* - * If there are effects for the command we are about to execute, or - * an end_req function we need to use nvme_execute_passthru_rq() - * synchronously in a work item seeing the end_req function and - * nvme_passthru_end() can't be called in the request done callback - * which is typically in interrupt context. + * If a command needs post-execution fixups, or there are any + * non-trivial effects, make sure to execute the command synchronously + * in a workqueue so that nvme_passthru_end gets called. */ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); - if (req->p.use_workqueue || effects) { + if (req->p.use_workqueue || + (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) { INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); req->p.rq = rq; queue_work(nvmet_wq, &req->p.work); diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index 8f9dba11873c..52bb68fb2216 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -284,7 +284,7 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, const char *cmdline, size_t extra_fdt_size) { void *fdt; - int ret, chosen_node; + int ret, chosen_node, len; const void *prop; size_t fdt_size; @@ -327,19 +327,19 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, goto out; /* Did we boot using an initrd? */ - prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL); + prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", &len); if (prop) { u64 tmp_start, tmp_end, tmp_size; - tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop)); + tmp_start = of_read_number(prop, len / 4); - prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL); + prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", &len); if (!prop) { ret = -EINVAL; goto out; } - tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop)); + tmp_end = of_read_number(prop, len / 4); /* * kexec reserves exact initrd size, while firmware may diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index be471ef07c7e..aabdb1795a87 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -549,7 +549,7 @@ static int find_dup_cset_node_entry(struct overlay_changeset *ovcs, fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); - node_path_match = !strcmp(fn_1, fn_2); + node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2); kfree(fn_1); kfree(fn_2); if (node_path_match) { @@ -584,7 +584,7 @@ static int find_dup_cset_prop(struct overlay_changeset *ovcs, fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); - node_path_match = !strcmp(fn_1, fn_2); + node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2); kfree(fn_1); kfree(fn_2); if (node_path_match && diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index cf91cb024be3..4c0551f89c44 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -137,6 +137,9 @@ static int start_task(void) /* Create the work queue and queue the LED task */ led_wq = create_singlethread_workqueue("led_wq"); + if (!led_wq) + return -ENOMEM; + queue_delayed_work(led_wq, &led_task, 0); return 0; diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index e408ebf5bd73..00972a7bc976 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -730,7 +730,7 @@ void dw_pcie_setup(struct dw_pcie *pci) if (pci->n_fts[1]) { val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); val &= ~PORT_LOGIC_N_FTS_MASK; - val |= pci->n_fts[pci->link_gen - 1]; + val |= pci->n_fts[1]; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); } diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index a5987e52700e..8dce71142e10 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -900,6 +900,11 @@ static int vmd_resume(struct device *dev) struct vmd_dev *vmd = pci_get_drvdata(pdev); int err, i; + if (vmd->irq_domain) + vmd_set_msi_remapping(vmd, true); + else + vmd_set_msi_remapping(vmd, false); + for (i = 0; i < vmd->msix_count; i++) { err = devm_request_irq(dev, pci_irq_vector(pdev, i), vmd_irq, IRQF_NO_THREAD, diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index a5ed779b0a51..45535d4ae644 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -883,7 +883,7 @@ static int pci_epf_test_bind(struct pci_epf *epf) if (ret) epf_test->dma_supported = false; - if (linkup_notifier) { + if (linkup_notifier || core_init_notifier) { epf->nb.notifier_call = pci_epf_test_notifier; pci_epc_register_notifier(epc, &epf->nb); } else { diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index 12ecd0aaa28d..0050e8f6814e 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -44,6 +44,8 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, va_start(ap, fmt); devname = kvasprintf(GFP_KERNEL, fmt, ap); va_end(ap); + if (!devname) + return -ENOMEM; ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn, irqflags, devname, dev_id); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 7fb5cd17cc98..f2909ae93f2f 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1179,11 +1179,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) sysfs_bin_attr_init(res_attr); if (write_combine) { - pdev->res_attr_wc[num] = res_attr; sprintf(res_attr_name, "resource%d_wc", num); res_attr->mmap = pci_mmap_resource_wc; } else { - pdev->res_attr[num] = res_attr; sprintf(res_attr_name, "resource%d", num); if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { res_attr->read = pci_read_resource_io; @@ -1201,10 +1199,17 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) res_attr->size = pci_resource_len(pdev, num); res_attr->private = (void *)(unsigned long)num; retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); - if (retval) + if (retval) { kfree(res_attr); + return retval; + } - return retval; + if (write_combine) + pdev->res_attr_wc[num] = res_attr; + else + pdev->res_attr[num] = res_attr; + + return 0; } /** diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 2bfff2328cf8..a0c6a9eeb7c6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -6383,6 +6383,8 @@ bool pci_device_is_present(struct pci_dev *pdev) { u32 v; + /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ + pdev = pci_physfn(pdev); if (pci_dev_is_disconnected(pdev)) return false; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c index 280a6ae3e27c..54aa4658fb36 100644 --- a/drivers/perf/arm_dmc620_pmu.c +++ b/drivers/perf/arm_dmc620_pmu.c @@ -725,6 +725,8 @@ static struct platform_driver dmc620_pmu_driver = { static int __init dmc620_pmu_init(void) { + int ret; + cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DMC620_DRVNAME, NULL, @@ -732,7 +734,11 @@ static int __init dmc620_pmu_init(void) if (cpuhp_state_num < 0) return cpuhp_state_num; - return platform_driver_register(&dmc620_pmu_driver); + ret = platform_driver_register(&dmc620_pmu_driver); + if (ret) + cpuhp_remove_multi_state(cpuhp_state_num); + + return ret; } static void __exit dmc620_pmu_exit(void) diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index a36698a90d2f..54b8ba032c78 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -858,7 +858,11 @@ static int __init dsu_pmu_init(void) if (ret < 0) return ret; dsu_pmu_cpuhp_state = ret; - return platform_driver_register(&dsu_pmu_driver); + ret = platform_driver_register(&dsu_pmu_driver); + if (ret) + cpuhp_remove_multi_state(dsu_pmu_cpuhp_state); + + return ret; } static void __exit dsu_pmu_exit(void) diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 226348822ab3..5933ad151f86 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -896,6 +896,8 @@ static struct platform_driver smmu_pmu_driver = { static int __init arm_smmu_pmu_init(void) { + int ret; + cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/arm/pmcg:online", NULL, @@ -903,7 +905,11 @@ static int __init arm_smmu_pmu_init(void) if (cpuhp_state_num < 0) return cpuhp_state_num; - return platform_driver_register(&smmu_pmu_driver); + ret = platform_driver_register(&smmu_pmu_driver); + if (ret) + cpuhp_remove_multi_state(cpuhp_state_num); + + return ret; } module_init(arm_smmu_pmu_init); diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index 2cb3779fcdf8..c0c3ab9b2a15 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -102,9 +102,9 @@ static int brcm_pm_notifier(struct notifier_block *notifier, static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { - struct phy *gphy = dev_id; + struct device *dev = dev_id; - pm_wakeup_event(&gphy->dev, 0); + pm_wakeup_event(dev, 0); return IRQ_HANDLED; } @@ -451,7 +451,7 @@ static int brcm_usb_phy_dvr_init(struct platform_device *pdev, if (priv->wake_irq >= 0) { err = devm_request_irq(dev, priv->wake_irq, brcm_usb_phy_wake_isr, 0, - dev_name(dev), gphy); + dev_name(dev), dev); if (err < 0) return err; device_set_wakeup_capable(dev, 1); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index ed69d455ac0e..a9687e040960 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -3417,8 +3417,8 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = { .clk_list = qmp_v3_phy_clk_l, .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), - .reset_list = sc7180_usb3phy_reset_l, - .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = qmp_v3_usb3phy_regs_layout, @@ -3805,8 +3805,8 @@ static const struct qmp_phy_cfg sm8250_dpphy_cfg = { .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3, .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3), - .clk_list = qmp_v4_phy_clk_l, - .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), + .clk_list = qmp_v4_sm8250_usbphy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l), .reset_list = msm8996_usb3phy_reset_l, .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index b1db28007986..e6fe1330eab9 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -393,8 +393,10 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, for_each_available_child_of_node(np_config, np) { ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps, type); - if (ret < 0) + if (ret < 0) { + of_node_put(np); goto exit; + } } return 0; diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c index ecab6bf63dc6..ad4db99094a7 100644 --- a/drivers/pinctrl/pinctrl-k210.c +++ b/drivers/pinctrl/pinctrl-k210.c @@ -862,8 +862,10 @@ static int k210_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, for_each_available_child_of_node(np_config, np) { ret = k210_pinctrl_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps); - if (ret < 0) + if (ret < 0) { + of_node_put(np); goto err; + } } return 0; diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index aadb8d237aef..b94abb8f7706 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -156,12 +156,12 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port, return 0; role_sw_err: - usb_role_switch_put(port->role_sw); -ori_sw_err: typec_switch_put(port->ori_sw); -mux_err: + port->ori_sw = NULL; +ori_sw_err: typec_mux_put(port->mux); - + port->mux = NULL; +mux_err: return -ENODEV; } diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c index 48a6617aa12f..de76de6f5090 100644 --- a/drivers/platform/chrome/cros_usbpd_notify.c +++ b/drivers/platform/chrome/cros_usbpd_notify.c @@ -285,7 +285,11 @@ static int __init cros_usbpd_notify_init(void) return ret; #ifdef CONFIG_ACPI - platform_driver_register(&cros_usbpd_notify_acpi_driver); + ret = platform_driver_register(&cros_usbpd_notify_acpi_driver); + if (ret) { + platform_driver_unregister(&cros_usbpd_notify_plat_driver); + return ret; + } #endif return 0; } diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index 65b4a819f1bd..c2c9b0d3244c 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -358,7 +358,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = { { 0x32, "DDN_DIAG_W_INGRESS" }, { 0x33, "DDN_DIAG_C_INGRESS" }, { 0x34, "DDN_DIAG_CORE_SENT" }, - { 0x35, "NDN_DIAG_S_OUT_OF_CRED" }, + { 0x35, "NDN_DIAG_N_OUT_OF_CRED" }, { 0x36, "NDN_DIAG_S_OUT_OF_CRED" }, { 0x37, "NDN_DIAG_E_OUT_OF_CRED" }, { 0x38, "NDN_DIAG_W_OUT_OF_CRED" }, diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c index eac3e6b4ea11..935562c870c3 100644 --- a/drivers/platform/x86/huawei-wmi.c +++ b/drivers/platform/x86/huawei-wmi.c @@ -760,6 +760,9 @@ static int huawei_wmi_input_setup(struct device *dev, const char *guid, struct input_dev **idev) { + acpi_status status; + int err; + *idev = devm_input_allocate_device(dev); if (!*idev) return -ENOMEM; @@ -769,10 +772,19 @@ static int huawei_wmi_input_setup(struct device *dev, (*idev)->id.bustype = BUS_HOST; (*idev)->dev.parent = dev; - return sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL) || - input_register_device(*idev) || - wmi_install_notify_handler(guid, huawei_wmi_input_notify, - *idev); + err = sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL); + if (err) + return err; + + err = input_register_device(*idev); + if (err) + return err; + + status = wmi_install_notify_handler(guid, huawei_wmi_input_notify, *idev); + if (ACPI_FAILURE(status)) + return -EIO; + + return 0; } static void huawei_wmi_input_exit(struct device *dev, const char *guid) diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 7cc9089d1e14..e7a3e3402817 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -583,7 +583,6 @@ __intel_scu_ipc_register(struct device *parent, scu->dev.parent = parent; scu->dev.class = &intel_scu_ipc_class; scu->dev.release = intel_scu_ipc_release; - dev_set_name(&scu->dev, "intel_scu_ipc"); if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem), "intel_scu_ipc")) { @@ -612,6 +611,7 @@ __intel_scu_ipc_register(struct device *parent, * After this point intel_scu_ipc_release() takes care of * releasing the SCU IPC resources once refcount drops to zero. */ + dev_set_name(&scu->dev, "intel_scu_ipc"); err = device_register(&scu->dev); if (err) { put_device(&scu->dev); diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c index 9a19fbd2f734..9a457956025a 100644 --- a/drivers/platform/x86/mxm-wmi.c +++ b/drivers/platform/x86/mxm-wmi.c @@ -35,13 +35,11 @@ int mxm_wmi_call_mxds(int adapter) .xarg = 1, }; struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; - struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; printk("calling mux switch %d\n", adapter); - status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, - &output); + status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL); if (ACPI_FAILURE(status)) return status; @@ -60,13 +58,11 @@ int mxm_wmi_call_mxmx(int adapter) .xarg = 1, }; struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; - struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; printk("calling mux switch %d\n", adapter); - status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, - &output); + status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL); if (ACPI_FAILURE(status)) return status; diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index 4df5aa6a309c..6a60c5d83383 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c @@ -148,14 +148,14 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, dev->dev.coherent_dma_mask = dev->dma_mask; dev->dev.release = &pnp_release_device; - dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number); - dev_id = pnp_add_id(dev, pnpid); if (!dev_id) { kfree(dev); return NULL; } + dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number); + return dev; } diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 15eadaf46f14..a4f766fc7c9d 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -3726,7 +3726,14 @@ static int __init ab8500_charger_init(void) if (ret) return ret; - return platform_driver_register(&ab8500_charger_driver); + ret = platform_driver_register(&ab8500_charger_driver); + if (ret) { + platform_unregister_drivers(ab8500_charger_component_drivers, + ARRAY_SIZE(ab8500_charger_component_drivers)); + return ret; + } + + return 0; } static void __exit ab8500_charger_exit(void) diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index a6e9afa5a1cf..3f9c60c5b250 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -696,6 +696,11 @@ int power_supply_get_battery_info(struct power_supply *psy, int i, tab_len, size; propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index); + if (!propname) { + power_supply_put_battery_info(psy, info); + err = -ENOMEM; + goto out_put_node; + } list = of_get_property(battery_np, propname, &size); if (!list || !size) { dev_err(&psy->dev, "failed to get %s\n", propname); @@ -1220,8 +1225,8 @@ create_triggers_failed: register_cooler_failed: psy_unregister_thermal(psy); register_thermal_failed: - device_del(dev); wakeup_init_failed: + device_del(dev); device_add_failed: check_supplies_failed: dev_set_name_failed: diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c index 7ed4e4bb26ec..fd33cdf9cf12 100644 --- a/drivers/power/supply/z2_battery.c +++ b/drivers/power/supply/z2_battery.c @@ -206,10 +206,12 @@ static int z2_batt_probe(struct i2c_client *client, charger->charge_gpiod = devm_gpiod_get_optional(&client->dev, NULL, GPIOD_IN); - if (IS_ERR(charger->charge_gpiod)) - return dev_err_probe(&client->dev, + if (IS_ERR(charger->charge_gpiod)) { + ret = dev_err_probe(&client->dev, PTR_ERR(charger->charge_gpiod), "failed to get charge GPIO\n"); + goto err; + } if (charger->charge_gpiod) { gpiod_set_consumer_name(charger->charge_gpiod, "BATT CHRG"); diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 0d4dd80e9f07..f8f9a7489129 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -275,7 +275,7 @@ static const struct pwm_mediatek_of_data mt2712_pwm_data = { static const struct pwm_mediatek_of_data mt7622_pwm_data = { .num_pwms = 6, .pwm45_fixup = false, - .has_ck_26m_sel = false, + .has_ck_26m_sel = true, }; static const struct pwm_mediatek_of_data mt7623_pwm_data = { diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c index c605013e4114..3fbb4bae93a4 100644 --- a/drivers/pwm/pwm-mtk-disp.c +++ b/drivers/pwm/pwm-mtk-disp.c @@ -178,7 +178,7 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip, { struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip); u64 rate, period, high_width; - u32 clk_div, con0, con1; + u32 clk_div, pwm_en, con0, con1; int err; err = clk_prepare_enable(mdp->clk_main); @@ -197,7 +197,8 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip, rate = clk_get_rate(mdp->clk_main); con0 = readl(mdp->base + mdp->data->con0); con1 = readl(mdp->base + mdp->data->con1); - state->enabled = !!(con0 & BIT(0)); + pwm_en = readl(mdp->base + DISP_PWM_EN); + state->enabled = !!(pwm_en & mdp->data->enable_mask); clk_div = FIELD_GET(PWM_CLKDIV_MASK, con0); period = FIELD_GET(PWM_PERIOD_MASK, con1); /* diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index 58347fcd4812..07e9fc58354f 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -217,8 +217,11 @@ static int pwm_sifive_clock_notifier(struct notifier_block *nb, struct pwm_sifive_ddata *ddata = container_of(nb, struct pwm_sifive_ddata, notifier); - if (event == POST_RATE_CHANGE) + if (event == POST_RATE_CHANGE) { + mutex_lock(&ddata->lock); pwm_sifive_update_clock(ddata, ndata->new_rate); + mutex_unlock(&ddata->lock); + } return NOTIFY_OK; } diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 11a10b575ace..6a1ff9d42f79 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -142,8 +142,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * source clock rate as required_clk_rate, PWM controller will * be able to configure the requested period. */ - required_clk_rate = - (NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH; + required_clk_rate = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC << PWM_DUTY_WIDTH, + period_ns); err = clk_set_rate(pc->clk, required_clk_rate); if (err < 0) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 94331d999d27..5ac2dc1e2abd 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -1803,8 +1803,11 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); err = rio_add_device(rdev); - if (err) - goto cleanup; + if (err) { + put_device(&rdev->dev); + return err; + } + rio_dev_get(rdev); return 0; @@ -1900,10 +1903,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) priv->md = chdev; - mutex_lock(&chdev->file_mutex); - list_add_tail(&priv->list, &chdev->file_list); - mutex_unlock(&chdev->file_mutex); - INIT_LIST_HEAD(&priv->db_filters); INIT_LIST_HEAD(&priv->pw_filters); spin_lock_init(&priv->fifo_lock); @@ -1912,6 +1911,7 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) sizeof(struct rio_event) * MPORT_EVENT_DEPTH, GFP_KERNEL); if (ret < 0) { + put_device(&chdev->dev); dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); ret = -ENOMEM; goto err_fifo; @@ -1922,6 +1922,9 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) spin_lock_init(&priv->req_lock); mutex_init(&priv->dma_lock); #endif + mutex_lock(&chdev->file_mutex); + list_add_tail(&priv->list, &chdev->file_list); + mutex_unlock(&chdev->file_mutex); filp->private_data = priv; goto out; diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 19b0c33f4a62..fdcf742b2adb 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -454,8 +454,12 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, 0, 0xffff); ret = rio_add_device(rdev); - if (ret) - goto cleanup; + if (ret) { + if (rswitch) + kfree(rswitch->route_table); + put_device(&rdev->dev); + return NULL; + } rio_dev_get(rdev); diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index e74cf09eeff0..9544b8ee0c96 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -2186,11 +2186,16 @@ int rio_register_mport(struct rio_mport *port) atomic_set(&port->state, RIO_DEVICE_RUNNING); res = device_register(&port->dev); - if (res) + if (res) { dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", port->id, res); - else + mutex_lock(&rio_mport_list_lock); + list_del(&port->node); + mutex_unlock(&rio_mport_list_lock); + put_device(&port->dev); + } else { dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id); + } return res; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 221ae807b379..3eae3aa5ad1d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -962,7 +962,7 @@ static int drms_uA_update(struct regulator_dev *rdev) /* get input voltage */ input_uV = 0; if (rdev->supply) - input_uV = regulator_get_voltage(rdev->supply); + input_uV = regulator_get_voltage_rdev(rdev->supply->rdev); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) { @@ -1531,7 +1531,13 @@ static int set_machine_constraints(struct regulator_dev *rdev) if (rdev->supply_name && !rdev->supply) return -EPROBE_DEFER; - if (rdev->supply) { + /* If supplying regulator has already been enabled, + * it's not intended to have use_count increment + * when rdev is only boot-on. + */ + if (rdev->supply && + (rdev->constraints->always_on || + !regulator_is_enabled(rdev->supply))) { ret = regulator_enable(rdev->supply); if (ret < 0) { _regulator_put(rdev->supply); @@ -1577,6 +1583,7 @@ static int set_supply(struct regulator_dev *rdev, rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { + module_put(supply_rdev->owner); err = -ENOMEM; return err; } @@ -1750,7 +1757,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); if (regulator == NULL) { - kfree(supply_name); + kfree_const(supply_name); return NULL; } @@ -1880,6 +1887,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev, node = of_get_regulator(dev, supply); if (node) { r = of_find_regulator_by_node(node); + of_node_put(node); if (r) return r; @@ -5546,6 +5554,7 @@ unset_supplies: regulator_remove_coupling(rdev); mutex_unlock(®ulator_list_mutex); wash: + regulator_put(rdev->supply); kfree(rdev->coupling_desc.coupled_rdevs); mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c index 639b71eb41ff..bcf7140f3bc9 100644 --- a/drivers/regulator/qcom-labibb-regulator.c +++ b/drivers/regulator/qcom-labibb-regulator.c @@ -822,6 +822,7 @@ static int qcom_labibb_regulator_probe(struct platform_device *pdev) if (irq == 0) irq = -EINVAL; + of_node_put(reg_node); return dev_err_probe(vreg->dev, irq, "Short-circuit irq not found.\n"); } diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 7f458d510483..27efdbbd90d9 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -1108,7 +1108,7 @@ static const struct rpmh_vreg_init_data pm7325_vreg_data[] = { static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = { RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps520, "vdd-s1"), RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps520, "vdd-s2"), - RPMH_VREG("smps3", "smp%s3", &pmic5_hfsmps510, "vdd-s3"), + RPMH_VREG("smps3", "smp%s3", &pmic5_hfsmps515, "vdd-s3"), RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1-l2"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l1-l2"), RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 78d90d856e40..fbcbc00f2e64 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -386,6 +386,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; @@ -498,6 +499,7 @@ detach_proxy_pds: detach_active_pds: adsp_pds_detach(adsp, adsp->active_pds, adsp->active_pd_count); free_rproc: + device_init_wakeup(adsp->dev, false); rproc_free(rproc); return ret; @@ -513,6 +515,8 @@ static int adsp_remove(struct platform_device *pdev) qcom_remove_sysmon_subdev(adsp->sysmon); qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev); qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); + adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + device_init_wakeup(adsp->dev, false); rproc_free(adsp->rproc); return 0; diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c index 20d50ec7eff1..cfd34ffcbb12 100644 --- a/drivers/remoteproc/qcom_q6v5_wcss.c +++ b/drivers/remoteproc/qcom_q6v5_wcss.c @@ -351,7 +351,7 @@ static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss) if (ret) { dev_err(wcss->dev, "xo cbcr enabling timed out (rc:%d)\n", ret); - return ret; + goto disable_xo_cbcr_clk; } writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE); @@ -417,6 +417,7 @@ disable_sleep_cbcr_clk: val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR); val &= ~Q6SS_CLK_ENABLE; writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR); +disable_xo_cbcr_clk: val = readl(wcss->reg_base + Q6SS_XO_CBCR); val &= ~Q6SS_CLK_ENABLE; writel(val, wcss->reg_base + Q6SS_XO_CBCR); @@ -827,6 +828,9 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss, int ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); + if (!res) + return -EINVAL; + wcss->reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!wcss->reg_base) diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c index a9f04dd83ab6..fbfaf2637a91 100644 --- a/drivers/remoteproc/qcom_sysmon.c +++ b/drivers/remoteproc/qcom_sysmon.c @@ -650,7 +650,9 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, if (sysmon->shutdown_irq != -ENODATA) { dev_err(sysmon->dev, "failed to retrieve shutdown-ack IRQ\n"); - return ERR_PTR(sysmon->shutdown_irq); + ret = sysmon->shutdown_irq; + kfree(sysmon); + return ERR_PTR(ret); } } else { ret = devm_request_threaded_irq(sysmon->dev, @@ -661,6 +663,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, if (ret) { dev_err(sysmon->dev, "failed to acquire shutdown-ack IRQ\n"); + kfree(sysmon); return ERR_PTR(ret); } } diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 775df165eb45..97e59f746126 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1955,12 +1955,18 @@ static void rproc_crash_handler_work(struct work_struct *work) mutex_lock(&rproc->lock); - if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) { + if (rproc->state == RPROC_CRASHED) { /* handle only the first crash detected */ mutex_unlock(&rproc->lock); return; } + if (rproc->state == RPROC_OFFLINE) { + /* Don't recover if the remote processor was stopped */ + mutex_unlock(&rproc->lock); + goto out; + } + rproc->state = RPROC_CRASHED; dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt, rproc->name); @@ -1970,6 +1976,7 @@ static void rproc_crash_handler_work(struct work_struct *work) if (!rproc->recovery_disabled) rproc_trigger_recovery(rproc); +out: pm_relax(rproc->dev.parent); } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7c006c2b125f..00e2ca7374ec 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -744,6 +744,168 @@ static irqreturn_t cmos_interrupt(int irq, void *p) return IRQ_NONE; } +#ifdef CONFIG_ACPI + +#include + +static u32 rtc_handler(void *context) +{ + struct device *dev = context; + struct cmos_rtc *cmos = dev_get_drvdata(dev); + unsigned char rtc_control = 0; + unsigned char rtc_intr; + unsigned long flags; + + + /* + * Always update rtc irq when ACPI is used as RTC Alarm. + * Or else, ACPI SCI is enabled during suspend/resume only, + * update rtc irq in that case. + */ + if (cmos_use_acpi_alarm()) + cmos_interrupt(0, (void *)cmos->rtc); + else { + /* Fix me: can we use cmos_interrupt() here as well? */ + spin_lock_irqsave(&rtc_lock, flags); + if (cmos_rtc.suspend_ctrl) + rtc_control = CMOS_READ(RTC_CONTROL); + if (rtc_control & RTC_AIE) { + cmos_rtc.suspend_ctrl &= ~RTC_AIE; + CMOS_WRITE(rtc_control, RTC_CONTROL); + rtc_intr = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, rtc_intr); + } + spin_unlock_irqrestore(&rtc_lock, flags); + } + + pm_wakeup_hard_event(dev); + acpi_clear_event(ACPI_EVENT_RTC); + acpi_disable_event(ACPI_EVENT_RTC, 0); + return ACPI_INTERRUPT_HANDLED; +} + +static void acpi_rtc_event_setup(struct device *dev) +{ + if (acpi_disabled) + return; + + acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); + /* + * After the RTC handler is installed, the Fixed_RTC event should + * be disabled. Only when the RTC alarm is set will it be enabled. + */ + acpi_clear_event(ACPI_EVENT_RTC); + acpi_disable_event(ACPI_EVENT_RTC, 0); +} + +static void acpi_rtc_event_cleanup(void) +{ + if (acpi_disabled) + return; + + acpi_remove_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler); +} + +static void rtc_wake_on(struct device *dev) +{ + acpi_clear_event(ACPI_EVENT_RTC); + acpi_enable_event(ACPI_EVENT_RTC, 0); +} + +static void rtc_wake_off(struct device *dev) +{ + acpi_disable_event(ACPI_EVENT_RTC, 0); +} + +#ifdef CONFIG_X86 +/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */ +static void use_acpi_alarm_quirks(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; + + if (!is_hpet_enabled()) + return; + + if (dmi_get_bios_year() < 2015) + return; + + use_acpi_alarm = true; +} +#else +static inline void use_acpi_alarm_quirks(void) { } +#endif + +static void acpi_cmos_wake_setup(struct device *dev) +{ + if (acpi_disabled) + return; + + use_acpi_alarm_quirks(); + + cmos_rtc.wake_on = rtc_wake_on; + cmos_rtc.wake_off = rtc_wake_off; + + /* ACPI tables bug workaround. */ + if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) { + dev_dbg(dev, "bogus FADT month_alarm (%d)\n", + acpi_gbl_FADT.month_alarm); + acpi_gbl_FADT.month_alarm = 0; + } + + cmos_rtc.day_alrm = acpi_gbl_FADT.day_alarm; + cmos_rtc.mon_alrm = acpi_gbl_FADT.month_alarm; + cmos_rtc.century = acpi_gbl_FADT.century; + + if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE) + dev_info(dev, "RTC can wake from S4\n"); + + /* RTC always wakes from S1/S2/S3, and often S4/STD */ + device_init_wakeup(dev, 1); +} + +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ + struct cmos_rtc *cmos = dev_get_drvdata(dev); + acpi_event_status rtc_status; + acpi_status status; + + if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) + return; + + status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Could not get RTC status\n"); + } else if (rtc_status & ACPI_EVENT_FLAG_SET) { + unsigned char mask; + *rtc_control &= ~RTC_AIE; + CMOS_WRITE(*rtc_control, RTC_CONTROL); + mask = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, mask); + } +} + +#else /* !CONFIG_ACPI */ + +static inline void acpi_rtc_event_setup(struct device *dev) +{ +} + +static inline void acpi_rtc_event_cleanup(void) +{ +} + +static inline void acpi_cmos_wake_setup(struct device *dev) +{ +} + +static inline void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ +} +#endif /* CONFIG_ACPI */ + #ifdef CONFIG_PNP #define INITSECTION @@ -827,19 +989,27 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) if (info->address_space) address_space = info->address_space; - if (info->rtc_day_alarm && info->rtc_day_alarm < 128) - cmos_rtc.day_alrm = info->rtc_day_alarm; - if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128) - cmos_rtc.mon_alrm = info->rtc_mon_alarm; - if (info->rtc_century && info->rtc_century < 128) - cmos_rtc.century = info->rtc_century; + cmos_rtc.day_alrm = info->rtc_day_alarm; + cmos_rtc.mon_alrm = info->rtc_mon_alarm; + cmos_rtc.century = info->rtc_century; if (info->wake_on && info->wake_off) { cmos_rtc.wake_on = info->wake_on; cmos_rtc.wake_off = info->wake_off; } + } else { + acpi_cmos_wake_setup(dev); } + if (cmos_rtc.day_alrm >= 128) + cmos_rtc.day_alrm = 0; + + if (cmos_rtc.mon_alrm >= 128) + cmos_rtc.mon_alrm = 0; + + if (cmos_rtc.century >= 128) + cmos_rtc.century = 0; + cmos_rtc.dev = dev; dev_set_drvdata(dev, &cmos_rtc); @@ -928,6 +1098,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) nvmem_cfg.size = address_space - NVRAM_OFFSET; devm_rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg); + /* + * Everything has gone well so far, so by default register a handler for + * the ACPI RTC fixed event. + */ + if (!info) + acpi_rtc_event_setup(dev); + dev_info(dev, "%s%s, %d bytes nvram%s\n", !is_valid_irq(rtc_irq) ? "no alarms" : cmos_rtc.mon_alrm ? "alarms up to one year" : @@ -973,6 +1150,9 @@ static void cmos_do_remove(struct device *dev) hpet_unregister_irq_handler(cmos_interrupt); } + if (!dev_get_platdata(dev)) + acpi_rtc_event_cleanup(); + cmos->rtc = NULL; ports = cmos->iomem; @@ -1122,9 +1302,6 @@ static void cmos_check_wkalrm(struct device *dev) } } -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control); - static int __maybe_unused cmos_resume(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -1191,174 +1368,16 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); * predate even PNPBIOS should set up platform_bus devices. */ -#ifdef CONFIG_ACPI - -#include - -static u32 rtc_handler(void *context) -{ - struct device *dev = context; - struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char rtc_control = 0; - unsigned char rtc_intr; - unsigned long flags; - - - /* - * Always update rtc irq when ACPI is used as RTC Alarm. - * Or else, ACPI SCI is enabled during suspend/resume only, - * update rtc irq in that case. - */ - if (cmos_use_acpi_alarm()) - cmos_interrupt(0, (void *)cmos->rtc); - else { - /* Fix me: can we use cmos_interrupt() here as well? */ - spin_lock_irqsave(&rtc_lock, flags); - if (cmos_rtc.suspend_ctrl) - rtc_control = CMOS_READ(RTC_CONTROL); - if (rtc_control & RTC_AIE) { - cmos_rtc.suspend_ctrl &= ~RTC_AIE; - CMOS_WRITE(rtc_control, RTC_CONTROL); - rtc_intr = CMOS_READ(RTC_INTR_FLAGS); - rtc_update_irq(cmos->rtc, 1, rtc_intr); - } - spin_unlock_irqrestore(&rtc_lock, flags); - } - - pm_wakeup_hard_event(dev); - acpi_clear_event(ACPI_EVENT_RTC); - acpi_disable_event(ACPI_EVENT_RTC, 0); - return ACPI_INTERRUPT_HANDLED; -} - -static inline void rtc_wake_setup(struct device *dev) -{ - acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); - /* - * After the RTC handler is installed, the Fixed_RTC event should - * be disabled. Only when the RTC alarm is set will it be enabled. - */ - acpi_clear_event(ACPI_EVENT_RTC); - acpi_disable_event(ACPI_EVENT_RTC, 0); -} - -static void rtc_wake_on(struct device *dev) -{ - acpi_clear_event(ACPI_EVENT_RTC); - acpi_enable_event(ACPI_EVENT_RTC, 0); -} - -static void rtc_wake_off(struct device *dev) -{ - acpi_disable_event(ACPI_EVENT_RTC, 0); -} - -#ifdef CONFIG_X86 -/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */ -static void use_acpi_alarm_quirks(void) -{ - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) - return; - - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - return; - - if (!is_hpet_enabled()) - return; - - if (dmi_get_bios_year() < 2015) - return; - - use_acpi_alarm = true; -} -#else -static inline void use_acpi_alarm_quirks(void) { } -#endif - -/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find - * its device node and pass extra config data. This helps its driver use - * capabilities that the now-obsolete mc146818 didn't have, and informs it - * that this board's RTC is wakeup-capable (per ACPI spec). - */ -static struct cmos_rtc_board_info acpi_rtc_info; - -static void cmos_wake_setup(struct device *dev) -{ - if (acpi_disabled) - return; - - use_acpi_alarm_quirks(); - - rtc_wake_setup(dev); - acpi_rtc_info.wake_on = rtc_wake_on; - acpi_rtc_info.wake_off = rtc_wake_off; - - /* workaround bug in some ACPI tables */ - if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) { - dev_dbg(dev, "bogus FADT month_alarm (%d)\n", - acpi_gbl_FADT.month_alarm); - acpi_gbl_FADT.month_alarm = 0; - } - - acpi_rtc_info.rtc_day_alarm = acpi_gbl_FADT.day_alarm; - acpi_rtc_info.rtc_mon_alarm = acpi_gbl_FADT.month_alarm; - acpi_rtc_info.rtc_century = acpi_gbl_FADT.century; - - /* NOTE: S4_RTC_WAKE is NOT currently useful to Linux */ - if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE) - dev_info(dev, "RTC can wake from S4\n"); - - dev->platform_data = &acpi_rtc_info; - - /* RTC always wakes from S1/S2/S3, and often S4/STD */ - device_init_wakeup(dev, 1); -} - -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control) -{ - struct cmos_rtc *cmos = dev_get_drvdata(dev); - acpi_event_status rtc_status; - acpi_status status; - - if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) - return; - - status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); - if (ACPI_FAILURE(status)) { - dev_err(dev, "Could not get RTC status\n"); - } else if (rtc_status & ACPI_EVENT_FLAG_SET) { - unsigned char mask; - *rtc_control &= ~RTC_AIE; - CMOS_WRITE(*rtc_control, RTC_CONTROL); - mask = CMOS_READ(RTC_INTR_FLAGS); - rtc_update_irq(cmos->rtc, 1, mask); - } -} - -#else - -static void cmos_wake_setup(struct device *dev) -{ -} - -static void cmos_check_acpi_rtc_status(struct device *dev, - unsigned char *rtc_control) -{ -} - -#endif - #ifdef CONFIG_PNP #include static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { - cmos_wake_setup(&pnp->dev); + int irq; if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { - unsigned int irq = 0; + irq = 0; #ifdef CONFIG_X86 /* Some machines contain a PNP entry for the RTC, but * don't define the IRQ. It should always be safe to @@ -1367,13 +1386,11 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) if (nr_legacy_irqs()) irq = RTC_IRQ; #endif - return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); } else { - return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), - pnp_irq(pnp, 0)); + irq = pnp_irq(pnp, 0); } + + return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); } static void cmos_pnp_remove(struct pnp_dev *pnp) @@ -1460,7 +1477,6 @@ static int __init cmos_platform_probe(struct platform_device *pdev) int irq; cmos_of_init(pdev); - cmos_wake_setup(&pdev->dev); if (RTC_IOMAPPED) resource = platform_get_resource(pdev, IORESOURCE_IO, 0); diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c index 157bf5209ac4..a40c1a52df65 100644 --- a/drivers/rtc/rtc-ds1347.c +++ b/drivers/rtc/rtc-ds1347.c @@ -112,7 +112,7 @@ static int ds1347_set_time(struct device *dev, struct rtc_time *dt) return err; century = (dt->tm_year / 100) + 19; - err = regmap_write(map, DS1347_CENTURY_REG, century); + err = regmap_write(map, DS1347_CENTURY_REG, bin2bcd(century)); if (err) return err; diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c index 5e0383401629..f6d2ad91ff7a 100644 --- a/drivers/rtc/rtc-mxc_v2.c +++ b/drivers/rtc/rtc-mxc_v2.c @@ -336,8 +336,10 @@ static int mxc_rtc_probe(struct platform_device *pdev) } pdata->rtc = devm_rtc_allocate_device(&pdev->dev); - if (IS_ERR(pdata->rtc)) + if (IS_ERR(pdata->rtc)) { + clk_disable_unprepare(pdata->clk); return PTR_ERR(pdata->rtc); + } pdata->rtc->ops = &mxc_rtc_ops; pdata->rtc->range_max = U32_MAX; diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index ea75b71a1f16..d97f9793123e 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -169,10 +169,10 @@ static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) if (ret) return ret; - alrm->time.tm_sec = bcd2bin(buf[0]); - alrm->time.tm_min = bcd2bin(buf[1]); - alrm->time.tm_hour = bcd2bin(buf[2]); - alrm->time.tm_mday = bcd2bin(buf[3]); + alrm->time.tm_sec = bcd2bin(buf[0] & 0x7f); + alrm->time.tm_min = bcd2bin(buf[1] & 0x7f); + alrm->time.tm_hour = bcd2bin(buf[2] & 0x3f); + alrm->time.tm_mday = bcd2bin(buf[3] & 0x3f); ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val); if (ret) @@ -424,7 +424,7 @@ static int pcf85063_clkout_control(struct clk_hw *hw, bool enable) unsigned int buf; int ret; - ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf); + ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf); if (ret < 0) return ret; buf &= PCF85063_REG_CLKO_F_MASK; diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c index 7fb9145c43bd..fa351ac20158 100644 --- a/drivers/rtc/rtc-pic32.c +++ b/drivers/rtc/rtc-pic32.c @@ -324,16 +324,16 @@ static int pic32_rtc_probe(struct platform_device *pdev) spin_lock_init(&pdata->alarm_lock); + pdata->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(pdata->rtc)) + return PTR_ERR(pdata->rtc); + clk_prepare_enable(pdata->clk); pic32_rtc_enable(pdata, 1); device_init_wakeup(&pdev->dev, 1); - pdata->rtc = devm_rtc_allocate_device(&pdev->dev); - if (IS_ERR(pdata->rtc)) - return PTR_ERR(pdata->rtc); - pdata->rtc->ops = &pic32_rtcops; pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; pdata->rtc->range_max = RTC_TIMESTAMP_END_2099; diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index bd929b0e7d7d..d82acf1af1fa 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -32,6 +32,14 @@ #define SNVS_LPPGDR_INIT 0x41736166 #define CNTR_TO_SECS_SH 15 +/* The maximum RTC clock cycles that are allowed to pass between two + * consecutive clock counter register reads. If the values are corrupted a + * bigger difference is expected. The RTC frequency is 32kHz. With 320 cycles + * we end at 10ms which should be enough for most cases. If it once takes + * longer than expected we do a retry. + */ +#define MAX_RTC_READ_DIFF_CYCLES 320 + struct snvs_rtc_data { struct rtc_device *rtc; struct regmap *regmap; @@ -56,6 +64,7 @@ static u64 rtc_read_lpsrt(struct snvs_rtc_data *data) static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) { u64 read1, read2; + s64 diff; unsigned int timeout = 100; /* As expected, the registers might update between the read of the LSB @@ -66,7 +75,8 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) do { read2 = read1; read1 = rtc_read_lpsrt(data); - } while (read1 != read2 && --timeout); + diff = read1 - read2; + } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout); if (!timeout) dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n"); @@ -78,13 +88,15 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb) { u32 count1, count2; + s32 diff; unsigned int timeout = 100; regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); do { count2 = count1; regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); - } while (count1 != count2 && --timeout); + diff = count1 - count2; + } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout); if (!timeout) { dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n"); return -ETIMEDOUT; diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c index bdb20f63254e..0f8e4231098e 100644 --- a/drivers/rtc/rtc-st-lpc.c +++ b/drivers/rtc/rtc-st-lpc.c @@ -238,6 +238,7 @@ static int st_rtc_probe(struct platform_device *pdev) rtc->clkrate = clk_get_rate(rtc->clk); if (!rtc->clkrate) { + clk_disable_unprepare(rtc->clk); dev_err(&pdev->dev, "Unable to fetch clock rate\n"); return -EINVAL; } diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index fd705429708e..f91b6cfd7ed0 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -825,16 +825,9 @@ done: /** * Start transmission of a packet. * Called from generic network device layer. - * - * skb Pointer to buffer containing the packet. - * dev Pointer to interface struct. - * - * returns 0 if packet consumed, !0 if packet rejected. - * Note: If we return !0, then the packet is free'd by - * the generic network layer. */ /* first merge version - leaving both functions separated */ -static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev) { struct ctcm_priv *priv = dev->ml_priv; @@ -877,7 +870,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) } /* unmerged MPC variant of ctcm_tx */ -static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) { int len = 0; struct ctcm_priv *priv = dev->ml_priv; diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 06a322bdced6..7e743f4717a9 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1518,9 +1518,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) /** * Packet transmit function called by network stack */ -static int -__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, - struct net_device *dev) +static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, + struct net_device *dev) { struct lcs_header *header; int rc = NETDEV_TX_OK; @@ -1581,8 +1580,7 @@ out: return rc; } -static int -lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lcs_card *card; int rc; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 5a0c2f07a3a2..ce5f0ffd6cc8 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1252,15 +1252,8 @@ static int netiucv_close(struct net_device *dev) /** * Start transmission of a packet. * Called from generic network device layer. - * - * @param skb Pointer to buffer containing the packet. - * @param dev Pointer to interface struct. - * - * @return 0 if packet consumed, !0 if packet rejected. - * Note: If we return !0, then the packet is free'd by - * the generic network layer. */ -static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev) { struct netiucv_priv *privptr = netdev_priv(dev); int rc; diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c index eab68fd9337a..37e1ab96ee5b 100644 --- a/drivers/scsi/elx/efct/efct_driver.c +++ b/drivers/scsi/elx/efct/efct_driver.c @@ -42,6 +42,7 @@ efct_device_init(void) rc = efct_scsi_reg_fc_transport(); if (rc) { + efct_scsi_tgt_driver_exit(); pr_err("failed to register to FC host\n"); return rc; } diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h index ee291cabf7e0..b14e516be7d5 100644 --- a/drivers/scsi/elx/libefc/efclib.h +++ b/drivers/scsi/elx/libefc/efclib.h @@ -58,10 +58,12 @@ enum efc_node_send_ls_acc { #define EFC_LINK_STATUS_UP 0 #define EFC_LINK_STATUS_DOWN 1 +enum efc_sm_event; + /* State machine context header */ struct efc_sm_ctx { void (*current_state)(struct efc_sm_ctx *ctx, - u32 evt, void *arg); + enum efc_sm_event evt, void *arg); const char *description; void *app; @@ -364,7 +366,7 @@ struct efc_node { int prev_evt; void (*nodedb_state)(struct efc_sm_ctx *ctx, - u32 evt, void *arg); + enum efc_sm_event evt, void *arg); struct timer_list gidpt_delay_timer; u64 time_last_gidpt_msec; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 5ae6c207d3ac..76dbdae0e987 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2501,6 +2501,7 @@ static int __init fcoe_init(void) out_free: mutex_unlock(&fcoe_config_mutex); + fcoe_transport_detach(&fcoe_sw_transport); out_destroy: destroy_workqueue(fcoe_wq); return rc; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index af658aa38fed..6260aa5ea6af 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); error = device_register(&ctlr->dev); - if (error) - goto out_del_q2; + if (error) { + destroy_workqueue(ctlr->devloss_work_q); + destroy_workqueue(ctlr->work_q); + put_device(&ctlr->dev); + return NULL; + } return ctlr; -out_del_q2: - destroy_workqueue(ctlr->devloss_work_q); - ctlr->devloss_work_q = NULL; out_del_q: destroy_workqueue(ctlr->work_q); ctlr->work_q = NULL; @@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, fcf->selected = new_fcf->selected; error = device_register(&fcf->dev); - if (error) - goto out_del; + if (error) { + put_device(&fcf->dev); + goto out; + } fcf->state = FCOE_FCF_STATE_CONNECTED; list_add_tail(&fcf->peers, &ctlr->fcfs); return fcf; -out_del: - kfree(fcf); out: return NULL; } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 3faa87fa296a..cf7988de7b90 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -8927,7 +8927,7 @@ clean1: /* wq/aer/h */ destroy_workqueue(h->monitor_ctlr_wq); h->monitor_ctlr_wq = NULL; } - kfree(h); + hpda_free_ctlr_info(h); return rc; } @@ -9788,7 +9788,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h) return 0; free_sas_phy: - hpsa_free_sas_phy(hpsa_sas_phy); + sas_phy_free(hpsa_sas_phy->phy); + kfree(hpsa_sas_phy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); free_sas_node: @@ -9824,10 +9825,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); if (rc) - goto free_sas_port; + goto free_sas_rphy; return 0; +free_sas_rphy: + sas_rphy_free(rphy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); device->sas_port = NULL; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 56b8a2d6ffe4..04fb7fc01226 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -10869,11 +10869,19 @@ static struct notifier_block ipr_notifier = { **/ static int __init ipr_init(void) { + int rc; + ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", IPR_DRIVER_VERSION, IPR_DRIVER_DATE); register_reboot_notifier(&ipr_notifier); - return pci_register_driver(&ipr_driver); + rc = pci_register_driver(&ipr_driver); + if (rc) { + unregister_reboot_notifier(&ipr_notifier); + return rc; + } + + return 0; } /** diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index d6e761adf1f1..df3b190fccd1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -7992,10 +7992,10 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, "IO_cnt", "Info", "BWutil(ms)"); } - /* Needs to be _bh because record is called from timer interrupt + /* Needs to be _irq because record is called from timer interrupt * context */ - spin_lock_bh(ring_lock); + spin_lock_irq(ring_lock); while (*head_idx != *tail_idx) { entry = &ring[*head_idx]; @@ -8039,7 +8039,7 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, if (cnt >= max_read_entries) break; } - spin_unlock_bh(ring_lock); + spin_unlock_irq(ring_lock); return cnt; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 0681daee6c14..e5ecd6ada6cd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -829,6 +829,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, if ((sas_rphy_add(rphy))) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); + sas_rphy_free(rphy); + rphy = NULL; } if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 51c7ce5f9792..307ffdfe048b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -5117,17 +5117,17 @@ struct secure_flash_update_block_pk { (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) -#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ - atomic_inc(&__vha->vref_count); \ - mb(); \ - if (__vha->flags.delete_progress) { \ - atomic_dec(&__vha->vref_count); \ - wake_up(&__vha->vref_waitq); \ - __bail = 1; \ - } else { \ - __bail = 0; \ - } \ -} while (0) +static inline bool qla_vha_mark_busy(scsi_qla_host_t *vha) +{ + atomic_inc(&vha->vref_count); + mb(); + if (vha->flags.delete_progress) { + atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); + return true; + } + return false; +} #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ atomic_dec(&__vha->vref_count); \ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b81797a3ab61..30798ab84db9 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -110,6 +110,7 @@ static void qla24xx_abort_iocb_timeout(void *data) struct qla_qpair *qpair = sp->qpair; u32 handle; unsigned long flags; + int sp_found = 0, cmdsp_found = 0; if (sp->cmd_sp) ql_dbg(ql_dbg_async, sp->vha, 0x507c, @@ -124,18 +125,21 @@ static void qla24xx_abort_iocb_timeout(void *data) spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == - sp->cmd_sp)) + sp->cmd_sp)) { qpair->req->outstanding_cmds[handle] = NULL; + cmdsp_found = 1; + } /* removing the abort */ if (qpair->req->outstanding_cmds[handle] == sp) { qpair->req->outstanding_cmds[handle] = NULL; + sp_found = 1; break; } } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - if (sp->cmd_sp) { + if (cmdsp_found && sp->cmd_sp) { /* * This done function should take care of * original command ref: INIT @@ -143,8 +147,10 @@ static void qla24xx_abort_iocb_timeout(void *data) sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); } - abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); - sp->done(sp, QLA_OS_TIMER_EXPIRED); + if (sp_found) { + abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); + sp->done(sp, QLA_OS_TIMER_EXPIRED); + } } static void qla24xx_abort_sp_done(srb_t *sp, int res) @@ -168,7 +174,6 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) struct srb_iocb *abt_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; - uint8_t bail; /* ref: INIT for ABTS command */ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, @@ -176,7 +181,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) if (!sp) return QLA_MEMORY_ALLOC_FAILED; - QLA_VHA_MARK_BUSY(vha, bail); + qla_vha_mark_busy(vha); abt_iocb = &sp->u.iocb_cmd; sp->type = SRB_ABT_CMD; sp->name = "abort"; @@ -2022,14 +2027,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; - uint8_t bail; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; - QLA_VHA_MARK_BUSY(vha, bail); + qla_vha_mark_busy(vha); sp->type = SRB_TM_CMD; sp->name = "tmf"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index db17f7f410cd..5185dc5daf80 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -225,11 +225,9 @@ static inline srb_t * qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) { srb_t *sp = NULL; - uint8_t bail; struct qla_qpair *qpair; - QLA_VHA_MARK_BUSY(vha, bail); - if (unlikely(bail)) + if (unlikely(qla_vha_mark_busy(vha))) return NULL; qpair = vha->hw->base_qpair; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 00e97f0a07eb..05d827227d0b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5043,13 +5043,11 @@ struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; - uint8_t bail; if (test_bit(UNLOADING, &vha->dpc_flags)) return NULL; - QLA_VHA_MARK_BUSY(vha, bail); - if (bail) + if (qla_vha_mark_busy(vha)) return NULL; e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 0b16061d8da8..591df0a91057 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3640,7 +3640,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return illegal_condition_result; } - lrdp = kzalloc(lbdof_blen, GFP_ATOMIC); + lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN); if (lrdp == NULL) return SCSI_MLQUEUE_HOST_BUSY; if (sdebug_verbose) @@ -4296,7 +4296,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (ret) return ret; - arr = kcalloc(lb_size, vnum, GFP_ATOMIC); + arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -4367,7 +4367,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD), max_zones); - arr = kzalloc(alloc_len, GFP_ATOMIC); + arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -7156,7 +7156,10 @@ clean: kfree(sdbg_devinfo->zstate); kfree(sdbg_devinfo); } - kfree(sdbg_host); + if (sdbg_host->dev.release) + put_device(&sdbg_host->dev); + else + kfree(sdbg_host); pr_warn("%s: failed, errno=%d\n", __func__, -error); return error; } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index bb5a6e0fa49a..dd9f5778f687 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -343,19 +343,11 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) if (rtn == BLK_EH_DONE) { /* - * Set the command to complete first in order to prevent a real - * completion from releasing the command while error handling - * is using it. If the command was already completed, then the - * lower level driver beat the timeout handler, and it is safe - * to return without escalating error recovery. - * - * If timeout handling lost the race to a real completion, the - * block layer may ignore that due to a fake timeout injection, - * so return RESET_TIMER to allow error handling another shot - * at this command. + * If scsi_done() has already set SCMD_STATE_COMPLETE, do not + * modify *scmd. */ if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state)) - return BLK_EH_RESET_TIMER; + return BLK_EH_DONE; if (scsi_abort_command(scmd) != SUCCESS) { set_host_byte(scmd, DID_TIME_OUT); scsi_eh_scmd_add(scmd); diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index e9ccfb97773f..7cf871323b2c 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -318,6 +318,9 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) ret); put_device(&snic->shost->shost_gendev); + spin_lock_irqsave(snic->shost->host_lock, flags); + list_del(&tgt->list); + spin_unlock_irqrestore(snic->shost->host_lock, flags); kfree(tgt); tgt = NULL; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index dae1a85f1512..a428b8145dcc 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -8476,8 +8476,6 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, struct scsi_device *sdp; unsigned long flags; int ret, retries; - unsigned long deadline; - int32_t remaining; spin_lock_irqsave(hba->host->host_lock, flags); sdp = hba->sdev_ufs_device; @@ -8510,14 +8508,9 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, * callbacks hence set the RQF_PM flag so that it doesn't resume the * already suspended childs. */ - deadline = jiffies + 10 * HZ; for (retries = 3; retries > 0; --retries) { - ret = -ETIMEDOUT; - remaining = deadline - jiffies; - if (remaining <= 0) - break; ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, - remaining / HZ, 0, 0, RQF_PM, NULL); + HZ, 0, 0, RQF_PM, NULL); if (!scsi_status_is_check_condition(ret) || !scsi_sense_valid(&sshdr) || sshdr.sense_key != UNIT_ATTENTION) diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index afd2fd74802d..52ecde8e446c 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -272,9 +272,9 @@ static int scpsys_power_off(struct generic_pm_domain *genpd) clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); /* subsys power off */ - regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT); regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT); + regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT); regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 499718e131d7..6a97e8af9390 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -63,6 +63,7 @@ config QCOM_GSBI config QCOM_LLCC tristate "Qualcomm Technologies, Inc. LLCC driver" depends on ARCH_QCOM || COMPILE_TEST + select REGMAP_MMIO help Qualcomm Technologies, Inc. platform specific Last Level Cache Controller(LLCC) driver for platforms such as, diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 2e455d9e3d94..173427bbf916 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -15,13 +15,18 @@ #include #include -struct apr { +enum { + PR_TYPE_APR = 0, +}; + +struct packet_router { struct rpmsg_endpoint *ch; struct device *dev; spinlock_t svcs_lock; spinlock_t rx_lock; struct idr svcs_idr; int dest_domain_id; + int type; struct pdr_handle *pdr; struct workqueue_struct *rxwq; struct work_struct rx_work; @@ -44,21 +49,21 @@ struct apr_rx_buf { */ int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt) { - struct apr *apr = dev_get_drvdata(adev->dev.parent); + struct packet_router *apr = dev_get_drvdata(adev->dev.parent); struct apr_hdr *hdr; unsigned long flags; int ret; - spin_lock_irqsave(&adev->lock, flags); + spin_lock_irqsave(&adev->svc.lock, flags); hdr = &pkt->hdr; hdr->src_domain = APR_DOMAIN_APPS; - hdr->src_svc = adev->svc_id; + hdr->src_svc = adev->svc.id; hdr->dest_domain = adev->domain_id; - hdr->dest_svc = adev->svc_id; + hdr->dest_svc = adev->svc.id; ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size); - spin_unlock_irqrestore(&adev->lock, flags); + spin_unlock_irqrestore(&adev->svc.lock, flags); return ret ? ret : hdr->pkt_size; } @@ -74,7 +79,7 @@ static void apr_dev_release(struct device *dev) static int apr_callback(struct rpmsg_device *rpdev, void *buf, int len, void *priv, u32 addr) { - struct apr *apr = dev_get_drvdata(&rpdev->dev); + struct packet_router *apr = dev_get_drvdata(&rpdev->dev); struct apr_rx_buf *abuf; unsigned long flags; @@ -100,11 +105,11 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf, return 0; } - -static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) +static int apr_do_rx_callback(struct packet_router *apr, struct apr_rx_buf *abuf) { uint16_t hdr_size, msg_type, ver, svc_id; - struct apr_device *svc = NULL; + struct pkt_router_svc *svc; + struct apr_device *adev; struct apr_driver *adrv = NULL; struct apr_resp_pkt resp; struct apr_hdr *hdr; @@ -145,12 +150,15 @@ static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) svc_id = hdr->dest_svc; spin_lock_irqsave(&apr->svcs_lock, flags); svc = idr_find(&apr->svcs_idr, svc_id); - if (svc && svc->dev.driver) - adrv = to_apr_driver(svc->dev.driver); + if (svc && svc->dev->driver) { + adev = svc_to_apr_device(svc); + adrv = to_apr_driver(adev->dev.driver); + } spin_unlock_irqrestore(&apr->svcs_lock, flags); - if (!adrv) { - dev_err(apr->dev, "APR: service is not registered\n"); + if (!adrv || !adev) { + dev_err(apr->dev, "APR: service is not registered (%d)\n", + svc_id); return -EINVAL; } @@ -164,20 +172,26 @@ static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) if (resp.payload_size > 0) resp.payload = buf + hdr_size; - adrv->callback(svc, &resp); + adrv->callback(adev, &resp); return 0; } static void apr_rxwq(struct work_struct *work) { - struct apr *apr = container_of(work, struct apr, rx_work); + struct packet_router *apr = container_of(work, struct packet_router, rx_work); struct apr_rx_buf *abuf, *b; unsigned long flags; if (!list_empty(&apr->rx_list)) { list_for_each_entry_safe(abuf, b, &apr->rx_list, node) { - apr_do_rx_callback(apr, abuf); + switch (apr->type) { + case PR_TYPE_APR: + apr_do_rx_callback(apr, abuf); + break; + default: + break; + } spin_lock_irqsave(&apr->rx_lock, flags); list_del(&abuf->node); spin_unlock_irqrestore(&apr->rx_lock, flags); @@ -201,7 +215,7 @@ static int apr_device_match(struct device *dev, struct device_driver *drv) while (id->domain_id != 0 || id->svc_id != 0) { if (id->domain_id == adev->domain_id && - id->svc_id == adev->svc_id) + id->svc_id == adev->svc.id) return 1; id++; } @@ -221,14 +235,14 @@ static void apr_device_remove(struct device *dev) { struct apr_device *adev = to_apr_device(dev); struct apr_driver *adrv; - struct apr *apr = dev_get_drvdata(adev->dev.parent); + struct packet_router *apr = dev_get_drvdata(adev->dev.parent); if (dev->driver) { adrv = to_apr_driver(dev->driver); if (adrv->remove) adrv->remove(adev); spin_lock(&apr->svcs_lock); - idr_remove(&apr->svcs_idr, adev->svc_id); + idr_remove(&apr->svcs_idr, adev->svc.id); spin_unlock(&apr->svcs_lock); } } @@ -255,28 +269,39 @@ struct bus_type aprbus = { EXPORT_SYMBOL_GPL(aprbus); static int apr_add_device(struct device *dev, struct device_node *np, - const struct apr_device_id *id) + u32 svc_id, u32 domain_id) { - struct apr *apr = dev_get_drvdata(dev); + struct packet_router *apr = dev_get_drvdata(dev); struct apr_device *adev = NULL; + struct pkt_router_svc *svc; int ret; adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) return -ENOMEM; - spin_lock_init(&adev->lock); + adev->svc_id = svc_id; + svc = &adev->svc; + + svc->id = svc_id; + svc->pr = apr; + svc->priv = adev; + svc->dev = dev; + spin_lock_init(&svc->lock); + + adev->domain_id = domain_id; - adev->svc_id = id->svc_id; - adev->domain_id = id->domain_id; - adev->version = id->svc_version; if (np) snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np); - else - strscpy(adev->name, id->name, APR_NAME_SIZE); - dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name, - id->domain_id, id->svc_id); + switch (apr->type) { + case PR_TYPE_APR: + dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name, + domain_id, svc_id); + break; + default: + break; + } adev->dev.bus = &aprbus; adev->dev.parent = dev; @@ -285,12 +310,19 @@ static int apr_add_device(struct device *dev, struct device_node *np, adev->dev.driver = NULL; spin_lock(&apr->svcs_lock); - idr_alloc(&apr->svcs_idr, adev, id->svc_id, - id->svc_id + 1, GFP_ATOMIC); + ret = idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC); spin_unlock(&apr->svcs_lock); + if (ret < 0) { + dev_err(dev, "idr_alloc failed: %d\n", ret); + goto out; + } - of_property_read_string_index(np, "qcom,protection-domain", - 1, &adev->service_path); + ret = of_property_read_string_index(np, "qcom,protection-domain", + 1, &adev->service_path); + if (ret < 0) { + dev_err(dev, "Failed to read second value of qcom,protection-domain\n"); + goto out; + } dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev)); @@ -300,13 +332,14 @@ static int apr_add_device(struct device *dev, struct device_node *np, put_device(&adev->dev); } +out: return ret; } static int of_apr_add_pd_lookups(struct device *dev) { const char *service_name, *service_path; - struct apr *apr = dev_get_drvdata(dev); + struct packet_router *apr = dev_get_drvdata(dev); struct device_node *node; struct pdr_service *pds; int ret; @@ -338,13 +371,14 @@ static int of_apr_add_pd_lookups(struct device *dev) static void of_register_apr_devices(struct device *dev, const char *svc_path) { - struct apr *apr = dev_get_drvdata(dev); + struct packet_router *apr = dev_get_drvdata(dev); struct device_node *node; const char *service_path; int ret; for_each_child_of_node(dev->of_node, node) { - struct apr_device_id id = { {0} }; + u32 svc_id; + u32 domain_id; /* * This function is called with svc_path NULL during @@ -374,13 +408,13 @@ static void of_register_apr_devices(struct device *dev, const char *svc_path) continue; } - if (of_property_read_u32(node, "reg", &id.svc_id)) + if (of_property_read_u32(node, "reg", &svc_id)) continue; - id.domain_id = apr->dest_domain_id; + domain_id = apr->dest_domain_id; - if (apr_add_device(dev, node, &id)) - dev_err(dev, "Failed to add apr %d svc\n", id.svc_id); + if (apr_add_device(dev, node, svc_id, domain_id)) + dev_err(dev, "Failed to add apr %d svc\n", svc_id); } } @@ -400,7 +434,7 @@ static int apr_remove_device(struct device *dev, void *svc_path) static void apr_pd_status(int state, char *svc_path, void *priv) { - struct apr *apr = (struct apr *)priv; + struct packet_router *apr = (struct packet_router *)priv; switch (state) { case SERVREG_SERVICE_STATE_UP: @@ -415,16 +449,20 @@ static void apr_pd_status(int state, char *svc_path, void *priv) static int apr_probe(struct rpmsg_device *rpdev) { struct device *dev = &rpdev->dev; - struct apr *apr; + struct packet_router *apr; int ret; apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL); if (!apr) return -ENOMEM; - ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", &apr->dest_domain_id); + ret = of_property_read_u32(dev->of_node, "qcom,domain", &apr->dest_domain_id); + if (ret) /* try deprecated apr-domain property */ + ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", + &apr->dest_domain_id); + apr->type = PR_TYPE_APR; if (ret) { - dev_err(dev, "APR Domain ID not specified in DT\n"); + dev_err(dev, "Domain ID not specified in DT\n"); return ret; } @@ -467,7 +505,7 @@ destroy_wq: static void apr_remove(struct rpmsg_device *rpdev) { - struct apr *apr = dev_get_drvdata(&rpdev->dev); + struct packet_router *apr = dev_get_drvdata(&rpdev->dev); pdr_handle_release(apr->pdr); device_for_each_child(&rpdev->dev, NULL, apr_remove_device); @@ -504,20 +542,20 @@ void apr_driver_unregister(struct apr_driver *drv) } EXPORT_SYMBOL_GPL(apr_driver_unregister); -static const struct of_device_id apr_of_match[] = { +static const struct of_device_id pkt_router_of_match[] = { { .compatible = "qcom,apr"}, { .compatible = "qcom,apr-v2"}, {} }; -MODULE_DEVICE_TABLE(of, apr_of_match); +MODULE_DEVICE_TABLE(of, pkt_router_of_match); -static struct rpmsg_driver apr_driver = { +static struct rpmsg_driver packet_router_driver = { .probe = apr_probe, .remove = apr_remove, .callback = apr_callback, .drv = { .name = "qcom,apr", - .of_match_table = apr_of_match, + .of_match_table = pkt_router_of_match, }, }; @@ -527,7 +565,7 @@ static int __init apr_init(void) ret = bus_register(&aprbus); if (!ret) - ret = register_rpmsg_driver(&apr_driver); + ret = register_rpmsg_driver(&packet_router_driver); else bus_unregister(&aprbus); @@ -537,7 +575,7 @@ static int __init apr_init(void) static void __exit apr_exit(void) { bus_unregister(&aprbus); - unregister_rpmsg_driver(&apr_driver); + unregister_rpmsg_driver(&packet_router_driver); } subsys_initcall(apr_init); diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index cabd8870316d..47d41804fdf6 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -607,7 +607,7 @@ static int qcom_llcc_probe(struct platform_device *pdev) if (ret) goto err; - drv_data->ecc_irq = platform_get_irq(pdev, 0); + drv_data->ecc_irq = platform_get_irq_optional(pdev, 0); if (drv_data->ecc_irq >= 0) { llcc_edac = platform_device_register_data(&pdev->dev, "qcom_llcc_edac", -1, drv_data, diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 2ac3856b8d42..52389859395c 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -67,7 +67,7 @@ static DEFINE_MUTEX(knav_dev_lock); * Newest followed by older ones. Search is done from start of the array * until a firmware file is found. */ -const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; +static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; static bool device_ready; bool knav_qmss_device_ready(void) @@ -1785,9 +1785,9 @@ static int knav_queue_probe(struct platform_device *pdev) INIT_LIST_HEAD(&kdev->pdsps); pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); dev_err(dev, "Failed to enable QMSS\n"); return ret; } diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c index b5b2fa538d5c..4d15587324d4 100644 --- a/drivers/soc/ti/smartreflex.c +++ b/drivers/soc/ti/smartreflex.c @@ -931,6 +931,7 @@ static int omap_sr_probe(struct platform_device *pdev) err_debugfs: debugfs_remove_recursive(sr_info->dbg_dir); err_list_del: + pm_runtime_disable(&pdev->dev); list_del(&sr_info->node); clk_unprepare(sr_info->fck); diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c index a9472e0e5d61..27d6e25a0115 100644 --- a/drivers/soc/ux500/ux500-soc-id.c +++ b/drivers/soc/ux500/ux500-soc-id.c @@ -167,20 +167,18 @@ ATTRIBUTE_GROUPS(ux500_soc); static const char *db8500_read_soc_id(struct device_node *backupram) { void __iomem *base; - void __iomem *uid; const char *retstr; + u32 uid[5]; base = of_iomap(backupram, 0); if (!base) return NULL; - uid = base + 0x1fc0; + memcpy_fromio(uid, base + 0x1fc0, sizeof(uid)); /* Throw these device-specific numbers into the entropy pool */ - add_device_randomness(uid, 0x14); + add_device_randomness(uid, sizeof(uid)); retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x", - readl((u32 *)uid+0), - readl((u32 *)uid+1), readl((u32 *)uid+2), - readl((u32 *)uid+3), readl((u32 *)uid+4)); + uid[0], uid[1], uid[2], uid[3], uid[4]); iounmap(base); return retstr; } diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c index 747983743a14..2bf534632f64 100644 --- a/drivers/soundwire/dmi-quirks.c +++ b/drivers/soundwire/dmi-quirks.c @@ -71,6 +71,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = { }, .driver_data = (void *)intel_tgl_bios, }, + { + /* quirk used for NUC15 LAPBC710 skew */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "LAPBC710"), + }, + .driver_data = (void *)intel_tgl_bios, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index bbb57b9f6e01..90e0bf8ca37d 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -1065,8 +1065,8 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = { .prepare = intel_prepare, .hw_free = intel_hw_free, .shutdown = intel_shutdown, - .set_sdw_stream = intel_pcm_set_sdw_stream, - .get_sdw_stream = intel_get_sdw_stream, + .set_stream = intel_pcm_set_sdw_stream, + .get_stream = intel_get_sdw_stream, }; static const struct snd_soc_dai_ops intel_pdm_dai_ops = { @@ -1075,8 +1075,8 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = { .prepare = intel_prepare, .hw_free = intel_hw_free, .shutdown = intel_shutdown, - .set_sdw_stream = intel_pdm_set_sdw_stream, - .get_sdw_stream = intel_get_sdw_stream, + .set_stream = intel_pdm_set_sdw_stream, + .get_stream = intel_get_sdw_stream, }; static const struct snd_soc_component_driver dai_component = { diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index f88c5d451f09..500035a1fd46 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -1032,8 +1032,8 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream, ctrl->sruntime[dai->id] = sruntime; for_each_rtd_codec_dais(rtd, i, codec_dai) { - ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime, - substream->stream); + ret = snd_soc_dai_set_stream(codec_dai, sruntime, + substream->stream); if (ret < 0 && ret != -ENOTSUPP) { dev_err(dai->dev, "Failed to set sdw stream on %s\n", codec_dai->name); @@ -1059,8 +1059,8 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = { .hw_free = qcom_swrm_hw_free, .startup = qcom_swrm_startup, .shutdown = qcom_swrm_shutdown, - .set_sdw_stream = qcom_swrm_set_sdw_stream, - .get_sdw_stream = qcom_swrm_get_sdw_stream, + .set_stream = qcom_swrm_set_sdw_stream, + .get_stream = qcom_swrm_get_sdw_stream, }; static const struct snd_soc_component_driver qcom_swrm_dai_component = { diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index ebbe138a5626..2a900aa302a3 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -1880,7 +1880,7 @@ static int set_stream(struct snd_pcm_substream *substream, /* Set stream pointer on all DAIs */ for_each_rtd_dais(rtd, i, dai) { - ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream); + ret = snd_soc_dai_set_stream(dai, sdw_stream, substream->stream); if (ret < 0) { dev_err(rtd->dev, "failed to set stream pointer on dai %s\n", dai->name); break; @@ -1951,7 +1951,7 @@ void sdw_shutdown_stream(void *sdw_substream) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s\n", dai->name); diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 6890a14eed8e..92113f488f7c 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -252,9 +252,19 @@ static int spi_gpio_set_direction(struct spi_device *spi, bool output) if (output) return gpiod_direction_output(spi_gpio->mosi, 1); - ret = gpiod_direction_input(spi_gpio->mosi); - if (ret) - return ret; + /* + * Only change MOSI to an input if using 3WIRE mode. + * Otherwise, MOSI could be left floating if there is + * no pull resistor connected to the I/O pin, or could + * be left logic high if there is a pull-up. Transmitting + * logic high when only clocking MISO data in can put some + * SPI devices in to a bad state. + */ + if (spi->mode & SPI_3WIRE) { + ret = gpiod_direction_input(spi_gpio->mosi); + if (ret) + return ret; + } /* * Send a turnaround high impedance cycle when switching * from output to input. Theoretically there should be diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 36966627f49b..3dbd31e79381 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -376,12 +376,23 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { /* read requests */ case SPI_IOC_RD_MODE: - retval = put_user(spi->mode & SPI_MODE_MASK, - (__u8 __user *)arg); - break; case SPI_IOC_RD_MODE32: - retval = put_user(spi->mode & SPI_MODE_MASK, - (__u32 __user *)arg); + tmp = spi->mode; + + { + struct spi_controller *ctlr = spi->controller; + + if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods && + ctlr->cs_gpiods[spi->chip_select]) + tmp &= ~SPI_CS_HIGH; + } + + if (cmd == SPI_IOC_RD_MODE) + retval = put_user(tmp & SPI_MODE_MASK, + (__u8 __user *)arg); + else + retval = put_user(tmp & SPI_MODE_MASK, + (__u32 __user *)arg); break; case SPI_IOC_RD_LSB_FIRST: retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c index 1d3026dae827..62d5397ff1f9 100644 --- a/drivers/staging/iio/accel/adis16203.c +++ b/drivers/staging/iio/accel/adis16203.c @@ -312,3 +312,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16203"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c index 2a8aa83b8d9e..bca857eef92e 100644 --- a/drivers/staging/iio/accel/adis16240.c +++ b/drivers/staging/iio/accel/adis16240.c @@ -440,3 +440,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16240"); +MODULE_IMPORT_NS(IIO_ADISLIB); diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index f0e61c1b6ffd..ed091418f7e7 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -188,6 +188,28 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd, return 0; } +static struct v4l2_rect * +imgu_subdev_get_crop(struct imgu_v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_crop(&sd->subdev, sd_state, pad); + else + return &sd->rect.eff; +} + +static struct v4l2_rect * +imgu_subdev_get_compose(struct imgu_v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_compose(&sd->subdev, sd_state, pad); + else + return &sd->rect.bds; +} + static int imgu_subdev_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) @@ -200,18 +222,12 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd, switch (sel->target) { case V4L2_SEL_TGT_CROP: - if (sel->which == V4L2_SUBDEV_FORMAT_TRY) - sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, - sel->pad); - else - sel->r = imgu_sd->rect.eff; + sel->r = *imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad, + sel->which); return 0; case V4L2_SEL_TGT_COMPOSE: - if (sel->which == V4L2_SUBDEV_FORMAT_TRY) - sel->r = *v4l2_subdev_get_try_compose(sd, sd_state, - sel->pad); - else - sel->r = imgu_sd->rect.bds; + sel->r = *imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad, + sel->which); return 0; default: return -EINVAL; @@ -223,10 +239,9 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_selection *sel) { struct imgu_device *imgu = v4l2_get_subdevdata(sd); - struct imgu_v4l2_subdev *imgu_sd = container_of(sd, - struct imgu_v4l2_subdev, - subdev); - struct v4l2_rect *rect, *try_sel; + struct imgu_v4l2_subdev *imgu_sd = + container_of(sd, struct imgu_v4l2_subdev, subdev); + struct v4l2_rect *rect; dev_dbg(&imgu->pci_dev->dev, "set subdev %u sel which %u target 0x%4x rect [%ux%u]", @@ -238,22 +253,18 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd, switch (sel->target) { case V4L2_SEL_TGT_CROP: - try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad); - rect = &imgu_sd->rect.eff; + rect = imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad, + sel->which); break; case V4L2_SEL_TGT_COMPOSE: - try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad); - rect = &imgu_sd->rect.bds; + rect = imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad, + sel->which); break; default: return -EINVAL; } - if (sel->which == V4L2_SUBDEV_FORMAT_TRY) - *try_sel = sel->r; - else - *rect = sel->r; - + *rect = sel->r; return 0; } diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c index b26e44adb2be..426e653bd55d 100644 --- a/drivers/staging/media/tegra-video/csi.c +++ b/drivers/staging/media/tegra-video/csi.c @@ -433,7 +433,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi, for (i = 0; i < chan->numgangports; i++) chan->csi_port_nums[i] = port_num + i * CSI_PORTS_PER_BRICK; - chan->of_node = node; + chan->of_node = of_node_get(node); chan->numpads = num_pads; if (num_pads & 0x2) { chan->pads[0].flags = MEDIA_PAD_FL_SINK; @@ -448,6 +448,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi, chan->mipi = tegra_mipi_request(csi->dev, node); if (IS_ERR(chan->mipi)) { ret = PTR_ERR(chan->mipi); + chan->mipi = NULL; dev_err(csi->dev, "failed to get mipi device: %d\n", ret); } @@ -640,6 +641,7 @@ static void tegra_csi_channels_cleanup(struct tegra_csi *csi) media_entity_cleanup(&subdev->entity); } + of_node_put(chan->of_node); list_del(&chan->list); kfree(chan); } diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h index 4ee05a1785cf..6960ea2e3d36 100644 --- a/drivers/staging/media/tegra-video/csi.h +++ b/drivers/staging/media/tegra-video/csi.h @@ -56,7 +56,7 @@ struct tegra_csi; * @framerate: active framerate for TPG * @h_blank: horizontal blanking for TPG active format * @v_blank: vertical blanking for TPG active format - * @mipi: mipi device for corresponding csi channel pads + * @mipi: mipi device for corresponding csi channel pads, or NULL if not applicable (TPG, error) * @pixel_rate: active pixel rate from the sensor on this channel */ struct tegra_csi_channel { diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c index 6acfc94a16e7..b520d1e0edd1 100644 --- a/drivers/staging/mt7621-pci/pci-mt7621.c +++ b/drivers/staging/mt7621-pci/pci-mt7621.c @@ -93,8 +93,8 @@ struct mt7621_pcie_port { * reset lines are inverted. */ struct mt7621_pcie { - void __iomem *base; struct device *dev; + void __iomem *base; struct list_head ports; bool resets_inverted; }; @@ -129,7 +129,7 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port, writel_relaxed(val, port->base + reg); } -static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot, +static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot, unsigned int func, unsigned int where) { return (((where & 0xF00) >> 8) << 24) | (bus << 16) | (slot << 11) | @@ -140,7 +140,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct mt7621_pcie *pcie = bus->sysdata; - u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), + u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR); @@ -148,7 +148,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus, return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3); } -struct pci_ops mt7621_pci_ops = { +struct pci_ops mt7621_pcie_ops = { .map_bus = mt7621_pcie_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, @@ -156,7 +156,7 @@ struct pci_ops mt7621_pci_ops = { static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) { - u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg); + u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg); pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); return pcie_read(pcie, RALINK_PCI_CONFIG_DATA); @@ -165,7 +165,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) static void write_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg, u32 val) { - u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg); + u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg); pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA); @@ -505,16 +505,17 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host) { struct mt7621_pcie *pcie = pci_host_bridge_priv(host); - host->ops = &mt7621_pci_ops; + host->ops = &mt7621_pcie_ops; host->sysdata = pcie; return pci_host_probe(host); } -static const struct soc_device_attribute mt7621_pci_quirks_match[] = { - { .soc_id = "mt7621", .revision = "E2" } +static const struct soc_device_attribute mt7621_pcie_quirks_match[] = { + { .soc_id = "mt7621", .revision = "E2" }, + { /* sentinel */ } }; -static int mt7621_pci_probe(struct platform_device *pdev) +static int mt7621_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct soc_device_attribute *attr; @@ -535,7 +536,7 @@ static int mt7621_pci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); INIT_LIST_HEAD(&pcie->ports); - attr = soc_device_match(mt7621_pci_quirks_match); + attr = soc_device_match(mt7621_pcie_quirks_match); if (attr) pcie->resets_inverted = true; @@ -572,7 +573,7 @@ remove_resets: return err; } -static int mt7621_pci_remove(struct platform_device *pdev) +static int mt7621_pcie_remove(struct platform_device *pdev) { struct mt7621_pcie *pcie = platform_get_drvdata(pdev); struct mt7621_pcie_port *port; @@ -583,18 +584,18 @@ static int mt7621_pci_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id mt7621_pci_ids[] = { +static const struct of_device_id mt7621_pcie_ids[] = { { .compatible = "mediatek,mt7621-pci" }, {}, }; -MODULE_DEVICE_TABLE(of, mt7621_pci_ids); +MODULE_DEVICE_TABLE(of, mt7621_pcie_ids); -static struct platform_driver mt7621_pci_driver = { - .probe = mt7621_pci_probe, - .remove = mt7621_pci_remove, +static struct platform_driver mt7621_pcie_driver = { + .probe = mt7621_pcie_probe, + .remove = mt7621_pcie_remove, .driver = { .name = "mt7621-pci", - .of_match_table = of_match_ptr(mt7621_pci_ids), + .of_match_table = of_match_ptr(mt7621_pcie_ids), }, }; -builtin_platform_driver(mt7621_pci_driver); +builtin_platform_driver(mt7621_pcie_driver); diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index e3d0a361d370..98e90670560b 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1489,9 +1489,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb, hdrlen += 4; } - rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); ieee->stats.rx_packets++; ieee->stats.rx_bytes += skb->len; + rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); return 1; } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index b58e75932ecd..3686b3c599ce 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -951,9 +951,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, #endif if (ieee->iw_mode == IW_MODE_MONITOR) { + unsigned int len = skb->len; + ieee80211_monitor_rx(ieee, skb, rx_stats); stats->rx_packets++; - stats->rx_bytes += skb->len; + stats->rx_bytes += len; return 1; } diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index af666bd9e8d4..c5cd873c6e01 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -65,8 +65,14 @@ static int imx8mm_tmu_get_temp(void *data, int *temp) u32 val; val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK; + + /* + * Do not validate against the V bit (bit 31) due to errata + * ERR051272: TMU: Bit 31 of registers TMU_TSCR/TMU_TRITSR/TMU_TRATSR invalid + */ + *temp = val * 1000; - if (*temp < VER1_TEMP_LOW_LIMIT) + if (*temp < VER1_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT) return -EAGAIN; return 0; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c index 8c42e7662033..92ed1213fe37 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c @@ -172,6 +172,7 @@ static const struct attribute_group fivr_attribute_group = { RFIM_SHOW(rfi_restriction_run_busy, 1) RFIM_SHOW(rfi_restriction_err_code, 1) RFIM_SHOW(rfi_restriction_data_rate, 1) +RFIM_SHOW(rfi_restriction_data_rate_base, 1) RFIM_SHOW(ddr_data_rate_point_0, 1) RFIM_SHOW(ddr_data_rate_point_1, 1) RFIM_SHOW(ddr_data_rate_point_2, 1) @@ -181,11 +182,13 @@ RFIM_SHOW(rfi_disable, 1) RFIM_STORE(rfi_restriction_run_busy, 1) RFIM_STORE(rfi_restriction_err_code, 1) RFIM_STORE(rfi_restriction_data_rate, 1) +RFIM_STORE(rfi_restriction_data_rate_base, 1) RFIM_STORE(rfi_disable, 1) static DEVICE_ATTR_RW(rfi_restriction_run_busy); static DEVICE_ATTR_RW(rfi_restriction_err_code); static DEVICE_ATTR_RW(rfi_restriction_data_rate); +static DEVICE_ATTR_RW(rfi_restriction_data_rate_base); static DEVICE_ATTR_RO(ddr_data_rate_point_0); static DEVICE_ATTR_RO(ddr_data_rate_point_1); static DEVICE_ATTR_RO(ddr_data_rate_point_2); @@ -248,6 +251,7 @@ static struct attribute *dvfs_attrs[] = { &dev_attr_rfi_restriction_run_busy.attr, &dev_attr_rfi_restriction_err_code.attr, &dev_attr_rfi_restriction_data_rate.attr, + &dev_attr_rfi_restriction_data_rate_base.attr, &dev_attr_ddr_data_rate_point_0.attr, &dev_attr_ddr_data_rate_point_1.attr, &dev_attr_ddr_data_rate_point_2.attr, diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index eafa7526eb8b..cc94d8b005d4 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -43,7 +43,7 @@ static irqreturn_t lmh_handle_irq(int hw_irq, void *data) if (irq) generic_handle_irq(irq); - return 0; + return IRQ_HANDLED; } static void lmh_enable_interrupt(struct irq_data *d) diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index 7419e196dbb0..1037de19873a 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -251,7 +251,8 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, disable_s2_shutdown = true; else dev_warn(chip->dev, - "No ADC is configured and critical temperature is above the maximum stage 2 threshold of 140 C! Configuring stage 2 shutdown at 140 C.\n"); + "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n", + temp, stage2_threshold_max, stage2_threshold_max); } skip: diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 867c8aa92b3a..38082fdc4fde 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -905,10 +905,6 @@ __thermal_cooling_device_register(struct device_node *np, cdev->id = ret; id = ret; - ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id); - if (ret) - goto out_ida_remove; - cdev->type = kstrdup(type ? type : "", GFP_KERNEL); if (!cdev->type) { ret = -ENOMEM; @@ -923,6 +919,11 @@ __thermal_cooling_device_register(struct device_node *np, cdev->device.class = &thermal_class; cdev->devdata = devdata; thermal_cooling_device_setup_sysfs(cdev); + ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id); + if (ret) { + thermal_cooling_device_destroy_sysfs(cdev); + goto out_kfree_type; + } ret = device_register(&cdev->device); if (ret) goto out_kfree_type; @@ -1235,10 +1236,6 @@ thermal_zone_device_register(const char *type, int trips, int mask, tz->id = id; strlcpy(tz->type, type, sizeof(tz->type)); - result = dev_set_name(&tz->device, "thermal_zone%d", tz->id); - if (result) - goto remove_id; - if (!ops->critical) ops->critical = thermal_zone_device_critical; @@ -1260,6 +1257,11 @@ thermal_zone_device_register(const char *type, int trips, int mask, /* A new thermal zone needs to be updated anyway. */ atomic_set(&tz->need_update, 1); + result = dev_set_name(&tz->device, "thermal_zone%d", tz->id); + if (result) { + thermal_zone_destroy_device_groups(tz); + goto remove_id; + } result = device_register(&tz->device); if (result) goto release_device; diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c index 711cf30e835a..60b3ac1a0317 100644 --- a/drivers/tty/serial/8250/8250_bcm7271.c +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -1214,9 +1214,17 @@ static struct platform_driver brcmuart_platform_driver = { static int __init brcmuart_init(void) { + int ret; + brcmuart_debugfs_root = debugfs_create_dir( brcmuart_platform_driver.driver.name, NULL); - return platform_driver_register(&brcmuart_platform_driver); + ret = platform_driver_register(&brcmuart_platform_driver); + if (ret) { + debugfs_remove_recursive(brcmuart_debugfs_root); + return ret; + } + + return 0; } module_init(brcmuart_init); diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 7c5f4e966b59..91799c420e25 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -199,9 +199,8 @@ static void altera_uart_set_termios(struct uart_port *port, */ } -static void altera_uart_rx_chars(struct altera_uart *pp) +static void altera_uart_rx_chars(struct uart_port *port) { - struct uart_port *port = &pp->port; unsigned char ch, flag; unsigned short status; @@ -246,9 +245,8 @@ static void altera_uart_rx_chars(struct altera_uart *pp) tty_flip_buffer_push(&port->state->port); } -static void altera_uart_tx_chars(struct altera_uart *pp) +static void altera_uart_tx_chars(struct uart_port *port) { - struct uart_port *port = &pp->port; struct circ_buf *xmit = &port->state->xmit; if (port->x_char) { @@ -272,26 +270,25 @@ static void altera_uart_tx_chars(struct altera_uart *pp) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - if (xmit->head == xmit->tail) { - pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK; - altera_uart_update_ctrl_reg(pp); - } + if (uart_circ_empty(xmit)) + altera_uart_stop_tx(port); } static irqreturn_t altera_uart_interrupt(int irq, void *data) { struct uart_port *port = data; struct altera_uart *pp = container_of(port, struct altera_uart, port); + unsigned long flags; unsigned int isr; isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr; - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); if (isr & ALTERA_UART_STATUS_RRDY_MSK) - altera_uart_rx_chars(pp); + altera_uart_rx_chars(port); if (isr & ALTERA_UART_STATUS_TRDY_MSK) - altera_uart_tx_chars(pp); - spin_unlock(&port->lock); + altera_uart_tx_chars(port); + spin_unlock_irqrestore(&port->lock, flags); return IRQ_RETVAL(isr); } diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 740389fa601b..a67d7eac221f 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1050,6 +1050,9 @@ static void pl011_dma_rx_callback(void *data) */ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) { + if (!uap->using_rx_dma) + return; + /* FIXME. Just disable the DMA enable */ uap->dmacr &= ~UART011_RXDMAE; pl011_write(uap->dmacr, uap, REG_DMACR); @@ -1855,8 +1858,17 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) static void pl011_unthrottle_rx(struct uart_port *port) { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + unsigned long flags; - pl011_enable_interrupts(uap); + spin_lock_irqsave(&uap->port.lock, flags); + + uap->im = UART011_RTIM; + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; + + pl011_write(uap->im, uap, REG_IMSC); + + spin_unlock_irqrestore(&uap->port.lock, flags); } static int pl011_startup(struct uart_port *port) diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 1e65933f6cce..52cab2038da8 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -707,6 +707,7 @@ static void pch_request_dma(struct uart_port *port) if (!chan) { dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n", __func__); + pci_dev_put(dma_dev); return; } priv->chan_tx = chan; @@ -723,6 +724,7 @@ static void pch_request_dma(struct uart_port *port) __func__); dma_release_channel(priv->chan_tx); priv->chan_tx = NULL; + pci_dev_put(dma_dev); return; } @@ -730,6 +732,8 @@ static void pch_request_dma(struct uart_port *port) priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize, &priv->rx_buf_dma, GFP_KERNEL); priv->chan_rx = chan; + + pci_dev_put(dma_dev); } static void pch_dma_rx_complete(void *arg) diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index d4dba298de7a..79187ff9ac13 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -619,8 +619,9 @@ static void tegra_uart_stop_tx(struct uart_port *u) if (tup->tx_in_progress != TEGRA_UART_TX_DMA) return; - dmaengine_terminate_all(tup->tx_dma_chan); + dmaengine_pause(tup->tx_dma_chan); dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); + dmaengine_terminate_all(tup->tx_dma_chan); count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); uart_xmit_advance(&tup->uport, count); @@ -763,8 +764,9 @@ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup) return; } - dmaengine_terminate_all(tup->rx_dma_chan); + dmaengine_pause(tup->rx_dma_chan); dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); + dmaengine_terminate_all(tup->rx_dma_chan); tegra_uart_rx_buffer_push(tup, state.residue); tup->rx_dma_active = false; diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index ce7ff7a0207f..5c60960e185d 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -1363,22 +1363,10 @@ static int stm32_usart_serial_probe(struct platform_device *pdev) if (!stm32port->info) return -EINVAL; - ret = stm32_usart_init_port(stm32port, pdev); - if (ret) - return ret; - - if (stm32port->wakeup_src) { - device_set_wakeup_capable(&pdev->dev, true); - ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); - if (ret) - goto err_deinit_port; - } - stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); - if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto err_wakeirq; - } + if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) + return -EPROBE_DEFER; + /* Fall back in interrupt mode for any non-deferral error */ if (IS_ERR(stm32port->rx_ch)) stm32port->rx_ch = NULL; @@ -1392,6 +1380,17 @@ static int stm32_usart_serial_probe(struct platform_device *pdev) if (IS_ERR(stm32port->tx_ch)) stm32port->tx_ch = NULL; + ret = stm32_usart_init_port(stm32port, pdev); + if (ret) + goto err_dma_tx; + + if (stm32port->wakeup_src) { + device_set_wakeup_capable(&pdev->dev, true); + ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); + if (ret) + goto err_deinit_port; + } + if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { /* Fall back in interrupt mode */ dma_release_channel(stm32port->rx_ch); @@ -1428,19 +1427,11 @@ err_port: pm_runtime_set_suspended(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - if (stm32port->tx_ch) { + if (stm32port->tx_ch) stm32_usart_of_dma_tx_remove(stm32port, pdev); - dma_release_channel(stm32port->tx_ch); - } - if (stm32port->rx_ch) stm32_usart_of_dma_rx_remove(stm32port, pdev); -err_dma_rx: - if (stm32port->rx_ch) - dma_release_channel(stm32port->rx_ch); - -err_wakeirq: if (stm32port->wakeup_src) dev_pm_clear_wake_irq(&pdev->dev); @@ -1450,6 +1441,14 @@ err_deinit_port: stm32_usart_deinit_port(stm32port); +err_dma_tx: + if (stm32port->tx_ch) + dma_release_channel(stm32port->tx_ch); + +err_dma_rx: + if (stm32port->rx_ch) + dma_release_channel(stm32port->rx_ch); + return ret; } diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 92e572634009..ac7cb80e4d6b 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c @@ -1137,7 +1137,13 @@ static int __init sunsab_init(void) } } - return platform_driver_register(&sab_driver); + err = platform_driver_register(&sab_driver); + if (err) { + kfree(sunsab_ports); + sunsab_ports = NULL; + } + + return err; } static void __exit sunsab_exit(void) diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index 6b5cfa5b0673..28be820b546e 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -110,8 +110,10 @@ static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info) * remember the state so we can allow user space to enable it later. */ + spin_lock(&priv->lock); if (!test_and_set_bit(0, &priv->flags)) disable_irq_nosync(irq); + spin_unlock(&priv->lock); return IRQ_HANDLED; } @@ -125,20 +127,19 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) * in the interrupt controller, but keep track of the * state to prevent per-irq depth damage. * - * Serialize this operation to support multiple tasks. + * Serialize this operation to support multiple tasks and concurrency + * with irq handler on SMP systems. */ spin_lock_irqsave(&priv->lock, flags); if (irq_on) { if (test_and_clear_bit(0, &priv->flags)) enable_irq(dev_info->irq); - spin_unlock_irqrestore(&priv->lock, flags); } else { - if (!test_and_set_bit(0, &priv->flags)) { - spin_unlock_irqrestore(&priv->lock, flags); - disable_irq(dev_info->irq); - } + if (!test_and_set_bit(0, &priv->flags)) + disable_irq_nosync(dev_info->irq); } + spin_unlock_irqrestore(&priv->lock, flags); return 0; } diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index 2f29431f612e..b23e543b3a3d 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -2006,10 +2006,11 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) { - u32 field, length_field, remainder; + u32 field, length_field, zlp = 0; struct cdnsp_ep *pep = preq->pep; struct cdnsp_ring *ep_ring; int num_trbs; + u32 maxp; int ret; ep_ring = cdnsp_request_to_transfer_ring(pdev, preq); @@ -2019,26 +2020,33 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) /* 1 TRB for data, 1 for status */ num_trbs = (pdev->three_stage_setup) ? 2 : 1; + maxp = usb_endpoint_maxp(pep->endpoint.desc); + + if (preq->request.zero && preq->request.length && + (preq->request.length % maxp == 0)) { + num_trbs++; + zlp = 1; + } + ret = cdnsp_prepare_transfer(pdev, preq, num_trbs); if (ret) return ret; /* If there's data, queue data TRBs */ - if (pdev->ep0_expect_in) - field = TRB_TYPE(TRB_DATA) | TRB_IOC; - else - field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC; - if (preq->request.length > 0) { - remainder = cdnsp_td_remainder(pdev, 0, preq->request.length, - preq->request.length, preq, 1, 0); + field = TRB_TYPE(TRB_DATA); - length_field = TRB_LEN(preq->request.length) | - TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); + if (zlp) + field |= TRB_CHAIN; + else + field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP); if (pdev->ep0_expect_in) field |= TRB_DIR_IN; + length_field = TRB_LEN(preq->request.length) | + TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0); + cdnsp_queue_trb(pdev, ep_ring, true, lower_32_bits(preq->request.dma), upper_32_bits(preq->request.dma), length_field, @@ -2046,6 +2054,20 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) TRB_SETUPID(pdev->setup_id) | pdev->setup_speed); + if (zlp) { + field = TRB_TYPE(TRB_NORMAL) | TRB_IOC; + + if (!pdev->ep0_expect_in) + field = TRB_ISP; + + cdnsp_queue_trb(pdev, ep_ring, true, + lower_32_bits(preq->request.dma), + upper_32_bits(preq->request.dma), 0, + field | ep_ring->cycle_state | + TRB_SETUPID(pdev->setup_id) | + pdev->setup_speed); + } + pdev->ep0_stage = CDNSP_DATA_STAGE; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index a2f3e56aba05..7258e640e9ee 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -120,21 +120,25 @@ static void __dwc3_set_mode(struct work_struct *work) unsigned long flags; int ret; u32 reg; + u32 desired_dr_role; mutex_lock(&dwc->mutex); + spin_lock_irqsave(&dwc->lock, flags); + desired_dr_role = dwc->desired_dr_role; + spin_unlock_irqrestore(&dwc->lock, flags); pm_runtime_get_sync(dwc->dev); if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) dwc3_otg_update(dwc, 0); - if (!dwc->desired_dr_role) + if (!desired_dr_role) goto out; - if (dwc->desired_dr_role == dwc->current_dr_role) + if (desired_dr_role == dwc->current_dr_role) goto out; - if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) + if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) goto out; switch (dwc->current_dr_role) { @@ -162,7 +166,7 @@ static void __dwc3_set_mode(struct work_struct *work) */ if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC31, 190A)) && - dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { + desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { reg = dwc3_readl(dwc->regs, DWC3_GCTL); reg |= DWC3_GCTL_CORESOFTRESET; dwc3_writel(dwc->regs, DWC3_GCTL, reg); @@ -182,11 +186,11 @@ static void __dwc3_set_mode(struct work_struct *work) spin_lock_irqsave(&dwc->lock, flags); - dwc3_set_prtcap(dwc, dwc->desired_dr_role); + dwc3_set_prtcap(dwc, desired_dr_role); spin_unlock_irqrestore(&dwc->lock, flags); - switch (dwc->desired_dr_role) { + switch (desired_dr_role) { case DWC3_GCTL_PRTCAP_HOST: ret = dwc3_host_init(dwc); if (ret) { @@ -959,8 +963,13 @@ static int dwc3_core_init(struct dwc3 *dwc) if (!dwc->ulpi_ready) { ret = dwc3_core_ulpi_init(dwc); - if (ret) + if (ret) { + if (ret == -ETIMEDOUT) { + dwc3_core_soft_reset(dwc); + ret = -EPROBE_DEFER; + } goto err0; + } dwc->ulpi_ready = true; } diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index c52f7b5b5ec0..e55d0c7db6b5 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -43,7 +43,7 @@ #define PCI_DEVICE_ID_INTEL_ADLP 0x51ee #define PCI_DEVICE_ID_INTEL_ADLM 0x54ee #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 -#define PCI_DEVICE_ID_INTEL_RPL 0x460e +#define PCI_DEVICE_ID_INTEL_RPL 0xa70e #define PCI_DEVICE_ID_INTEL_RPLS 0x7a61 #define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1 #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index d0352daab012..ec1de6f6c290 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -258,7 +258,8 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) if (IS_ERR(qcom->icc_path_apps)) { dev_err(dev, "failed to get apps-usb path: %ld\n", PTR_ERR(qcom->icc_path_apps)); - return PTR_ERR(qcom->icc_path_apps); + ret = PTR_ERR(qcom->icc_path_apps); + goto put_path_ddr; } if (usb_get_maximum_speed(&qcom->dwc3->dev) >= USB_SPEED_SUPER || @@ -271,17 +272,23 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) if (ret) { dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret); - return ret; + goto put_path_apps; } ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW); if (ret) { dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret); - return ret; + goto put_path_apps; } return 0; + +put_path_apps: + icc_put(qcom->icc_path_apps); +put_path_ddr: + icc_put(qcom->icc_path_ddr); + return ret; } /** diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index ca0a7d9eaa34..6be6009f911e 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -71,7 +71,7 @@ struct f_hidg { wait_queue_head_t write_queue; struct usb_request *req; - int minor; + struct device dev; struct cdev cdev; struct usb_function func; @@ -84,6 +84,14 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f) return container_of(f, struct f_hidg, func); } +static void hidg_release(struct device *dev) +{ + struct f_hidg *hidg = container_of(dev, struct f_hidg, dev); + + kfree(hidg->set_report_buf); + kfree(hidg); +} + /*-------------------------------------------------------------------------*/ /* Static descriptors */ @@ -904,9 +912,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) struct usb_ep *ep; struct f_hidg *hidg = func_to_hidg(f); struct usb_string *us; - struct device *device; int status; - dev_t dev; /* maybe allocate device-global string IDs, and patch descriptors */ us = usb_gstrings_attach(c->cdev, ct_func_strings, @@ -999,21 +1005,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) /* create char device */ cdev_init(&hidg->cdev, &f_hidg_fops); - dev = MKDEV(major, hidg->minor); - status = cdev_add(&hidg->cdev, dev, 1); + status = cdev_device_add(&hidg->cdev, &hidg->dev); if (status) goto fail_free_descs; - device = device_create(hidg_class, NULL, dev, NULL, - "%s%d", "hidg", hidg->minor); - if (IS_ERR(device)) { - status = PTR_ERR(device); - goto del; - } - return 0; -del: - cdev_del(&hidg->cdev); fail_free_descs: usb_free_all_descriptors(f); fail: @@ -1244,9 +1240,7 @@ static void hidg_free(struct usb_function *f) hidg = func_to_hidg(f); opts = container_of(f->fi, struct f_hid_opts, func_inst); - kfree(hidg->report_desc); - kfree(hidg->set_report_buf); - kfree(hidg); + put_device(&hidg->dev); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); @@ -1256,8 +1250,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); - device_destroy(hidg_class, MKDEV(major, hidg->minor)); - cdev_del(&hidg->cdev); + cdev_device_del(&hidg->cdev, &hidg->dev); usb_free_all_descriptors(f); } @@ -1266,6 +1259,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) { struct f_hidg *hidg; struct f_hid_opts *opts; + int ret; /* allocate and initialize one new instance */ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL); @@ -1277,17 +1271,28 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) mutex_lock(&opts->lock); ++opts->refcnt; - hidg->minor = opts->minor; + device_initialize(&hidg->dev); + hidg->dev.release = hidg_release; + hidg->dev.class = hidg_class; + hidg->dev.devt = MKDEV(major, opts->minor); + ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor); + if (ret) { + --opts->refcnt; + mutex_unlock(&opts->lock); + return ERR_PTR(ret); + } + hidg->bInterfaceSubClass = opts->subclass; hidg->bInterfaceProtocol = opts->protocol; hidg->report_length = opts->report_length; hidg->report_desc_length = opts->report_desc_length; if (opts->report_desc) { - hidg->report_desc = kmemdup(opts->report_desc, - opts->report_desc_length, - GFP_KERNEL); + hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc, + opts->report_desc_length, + GFP_KERNEL); if (!hidg->report_desc) { - kfree(hidg); + put_device(&hidg->dev); + --opts->refcnt; mutex_unlock(&opts->lock); return ERR_PTR(-ENOMEM); } diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index bf0a3fc2d776..5df1b68e5eac 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -213,8 +213,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req) memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_DATA; - uvc_event->data.length = req->actual; - memcpy(&uvc_event->data.data, req->buf, req->actual); + uvc_event->data.length = min_t(unsigned int, req->actual, + sizeof(uvc_event->data.data)); + memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length); v4l2_event_queue(&uvc->vdev, &v4l2_event); } } diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index fdca28e72a3b..d0e051beb3af 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -629,10 +629,10 @@ static void fotg210_request_error(struct fotg210_udc *fotg210) static void fotg210_set_address(struct fotg210_udc *fotg210, struct usb_ctrlrequest *ctrl) { - if (ctrl->wValue >= 0x0100) { + if (le16_to_cpu(ctrl->wValue) >= 0x0100) { fotg210_request_error(fotg210); } else { - fotg210_set_dev_addr(fotg210, ctrl->wValue); + fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue)); fotg210_set_cxdone(fotg210); } } @@ -713,17 +713,17 @@ static void fotg210_get_status(struct fotg210_udc *fotg210, switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: - fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED; + fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED); break; case USB_RECIP_INTERFACE: - fotg210->ep0_data = 0; + fotg210->ep0_data = cpu_to_le16(0); break; case USB_RECIP_ENDPOINT: epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK; if (epnum) fotg210->ep0_data = - fotg210_is_epnstall(fotg210->ep[epnum]) - << USB_ENDPOINT_HALT; + cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum]) + << USB_ENDPOINT_HALT); else fotg210_request_error(fotg210); break; diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index c53f6f276d5c..f8a63c143492 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -619,7 +619,6 @@ static int xhci_mtk_probe(struct platform_device *pdev) dealloc_usb3_hcd: usb_remove_hcd(xhci->shared_hcd); - xhci->shared_hcd = NULL; dealloc_usb2_hcd: usb_remove_hcd(hcd); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 504f32bf60f6..592e1482cc59 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -61,6 +61,7 @@ #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 @@ -259,7 +260,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_MISSING_CAS; if (pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI) + (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI)) xhci->quirks |= XHCI_RESET_TO_DEFAULT; if (pdev->vendor == PCI_VENDOR_ID_INTEL && diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 2daaf9bc2a2c..3fe9b6e60b2b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2534,7 +2534,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, switch (trb_comp_code) { case COMP_SUCCESS: - ep_ring->err_count = 0; + ep->err_count = 0; /* handle success with untransferred data as short packet */ if (ep_trb != td->last_trb || remaining) { xhci_warn(xhci, "WARN Successful completion on short TX\n"); @@ -2560,7 +2560,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, break; case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || - (ep_ring->err_count++ > MAX_SOFT_RETRY) || + (ep->err_count++ > MAX_SOFT_RETRY) || le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) break; @@ -2641,8 +2641,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_USB_TRANSACTION_ERROR: case COMP_INVALID_STREAM_TYPE_ERROR: case COMP_INVALID_STREAM_ID_ERROR: - xhci_handle_halted_endpoint(xhci, ep, 0, NULL, - EP_SOFT_RESET); + xhci_dbg(xhci, "Stream transaction error ep %u no id\n", + ep_index); + if (ep->err_count++ > MAX_SOFT_RETRY) + xhci_handle_halted_endpoint(xhci, ep, 0, NULL, + EP_HARD_RESET); + else + xhci_handle_halted_endpoint(xhci, ep, 0, NULL, + EP_SOFT_RESET); goto cleanup; case COMP_RING_UNDERRUN: case COMP_RING_OVERRUN: diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 65132c4505e7..142c272a7315 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -933,6 +933,7 @@ struct xhci_virt_ep { * have to restore the device state to the previous state */ struct xhci_ring *new_ring; + unsigned int err_count; unsigned int ep_state; #define SET_DEQ_PENDING (1 << 0) #define EP_HALTED (1 << 1) /* For stall handling */ @@ -1629,7 +1630,6 @@ struct xhci_ring { * if we own the TRB (if we are the consumer). See section 4.9.1. */ u32 cycle_state; - unsigned int err_count; unsigned int stream_id; unsigned int num_segs; unsigned int num_trbs_free; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index dc67fff8e941..22c3df49ba8a 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1628,8 +1628,6 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) { struct musb *musb = gadget_to_musb(gadget); - if (!musb->xceiv->set_power) - return -EOPNOTSUPP; return usb_phy_set_power(musb->xceiv, mA); } diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index dfaed7eee94f..32e6d19f7011 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -106,10 +106,13 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode) struct fwnode_handle *parent = fwnode_get_parent(fwnode); struct device *dev; - if (!parent || !fwnode_property_present(parent, "usb-role-switch")) + if (!fwnode_property_present(parent, "usb-role-switch")) { + fwnode_handle_put(parent); return NULL; + } dev = class_find_device_by_fwnode(role_class, parent); + fwnode_handle_put(parent); return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index a2126b07e854..9bb20779f156 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -195,6 +195,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ + { USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */ + { USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index a7a7af8d05bf..e04bdb308265 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -130,9 +130,6 @@ static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ, static int calc_baud_divisor(speed_t baudrate, speed_t clockrate) { - if (!baudrate) - return 0; - return DIV_ROUND_CLOSEST(clockrate, baudrate); } @@ -519,9 +516,14 @@ static void f81232_set_baudrate(struct tty_struct *tty, speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE }; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { - idx = f81232_find_clk(baud_list[i]); + baudrate = baud_list[i]; + if (baudrate == 0) { + tty_encode_baud_rate(tty, 0, 0); + return; + } + + idx = f81232_find_clk(baudrate); if (idx >= 0) { - baudrate = baud_list[i]; tty_encode_baud_rate(tty, baudrate, baudrate); break; } diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c index c0bca52ef92a..556d4e0dda87 100644 --- a/drivers/usb/serial/f81534.c +++ b/drivers/usb/serial/f81534.c @@ -536,9 +536,6 @@ static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags) static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate) { - if (!baudrate) - return 0; - /* Round to nearest divisor */ return DIV_ROUND_CLOSEST(clockrate, baudrate); } @@ -568,9 +565,14 @@ static int f81534_set_port_config(struct usb_serial_port *port, u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE}; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { - idx = f81534_find_clk(baud_list[i]); + baudrate = baud_list[i]; + if (baudrate == 0) { + tty_encode_baud_rate(tty, 0, 0); + return 0; + } + + idx = f81534_find_clk(baudrate); if (idx >= 0) { - baudrate = baud_list[i]; tty_encode_baud_rate(tty, baudrate, baudrate); break; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c3b7f1d98e78..dee79c7d82d5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -255,6 +255,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM05G 0x030a #define QUECTEL_PRODUCT_EM060K 0x030b +#define QUECTEL_PRODUCT_EM05G_SG 0x0311 #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_RM520N 0x0801 @@ -1160,6 +1161,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff), + .driver_info = RSVD(6) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 20b857e97e60..7e4ce0e7e05a 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -438,6 +438,8 @@ static int alauda_init_media(struct us_data *us) + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift); MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); + if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL) + return USB_STOR_TRANSPORT_ERROR; if (alauda_reset_media(us) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index 78e0e78954f2..0aefb9e14f22 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -134,7 +134,7 @@ int typec_altmode_exit(struct typec_altmode *adev) if (!adev || !adev->active) return 0; - if (!pdev->ops || !pdev->ops->enter) + if (!pdev->ops || !pdev->ops->exit) return -EOPNOTSUPP; /* Moving to USB Safe State */ diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index ff6c14d7b1a8..339752fef65e 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1895,6 +1895,49 @@ void *typec_get_drvdata(struct typec_port *port) } EXPORT_SYMBOL_GPL(typec_get_drvdata); +int typec_get_fw_cap(struct typec_capability *cap, + struct fwnode_handle *fwnode) +{ + const char *cap_str; + int ret; + + cap->fwnode = fwnode; + + ret = fwnode_property_read_string(fwnode, "power-role", &cap_str); + if (ret < 0) + return ret; + + ret = typec_find_port_power_role(cap_str); + if (ret < 0) + return ret; + cap->type = ret; + + /* USB data support is optional */ + ret = fwnode_property_read_string(fwnode, "data-role", &cap_str); + if (ret == 0) { + ret = typec_find_port_data_role(cap_str); + if (ret < 0) + return ret; + cap->data = ret; + } + + /* Get the preferred power role for a DRP */ + if (cap->type == TYPEC_PORT_DRP) { + cap->prefer_role = TYPEC_NO_PREFERRED_ROLE; + + ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str); + if (ret == 0) { + ret = typec_find_power_role(cap_str); + if (ret < 0) + return ret; + cap->prefer_role = ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(typec_get_fw_cap); + /** * typec_port_register_altmode - Register USB Type-C Port Alternate Mode * @port: USB Type-C Port that supports the alternate mode diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index 64e248117c41..5340a3a3a81b 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -817,8 +817,10 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data) return ERR_PTR(err); tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc); - if (IS_ERR(tcpci->port)) + if (IS_ERR(tcpci->port)) { + fwnode_handle_put(tcpci->tcpc.fwnode); return ERR_CAST(tcpci->port); + } return tcpci; } @@ -827,6 +829,7 @@ EXPORT_SYMBOL_GPL(tcpci_register_port); void tcpci_unregister_port(struct tcpci *tcpci) { tcpm_unregister_port(tcpci->port); + fwnode_handle_put(tcpci->tcpc.fwnode); } EXPORT_SYMBOL_GPL(tcpci_unregister_port); diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 33aadc0a29ea..984a13a9efc2 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -5928,7 +5928,6 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode) { const char *opmode_str; - const char *cap_str; int ret; u32 mw, frs_current; @@ -5944,23 +5943,10 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, */ fw_devlink_purge_absent_suppliers(fwnode); - /* USB data support is optional */ - ret = fwnode_property_read_string(fwnode, "data-role", &cap_str); - if (ret == 0) { - ret = typec_find_port_data_role(cap_str); - if (ret < 0) - return ret; - port->typec_caps.data = ret; - } - - ret = fwnode_property_read_string(fwnode, "power-role", &cap_str); + ret = typec_get_fw_cap(&port->typec_caps, fwnode); if (ret < 0) return ret; - ret = typec_find_port_power_role(cap_str); - if (ret < 0) - return ret; - port->typec_caps.type = ret; port->port_type = port->typec_caps.type; port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable"); @@ -5997,14 +5983,6 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, if (port->port_type == TYPEC_PORT_SRC) return 0; - /* Get the preferred power role for DRP */ - ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str); - if (ret < 0) - return ret; - - port->typec_caps.prefer_role = typec_find_power_role(cap_str); - if (port->typec_caps.prefer_role < 0) - return -EINVAL; sink: port->self_powered = fwnode_property_read_bool(fwnode, "self-powered"); diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 23a8b9b0b1fe..2f32c3fceef8 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -684,14 +684,13 @@ static int tps6598x_probe(struct i2c_client *client) ret = devm_tps6598_psy_register(tps); if (ret) - return ret; + goto err_role_put; tps->port = typec_register_port(&client->dev, &typec_cap); if (IS_ERR(tps->port)) { ret = PTR_ERR(tps->port); goto err_role_put; } - fwnode_handle_put(fwnode); if (status & TPS_STATUS_PLUG_PRESENT) { ret = tps6598x_connect(tps, status); @@ -710,6 +709,7 @@ static int tps6598x_probe(struct i2c_client *client) } i2c_set_clientdata(client, tps); + fwnode_handle_put(fwnode); return 0; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 2faf3bd1c3ba..4d9e3fdae5f6 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -66,8 +66,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) { struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; - vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, - VDPASIM_QUEUE_MAX, false, + vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false, (struct vring_desc *)(uintptr_t)vq->desc_addr, (struct vring_avail *) (uintptr_t)vq->driver_addr, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c index a790903f243e..22b812c32bee 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c @@ -308,8 +308,10 @@ static int __init vdpasim_blk_init(void) int ret; ret = device_register(&vdpasim_blk_mgmtdev); - if (ret) + if (ret) { + put_device(&vdpasim_blk_mgmtdev); return ret; + } ret = vdpa_mgmtdev_register(&mgmt_dev); if (ret) diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c index a1ab6163f7d1..f1c420c5e26e 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c @@ -194,8 +194,10 @@ static int __init vdpasim_net_init(void) } ret = device_register(&vdpasim_net_mgmtdev); - if (ret) + if (ret) { + put_device(&vdpasim_net_mgmtdev); return ret; + } ret = vdpa_mgmtdev_register(&mgmt_dev); if (ret) diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index 6af7ce7d619c..701bd99a8719 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -72,12 +72,11 @@ static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev, const char **extra_dbg) { #ifdef CONFIG_ACPI - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct device *dev = vdev->device; acpi_handle handle = ACPI_HANDLE(dev); acpi_status acpi_ret; - acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer); + acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL); if (ACPI_FAILURE(acpi_ret)) { if (extra_dbg) *extra_dbg = acpi_format_exception(acpi_ret); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 6942472cffb0..0a9746bc9228 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2048,7 +2048,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct vhost_dev *dev = vq->dev; struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; struct iovec *_iov; - u64 s = 0; + u64 s = 0, last = addr + len - 1; int ret = 0; while ((u64)len > s) { @@ -2058,7 +2058,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, break; } - map = vhost_iotlb_itree_first(umem, addr, addr + len - 1); + map = vhost_iotlb_itree_first(umem, addr, last); if (map == NULL || map->start > addr) { if (umem != dev->iotlb) { ret = -EFAULT; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index eab55accf381..786876af0a73 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -1101,7 +1101,7 @@ static int iotlb_translate(const struct vringh *vrh, struct vhost_iotlb_map *map; struct vhost_iotlb *iotlb = vrh->iotlb; int ret = 0; - u64 s = 0; + u64 s = 0, last = addr + len - 1; spin_lock(vrh->iotlb_lock); @@ -1113,8 +1113,7 @@ static int iotlb_translate(const struct vringh *vrh, break; } - map = vhost_iotlb_itree_first(iotlb, addr, - addr + len - 1); + map = vhost_iotlb_itree_first(iotlb, addr, last); if (!map || map->start > addr) { ret = -EINVAL; break; diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 97bfe499222b..74ac0c28fe43 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -968,7 +968,14 @@ static int __init vhost_vsock_init(void) VSOCK_TRANSPORT_F_H2G); if (ret < 0) return ret; - return misc_register(&vhost_vsock_misc); + + ret = misc_register(&vhost_vsock_misc); + if (ret) { + vsock_core_unregister(&vhost_transport.transport); + return ret; + } + + return 0; }; static void __exit vhost_vsock_exit(void) diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index e7f1a831f947..d10bc37144a1 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -620,6 +620,7 @@ config FB_TGA config FB_UVESA tristate "Userspace VESA VGA graphics support" depends on FB && CONNECTOR + depends on !UML select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -2232,7 +2233,6 @@ config FB_SSD1307 select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT select FB_DEFERRED_IO - select PWM select FB_BACKLIGHT help This driver implements support for the Solomon SSD1307 diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 1f37904b0405..df40360e04ff 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2462,7 +2462,8 @@ err_out: if (userfont) { p->userfont = old_userfont; - REFCOUNT(data)--; + if (--REFCOUNT(data) == 0) + kfree(data - FONT_EXTRA_WORDS * sizeof(int)); } vc->vc_font.width = old_width; diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c index 2398b3d48fed..305f1587bd89 100644 --- a/drivers/video/fbdev/ep93xx-fb.c +++ b/drivers/video/fbdev/ep93xx-fb.c @@ -552,12 +552,14 @@ static int ep93xxfb_probe(struct platform_device *pdev) err = register_framebuffer(info); if (err) - goto failed_check; + goto failed_framebuffer; dev_info(info->dev, "registered. Mode = %dx%d-%d\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); return 0; +failed_framebuffer: + clk_disable_unprepare(fbi->clk); failed_check: if (fbi->mach_info->teardown) fbi->mach_info->teardown(pdev); diff --git a/drivers/video/fbdev/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig index ac9c860592aa..85bc14b6faf6 100644 --- a/drivers/video/fbdev/geode/Kconfig +++ b/drivers/video/fbdev/geode/Kconfig @@ -5,6 +5,7 @@ config FB_GEODE bool "AMD Geode family framebuffer support" depends on FB && PCI && (X86_32 || (X86 && COMPILE_TEST)) + depends on !UML help Say 'Y' here to allow you to select framebuffer drivers for the AMD Geode family of processors. diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 58c304a3b7c4..de865e197c8d 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -799,12 +799,18 @@ static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par, static int hvfb_on_panic(struct notifier_block *nb, unsigned long e, void *p) { + struct hv_device *hdev; struct hvfb_par *par; struct fb_info *info; par = container_of(nb, struct hvfb_par, hvfb_panic_nb); - par->synchronous_fb = true; info = par->info; + hdev = device_to_hv_device(info->device); + + if (hv_ringbuffer_spinlock_busy(hdev->channel)) + return NOTIFY_DONE; + + par->synchronous_fb = true; if (par->need_docopy) hvfb_docopy(par, 0, dio_fb_size); synthvid_update(info, 0, 0, INT_MAX, INT_MAX); diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 236521b19daf..e7348d657e18 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1377,8 +1377,8 @@ static struct video_board vbG200 = { .lowlevel = &matrox_G100 }; static struct video_board vbG200eW = { - .maxvram = 0x100000, - .maxdisplayable = 0x800000, + .maxvram = 0x1000000, + .maxdisplayable = 0x0800000, .accelID = FB_ACCEL_MATROX_MGAG200, .lowlevel = &matrox_G100 }; diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c index cbcf112c88d3..e8690f7aea05 100644 --- a/drivers/video/fbdev/pm2fb.c +++ b/drivers/video/fbdev/pm2fb.c @@ -1530,8 +1530,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) } info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev); - if (!info) - return -ENOMEM; + if (!info) { + err = -ENOMEM; + goto err_exit_disable; + } default_par = info->par; switch (pdev->device) { @@ -1712,6 +1714,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); err_exit_neither: framebuffer_release(info); + err_exit_disable: + pci_disable_device(pdev); return retval; } @@ -1738,6 +1742,7 @@ static void pm2fb_remove(struct pci_dev *pdev) fb_dealloc_cmap(&info->cmap); kfree(info->pixmap.addr); framebuffer_release(info); + pci_disable_device(pdev); } static const struct pci_device_id pm2fb_id_table[] = { diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index 4df6772802d7..1f3b7e013568 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -1758,6 +1758,7 @@ static int uvesafb_probe(struct platform_device *dev) out_unmap: iounmap(info->screen_base); out_mem: + arch_phys_wc_del(par->mtrr_handle); release_mem_region(info->fix.smem_start, info->fix.smem_len); out_reg: release_region(0x3c0, 32); diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index ff61605b8764..a543643ce014 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c @@ -277,8 +277,10 @@ static int vmlfb_get_gpu(struct vml_par *par) mutex_unlock(&vml_mutex); - if (pci_enable_device(par->gpu) < 0) + if (pci_enable_device(par->gpu) < 0) { + pci_dev_put(par->gpu); return -ENODEV; + } return 0; } diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c index 89d75079b730..0363b478fa3e 100644 --- a/drivers/video/fbdev/via/via-core.c +++ b/drivers/video/fbdev/via/via-core.c @@ -725,7 +725,14 @@ static int __init via_core_init(void) return ret; viafb_i2c_init(); viafb_gpio_init(); - return pci_register_driver(&via_driver); + ret = pci_register_driver(&via_driver); + if (ret) { + viafb_gpio_exit(); + viafb_i2c_exit(); + return ret; + } + + return 0; } static void __exit via_core_exit(void) diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c index 6a1bc284f297..eae78366eb02 100644 --- a/drivers/vme/bridges/vme_fake.c +++ b/drivers/vme/bridges/vme_fake.c @@ -1073,6 +1073,8 @@ static int __init fake_init(void) /* We need a fake parent device */ vme_root = __root_device_register("vme", THIS_MODULE); + if (IS_ERR(vme_root)) + return PTR_ERR(vme_root); /* If we want to support more than one bridge at some point, we need to * dynamically allocate this so we get one per device. diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index be9051b02f24..5b4c766d15e6 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c @@ -1765,6 +1765,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, return 0; err_dma: + list_del(&entry->list); err_dest: err_source: err_align: diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index e88e8f6f0a33..719c5d1dda27 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -760,7 +760,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, goto out; } - pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL); + pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); if (!pfns) { rc = -ENOMEM; goto out; diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index 3ac5fcf98d0d..daaf3810cc92 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -366,12 +366,15 @@ void afs_fs_probe_dispatcher(struct work_struct *work) unsigned long nowj, timer_at, poll_at; bool first_pass = true, set_timer = false; - if (!net->live) + if (!net->live) { + afs_dec_servers_outstanding(net); return; + } _enter(""); if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) { + afs_dec_servers_outstanding(net); _leave(" [none]"); return; } diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 830a6a876ffe..c316931fc99c 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -434,8 +434,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) current->mm->start_stack = current->mm->start_brk + stack_size; #endif - if (create_elf_fdpic_tables(bprm, current->mm, - &exec_params, &interp_params) < 0) + retval = create_elf_fdpic_tables(bprm, current->mm, &exec_params, + &interp_params); + if (retval < 0) goto error; kdebug("- start_code %lx", current->mm->start_code); diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index e1eae7ea823a..bb202ad369d5 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -44,10 +44,10 @@ static LIST_HEAD(entries); static int enabled = 1; enum {Enabled, Magic}; -#define MISC_FMT_PRESERVE_ARGV0 (1 << 31) -#define MISC_FMT_OPEN_BINARY (1 << 30) -#define MISC_FMT_CREDENTIALS (1 << 29) -#define MISC_FMT_OPEN_FILE (1 << 28) +#define MISC_FMT_PRESERVE_ARGV0 (1UL << 31) +#define MISC_FMT_OPEN_BINARY (1UL << 30) +#define MISC_FMT_CREDENTIALS (1UL << 29) +#define MISC_FMT_OPEN_FILE (1UL << 28) typedef struct { struct list_head list; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 1ff527bbe54c..cd9202867d98 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -433,6 +433,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, u64 wanted_disk_byte = ref->wanted_disk_byte; u64 count = 0; u64 data_offset; + u8 type; if (level != 0) { eb = path->nodes[level]; @@ -487,6 +488,9 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, continue; } fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); + type = btrfs_file_extent_type(eb, fi); + if (type == BTRFS_FILE_EXTENT_INLINE) + goto next; disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); data_offset = btrfs_file_extent_offset(eb, fi); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2fd46093e5bb..2c80fc902f59 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -202,11 +202,9 @@ static bool btrfs_supported_super_csum(u16 csum_type) * Return 0 if the superblock checksum type matches the checksum value of that * algorithm. Pass the raw disk superblock data. */ -static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, - char *raw_disk_sb) +int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, + const struct btrfs_super_block *disk_sb) { - struct btrfs_super_block *disk_sb = - (struct btrfs_super_block *)raw_disk_sb; char result[BTRFS_CSUM_SIZE]; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); @@ -217,7 +215,7 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is * filled with zeros and is included in the checksum. */ - crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE, + crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result); if (memcmp(disk_sb->csum, result, fs_info->csum_size)) @@ -2491,8 +2489,8 @@ out: * 1, 2 2nd and 3rd backup copy * -1 skip bytenr check */ -static int validate_super(struct btrfs_fs_info *fs_info, - struct btrfs_super_block *sb, int mirror_num) +int btrfs_validate_super(struct btrfs_fs_info *fs_info, + struct btrfs_super_block *sb, int mirror_num) { u64 nodesize = btrfs_super_nodesize(sb); u64 sectorsize = btrfs_super_sectorsize(sb); @@ -2675,7 +2673,7 @@ static int validate_super(struct btrfs_fs_info *fs_info, */ static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info) { - return validate_super(fs_info, fs_info->super_copy, 0); + return btrfs_validate_super(fs_info, fs_info->super_copy, 0); } /* @@ -2689,7 +2687,7 @@ static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info, { int ret; - ret = validate_super(fs_info, sb, -1); + ret = btrfs_validate_super(fs_info, sb, -1); if (ret < 0) goto out; if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) { @@ -3210,7 +3208,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device * We want to check superblock checksum, the type is stored inside. * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). */ - if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) { + if (btrfs_check_super_csum(fs_info, disk_super)) { btrfs_err(fs_info, "superblock checksum mismatch"); err = -EINVAL; btrfs_release_disk_super(disk_super); @@ -3703,7 +3701,7 @@ static void btrfs_end_super_write(struct bio *bio) } struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, - int copy_num) + int copy_num, bool drop_cache) { struct btrfs_super_block *super; struct page *page; @@ -3721,6 +3719,19 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) return ERR_PTR(-EINVAL); + if (drop_cache) { + /* This should only be called with the primary sb. */ + ASSERT(copy_num == 0); + + /* + * Drop the page of the primary superblock, so later read will + * always read from the device. + */ + invalidate_inode_pages2_range(mapping, + bytenr >> PAGE_SHIFT, + (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT); + } + page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); if (IS_ERR(page)) return ERR_CAST(page); @@ -3752,7 +3763,7 @@ struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev) * later supers, using BTRFS_SUPER_MIRROR_MAX instead */ for (i = 0; i < 1; i++) { - super = btrfs_read_dev_one_super(bdev, i); + super = btrfs_read_dev_one_super(bdev, i, false); if (IS_ERR(super)) continue; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 1b8fd3deafc9..718787dfdb8e 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -52,14 +52,18 @@ struct extent_buffer *btrfs_find_create_tree_block( void btrfs_clean_tree_block(struct extent_buffer *buf); void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info); int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info); +int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, + const struct btrfs_super_block *disk_sb); int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options); void __cold close_ctree(struct btrfs_fs_info *fs_info); +int btrfs_validate_super(struct btrfs_fs_info *fs_info, + struct btrfs_super_block *sb, int mirror_num); int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors); struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev); struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, - int copy_num); + int copy_num, bool drop_cache); int btrfs_commit_super(struct btrfs_fs_info *fs_info); struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, struct btrfs_key *key); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 90934711dcf0..eae622ef4c6d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -872,7 +872,10 @@ next_slot: args->start - extent_offset, 0, false); ret = btrfs_inc_extent_ref(trans, &ref); - BUG_ON(ret); /* -ENOMEM */ + if (ret) { + btrfs_abort_transaction(trans, ret); + break; + } } key.offset = args->start; } @@ -959,7 +962,10 @@ delete_extent_item: key.offset - extent_offset, 0, false); ret = btrfs_free_extent(trans, &ref); - BUG_ON(ret); /* -ENOMEM */ + if (ret) { + btrfs_abort_transaction(trans, ret); + break; + } args->bytes_found += extent_end - key.offset; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 391a4af9c5e5..ed9c715d2579 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3415,13 +3415,10 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info, di_args->bytes_used = btrfs_device_get_bytes_used(dev); di_args->total_bytes = btrfs_device_get_total_bytes(dev); memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); - if (dev->name) { - strncpy(di_args->path, rcu_str_deref(dev->name), - sizeof(di_args->path) - 1); - di_args->path[sizeof(di_args->path) - 1] = 0; - } else { + if (dev->name) + strscpy(di_args->path, rcu_str_deref(dev->name), sizeof(di_args->path)); + else di_args->path[0] = '\0'; - } out: rcu_read_unlock(); diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h index 5c1a617eb25d..5c2b66d155ef 100644 --- a/fs/btrfs/rcu-string.h +++ b/fs/btrfs/rcu-string.h @@ -18,7 +18,11 @@ static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) (len * sizeof(char)), mask); if (!ret) return ret; - strncpy(ret->str, src, len); + /* Warn if the source got unexpectedly truncated. */ + if (WARN_ON(strscpy(ret->str, src, len) < 0)) { + kfree(ret); + return NULL; + } return ret; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 61b84391be58..4ff55457f902 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2497,11 +2497,87 @@ static int btrfs_freeze(struct super_block *sb) return btrfs_commit_transaction(trans); } +static int check_dev_super(struct btrfs_device *dev) +{ + struct btrfs_fs_info *fs_info = dev->fs_info; + struct btrfs_super_block *sb; + u16 csum_type; + int ret = 0; + + /* This should be called with fs still frozen. */ + ASSERT(test_bit(BTRFS_FS_FROZEN, &fs_info->flags)); + + /* Missing dev, no need to check. */ + if (!dev->bdev) + return 0; + + /* Only need to check the primary super block. */ + sb = btrfs_read_dev_one_super(dev->bdev, 0, true); + if (IS_ERR(sb)) + return PTR_ERR(sb); + + /* Verify the checksum. */ + csum_type = btrfs_super_csum_type(sb); + if (csum_type != btrfs_super_csum_type(fs_info->super_copy)) { + btrfs_err(fs_info, "csum type changed, has %u expect %u", + csum_type, btrfs_super_csum_type(fs_info->super_copy)); + ret = -EUCLEAN; + goto out; + } + + if (btrfs_check_super_csum(fs_info, sb)) { + btrfs_err(fs_info, "csum for on-disk super block no longer matches"); + ret = -EUCLEAN; + goto out; + } + + /* Btrfs_validate_super() includes fsid check against super->fsid. */ + ret = btrfs_validate_super(fs_info, sb, 0); + if (ret < 0) + goto out; + + if (btrfs_super_generation(sb) != fs_info->last_trans_committed) { + btrfs_err(fs_info, "transid mismatch, has %llu expect %llu", + btrfs_super_generation(sb), + fs_info->last_trans_committed); + ret = -EUCLEAN; + goto out; + } +out: + btrfs_release_disk_super(sb); + return ret; +} + static int btrfs_unfreeze(struct super_block *sb) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); + struct btrfs_device *device; + int ret = 0; + /* + * Make sure the fs is not changed by accident (like hibernation then + * modified by other OS). + * If we found anything wrong, we mark the fs error immediately. + * + * And since the fs is frozen, no one can modify the fs yet, thus + * we don't need to hold device_list_mutex. + */ + list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { + ret = check_dev_super(device); + if (ret < 0) { + btrfs_handle_fs_error(fs_info, ret, + "super block on devid %llu got modified unexpectedly", + device->devid); + break; + } + } clear_bit(BTRFS_FS_FROZEN, &fs_info->flags); + + /* + * We still return 0, to allow VFS layer to unfreeze the fs even the + * above checks failed. Since the fs is either fine or read-only, we're + * safe to continue, without causing further damage. + */ return 0; } diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 7c45d960b53c..259a3b5f9303 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -39,8 +39,10 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, goto out; path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; + if (!path) { + ret = -ENOMEM; + goto out; + } level = btrfs_header_level(root->node); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c886ec81c5d0..f01549b8c7c5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2074,7 +2074,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct page *page; int ret; - disk_super = btrfs_read_dev_one_super(bdev, copy_num); + disk_super = btrfs_read_dev_one_super(bdev, copy_num, false); if (IS_ERR(disk_super)) continue; @@ -7043,6 +7043,27 @@ static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info, } #endif +static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info, + u64 devid, u8 *uuid) +{ + struct btrfs_device *dev; + + if (!btrfs_test_opt(fs_info, DEGRADED)) { + btrfs_report_missing_device(fs_info, devid, uuid, true); + return ERR_PTR(-ENOENT); + } + + dev = add_missing_dev(fs_info->fs_devices, devid, uuid); + if (IS_ERR(dev)) { + btrfs_err(fs_info, "failed to init missing device %llu: %ld", + devid, PTR_ERR(dev)); + return dev; + } + btrfs_report_missing_device(fs_info, devid, uuid, false); + + return dev; +} + static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, struct btrfs_chunk *chunk) { @@ -7130,28 +7151,18 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, BTRFS_UUID_SIZE); args.uuid = uuid; map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args); - if (!map->stripes[i].dev && - !btrfs_test_opt(fs_info, DEGRADED)) { - free_extent_map(em); - btrfs_report_missing_device(fs_info, devid, uuid, true); - return -ENOENT; - } if (!map->stripes[i].dev) { - map->stripes[i].dev = - add_missing_dev(fs_info->fs_devices, devid, - uuid); + map->stripes[i].dev = handle_missing_device(fs_info, + devid, uuid); if (IS_ERR(map->stripes[i].dev)) { + ret = PTR_ERR(map->stripes[i].dev); free_extent_map(em); - btrfs_err(fs_info, - "failed to init missing dev %llu: %ld", - devid, PTR_ERR(map->stripes[i].dev)); - return PTR_ERR(map->stripes[i].dev); + return ret; } - btrfs_report_missing_device(fs_info, devid, uuid, false); } + set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &(map->stripes[i].dev->dev_state)); - } write_lock(&map_tree->lock); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index be96fe615bec..67b782b0a90a 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2872,7 +2872,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got while (true) { flags &= CEPH_FILE_MODE_MASK; - if (atomic_read(&fi->num_locks)) + if (vfs_inode_has_locks(inode)) flags |= CHECK_FILELOCK; _got = 0; ret = try_get_cap_refs(inode, need, want, endoff, diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index bdeb271f47d9..3e3b8be76b21 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -32,18 +32,14 @@ void __init ceph_flock_init(void) static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src) { - struct ceph_file_info *fi = dst->fl_file->private_data; struct inode *inode = file_inode(dst->fl_file); atomic_inc(&ceph_inode(inode)->i_filelock_ref); - atomic_inc(&fi->num_locks); } static void ceph_fl_release_lock(struct file_lock *fl) { - struct ceph_file_info *fi = fl->fl_file->private_data; struct inode *inode = file_inode(fl->fl_file); struct ceph_inode_info *ci = ceph_inode(inode); - atomic_dec(&fi->num_locks); if (atomic_dec_and_test(&ci->i_filelock_ref)) { /* clear error when all locks are released */ spin_lock(&ci->i_ceph_lock); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 14f951cd5b61..8c9021d0f837 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -773,7 +773,6 @@ struct ceph_file_info { struct list_head rw_contexts; u32 filp_gen; - atomic_t num_locks; }; struct ceph_dir_file_info { diff --git a/fs/char_dev.c b/fs/char_dev.c index ba0ded7842a7..3f667292608c 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -547,7 +547,7 @@ int cdev_device_add(struct cdev *cdev, struct device *dev) } rc = device_add(dev); - if (rc) + if (rc && dev->devt) cdev_del(cdev); return rc; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 668dd6a86295..fc736ced6f4a 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -656,9 +656,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root) seq_printf(s, ",echo_interval=%lu", tcon->ses->server->echo_interval / HZ); - /* Only display max_credits if it was overridden on mount */ + /* Only display the following if overridden on mount */ if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); + if (tcon->ses->server->tcp_nodelay) + seq_puts(s, ",tcpnodelay"); + if (tcon->ses->server->noautotune) + seq_puts(s, ",noautotune"); + if (tcon->ses->server->noblocksnd) + seq_puts(s, ",noblocksend"); if (tcon->snapshot_time) seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 1ab72c3d0bff..0f1b9c48838c 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include "cifs_fs_sb.h" @@ -21,6 +23,7 @@ #include #include #include "smb2pdu.h" +#include "smb2glob.h" #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ @@ -1972,4 +1975,70 @@ static inline bool cifs_is_referral_server(struct cifs_tcon *tcon, return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER)); } +static inline unsigned int cifs_get_num_sgs(const struct smb_rqst *rqst, + int num_rqst, + const u8 *sig) +{ + unsigned int len, skip; + unsigned int nents = 0; + unsigned long addr; + int i, j; + + /* Assumes the first rqst has a transform header as the first iov. + * I.e. + * rqst[0].rq_iov[0] is transform header + * rqst[0].rq_iov[1+] data to be encrypted/decrypted + * rqst[1+].rq_iov[0+] data to be encrypted/decrypted + */ + for (i = 0; i < num_rqst; i++) { + /* + * The first rqst has a transform header where the + * first 20 bytes are not part of the encrypted blob. + */ + for (j = 0; j < rqst[i].rq_nvec; j++) { + struct kvec *iov = &rqst[i].rq_iov[j]; + + skip = (i == 0) && (j == 0) ? 20 : 0; + addr = (unsigned long)iov->iov_base + skip; + if (unlikely(is_vmalloc_addr((void *)addr))) { + len = iov->iov_len - skip; + nents += DIV_ROUND_UP(offset_in_page(addr) + len, + PAGE_SIZE); + } else { + nents++; + } + } + nents += rqst[i].rq_npages; + } + nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE); + return nents; +} + +/* We can not use the normal sg_set_buf() as we will sometimes pass a + * stack object as buf. + */ +static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg, + const void *buf, + unsigned int buflen) +{ + unsigned long addr = (unsigned long)buf; + unsigned int off = offset_in_page(addr); + + addr &= PAGE_MASK; + if (unlikely(is_vmalloc_addr((void *)addr))) { + do { + unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off); + + sg_set_page(sg++, vmalloc_to_page((void *)addr), len, off); + + off = 0; + addr += PAGE_SIZE; + buflen -= len; + } while (buflen); + } else { + sg_set_page(sg++, virt_to_page(addr), buflen, off); + } + return sg; +} + #endif /* _CIFS_GLOB_H */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index b2697356b5e7..50844d51da5d 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -590,8 +590,8 @@ int cifs_alloc_hash(const char *name, struct crypto_shash **shash, struct sdesc **sdesc); void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc); -extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page, - unsigned int *len, unsigned int *offset); +void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page, + unsigned int *len, unsigned int *offset); struct cifs_chan * cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server); int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a4284c4d7e03..555bd386a24d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1948,7 +1948,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)), struct cifs_ses * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { - int rc = -ENOMEM; + int rc = 0; unsigned int xid; struct cifs_ses *ses; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; @@ -1990,6 +1990,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) return ses; } + rc = -ENOMEM; + cifs_dbg(FYI, "Existing smb sess not found\n"); ses = sesInfoAlloc(); if (ses == NULL) diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 94143d7f58c7..3a90ee314ed7 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -1134,8 +1134,8 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc) * @len: Where to store the length for this page: * @offset: Where to store the offset for this page */ -void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page, - unsigned int *len, unsigned int *offset) +void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page, + unsigned int *len, unsigned int *offset) { *len = rqst->rq_pagesz; *offset = (page == 0) ? rqst->rq_offset : 0; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 5e6526c201fe..817d78129bd2 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -4416,69 +4416,82 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len, memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8); } -/* We can not use the normal sg_set_buf() as we will sometimes pass a - * stack object as buf. - */ -static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf, - unsigned int buflen) +static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst, + int num_rqst, const u8 *sig, u8 **iv, + struct aead_request **req, struct scatterlist **sgl, + unsigned int *num_sgs) { - void *addr; - /* - * VMAP_STACK (at least) puts stack into the vmalloc address space - */ - if (is_vmalloc_addr(buf)) - addr = vmalloc_to_page(buf); - else - addr = virt_to_page(buf); - sg_set_page(sg, addr, buflen, offset_in_page(buf)); -} + unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm); + unsigned int iv_size = crypto_aead_ivsize(tfm); + unsigned int len; + u8 *p; -/* Assumes the first rqst has a transform header as the first iov. - * I.e. - * rqst[0].rq_iov[0] is transform header - * rqst[0].rq_iov[1+] data to be encrypted/decrypted - * rqst[1+].rq_iov[0+] data to be encrypted/decrypted - */ -static struct scatterlist * -init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign) -{ - unsigned int sg_len; - struct scatterlist *sg; - unsigned int i; - unsigned int j; - unsigned int idx = 0; - int skip; + *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig); - sg_len = 1; - for (i = 0; i < num_rqst; i++) - sg_len += rqst[i].rq_nvec + rqst[i].rq_npages; + len = iv_size; + len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); + len = ALIGN(len, crypto_tfm_ctx_alignment()); + len += req_size; + len = ALIGN(len, __alignof__(struct scatterlist)); + len += *num_sgs * sizeof(**sgl); - sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL); - if (!sg) + p = kmalloc(len, GFP_ATOMIC); + if (!p) return NULL; - sg_init_table(sg, sg_len); + *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1); + *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, + crypto_tfm_ctx_alignment()); + *sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, + __alignof__(struct scatterlist)); + return p; +} + +static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst, + int num_rqst, const u8 *sig, u8 **iv, + struct aead_request **req, struct scatterlist **sgl) +{ + unsigned int off, len, skip; + struct scatterlist *sg; + unsigned int num_sgs; + unsigned long addr; + int i, j; + void *p; + + p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs); + if (!p) + return NULL; + + sg_init_table(*sgl, num_sgs); + sg = *sgl; + + /* Assumes the first rqst has a transform header as the first iov. + * I.e. + * rqst[0].rq_iov[0] is transform header + * rqst[0].rq_iov[1+] data to be encrypted/decrypted + * rqst[1+].rq_iov[0+] data to be encrypted/decrypted + */ for (i = 0; i < num_rqst; i++) { + /* + * The first rqst has a transform header where the + * first 20 bytes are not part of the encrypted blob. + */ for (j = 0; j < rqst[i].rq_nvec; j++) { - /* - * The first rqst has a transform header where the - * first 20 bytes are not part of the encrypted blob - */ + struct kvec *iov = &rqst[i].rq_iov[j]; + skip = (i == 0) && (j == 0) ? 20 : 0; - smb2_sg_set_buf(&sg[idx++], - rqst[i].rq_iov[j].iov_base + skip, - rqst[i].rq_iov[j].iov_len - skip); - } - + addr = (unsigned long)iov->iov_base + skip; + len = iov->iov_len - skip; + sg = cifs_sg_set_buf(sg, (void *)addr, len); + } for (j = 0; j < rqst[i].rq_npages; j++) { - unsigned int len, offset; - - rqst_page_get_length(&rqst[i], j, &len, &offset); - sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset); + rqst_page_get_length(&rqst[i], j, &len, &off); + sg_set_page(sg++, rqst[i].rq_pages[j], len, off); } } - smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE); - return sg; + cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE); + + return p; } static int @@ -4522,11 +4535,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, u8 sign[SMB2_SIGNATURE_SIZE] = {}; u8 key[SMB3_ENC_DEC_KEY_SIZE]; struct aead_request *req; - char *iv; - unsigned int iv_len; + u8 *iv; DECLARE_CRYPTO_WAIT(wait); struct crypto_aead *tfm; unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); + void *creq; rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); if (rc) { @@ -4561,32 +4574,15 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, return rc; } - req = aead_request_alloc(tfm, GFP_KERNEL); - if (!req) { - cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__); + creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg); + if (unlikely(!creq)) return -ENOMEM; - } if (!enc) { memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE); crypt_len += SMB2_SIGNATURE_SIZE; } - sg = init_sg(num_rqst, rqst, sign); - if (!sg) { - cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__); - rc = -ENOMEM; - goto free_req; - } - - iv_len = crypto_aead_ivsize(tfm); - iv = kzalloc(iv_len, GFP_KERNEL); - if (!iv) { - cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__); - rc = -ENOMEM; - goto free_sg; - } - if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE); @@ -4595,6 +4591,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE); } + aead_request_set_tfm(req, tfm); aead_request_set_crypt(req, sg, sg, crypt_len, iv); aead_request_set_ad(req, assoc_data_len); @@ -4607,11 +4604,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, if (!rc && enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); - kfree(iv); -free_sg: - kfree(sg); -free_req: - kfree(req); + kfree_sensitive(creq); return rc; } diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index d1f9d2632202..ec6519e1ca3b 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -316,6 +316,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry, return 0; out_remove: + configfs_put(dentry->d_fsdata); configfs_remove_dirent(dentry); return PTR_ERR(inode); } @@ -382,6 +383,7 @@ int configfs_create_link(struct configfs_dirent *target, struct dentry *parent, return 0; out_remove: + configfs_put(dentry->d_fsdata); configfs_remove_dirent(dentry); return PTR_ERR(inode); } diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 950c63fa4d0b..38930d9b0bb7 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -378,8 +378,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, } EXPORT_SYMBOL_GPL(debugfs_attr_read); -ssize_t debugfs_attr_write(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) +static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf, + size_t len, loff_t *ppos, bool is_signed) { struct dentry *dentry = F_DENTRY(file); ssize_t ret; @@ -387,12 +387,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf, ret = debugfs_file_get(dentry); if (unlikely(ret)) return ret; - ret = simple_attr_write(file, buf, len, ppos); + if (is_signed) + ret = simple_attr_write_signed(file, buf, len, ppos); + else + ret = simple_attr_write(file, buf, len, ppos); debugfs_file_put(dentry); return ret; } + +ssize_t debugfs_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return debugfs_attr_write_xsigned(file, buf, len, ppos, false); +} EXPORT_SYMBOL_GPL(debugfs_attr_write); +ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return debugfs_attr_write_xsigned(file, buf, len, ppos, true); +} +EXPORT_SYMBOL_GPL(debugfs_attr_write_signed); + static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode, struct dentry *parent, void *value, const struct file_operations *fops, @@ -738,11 +754,11 @@ static int debugfs_atomic_t_get(void *data, u64 *val) *val = atomic_read((atomic_t *)data); return 0; } -DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get, debugfs_atomic_t_set, "%lld\n"); -DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, "%lld\n"); -DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, "%lld\n"); /** diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 9d7078a1dc8b..d56a8f88a385 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1520,7 +1520,11 @@ static void process_recv_sockets(struct work_struct *work) static void process_listen_recv_socket(struct work_struct *work) { - accept_from_sock(&listen_con); + int ret; + + do { + ret = accept_from_sock(&listen_con); + } while (!ret); } static void dlm_connect(struct connection *con) @@ -1797,7 +1801,7 @@ static int dlm_listen_for_all(void) result = sock->ops->listen(sock, 5); if (result < 0) { dlm_close_sock(&listen_con.sock); - goto out; + return result; } return 0; @@ -2000,7 +2004,6 @@ fail_listen: dlm_proto_ops = NULL; fail_proto_ops: dlm_allow_conn = 0; - dlm_close_sock(&listen_con.sock); work_stop(); fail_local: deinit_local(); diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index a0fb0c4bdc7c..f9a79053f03a 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -665,7 +665,7 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries) * it's possible we've just missed a transaction commit here, * so ignore the returned status */ - jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); + ext4_debug("%s: retrying operation after ENOSPC\n", sb->s_id); (void) jbd2_journal_force_commit_nested(sbi->s_journal); return 1; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2d84030d7b7f..bc209f303327 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -559,7 +559,7 @@ enum { * * It's not paranoia if the Murphy's Law really *is* out to get you. :-) */ -#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG)) +#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1U << EXT4_INODE_##FLAG)) #define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG)) static inline void ext4_check_flag_values(void) @@ -2996,7 +2996,8 @@ int do_journal_get_write_access(handle_t *handle, struct inode *inode, typedef enum { EXT4_IGET_NORMAL = 0, EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */ - EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */ + EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */ + EXT4_IGET_BAD = 0x0004 /* Allow to iget a bad inode */ } ext4_iget_flags; extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, @@ -3646,8 +3647,8 @@ extern void ext4_initialize_dirent_tail(struct buffer_head *bh, unsigned int blocksize); extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode, struct buffer_head *bh); -extern int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name, - struct inode *inode); +extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name, + struct inode *inode, struct dentry *dentry); extern int __ext4_link(struct inode *dir, struct inode *inode, struct dentry *dentry); diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 3477a16d08ae..8e1fb18f465e 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -267,8 +267,7 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle, trace_ext4_forget(inode, is_metadata, blocknr); BUFFER_TRACE(bh, "enter"); - jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " - "data mode %x\n", + ext4_debug("forgetting bh %p: is_metadata=%d, mode %o, data mode %x\n", bh, is_metadata, inode->i_mode, test_opt(inode->i_sb, DATA_FLAGS)); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 9e09caaf9b0b..d3fae909fcbf 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -5810,6 +5810,14 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) struct ext4_extent *extent; ext4_lblk_t first_lblk, first_lclu, last_lclu; + /* + * if data can be stored inline, the logical cluster isn't + * mapped - no physical clusters have been allocated, and the + * file has no extents + */ + if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) + return 0; + /* search for the extent closest to the first block in the cluster */ path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); if (IS_ERR(path)) { diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 9a3a8996aacf..aa99a3659edf 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -1372,7 +1372,7 @@ retry: if (count_reserved) count_rsvd(inode, lblk, orig_es.es_len - len1 - len2, &orig_es, &rc); - goto out; + goto out_get_reserved; } if (len1 > 0) { @@ -1414,6 +1414,7 @@ retry: } } +out_get_reserved: if (count_reserved) *reserved = get_rsvd(inode, end, es, &rc); out: diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index be3f8ce98962..a8d0a8081a1d 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -399,25 +399,34 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update) struct __track_dentry_update_args *dentry_update = (struct __track_dentry_update_args *)arg; struct dentry *dentry = dentry_update->dentry; - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct inode *dir = dentry->d_parent->d_inode; + struct super_block *sb = inode->i_sb; + struct ext4_sb_info *sbi = EXT4_SB(sb); mutex_unlock(&ei->i_fc_lock); + + if (IS_ENCRYPTED(dir)) { + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME, + NULL); + mutex_lock(&ei->i_fc_lock); + return -EOPNOTSUPP; + } + node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS); if (!node) { - ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL); + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL); mutex_lock(&ei->i_fc_lock); return -ENOMEM; } node->fcd_op = dentry_update->op; - node->fcd_parent = dentry->d_parent->d_inode->i_ino; + node->fcd_parent = dir->i_ino; node->fcd_ino = inode->i_ino; if (dentry->d_name.len > DNAME_INLINE_LEN) { node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS); if (!node->fcd_name.name) { kmem_cache_free(ext4_fc_dentry_cachep, node); - ext4_fc_mark_ineligible(inode->i_sb, - EXT4_FC_REASON_NOMEM, NULL); + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL); mutex_lock(&ei->i_fc_lock); return -ENOMEM; } @@ -595,6 +604,15 @@ static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail) /* Ext4 commit path routines */ +/* memcpy to fc reserved space and update CRC */ +static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src, + int len, u32 *crc) +{ + if (crc) + *crc = ext4_chksum(EXT4_SB(sb), *crc, src, len); + return memcpy(dst, src, len); +} + /* memzero and update CRC */ static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len, u32 *crc) @@ -620,62 +638,59 @@ static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len, */ static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc) { - struct ext4_fc_tl *tl; + struct ext4_fc_tl tl; struct ext4_sb_info *sbi = EXT4_SB(sb); struct buffer_head *bh; int bsize = sbi->s_journal->j_blocksize; int ret, off = sbi->s_fc_bytes % bsize; - int pad_len; + int remaining; + u8 *dst; /* - * After allocating len, we should have space at least for a 0 byte - * padding. + * If 'len' is too long to fit in any block alongside a PAD tlv, then we + * cannot fulfill the request. */ - if (len + sizeof(struct ext4_fc_tl) > bsize) + if (len > bsize - EXT4_FC_TAG_BASE_LEN) return NULL; - if (bsize - off - 1 > len + sizeof(struct ext4_fc_tl)) { - /* - * Only allocate from current buffer if we have enough space for - * this request AND we have space to add a zero byte padding. - */ - if (!sbi->s_fc_bh) { - ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); - if (ret) - return NULL; - sbi->s_fc_bh = bh; - } - sbi->s_fc_bytes += len; - return sbi->s_fc_bh->b_data + off; + if (!sbi->s_fc_bh) { + ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); + if (ret) + return NULL; + sbi->s_fc_bh = bh; } - /* Need to add PAD tag */ - tl = (struct ext4_fc_tl *)(sbi->s_fc_bh->b_data + off); - tl->fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD); - pad_len = bsize - off - 1 - sizeof(struct ext4_fc_tl); - tl->fc_len = cpu_to_le16(pad_len); - if (crc) - *crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl)); - if (pad_len > 0) - ext4_fc_memzero(sb, tl + 1, pad_len, crc); + dst = sbi->s_fc_bh->b_data + off; + + /* + * Allocate the bytes in the current block if we can do so while still + * leaving enough space for a PAD tlv. + */ + remaining = bsize - EXT4_FC_TAG_BASE_LEN - off; + if (len <= remaining) { + sbi->s_fc_bytes += len; + return dst; + } + + /* + * Else, terminate the current block with a PAD tlv, then allocate a new + * block and allocate the bytes at the start of that new block. + */ + + tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD); + tl.fc_len = cpu_to_le16(remaining); + ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc); + ext4_fc_memzero(sb, dst + EXT4_FC_TAG_BASE_LEN, remaining, crc); + ext4_fc_submit_bh(sb, false); ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); if (ret) return NULL; sbi->s_fc_bh = bh; - sbi->s_fc_bytes = (sbi->s_fc_bytes / bsize + 1) * bsize + len; + sbi->s_fc_bytes += bsize - off + len; return sbi->s_fc_bh->b_data; } -/* memcpy to fc reserved space and update CRC */ -static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src, - int len, u32 *crc) -{ - if (crc) - *crc = ext4_chksum(EXT4_SB(sb), *crc, src, len); - return memcpy(dst, src, len); -} - /* * Complete a fast commit by writing tail tag. * @@ -696,23 +711,25 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc) * ext4_fc_reserve_space takes care of allocating an extra block if * there's no enough space on this block for accommodating this tail. */ - dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(tail), &crc); + dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + sizeof(tail), &crc); if (!dst) return -ENOSPC; off = sbi->s_fc_bytes % bsize; tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL); - tl.fc_len = cpu_to_le16(bsize - off - 1 + sizeof(struct ext4_fc_tail)); + tl.fc_len = cpu_to_le16(bsize - off + sizeof(struct ext4_fc_tail)); sbi->s_fc_bytes = round_up(sbi->s_fc_bytes, bsize); - ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), &crc); - dst += sizeof(tl); + ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, &crc); + dst += EXT4_FC_TAG_BASE_LEN; tail.fc_tid = cpu_to_le32(sbi->s_journal->j_running_transaction->t_tid); ext4_fc_memcpy(sb, dst, &tail.fc_tid, sizeof(tail.fc_tid), &crc); dst += sizeof(tail.fc_tid); tail.fc_crc = cpu_to_le32(crc); ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL); + dst += sizeof(tail.fc_crc); + memset(dst, 0, bsize - off); /* Don't leak uninitialized memory. */ ext4_fc_submit_bh(sb, true); @@ -729,15 +746,15 @@ static bool ext4_fc_add_tlv(struct super_block *sb, u16 tag, u16 len, u8 *val, struct ext4_fc_tl tl; u8 *dst; - dst = ext4_fc_reserve_space(sb, sizeof(tl) + len, crc); + dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + len, crc); if (!dst) return false; tl.fc_tag = cpu_to_le16(tag); tl.fc_len = cpu_to_le16(len); - ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), crc); - ext4_fc_memcpy(sb, dst + sizeof(tl), val, len, crc); + ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc); + ext4_fc_memcpy(sb, dst + EXT4_FC_TAG_BASE_LEN, val, len, crc); return true; } @@ -749,8 +766,8 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc, struct ext4_fc_dentry_info fcd; struct ext4_fc_tl tl; int dlen = fc_dentry->fcd_name.len; - u8 *dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(fcd) + dlen, - crc); + u8 *dst = ext4_fc_reserve_space(sb, + EXT4_FC_TAG_BASE_LEN + sizeof(fcd) + dlen, crc); if (!dst) return false; @@ -759,8 +776,8 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc, fcd.fc_ino = cpu_to_le32(fc_dentry->fcd_ino); tl.fc_tag = cpu_to_le16(fc_dentry->fcd_op); tl.fc_len = cpu_to_le16(sizeof(fcd) + dlen); - ext4_fc_memcpy(sb, dst, &tl, sizeof(tl), crc); - dst += sizeof(tl); + ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc); + dst += EXT4_FC_TAG_BASE_LEN; ext4_fc_memcpy(sb, dst, &fcd, sizeof(fcd), crc); dst += sizeof(fcd); ext4_fc_memcpy(sb, dst, fc_dentry->fcd_name.name, dlen, crc); @@ -796,13 +813,13 @@ static int ext4_fc_write_inode(struct inode *inode, u32 *crc) ret = -ECANCELED; dst = ext4_fc_reserve_space(inode->i_sb, - sizeof(tl) + inode_len + sizeof(fc_inode.fc_ino), crc); + EXT4_FC_TAG_BASE_LEN + inode_len + sizeof(fc_inode.fc_ino), crc); if (!dst) goto err; - if (!ext4_fc_memcpy(inode->i_sb, dst, &tl, sizeof(tl), crc)) + if (!ext4_fc_memcpy(inode->i_sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc)) goto err; - dst += sizeof(tl); + dst += EXT4_FC_TAG_BASE_LEN; if (!ext4_fc_memcpy(inode->i_sb, dst, &fc_inode, sizeof(fc_inode), crc)) goto err; dst += sizeof(fc_inode); @@ -840,8 +857,8 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc) mutex_unlock(&ei->i_fc_lock); cur_lblk_off = old_blk_size; - jbd_debug(1, "%s: will try writing %d to %d for inode %ld\n", - __func__, cur_lblk_off, new_blk_size, inode->i_ino); + ext4_debug("will try writing %d to %d for inode %ld\n", + cur_lblk_off, new_blk_size, inode->i_ino); while (cur_lblk_off <= new_blk_size) { map.m_lblk = cur_lblk_off; @@ -1096,7 +1113,7 @@ static void ext4_fc_update_stats(struct super_block *sb, int status, { struct ext4_fc_stats *stats = &EXT4_SB(sb)->s_fc_stats; - jbd_debug(1, "Fast commit ended with status = %d", status); + ext4_debug("Fast commit ended with status = %d", status); if (status == EXT4_FC_STATUS_OK) { stats->fc_num_commits++; stats->fc_numblks += nblks; @@ -1266,7 +1283,7 @@ struct dentry_info_args { }; static inline void tl_to_darg(struct dentry_info_args *darg, - struct ext4_fc_tl *tl, u8 *val) + struct ext4_fc_tl *tl, u8 *val) { struct ext4_fc_dentry_info fcd; @@ -1275,8 +1292,14 @@ static inline void tl_to_darg(struct dentry_info_args *darg, darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino); darg->ino = le32_to_cpu(fcd.fc_ino); darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname); - darg->dname_len = le16_to_cpu(tl->fc_len) - - sizeof(struct ext4_fc_dentry_info); + darg->dname_len = tl->fc_len - sizeof(struct ext4_fc_dentry_info); +} + +static inline void ext4_fc_get_tl(struct ext4_fc_tl *tl, u8 *val) +{ + memcpy(tl, val, EXT4_FC_TAG_BASE_LEN); + tl->fc_len = le16_to_cpu(tl->fc_len); + tl->fc_tag = le16_to_cpu(tl->fc_tag); } /* Unlink replay function */ @@ -1298,19 +1321,19 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl, inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL); if (IS_ERR(inode)) { - jbd_debug(1, "Inode %d not found", darg.ino); + ext4_debug("Inode %d not found", darg.ino); return 0; } old_parent = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL); if (IS_ERR(old_parent)) { - jbd_debug(1, "Dir with inode %d not found", darg.parent_ino); + ext4_debug("Dir with inode %d not found", darg.parent_ino); iput(inode); return 0; } - ret = __ext4_unlink(NULL, old_parent, &entry, inode); + ret = __ext4_unlink(old_parent, &entry, inode, NULL); /* -ENOENT ok coz it might not exist anymore. */ if (ret == -ENOENT) ret = 0; @@ -1330,21 +1353,21 @@ static int ext4_fc_replay_link_internal(struct super_block *sb, dir = ext4_iget(sb, darg->parent_ino, EXT4_IGET_NORMAL); if (IS_ERR(dir)) { - jbd_debug(1, "Dir with inode %d not found.", darg->parent_ino); + ext4_debug("Dir with inode %d not found.", darg->parent_ino); dir = NULL; goto out; } dentry_dir = d_obtain_alias(dir); if (IS_ERR(dentry_dir)) { - jbd_debug(1, "Failed to obtain dentry"); + ext4_debug("Failed to obtain dentry"); dentry_dir = NULL; goto out; } dentry_inode = d_alloc(dentry_dir, &qstr_dname); if (!dentry_inode) { - jbd_debug(1, "Inode dentry not created."); + ext4_debug("Inode dentry not created."); ret = -ENOMEM; goto out; } @@ -1357,7 +1380,7 @@ static int ext4_fc_replay_link_internal(struct super_block *sb, * could complete. */ if (ret && ret != -EEXIST) { - jbd_debug(1, "Failed to link\n"); + ext4_debug("Failed to link\n"); goto out; } @@ -1391,7 +1414,7 @@ static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl, inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL); if (IS_ERR(inode)) { - jbd_debug(1, "Inode not found."); + ext4_debug("Inode not found."); return 0; } @@ -1441,7 +1464,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl, struct ext4_inode *raw_fc_inode; struct inode *inode = NULL; struct ext4_iloc iloc; - int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag); + int inode_len, ino, ret, tag = tl->fc_tag; struct ext4_extent_header *eh; memcpy(&fc_inode, val, sizeof(fc_inode)); @@ -1466,7 +1489,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl, if (ret) goto out; - inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode); + inode_len = tl->fc_len - sizeof(struct ext4_fc_inode); raw_inode = ext4_raw_inode(&iloc); memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block)); @@ -1501,7 +1524,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl, /* Given that we just wrote the inode on disk, this SHOULD succeed. */ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL); if (IS_ERR(inode)) { - jbd_debug(1, "Inode not found."); + ext4_debug("Inode not found."); return -EFSCORRUPTED; } @@ -1554,7 +1577,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl, inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL); if (IS_ERR(inode)) { - jbd_debug(1, "inode %d not found.", darg.ino); + ext4_debug("inode %d not found.", darg.ino); inode = NULL; ret = -EINVAL; goto out; @@ -1567,7 +1590,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl, */ dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL); if (IS_ERR(dir)) { - jbd_debug(1, "Dir %d not found.", darg.ino); + ext4_debug("Dir %d not found.", darg.ino); goto out; } ret = ext4_init_new_dir(NULL, dir, inode); @@ -1655,7 +1678,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb, inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL); if (IS_ERR(inode)) { - jbd_debug(1, "Inode not found."); + ext4_debug("Inode not found."); return 0; } @@ -1669,7 +1692,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb, cur = start; remaining = len; - jbd_debug(1, "ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n", + ext4_debug("ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n", start, start_pblk, len, ext4_ext_is_unwritten(ex), inode->i_ino); @@ -1730,7 +1753,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb, } /* Range is mapped and needs a state change */ - jbd_debug(1, "Converting from %ld to %d %lld", + ext4_debug("Converting from %ld to %d %lld", map.m_flags & EXT4_MAP_UNWRITTEN, ext4_ext_is_unwritten(ex), map.m_pblk); ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, @@ -1773,7 +1796,7 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL); if (IS_ERR(inode)) { - jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino)); + ext4_debug("Inode %d not found", le32_to_cpu(lrange.fc_ino)); return 0; } @@ -1781,7 +1804,7 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, if (ret) goto out; - jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n", + ext4_debug("DEL_RANGE, inode %ld, lblk %d, len %d\n", inode->i_ino, le32_to_cpu(lrange.fc_lblk), le32_to_cpu(lrange.fc_len)); while (remaining > 0) { @@ -1830,7 +1853,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb) inode = ext4_iget(sb, state->fc_modified_inodes[i], EXT4_IGET_NORMAL); if (IS_ERR(inode)) { - jbd_debug(1, "Inode %d not found.", + ext4_debug("Inode %d not found.", state->fc_modified_inodes[i]); continue; } @@ -1896,6 +1919,33 @@ void ext4_fc_replay_cleanup(struct super_block *sb) kfree(sbi->s_fc_replay_state.fc_modified_inodes); } +static bool ext4_fc_value_len_isvalid(struct ext4_sb_info *sbi, + int tag, int len) +{ + switch (tag) { + case EXT4_FC_TAG_ADD_RANGE: + return len == sizeof(struct ext4_fc_add_range); + case EXT4_FC_TAG_DEL_RANGE: + return len == sizeof(struct ext4_fc_del_range); + case EXT4_FC_TAG_CREAT: + case EXT4_FC_TAG_LINK: + case EXT4_FC_TAG_UNLINK: + len -= sizeof(struct ext4_fc_dentry_info); + return len >= 1 && len <= EXT4_NAME_LEN; + case EXT4_FC_TAG_INODE: + len -= sizeof(struct ext4_fc_inode); + return len >= EXT4_GOOD_OLD_INODE_SIZE && + len <= sbi->s_inode_size; + case EXT4_FC_TAG_PAD: + return true; /* padding can have any length */ + case EXT4_FC_TAG_TAIL: + return len >= sizeof(struct ext4_fc_tail); + case EXT4_FC_TAG_HEAD: + return len == sizeof(struct ext4_fc_head); + } + return false; +} + /* * Recovery Scan phase handler * @@ -1931,7 +1981,7 @@ static int ext4_fc_replay_scan(journal_t *journal, state = &sbi->s_fc_replay_state; start = (u8 *)bh->b_data; - end = (__u8 *)bh->b_data + journal->j_blocksize - 1; + end = start + journal->j_blocksize; if (state->fc_replay_expected_off == 0) { state->fc_cur_tag = 0; @@ -1952,12 +2002,19 @@ static int ext4_fc_replay_scan(journal_t *journal, } state->fc_replay_expected_off++; - for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) { - memcpy(&tl, cur, sizeof(tl)); - val = cur + sizeof(tl); - jbd_debug(3, "Scan phase, tag:%s, blk %lld\n", - tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr); - switch (le16_to_cpu(tl.fc_tag)) { + for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN; + cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) { + ext4_fc_get_tl(&tl, cur); + val = cur + EXT4_FC_TAG_BASE_LEN; + if (tl.fc_len > end - val || + !ext4_fc_value_len_isvalid(sbi, tl.fc_tag, tl.fc_len)) { + ret = state->fc_replay_num_tags ? + JBD2_FC_REPLAY_STOP : -ECANCELED; + goto out_err; + } + ext4_debug("Scan phase, tag:%s, blk %lld\n", + tag2str(tl.fc_tag), bh->b_blocknr); + switch (tl.fc_tag) { case EXT4_FC_TAG_ADD_RANGE: memcpy(&ext, val, sizeof(ext)); ex = (struct ext4_extent *)&ext.fc_ex; @@ -1977,13 +2034,13 @@ static int ext4_fc_replay_scan(journal_t *journal, case EXT4_FC_TAG_PAD: state->fc_cur_tag++; state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, - sizeof(tl) + le16_to_cpu(tl.fc_len)); + EXT4_FC_TAG_BASE_LEN + tl.fc_len); break; case EXT4_FC_TAG_TAIL: state->fc_cur_tag++; memcpy(&tail, val, sizeof(tail)); state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, - sizeof(tl) + + EXT4_FC_TAG_BASE_LEN + offsetof(struct ext4_fc_tail, fc_crc)); if (le32_to_cpu(tail.fc_tid) == expected_tid && @@ -2010,7 +2067,7 @@ static int ext4_fc_replay_scan(journal_t *journal, } state->fc_cur_tag++; state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, - sizeof(tl) + le16_to_cpu(tl.fc_len)); + EXT4_FC_TAG_BASE_LEN + tl.fc_len); break; default: ret = state->fc_replay_num_tags ? @@ -2050,7 +2107,7 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, sbi->s_mount_state |= EXT4_FC_REPLAY; } if (!sbi->s_fc_replay_state.fc_replay_num_tags) { - jbd_debug(1, "Replay stops\n"); + ext4_debug("Replay stops\n"); ext4_fc_set_bitmaps_and_counters(sb); return 0; } @@ -2063,21 +2120,22 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, #endif start = (u8 *)bh->b_data; - end = (__u8 *)bh->b_data + journal->j_blocksize - 1; + end = start + journal->j_blocksize; - for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) { - memcpy(&tl, cur, sizeof(tl)); - val = cur + sizeof(tl); + for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN; + cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) { + ext4_fc_get_tl(&tl, cur); + val = cur + EXT4_FC_TAG_BASE_LEN; if (state->fc_replay_num_tags == 0) { ret = JBD2_FC_REPLAY_STOP; ext4_fc_set_bitmaps_and_counters(sb); break; } - jbd_debug(3, "Replay phase, tag:%s\n", - tag2str(le16_to_cpu(tl.fc_tag))); + + ext4_debug("Replay phase, tag:%s\n", tag2str(tl.fc_tag)); state->fc_replay_num_tags--; - switch (le16_to_cpu(tl.fc_tag)) { + switch (tl.fc_tag) { case EXT4_FC_TAG_LINK: ret = ext4_fc_replay_link(sb, &tl, val); break; @@ -2098,19 +2156,18 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, break; case EXT4_FC_TAG_PAD: trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0, - le16_to_cpu(tl.fc_len), 0); + tl.fc_len, 0); break; case EXT4_FC_TAG_TAIL: - trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0, - le16_to_cpu(tl.fc_len), 0); + trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, + 0, tl.fc_len, 0); memcpy(&tail, val, sizeof(tail)); WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid); break; case EXT4_FC_TAG_HEAD: break; default: - trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0, - le16_to_cpu(tl.fc_len), 0); + trace_ext4_fc_replay(sb, tl.fc_tag, 0, tl.fc_len, 0); ret = -ECANCELED; break; } @@ -2134,17 +2191,17 @@ void ext4_fc_init(struct super_block *sb, journal_t *journal) journal->j_fc_cleanup_callback = ext4_fc_cleanup; } -static const char *fc_ineligible_reasons[] = { - "Extended attributes changed", - "Cross rename", - "Journal flag changed", - "Insufficient memory", - "Swap boot", - "Resize", - "Dir renamed", - "Falloc range op", - "Data journalling", - "FC Commit Failed" +static const char * const fc_ineligible_reasons[] = { + [EXT4_FC_REASON_XATTR] = "Extended attributes changed", + [EXT4_FC_REASON_CROSS_RENAME] = "Cross rename", + [EXT4_FC_REASON_JOURNAL_FLAG_CHANGE] = "Journal flag changed", + [EXT4_FC_REASON_NOMEM] = "Insufficient memory", + [EXT4_FC_REASON_SWAP_BOOT] = "Swap boot", + [EXT4_FC_REASON_RESIZE] = "Resize", + [EXT4_FC_REASON_RENAME_DIR] = "Dir renamed", + [EXT4_FC_REASON_FALLOC_RANGE] = "Falloc range op", + [EXT4_FC_REASON_INODE_JOURNAL_DATA] = "Data journalling", + [EXT4_FC_REASON_ENCRYPTED_FILENAME] = "Encrypted filename", }; int ext4_fc_info_show(struct seq_file *seq, void *v) diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h index 083ad1cb705a..2cbd317eda26 100644 --- a/fs/ext4/fast_commit.h +++ b/fs/ext4/fast_commit.h @@ -58,7 +58,7 @@ struct ext4_fc_dentry_info { __u8 fc_dname[0]; }; -/* Value structure for EXT4_FC_TAG_INODE and EXT4_FC_TAG_INODE_PARTIAL. */ +/* Value structure for EXT4_FC_TAG_INODE. */ struct ext4_fc_inode { __le32 fc_ino; __u8 fc_raw_inode[0]; @@ -70,6 +70,9 @@ struct ext4_fc_tail { __le32 fc_crc; }; +/* Tag base length */ +#define EXT4_FC_TAG_BASE_LEN (sizeof(struct ext4_fc_tl)) + /* * Fast commit status codes */ @@ -93,7 +96,7 @@ enum { EXT4_FC_REASON_RENAME_DIR, EXT4_FC_REASON_FALLOC_RANGE, EXT4_FC_REASON_INODE_JOURNAL_DATA, - EXT4_FC_COMMIT_FAILED, + EXT4_FC_REASON_ENCRYPTED_FILENAME, EXT4_FC_REASON_MAX }; diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 89efa78ed4b2..9813cc4b7b2a 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -148,6 +148,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; + unsigned int key; int ret = -EIO; *err = 0; @@ -156,7 +157,13 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, if (!p->key) goto no_block; while (--depth) { - bh = sb_getblk(sb, le32_to_cpu(p->key)); + key = le32_to_cpu(p->key); + if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) { + /* the block was out of range */ + ret = -EFSCORRUPTED; + goto failure; + } + bh = sb_getblk(sb, key); if (unlikely(!bh)) { ret = -ENOMEM; goto failure; @@ -460,7 +467,7 @@ static int ext4_splice_branch(handle_t *handle, * the new i_size. But that is not done here - it is done in * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. */ - jbd_debug(5, "splicing indirect only\n"); + ext4_debug("splicing indirect only\n"); BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); if (err) @@ -472,7 +479,7 @@ static int ext4_splice_branch(handle_t *handle, err = ext4_mark_inode_dirty(handle, ar->inode); if (unlikely(err)) goto err_out; - jbd_debug(5, "splicing direct\n"); + ext4_debug("splicing direct\n"); } return err; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index bdadbe57ea80..0a63863bc58c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -225,13 +225,13 @@ void ext4_evict_inode(struct inode *inode) /* * For inodes with journalled data, transaction commit could have - * dirtied the inode. Flush worker is ignoring it because of I_FREEING - * flag but we still need to remove the inode from the writeback lists. + * dirtied the inode. And for inodes with dioread_nolock, unwritten + * extents converting worker could merge extents and also have dirtied + * the inode. Flush worker is ignoring it because of I_FREEING flag but + * we still need to remove the inode from the writeback lists. */ - if (!list_empty_careful(&inode->i_io_list)) { - WARN_ON_ONCE(!ext4_should_journal_data(inode)); + if (!list_empty_careful(&inode->i_io_list)) inode_io_list_del(inode); - } /* * Protect us against freezing - iput() caller didn't have to have any @@ -338,6 +338,12 @@ stop_handle: ext4_xattr_inode_array_free(ea_inode_array); return; no_delete: + /* + * Check out some where else accidentally dirty the evicting inode, + * which may probably cause inode use-after-free issues later. + */ + WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list)); + if (!list_empty(&EXT4_I(inode)->i_fc_list)) ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL); ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ @@ -1298,7 +1304,8 @@ static int ext4_write_end(struct file *file, trace_ext4_write_end(inode, pos, len, copied); - if (ext4_has_inline_data(inode)) + if (ext4_has_inline_data(inode) && + ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) return ext4_write_inline_data_end(inode, pos, len, copied, page); copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); @@ -4198,7 +4205,8 @@ int ext4_truncate(struct inode *inode) /* If we zero-out tail of the page, we have to create jinode for jbd2 */ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { - if (ext4_inode_attach_jinode(inode) < 0) + err = ext4_inode_attach_jinode(inode); + if (err) goto out_trace; } @@ -4299,9 +4307,17 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; inode_offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)); - block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); + block = ext4_inode_table(sb, gdp); + if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) || + (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) { + ext4_error(sb, "Invalid inode table block %llu in " + "block_group %u", block, iloc->block_group); + return -EFSCORRUPTED; + } + block += (inode_offset / inodes_per_block); + bh = sb_getblk(sb, block); if (unlikely(!bh)) return -ENOMEM; @@ -4876,8 +4892,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) ext4_error_inode(inode, function, line, 0, "casefold flag without casefold feature"); - brelse(iloc.bh); + if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) { + ext4_error_inode(inode, function, line, 0, + "bad inode without EXT4_IGET_BAD flag"); + ret = -EUCLEAN; + goto bad_inode; + } + brelse(iloc.bh); unlock_new_inode(inode); return inode; @@ -5198,7 +5220,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) if (EXT4_SB(inode->i_sb)->s_journal) { if (ext4_journal_current_handle()) { - jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); + ext4_debug("called recursively, non-PF_MEMALLOC!\n"); dump_stack(); return -EIO; } @@ -5791,6 +5813,14 @@ static int __ext4_expand_extra_isize(struct inode *inode, return 0; } + /* + * We may need to allocate external xattr block so we need quotas + * initialized. Here we can be called with various locks held so we + * cannot affort to initialize quotas ourselves. So just bail. + */ + if (dquot_initialize_needed(inode)) + return -EAGAIN; + /* try to expand with EAs present */ error = ext4_expand_extra_isize_ea(inode, new_extra_isize, raw_inode, handle); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index f61b59045c6d..2e6d03e5790e 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -124,7 +124,8 @@ static long swap_inode_boot_loader(struct super_block *sb, blkcnt_t blocks; unsigned short bytes; - inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL); + inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, + EXT4_IGET_SPECIAL | EXT4_IGET_BAD); if (IS_ERR(inode_bl)) return PTR_ERR(inode_bl); ei_bl = EXT4_I(inode_bl); @@ -174,7 +175,7 @@ static long swap_inode_boot_loader(struct super_block *sb, /* Protect extent tree against block allocations via delalloc */ ext4_double_down_write_data_sem(inode, inode_bl); - if (inode_bl->i_nlink == 0) { + if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) { /* this inode has never been used as a BOOT_LOADER */ set_nlink(inode_bl, 1); i_uid_write(inode_bl, 0); @@ -491,6 +492,10 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid) if (ext4_is_quota_file(inode)) return err; + err = dquot_initialize(inode); + if (err) + return err; + err = ext4_get_inode_loc(inode, &iloc); if (err) return err; @@ -506,10 +511,6 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid) brelse(iloc.bh); } - err = dquot_initialize(inode); - if (err) - return err; - handle = ext4_journal_start(inode, EXT4_HT_QUOTA, EXT4_QUOTA_INIT_BLOCKS(sb) + EXT4_QUOTA_DEL_BLOCKS(sb) + 3); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index b8cf3e986b00..1e6cc6c21d60 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3204,14 +3204,20 @@ end_rmdir: return retval; } -int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name, - struct inode *inode) +int __ext4_unlink(struct inode *dir, const struct qstr *d_name, + struct inode *inode, + struct dentry *dentry /* NULL during fast_commit recovery */) { int retval = -ENOENT; struct buffer_head *bh; struct ext4_dir_entry_2 *de; + handle_t *handle; int skip_remove_dentry = 0; + /* + * Keep this outside the transaction; it may have to set up the + * directory's encryption key, which isn't GFP_NOFS-safe. + */ bh = ext4_find_entry(dir, d_name, &de, NULL); if (IS_ERR(bh)) return PTR_ERR(bh); @@ -3228,7 +3234,14 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) skip_remove_dentry = 1; else - goto out; + goto out_bh; + } + + handle = ext4_journal_start(dir, EXT4_HT_DIR, + EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); + if (IS_ERR(handle)) { + retval = PTR_ERR(handle); + goto out_bh; } if (IS_DIRSYNC(dir)) @@ -3237,12 +3250,12 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name if (!skip_remove_dentry) { retval = ext4_delete_entry(handle, dir, de, bh); if (retval) - goto out; + goto out_handle; dir->i_ctime = dir->i_mtime = current_time(dir); ext4_update_dx_flag(dir); retval = ext4_mark_inode_dirty(handle, dir); if (retval) - goto out; + goto out_handle; } else { retval = 0; } @@ -3255,15 +3268,17 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name ext4_orphan_add(handle, inode); inode->i_ctime = current_time(inode); retval = ext4_mark_inode_dirty(handle, inode); - -out: + if (dentry && !retval) + ext4_fc_track_unlink(handle, dentry); +out_handle: + ext4_journal_stop(handle); +out_bh: brelse(bh); return retval; } static int ext4_unlink(struct inode *dir, struct dentry *dentry) { - handle_t *handle; int retval; if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) @@ -3281,16 +3296,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (retval) goto out_trace; - handle = ext4_journal_start(dir, EXT4_HT_DIR, - EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); - if (IS_ERR(handle)) { - retval = PTR_ERR(handle); - goto out_trace; - } - - retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry)); - if (!retval) - ext4_fc_track_unlink(handle, dentry); + retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry), dentry); #ifdef CONFIG_UNICODE /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid @@ -3301,8 +3307,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (IS_CASEFOLDED(dir)) d_invalidate(dentry); #endif - if (handle) - ext4_journal_stop(handle); out_trace: trace_ext4_unlink_exit(dentry, retval); @@ -3806,6 +3810,9 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir, return -EXDEV; retval = dquot_initialize(old.dir); + if (retval) + return retval; + retval = dquot_initialize(old.inode); if (retval) return retval; retval = dquot_initialize(new.dir); diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c index 53adc8f570a3..c26c404ac58b 100644 --- a/fs/ext4/orphan.c +++ b/fs/ext4/orphan.c @@ -181,8 +181,8 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) } else brelse(iloc.bh); - jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); - jbd_debug(4, "orphan inode %lu will point to %d\n", + ext4_debug("superblock will point to %lu\n", inode->i_ino); + ext4_debug("orphan inode %lu will point to %d\n", inode->i_ino, NEXT_ORPHAN(inode)); out: ext4_std_error(sb, err); @@ -251,7 +251,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode) } mutex_lock(&sbi->s_orphan_lock); - jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino); + ext4_debug("remove inode %lu from orphan list\n", inode->i_ino); prev = ei->i_orphan.prev; list_del_init(&ei->i_orphan); @@ -267,7 +267,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode) ino_next = NEXT_ORPHAN(inode); if (prev == &sbi->s_orphan) { - jbd_debug(4, "superblock will point to %u\n", ino_next); + ext4_debug("superblock will point to %u\n", ino_next); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, inode->i_sb, sbi->s_sbh, EXT4_JTR_NONE); @@ -286,7 +286,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode) struct inode *i_prev = &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode; - jbd_debug(4, "orphan inode %lu will point to %u\n", + ext4_debug("orphan inode %lu will point to %u\n", i_prev->i_ino, ino_next); err = ext4_reserve_inode_write(handle, i_prev, &iloc2); if (err) { @@ -332,8 +332,8 @@ static void ext4_process_orphan(struct inode *inode, ext4_msg(sb, KERN_DEBUG, "%s: truncating inode %lu to %lld bytes", __func__, inode->i_ino, inode->i_size); - jbd_debug(2, "truncating inode %lu to %lld bytes\n", - inode->i_ino, inode->i_size); + ext4_debug("truncating inode %lu to %lld bytes\n", + inode->i_ino, inode->i_size); inode_lock(inode); truncate_inode_pages(inode->i_mapping, inode->i_size); ret = ext4_truncate(inode); @@ -353,8 +353,8 @@ static void ext4_process_orphan(struct inode *inode, ext4_msg(sb, KERN_DEBUG, "%s: deleting unreferenced inode %lu", __func__, inode->i_ino); - jbd_debug(2, "deleting unreferenced inode %lu\n", - inode->i_ino); + ext4_debug("deleting unreferenced inode %lu\n", + inode->i_ino); (*nr_orphans)++; } iput(inode); /* The delete magic happens here! */ @@ -391,7 +391,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) int inodes_per_ob = ext4_inodes_per_orphan_block(sb); if (!es->s_last_orphan && !oi->of_blocks) { - jbd_debug(4, "no orphan inodes to clean up\n"); + ext4_debug("no orphan inodes to clean up\n"); return; } @@ -412,10 +412,10 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & SB_RDONLY)) { ext4_msg(sb, KERN_INFO, "Errors on filesystem, " - "clearing orphan list.\n"); + "clearing orphan list."); es->s_last_orphan = 0; } - jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); + ext4_debug("Skipping orphan recovery on fs with errors.\n"); return; } @@ -459,7 +459,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) * so, skip the rest. */ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { - jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); + ext4_debug("Skipping orphan recovery on fs with errors.\n"); es->s_last_orphan = 0; break; } diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 62bbfe8960f3..405c68085055 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1557,8 +1557,8 @@ exit_journal: int meta_bg = ext4_has_feature_meta_bg(sb); sector_t old_gdb = 0; - update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, - sizeof(struct ext4_super_block), 0); + update_backups(sb, ext4_group_first_block_no(sb, 0), + (char *)es, sizeof(struct ext4_super_block), 0); for (; gdb_num <= gdb_num_end; gdb_num++) { struct buffer_head *gdb_bh; @@ -1769,7 +1769,7 @@ errout: if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extended group to %llu " "blocks\n", ext4_blocks_count(es)); - update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, + update_backups(sb, ext4_group_first_block_no(sb, 0), (char *)es, sizeof(struct ext4_super_block), 0); } return err; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 985d79fb6128..802ca160d31e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1288,6 +1288,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) return NULL; inode_set_iversion(&ei->vfs_inode, 1); + ei->i_flags = 0; spin_lock_init(&ei->i_raw_lock); INIT_LIST_HEAD(&ei->i_prealloc_list); atomic_set(&ei->i_prealloc_active, 0); @@ -4663,30 +4664,31 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_has_feature_journal_needs_recovery(sb)) { ext4_msg(sb, KERN_ERR, "required journal recovery " "suppressed and not mounted read-only"); - goto failed_mount_wq; + goto failed_mount3a; } else { /* Nojournal mode, all journal mount options are illegal */ - if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { - ext4_msg(sb, KERN_ERR, "can't mount with " - "journal_checksum, fs mounted w/o journal"); - goto failed_mount_wq; - } if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with " "journal_async_commit, fs mounted w/o journal"); - goto failed_mount_wq; + goto failed_mount3a; + } + + if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "journal_checksum, fs mounted w/o journal"); + goto failed_mount3a; } if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { ext4_msg(sb, KERN_ERR, "can't mount with " "commit=%lu, fs mounted w/o journal", sbi->s_commit_interval / HZ); - goto failed_mount_wq; + goto failed_mount3a; } if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { ext4_msg(sb, KERN_ERR, "can't mount with " "data=, fs mounted w/o journal"); - goto failed_mount_wq; + goto failed_mount3a; } sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); @@ -5153,9 +5155,9 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb, return NULL; } - jbd_debug(2, "Journal inode found at %p: %lld bytes\n", + ext4_debug("Journal inode found at %p: %lld bytes\n", journal_inode, journal_inode->i_size); - if (!S_ISREG(journal_inode->i_mode)) { + if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) { ext4_msg(sb, KERN_ERR, "invalid journal inode"); iput(journal_inode); return NULL; @@ -6309,6 +6311,20 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, return err; } +static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum) +{ + switch (type) { + case USRQUOTA: + return qf_inum == EXT4_USR_QUOTA_INO; + case GRPQUOTA: + return qf_inum == EXT4_GRP_QUOTA_INO; + case PRJQUOTA: + return qf_inum >= EXT4_GOOD_OLD_FIRST_INO; + default: + BUG(); + } +} + static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags) { @@ -6325,9 +6341,16 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id, if (!qf_inums[type]) return -EPERM; + if (!ext4_check_quota_inum(type, qf_inums[type])) { + ext4_error(sb, "Bad quota inum: %lu, type: %d", + qf_inums[type], type); + return -EUCLEAN; + } + qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL); if (IS_ERR(qf_inode)) { - ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]); + ext4_error(sb, "Bad quota inode: %lu, type: %d", + qf_inums[type], type); return PTR_ERR(qf_inode); } @@ -6366,8 +6389,9 @@ int ext4_enable_quotas(struct super_block *sb) if (err) { ext4_warning(sb, "Failed to enable quota tracking " - "(type=%d, err=%d). Please run " - "e2fsck to fix.", type, err); + "(type=%d, err=%d, ino=%lu). " + "Please run e2fsck to fix.", type, + err, qf_inums[type]); for (type--; type >= 0; type--) { struct inode *inode; diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index 840639e7a7ca..5ece4d3c6210 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -76,7 +76,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count, size_t n = min_t(size_t, count, PAGE_SIZE - offset_in_page(pos)); struct page *page; - void *fsdata; + void *fsdata = NULL; int res; res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0, diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 533216e80fa2..b92da41e9640 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1281,7 +1281,7 @@ retry_ref: ce = mb_cache_entry_get(ea_block_cache, hash, bh->b_blocknr); if (ce) { - ce->e_reusable = 1; + set_bit(MBE_REUSABLE_B, &ce->e_flags); mb_cache_entry_put(ea_block_cache, ce); } } @@ -1441,6 +1441,9 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle, if (!err) err = ext4_inode_attach_jinode(ea_inode); if (err) { + if (ext4_xattr_inode_dec_ref(handle, ea_inode)) + ext4_warning_inode(ea_inode, + "cleanup dec ref error %d", err); iput(ea_inode); return ERR_PTR(err); } @@ -2042,7 +2045,7 @@ inserted: } BHDR(new_bh)->h_refcount = cpu_to_le32(ref); if (ref == EXT4_XATTR_REFCOUNT_MAX) - ce->e_reusable = 0; + clear_bit(MBE_REUSABLE_B, &ce->e_flags); ea_bdebug(new_bh, "reusing; refcount now=%d", ref); ext4_xattr_block_csum_set(inode, new_bh); @@ -2070,19 +2073,11 @@ inserted: goal = ext4_group_first_block_no(sb, EXT4_I(inode)->i_block_group); - - /* non-extent files can't have physical blocks past 2^32 */ - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; - block = ext4_new_meta_blocks(handle, inode, goal, 0, NULL, &error); if (error) goto cleanup; - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); - ea_idebug(inode, "creating block %llu", (unsigned long long)block); @@ -2554,7 +2549,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS); - buffer = kmalloc(value_size, GFP_NOFS); + buffer = kvmalloc(value_size, GFP_NOFS); b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS); if (!is || !bs || !buffer || !b_entry_name) { error = -ENOMEM; @@ -2606,7 +2601,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, error = 0; out: kfree(b_entry_name); - kfree(buffer); + kvfree(buffer); if (is) brelse(is->iloc.bh); if (bs) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index e75a276f5b9c..280a93855eaf 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -1033,6 +1033,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, if (ofs_in_node >= max_addrs) { f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u", ofs_in_node, dni->ino, dni->nid, max_addrs); + f2fs_put_page(node_page, 1); return false; } @@ -1673,8 +1674,9 @@ freed: get_valid_blocks(sbi, segno, false) == 0) seg_freed++; - if (__is_large_section(sbi) && segno + 1 < end_segno) - sbi->next_victim_seg[gc_type] = segno + 1; + if (__is_large_section(sbi)) + sbi->next_victim_seg[gc_type] = + (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO; skip: f2fs_put_page(sum_page, 0); } @@ -2051,8 +2053,6 @@ out_unlock: if (err) return err; - set_sbi_flag(sbi, SBI_IS_RESIZEFS); - freeze_super(sbi->sb); down_write(&sbi->gc_lock); down_write(&sbi->cp_global_sem); @@ -2068,6 +2068,7 @@ out_unlock: if (err) goto out_err; + set_sbi_flag(sbi, SBI_IS_RESIZEFS); err = free_segment_range(sbi, secs, false); if (err) goto recover_out; @@ -2091,6 +2092,7 @@ out_unlock: f2fs_commit_super(sbi, false); } recover_out: + clear_sbi_flag(sbi, SBI_IS_RESIZEFS); if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); @@ -2103,6 +2105,5 @@ out_err: up_write(&sbi->cp_global_sem); up_write(&sbi->gc_lock); thaw_super(sbi->sb); - clear_sbi_flag(sbi, SBI_IS_RESIZEFS); return err; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 0e6e73bc42d4..f810c6bbeff0 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1357,8 +1357,7 @@ static int read_node_page(struct page *page, int op_flags) return err; /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */ - if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) || - is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) { + if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) { ClearPageUptodate(page); return -ENOENT; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index af810b2d5d90..194c0811fbdf 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1551,7 +1551,7 @@ retry: if (i + 1 < dpolicy->granularity) break; - if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered) + if (i + 1 < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered) return __issue_discard_cmd_orderly(sbi, dpolicy); pend_list = &dcc->pend_list[i]; @@ -2129,8 +2129,10 @@ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi) dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); - if (IS_ERR(dcc->f2fs_issue_discard)) + if (IS_ERR(dcc->f2fs_issue_discard)) { err = PTR_ERR(dcc->f2fs_issue_discard); + dcc->f2fs_issue_discard = NULL; + } return err; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index a0d1ef73b83e..f4e8de1f4789 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -4428,9 +4428,9 @@ free_nm: f2fs_destroy_node_manager(sbi); free_sm: f2fs_destroy_segment_manager(sbi); - f2fs_destroy_post_read_wq(sbi); stop_ckpt_thread: f2fs_stop_ckpt_thread(sbi); + f2fs_destroy_post_read_wq(sbi); free_devices: destroy_device_list(sbi); kvfree(sbi->ckpt); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 4a95a92546a0..7c9c6d0b38fd 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -456,14 +456,16 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) /* panic? */ return -EIO; + res = -EIO; + if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN) + goto out; fd.search_key->cat = HFS_I(main_inode)->cat_key; if (hfs_brec_find(&fd)) - /* panic? */ goto out; if (S_ISDIR(main_inode->i_mode)) { if (fd.entrylength < sizeof(struct hfs_cat_dir)) - /* panic? */; + goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_dir)); if (rec.type != HFS_CDR_DIR || @@ -476,6 +478,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_dir)); } else if (HFS_IS_RSRC(inode)) { + if (fd.entrylength < sizeof(struct hfs_cat_file)) + goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); hfs_inode_write_fork(inode, rec.file.RExtRec, @@ -484,7 +488,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) sizeof(struct hfs_cat_file)); } else { if (fd.entrylength < sizeof(struct hfs_cat_file)) - /* panic? */; + goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); if (rec.type != HFS_CDR_FIL || @@ -501,9 +505,10 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); } + res = 0; out: hfs_find_exit(&fd); - return 0; + return res; } static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry, diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c index 39f5e343bf4d..fdb0edb8a607 100644 --- a/fs/hfs/trans.c +++ b/fs/hfs/trans.c @@ -109,7 +109,7 @@ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr if (nls_io) { wchar_t ch; - while (srclen > 0) { + while (srclen > 0 && dstlen > 0) { size = nls_io->char2uni(src, srclen, &ch); if (size < 0) { ch = '?'; diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 1798949f269b..ebc0d5c678d0 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -198,6 +198,8 @@ struct hfsplus_sb_info { #define HFSPLUS_SB_HFSX 3 #define HFSPLUS_SB_CASEFOLD 4 #define HFSPLUS_SB_NOBARRIER 5 +#define HFSPLUS_SB_UID 6 +#define HFSPLUS_SB_GID 7 static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb) { diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 6fef67c2a9f0..bf6f75f569e4 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -190,11 +190,11 @@ static void hfsplus_get_perms(struct inode *inode, mode = be16_to_cpu(perms->mode); i_uid_write(inode, be32_to_cpu(perms->owner)); - if (!i_uid_read(inode) && !mode) + if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode)) inode->i_uid = sbi->uid; i_gid_write(inode, be32_to_cpu(perms->group)); - if (!i_gid_read(inode) && !mode) + if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode)) inode->i_gid = sbi->gid; if (dir) { @@ -509,8 +509,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) if (type == HFSPLUS_FOLDER) { struct hfsplus_cat_folder *folder = &entry.folder; - if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) - /* panic? */; + WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder)); hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, sizeof(struct hfsplus_cat_folder)); hfsplus_get_perms(inode, &folder->permissions, 1); @@ -530,8 +529,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) } else if (type == HFSPLUS_FILE) { struct hfsplus_cat_file *file = &entry.file; - if (fd->entrylength < sizeof(struct hfsplus_cat_file)) - /* panic? */; + WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file)); hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, sizeof(struct hfsplus_cat_file)); @@ -588,8 +586,7 @@ int hfsplus_cat_write_inode(struct inode *inode) if (S_ISDIR(main_inode->i_mode)) { struct hfsplus_cat_folder *folder = &entry.folder; - if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) - /* panic? */; + WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder)); hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, sizeof(struct hfsplus_cat_folder)); /* simple node checks? */ @@ -614,8 +611,7 @@ int hfsplus_cat_write_inode(struct inode *inode) } else { struct hfsplus_cat_file *file = &entry.file; - if (fd.entrylength < sizeof(struct hfsplus_cat_file)) - /* panic? */; + WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file)); hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, sizeof(struct hfsplus_cat_file)); hfsplus_inode_write_fork(inode, &file->data_fork); diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c index 047e05c57560..c94a58762ad6 100644 --- a/fs/hfsplus/options.c +++ b/fs/hfsplus/options.c @@ -140,6 +140,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) if (!uid_valid(sbi->uid)) { pr_err("invalid uid specified\n"); return 0; + } else { + set_bit(HFSPLUS_SB_UID, &sbi->flags); } break; case opt_gid: @@ -151,6 +153,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) if (!gid_valid(sbi->gid)) { pr_err("invalid gid specified\n"); return 0; + } else { + set_bit(HFSPLUS_SB_GID, &sbi->flags); } break; case opt_part: diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index be8deec29ebe..352230a011e0 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1250,7 +1250,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par case Opt_size: /* memparse() will accept a K/M/G without a digit */ - if (!isdigit(param->string[0])) + if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->max_size_opt = memparse(param->string, &rest); ctx->max_val_type = SIZE_STD; @@ -1260,7 +1260,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par case Opt_nr_inodes: /* memparse() will accept a K/M/G without a digit */ - if (!isdigit(param->string[0])) + if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->nr_inodes = memparse(param->string, &rest); return 0; @@ -1276,7 +1276,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par case Opt_min_size: /* memparse() will accept a K/M/G without a digit */ - if (!isdigit(param->string[0])) + if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->min_size_opt = memparse(param->string, &rest); ctx->min_val_type = SIZE_STD; diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index e75f31b81d63..f401bc05d5ff 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -155,7 +155,7 @@ int dbMount(struct inode *ipbmap) struct bmap *bmp; struct dbmap_disk *dbmp_le; struct metapage *mp; - int i; + int i, err; /* * allocate/initialize the in-memory bmap descriptor @@ -170,8 +170,8 @@ int dbMount(struct inode *ipbmap) BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { - kfree(bmp); - return -EIO; + err = -EIO; + goto err_kfree_bmp; } /* copy the on-disk bmap descriptor to its in-memory version. */ @@ -181,9 +181,8 @@ int dbMount(struct inode *ipbmap) bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); if (!bmp->db_numag) { - release_metapage(mp); - kfree(bmp); - return -EINVAL; + err = -EINVAL; + goto err_release_metapage; } bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); @@ -194,6 +193,16 @@ int dbMount(struct inode *ipbmap) bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); + if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) { + err = -EINVAL; + goto err_release_metapage; + } + + if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) { + err = -EINVAL; + goto err_release_metapage; + } + for (i = 0; i < MAXAG; i++) bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]); bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize); @@ -214,6 +223,12 @@ int dbMount(struct inode *ipbmap) BMAP_LOCK_INIT(bmp); return (0); + +err_release_metapage: + release_metapage(mp); +err_kfree_bmp: + kfree(bmp); + return err; } diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 9db4f5789c0e..4fbbf88435e6 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -946,7 +946,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip, if (ssize <= IDATASIZE) { ip->i_op = &jfs_fast_symlink_inode_operations; - ip->i_link = JFS_IP(ip)->i_inline; + ip->i_link = JFS_IP(ip)->i_inline_all; memcpy(ip->i_link, name, ssize); ip->i_size = ssize - 1; diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c index 30a92ddc1817..b962b16e5aeb 100644 --- a/fs/ksmbd/auth.c +++ b/fs/ksmbd/auth.c @@ -319,7 +319,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob, dn_off = le32_to_cpu(authblob->DomainName.BufferOffset); dn_len = le16_to_cpu(authblob->DomainName.Length); - if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len) + if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len || + nt_len < CIFS_ENCPWD_SIZE) return -EINVAL; /* TODO : use domain name that imported from configuration file */ diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c index 02254b09c0da..6a1d1fbe5fb2 100644 --- a/fs/ksmbd/connection.c +++ b/fs/ksmbd/connection.c @@ -310,9 +310,12 @@ int ksmbd_conn_handler_loop(void *p) /* 4 for rfc1002 length field */ size = pdu_size + 4; - conn->request_buf = kvmalloc(size, GFP_KERNEL); + conn->request_buf = kvmalloc(size, + GFP_KERNEL | + __GFP_NOWARN | + __GFP_NORETRY); if (!conn->request_buf) - continue; + break; memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf)); if (!ksmbd_smb_request(conn)) diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c index 8d8ffd8c6f19..0fa467f2c897 100644 --- a/fs/ksmbd/mgmt/user_session.c +++ b/fs/ksmbd/mgmt/user_session.c @@ -106,15 +106,17 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name) entry->method = method; entry->id = ksmbd_ipc_id_alloc(); if (entry->id < 0) - goto error; + goto free_entry; resp = ksmbd_rpc_open(sess, entry->id); if (!resp) - goto error; + goto free_id; kvfree(resp); return entry->id; -error: +free_id: + ksmbd_rpc_id_free(entry->id); +free_entry: list_del(&entry->list); kfree(entry); return -EINVAL; diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c index 755329c295ca..293af921f2ad 100644 --- a/fs/ksmbd/transport_tcp.c +++ b/fs/ksmbd/transport_tcp.c @@ -295,6 +295,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig, struct msghdr ksmbd_msg; struct kvec *iov; struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn; + int max_retry = 2; iov = get_conn_iovec(t, nr_segs); if (!iov) @@ -321,9 +322,11 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig, } else if (conn->status == KSMBD_SESS_NEED_RECONNECT) { total_read = -EAGAIN; break; - } else if (length == -ERESTARTSYS || length == -EAGAIN) { + } else if ((length == -ERESTARTSYS || length == -EAGAIN) && + max_retry) { usleep_range(1000, 2000); length = 0; + max_retry--; continue; } else if (length <= 0) { total_read = -EAGAIN; diff --git a/fs/libfs.c b/fs/libfs.c index 51b4de3b3447..7bb5d90319cc 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -967,8 +967,8 @@ out: EXPORT_SYMBOL_GPL(simple_attr_read); /* interpret the buffer as a number to call the set function with */ -ssize_t simple_attr_write(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) +static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf, + size_t len, loff_t *ppos, bool is_signed) { struct simple_attr *attr; unsigned long long val; @@ -989,7 +989,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, goto out; attr->set_buf[size] = '\0'; - ret = kstrtoull(attr->set_buf, 0, &val); + if (is_signed) + ret = kstrtoll(attr->set_buf, 0, &val); + else + ret = kstrtoull(attr->set_buf, 0, &val); if (ret) goto out; ret = attr->set(attr->data, val); @@ -999,8 +1002,21 @@ out: mutex_unlock(&attr->mutex); return ret; } + +ssize_t simple_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return simple_attr_write_xsigned(file, buf, len, ppos, false); +} EXPORT_SYMBOL_GPL(simple_attr_write); +ssize_t simple_attr_write_signed(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return simple_attr_write_xsigned(file, buf, len, ppos, true); +} +EXPORT_SYMBOL_GPL(simple_attr_write_signed); + /** * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation * @sb: filesystem to do the file handle conversion on diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index e1c4617de771..3515f17eaf3f 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -176,7 +176,7 @@ nlm_delete_file(struct nlm_file *file) } } -static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner) +static int nlm_unlock_files(struct nlm_file *file, const struct file_lock *fl) { struct file_lock lock; @@ -184,12 +184,15 @@ static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner) lock.fl_type = F_UNLCK; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; - lock.fl_owner = owner; - if (file->f_file[O_RDONLY] && - vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL)) + lock.fl_owner = fl->fl_owner; + lock.fl_pid = fl->fl_pid; + lock.fl_flags = FL_POSIX; + + lock.fl_file = file->f_file[O_RDONLY]; + if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL)) goto out_err; - if (file->f_file[O_WRONLY] && - vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL)) + lock.fl_file = file->f_file[O_WRONLY]; + if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL)) goto out_err; return 0; out_err: @@ -226,7 +229,7 @@ again: if (match(lockhost, host)) { spin_unlock(&flctx->flc_lock); - if (nlm_unlock_files(file, fl->fl_owner)) + if (nlm_unlock_files(file, fl)) return 1; goto again; } diff --git a/fs/locks.c b/fs/locks.c index 3d6fb4ae847b..82a4487e95b3 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2703,6 +2703,29 @@ int vfs_cancel_lock(struct file *filp, struct file_lock *fl) } EXPORT_SYMBOL_GPL(vfs_cancel_lock); +/** + * vfs_inode_has_locks - are any file locks held on @inode? + * @inode: inode to check for locks + * + * Return true if there are any FL_POSIX or FL_FLOCK locks currently + * set on @inode. + */ +bool vfs_inode_has_locks(struct inode *inode) +{ + struct file_lock_context *ctx; + bool ret; + + ctx = smp_load_acquire(&inode->i_flctx); + if (!ctx) + return false; + + spin_lock(&ctx->flc_lock); + ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock); + spin_unlock(&ctx->flc_lock); + return ret; +} +EXPORT_SYMBOL_GPL(vfs_inode_has_locks); + #ifdef CONFIG_PROC_FS #include #include diff --git a/fs/mbcache.c b/fs/mbcache.c index 2010bc80a3f2..95b047256d09 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -90,12 +90,19 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, return -ENOMEM; INIT_LIST_HEAD(&entry->e_list); - /* One ref for hash, one ref returned */ - atomic_set(&entry->e_refcnt, 1); + /* + * We create entry with two references. One reference is kept by the + * hash table, the other reference is used to protect us from + * mb_cache_entry_delete_or_get() until the entry is fully setup. This + * avoids nesting of cache->c_list_lock into hash table bit locks which + * is problematic for RT. + */ + atomic_set(&entry->e_refcnt, 2); entry->e_key = key; entry->e_value = value; - entry->e_reusable = reusable; - entry->e_referenced = 0; + entry->e_flags = 0; + if (reusable) + set_bit(MBE_REUSABLE_B, &entry->e_flags); head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { @@ -107,20 +114,24 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, } hlist_bl_add_head(&entry->e_hash_list, head); hlist_bl_unlock(head); - spin_lock(&cache->c_list_lock); list_add_tail(&entry->e_list, &cache->c_list); - /* Grab ref for LRU list */ - atomic_inc(&entry->e_refcnt); cache->c_entry_count++; spin_unlock(&cache->c_list_lock); + mb_cache_entry_put(cache, entry); return 0; } EXPORT_SYMBOL(mb_cache_entry_create); -void __mb_cache_entry_free(struct mb_cache_entry *entry) +void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry) { + struct hlist_bl_head *head; + + head = mb_cache_entry_head(cache, entry->e_key); + hlist_bl_lock(head); + hlist_bl_del(&entry->e_hash_list); + hlist_bl_unlock(head); kmem_cache_free(mb_entry_cache, entry); } EXPORT_SYMBOL(__mb_cache_entry_free); @@ -134,7 +145,7 @@ EXPORT_SYMBOL(__mb_cache_entry_free); */ void mb_cache_entry_wait_unused(struct mb_cache_entry *entry) { - wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 3); + wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2); } EXPORT_SYMBOL(mb_cache_entry_wait_unused); @@ -155,10 +166,10 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache, while (node) { entry = hlist_bl_entry(node, struct mb_cache_entry, e_hash_list); - if (entry->e_key == key && entry->e_reusable) { - atomic_inc(&entry->e_refcnt); + if (entry->e_key == key && + test_bit(MBE_REUSABLE_B, &entry->e_flags) && + atomic_inc_not_zero(&entry->e_refcnt)) goto out; - } node = node->next; } entry = NULL; @@ -218,10 +229,9 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(entry, node, head, e_hash_list) { - if (entry->e_key == key && entry->e_value == value) { - atomic_inc(&entry->e_refcnt); + if (entry->e_key == key && entry->e_value == value && + atomic_inc_not_zero(&entry->e_refcnt)) goto out; - } } entry = NULL; out: @@ -281,37 +291,25 @@ EXPORT_SYMBOL(mb_cache_entry_delete); struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, u32 key, u64 value) { - struct hlist_bl_node *node; - struct hlist_bl_head *head; struct mb_cache_entry *entry; - head = mb_cache_entry_head(cache, key); - hlist_bl_lock(head); - hlist_bl_for_each_entry(entry, node, head, e_hash_list) { - if (entry->e_key == key && entry->e_value == value) { - if (atomic_read(&entry->e_refcnt) > 2) { - atomic_inc(&entry->e_refcnt); - hlist_bl_unlock(head); - return entry; - } - /* We keep hash list reference to keep entry alive */ - hlist_bl_del_init(&entry->e_hash_list); - hlist_bl_unlock(head); - spin_lock(&cache->c_list_lock); - if (!list_empty(&entry->e_list)) { - list_del_init(&entry->e_list); - if (!WARN_ONCE(cache->c_entry_count == 0, - "mbcache: attempt to decrement c_entry_count past zero")) - cache->c_entry_count--; - atomic_dec(&entry->e_refcnt); - } - spin_unlock(&cache->c_list_lock); - mb_cache_entry_put(cache, entry); - return NULL; - } - } - hlist_bl_unlock(head); + entry = mb_cache_entry_get(cache, key, value); + if (!entry) + return NULL; + /* + * Drop the ref we got from mb_cache_entry_get() and the initial hash + * ref if we are the last user + */ + if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2) + return entry; + + spin_lock(&cache->c_list_lock); + if (!list_empty(&entry->e_list)) + list_del_init(&entry->e_list); + cache->c_entry_count--; + spin_unlock(&cache->c_list_lock); + __mb_cache_entry_free(cache, entry); return NULL; } EXPORT_SYMBOL(mb_cache_entry_delete_or_get); @@ -325,7 +323,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete_or_get); void mb_cache_entry_touch(struct mb_cache *cache, struct mb_cache_entry *entry) { - entry->e_referenced = 1; + set_bit(MBE_REFERENCED_B, &entry->e_flags); } EXPORT_SYMBOL(mb_cache_entry_touch); @@ -343,42 +341,24 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, unsigned long nr_to_scan) { struct mb_cache_entry *entry; - struct hlist_bl_head *head; unsigned long shrunk = 0; spin_lock(&cache->c_list_lock); while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list); - if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) { - entry->e_referenced = 0; + /* Drop initial hash reference if there is no user */ + if (test_bit(MBE_REFERENCED_B, &entry->e_flags) || + atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) { + clear_bit(MBE_REFERENCED_B, &entry->e_flags); list_move_tail(&entry->e_list, &cache->c_list); continue; } list_del_init(&entry->e_list); cache->c_entry_count--; - /* - * We keep LRU list reference so that entry doesn't go away - * from under us. - */ spin_unlock(&cache->c_list_lock); - head = mb_cache_entry_head(cache, entry->e_key); - hlist_bl_lock(head); - /* Now a reliable check if the entry didn't get used... */ - if (atomic_read(&entry->e_refcnt) > 2) { - hlist_bl_unlock(head); - spin_lock(&cache->c_list_lock); - list_add_tail(&entry->e_list, &cache->c_list); - cache->c_entry_count++; - continue; - } - if (!hlist_bl_unhashed(&entry->e_hash_list)) { - hlist_bl_del_init(&entry->e_hash_list); - atomic_dec(&entry->e_refcnt); - } - hlist_bl_unlock(head); - if (mb_cache_entry_put(cache, entry)) - shrunk++; + __mb_cache_entry_free(cache, entry); + shrunk++; cond_resched(); spin_lock(&cache->c_list_lock); } @@ -470,11 +450,6 @@ void mb_cache_destroy(struct mb_cache *cache) * point. */ list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { - if (!hlist_bl_unhashed(&entry->e_hash_list)) { - hlist_bl_del_init(&entry->e_hash_list); - atomic_dec(&entry->e_refcnt); - } else - WARN_ON(1); list_del(&entry->e_list); WARN_ON(atomic_read(&entry->e_refcnt) != 1); mb_cache_entry_put(cache, entry); diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index bc0c698f3350..565421c6682e 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -147,7 +147,7 @@ struct vfsmount *nfs_d_automount(struct path *path) struct nfs_fs_context *ctx; struct fs_context *fc; struct vfsmount *mnt = ERR_PTR(-ENOMEM); - struct nfs_server *server = NFS_SERVER(d_inode(path->dentry)); + struct nfs_server *server = NFS_SB(path->dentry->d_sb); struct nfs_client *client = server->nfs_client; int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); int ret; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dc03924b6b71..b6b1fad031c7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -126,6 +126,11 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry, if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) return NULL; + label->lfs = 0; + label->pi = 0; + label->len = 0; + label->label = NULL; + err = security_dentry_init_security(dentry, sattr->ia_mode, &dentry->d_name, (void **)&label->label, &label->len); if (err == 0) @@ -2139,18 +2144,18 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context } static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, - fmode_t fmode) + fmode_t fmode) { struct nfs4_state *newstate; + struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); + int openflags = opendata->o_arg.open_flags; int ret; if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) return 0; - opendata->o_arg.open_flags = 0; opendata->o_arg.fmode = fmode; - opendata->o_arg.share_access = nfs4_map_atomic_open_share( - NFS_SB(opendata->dentry->d_sb), - fmode, 0); + opendata->o_arg.share_access = + nfs4_map_atomic_open_share(server, fmode, openflags); memset(&opendata->o_res, 0, sizeof(opendata->o_res)); memset(&opendata->c_res, 0, sizeof(opendata->c_res)); nfs4_init_opendata_res(opendata); @@ -2730,10 +2735,15 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s struct nfs4_opendata *opendata; int ret; - opendata = nfs4_open_recoverdata_alloc(ctx, state, - NFS4_OPEN_CLAIM_FH); + opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); if (IS_ERR(opendata)) return PTR_ERR(opendata); + /* + * We're not recovering a delegation, so ask for no delegation. + * Otherwise the recovery thread could deadlock with an outstanding + * delegation return. + */ + opendata->o_arg.open_flags = O_DIRECT; ret = nfs4_open_recover(opendata, state); if (ret == -ESTALE) d_drop(ctx->dentry); @@ -3823,7 +3833,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr, int *opened) { struct nfs4_state *state; - struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; + struct nfs4_label l, *label; label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); @@ -3982,7 +3992,7 @@ static int _nfs4_discover_trunking(struct nfs_server *server, page = alloc_page(GFP_KERNEL); if (!page) - return -ENOMEM; + goto out_put_cred; locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); if (!locations) goto out_free; @@ -3998,6 +4008,8 @@ out_free_2: kfree(locations); out_free: __free_page(page); +out_put_cred: + put_cred(cred); return status; } @@ -4657,7 +4669,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags) { struct nfs_server *server = NFS_SERVER(dir); - struct nfs4_label l, *ilabel = NULL; + struct nfs4_label l, *ilabel; struct nfs_open_context *ctx; struct nfs4_state *state; int status = 0; @@ -5017,7 +5029,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, struct nfs4_exception exception = { .interruptible = true, }; - struct nfs4_label l, *label = NULL; + struct nfs4_label l, *label; int err; label = nfs4_label_init_security(dir, dentry, sattr, &l); @@ -5058,7 +5070,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, struct nfs4_exception exception = { .interruptible = true, }; - struct nfs4_label l, *label = NULL; + struct nfs4_label l, *label; int err; label = nfs4_label_init_security(dir, dentry, sattr, &l); @@ -5177,7 +5189,7 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, struct nfs4_exception exception = { .interruptible = true, }; - struct nfs4_label l, *label = NULL; + struct nfs4_label l, *label; int err; label = nfs4_label_init_security(dir, dentry, sattr, &l); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ecac56be6cb7..0cd803b4d90c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1227,6 +1227,8 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) if (IS_ERR(task)) { printk(KERN_ERR "%s: kthread_run: %ld\n", __func__, PTR_ERR(task)); + if (!nfs_client_init_is_complete(clp)) + nfs_mark_client_ready(clp, PTR_ERR(task)); nfs4_clear_state_manager_bit(clp); nfs_put_client(clp); module_put(THIS_MODULE); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 046788afb6d9..0ae9e06a0bba 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4179,19 +4179,17 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, p = xdr_inline_decode(xdr, len); if (unlikely(!p)) return -EIO; + bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL; if (len < NFS4_MAXLABELLEN) { - if (label) { - if (label->len) { - if (label->len < len) - return -ERANGE; - memcpy(label->label, p, len); - } + if (label && label->len) { + if (label->len < len) + return -ERANGE; + memcpy(label->label, p, len); label->len = len; label->pi = pi; label->lfs = lfs; status = NFS_ATTR_FATTR_V4_SECURITY_LABEL; } - bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL; } else printk(KERN_WARNING "%s: label too long (%u)!\n", __func__, len); diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index 4b43929c1f25..30a1782a03f0 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -246,37 +246,27 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p) struct nfsd3_getaclres *resp = rqstp->rq_resp; struct dentry *dentry = resp->fh.fh_dentry; struct inode *inode; - int w; if (!svcxdr_encode_stat(xdr, resp->status)) - return 0; + return false; if (dentry == NULL || d_really_is_negative(dentry)) - return 1; + return true; inode = d_inode(dentry); if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat)) - return 0; + return false; if (xdr_stream_encode_u32(xdr, resp->mask) < 0) - return 0; - - rqstp->rq_res.page_len = w = nfsacl_size( - (resp->mask & NFS_ACL) ? resp->acl_access : NULL, - (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); - while (w > 0) { - if (!*(rqstp->rq_next_page++)) - return 1; - w -= PAGE_SIZE; - } + return false; if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access, resp->mask & NFS_ACL, 0)) - return 0; + return false; if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default, resp->mask & NFS_DFACL, NFS_ACL_DEFAULT)) - return 0; + return false; - return 1; + return true; } /* ACCESS */ @@ -286,17 +276,17 @@ static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p) struct nfsd3_accessres *resp = rqstp->rq_resp; if (!svcxdr_encode_stat(xdr, resp->status)) - return 0; + return false; switch (resp->status) { case nfs_ok: if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat)) - return 0; + return false; if (xdr_stream_encode_u32(xdr, resp->access) < 0) - return 0; + return false; break; } - return 1; + return true; } /* diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 0f8b10f363e7..2e0040d3bca7 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -917,7 +917,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c } else { if (!conn->cb_xprt) return -EINVAL; - clp->cl_cb_conn.cb_xprt = conn->cb_xprt; clp->cl_cb_session = ses; args.bc_xprt = conn->cb_xprt; args.prognumber = clp->cl_cb_session->se_cb_prog; @@ -937,6 +936,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c rpc_shutdown_client(client); return -ENOMEM; } + + if (clp->cl_minorversion != 0) + clp->cl_cb_conn.cb_xprt = conn->cb_xprt; clp->cl_cb_client = client; clp->cl_cb_cred = cred; rcu_read_lock(); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 7b763f146b62..c062728034ad 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -627,15 +627,26 @@ find_any_file(struct nfs4_file *f) return ret; } -static struct nfsd_file *find_deleg_file(struct nfs4_file *f) +static struct nfsd_file *find_any_file_locked(struct nfs4_file *f) { - struct nfsd_file *ret = NULL; + lockdep_assert_held(&f->fi_lock); + + if (f->fi_fds[O_RDWR]) + return f->fi_fds[O_RDWR]; + if (f->fi_fds[O_WRONLY]) + return f->fi_fds[O_WRONLY]; + if (f->fi_fds[O_RDONLY]) + return f->fi_fds[O_RDONLY]; + return NULL; +} + +static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f) +{ + lockdep_assert_held(&f->fi_lock); - spin_lock(&f->fi_lock); if (f->fi_deleg_file) - ret = nfsd_file_get(f->fi_deleg_file); - spin_unlock(&f->fi_lock); - return ret; + return f->fi_deleg_file; + return NULL; } static atomic_long_t num_delegations; @@ -2501,9 +2512,11 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) ols = openlockstateid(st); oo = ols->st_stateowner; nf = st->sc_file; - file = find_any_file(nf); + + spin_lock(&nf->fi_lock); + file = find_any_file_locked(nf); if (!file) - return 0; + goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2525,8 +2538,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) seq_printf(s, ", "); nfs4_show_owner(s, oo); seq_printf(s, " }\n"); - nfsd_file_put(file); - +out: + spin_unlock(&nf->fi_lock); return 0; } @@ -2540,9 +2553,10 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) ols = openlockstateid(st); oo = ols->st_stateowner; nf = st->sc_file; - file = find_any_file(nf); + spin_lock(&nf->fi_lock); + file = find_any_file_locked(nf); if (!file) - return 0; + goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2562,8 +2576,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) seq_printf(s, ", "); nfs4_show_owner(s, oo); seq_printf(s, " }\n"); - nfsd_file_put(file); - +out: + spin_unlock(&nf->fi_lock); return 0; } @@ -2575,9 +2589,10 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) ds = delegstateid(st); nf = st->sc_file; - file = find_deleg_file(nf); + spin_lock(&nf->fi_lock); + file = find_deleg_file_locked(nf); if (!file) - return 0; + goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2593,8 +2608,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) seq_printf(s, ", "); nfs4_show_fname(s, file); seq_printf(s, " }\n"); - nfsd_file_put(file); - +out: + spin_unlock(&nf->fi_lock); return 0; } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index e0409f6cdfd5..dfd3877fdd81 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3514,6 +3514,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, case nfserr_noent: xdr_truncate_encode(xdr, start_offset); goto skip_entry; + case nfserr_jukebox: + /* + * The pseudoroot should only display dentries that lead to + * exports. If we get EJUKEBOX here, then we can't tell whether + * this entry should be included. Just fail the whole READDIR + * with NFS4ERR_DELAY in that case, and hope that the situation + * will resolve itself by the client's next attempt. + */ + if (cd->rd_fhp->fh_export->ex_flags & NFSEXP_V4ROOT) + goto fail; + fallthrough; default: /* * If the client requested the RDATTR_ERROR attribute, diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index ccb59e91011b..373695cc62a7 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -425,8 +425,8 @@ static void nfsd_shutdown_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - nfsd_file_cache_shutdown_net(net); nfs4_state_shutdown_net(net); + nfsd_file_cache_shutdown_net(net); if (nn->lockd_up) { lockd_down(net); nn->lockd_up = false; diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 16994598a8db..6aa6cef0757f 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "nilfs.h" #include "segment.h" @@ -192,6 +193,34 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, return ret; } +/** + * nilfs_get_blocksize - get block size from raw superblock data + * @sb: super block instance + * @sbp: superblock raw data buffer + * @blocksize: place to store block size + * + * nilfs_get_blocksize() calculates the block size from the block size + * exponent information written in @sbp and stores it in @blocksize, + * or aborts with an error message if it's too large. + * + * Return Value: On success, 0 is returned. If the block size is too + * large, -EINVAL is returned. + */ +static int nilfs_get_blocksize(struct super_block *sb, + struct nilfs_super_block *sbp, int *blocksize) +{ + unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); + + if (unlikely(shift_bits > + ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)) { + nilfs_err(sb, "too large filesystem blocksize: 2 ^ %u KiB", + shift_bits); + return -EINVAL; + } + *blocksize = BLOCK_SIZE << shift_bits; + return 0; +} + /** * load_nilfs - load and recover the nilfs * @nilfs: the_nilfs structure to be released @@ -245,11 +274,15 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); /* verify consistency between two super blocks */ - blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size); + err = nilfs_get_blocksize(sb, sbp[0], &blocksize); + if (err) + goto scan_error; + if (blocksize != nilfs->ns_blocksize) { nilfs_warn(sb, "blocksize differs between two super blocks (%d != %d)", blocksize, nilfs->ns_blocksize); + err = -EINVAL; goto scan_error; } @@ -443,11 +476,33 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp) return crc == le32_to_cpu(sbp->s_sum); } -static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) +/** + * nilfs_sb2_bad_offset - check the location of the second superblock + * @sbp: superblock raw data buffer + * @offset: byte offset of second superblock calculated from device size + * + * nilfs_sb2_bad_offset() checks if the position on the second + * superblock is valid or not based on the filesystem parameters + * stored in @sbp. If @offset points to a location within the segment + * area, or if the parameters themselves are not normal, it is + * determined to be invalid. + * + * Return Value: true if invalid, false if valid. + */ +static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) { - return offset < ((le64_to_cpu(sbp->s_nsegments) * - le32_to_cpu(sbp->s_blocks_per_segment)) << - (le32_to_cpu(sbp->s_log_block_size) + 10)); + unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); + u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); + u64 nsegments = le64_to_cpu(sbp->s_nsegments); + u64 index; + + if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS || + shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS) + return true; + + index = offset >> (shift_bits + BLOCK_SIZE_BITS); + do_div(index, blocks_per_segment); + return index < nsegments; } static void nilfs_release_super_block(struct the_nilfs *nilfs) @@ -586,9 +641,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) if (err) goto failed_sbh; - blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); - if (blocksize < NILFS_MIN_BLOCK_SIZE || - blocksize > NILFS_MAX_BLOCK_SIZE) { + err = nilfs_get_blocksize(sb, sbp, &blocksize); + if (err) + goto failed_sbh; + + if (blocksize < NILFS_MIN_BLOCK_SIZE) { nilfs_err(sb, "couldn't mount because of unsupported filesystem blocksize %d", blocksize); diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c index e8c00dda42ad..42af83bcaf13 100644 --- a/fs/ntfs3/attrib.c +++ b/fs/ntfs3/attrib.c @@ -101,6 +101,10 @@ int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni, asize = le32_to_cpu(attr->size); run_off = le16_to_cpu(attr->nres.run_off); + + if (run_off > asize) + return -EINVAL; + err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, vcn ? *vcn : svcn, Add2Ptr(attr, run_off), asize - run_off); @@ -1142,6 +1146,11 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST svcn, evcn; u16 ro; + if (!ni) { + /* Is record corrupted? */ + return -ENOENT; + } + attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL); if (!attr) { /* Is record corrupted? */ @@ -1157,6 +1166,10 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type, } ro = le16_to_cpu(attr->nres.run_off); + + if (ro > le32_to_cpu(attr->size)) + return -EINVAL; + err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn, Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro); if (err < 0) @@ -1832,6 +1845,11 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) u16 le_sz; u16 roff = le16_to_cpu(attr->nres.run_off); + if (roff > le32_to_cpu(attr->size)) { + err = -EINVAL; + goto out; + } + run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn1 - 1, svcn, Add2Ptr(attr, roff), le32_to_cpu(attr->size) - roff); diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c index bad6d8a849a2..c0c6bcbc8c05 100644 --- a/fs/ntfs3/attrlist.c +++ b/fs/ntfs3/attrlist.c @@ -68,6 +68,11 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr) run_init(&ni->attr_list.run); + if (run_off > le32_to_cpu(attr->size)) { + err = -EINVAL; + goto out; + } + err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno, 0, le64_to_cpu(attr->nres.evcn), 0, Add2Ptr(attr, run_off), diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c index aa184407520f..2a63793f522d 100644 --- a/fs/ntfs3/bitmap.c +++ b/fs/ntfs3/bitmap.c @@ -666,7 +666,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits) if (!wnd->bits_last) wnd->bits_last = wbits; - wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS); + wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN); if (!wnd->free_bits) return -ENOMEM; @@ -1432,7 +1432,7 @@ int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range) down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); - for (; iw < wnd->nbits; iw++, wbit = 0) { + for (; iw < wnd->nwnd; iw++, wbit = 0) { CLST lcn_wnd = iw * wbits; struct buffer_head *bh; diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 7a678a5b1ca5..c526e0427f2b 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -488,10 +488,10 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size) new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); - ni_lock(ni); - truncate_setsize(inode, new_size); + ni_lock(ni); + down_write(&ni->file.run_lock); err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, &new_valid, ni->mi.sbi->options->prealloc, NULL); diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 18842998c8fa..cdeb0b51f0ba 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -567,6 +567,12 @@ static int ni_repack(struct ntfs_inode *ni) } roff = le16_to_cpu(attr->nres.run_off); + + if (roff > le32_to_cpu(attr->size)) { + err = -EINVAL; + break; + } + err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn, Add2Ptr(attr, roff), le32_to_cpu(attr->size) - roff); @@ -1541,6 +1547,9 @@ int ni_delete_all(struct ntfs_inode *ni) asize = le32_to_cpu(attr->size); roff = le16_to_cpu(attr->nres.run_off); + if (roff > asize) + return -EINVAL; + /* run==1 means unpack and deallocate. */ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn, Add2Ptr(attr, roff), asize - roff); @@ -2242,6 +2251,11 @@ remove_wof: asize = le32_to_cpu(attr->size); roff = le16_to_cpu(attr->nres.run_off); + if (roff > asize) { + err = -EINVAL; + goto out; + } + /*run==1 Means unpack and deallocate. */ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn, Add2Ptr(attr, roff), asize - roff); diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c index 614513460b8e..20abdb268286 100644 --- a/fs/ntfs3/fslog.c +++ b/fs/ntfs3/fslog.c @@ -1132,7 +1132,7 @@ static int read_log_page(struct ntfs_log *log, u32 vbo, return -EINVAL; if (!*buffer) { - to_free = kmalloc(bytes, GFP_NOFS); + to_free = kmalloc(log->page_size, GFP_NOFS); if (!to_free) return -ENOMEM; *buffer = to_free; @@ -1180,10 +1180,7 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first, struct restart_info *info) { u32 skip, vbo; - struct RESTART_HDR *r_page = kmalloc(DefaultLogPageSize, GFP_NOFS); - - if (!r_page) - return -ENOMEM; + struct RESTART_HDR *r_page = NULL; /* Determine which restart area we are looking for. */ if (first) { @@ -1197,7 +1194,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first, /* Loop continuously until we succeed. */ for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) { bool usa_error; - u32 sys_page_size; bool brst, bchk; struct RESTART_AREA *ra; @@ -1251,24 +1247,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first, goto check_result; } - /* Read the entire restart area. */ - sys_page_size = le32_to_cpu(r_page->sys_page_size); - if (DefaultLogPageSize != sys_page_size) { - kfree(r_page); - r_page = kzalloc(sys_page_size, GFP_NOFS); - if (!r_page) - return -ENOMEM; - - if (read_log_page(log, vbo, - (struct RECORD_PAGE_HDR **)&r_page, - &usa_error)) { - /* Ignore any errors. */ - kfree(r_page); - r_page = NULL; - continue; - } - } - if (is_client_area_valid(r_page, usa_error)) { info->valid_page = true; ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off)); @@ -2727,6 +2705,9 @@ static inline bool check_attr(const struct MFT_REC *rec, return false; } + if (run_off > asize) + return false; + if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn, Add2Ptr(attr, run_off), asize - run_off) < 0) { return false; @@ -4769,6 +4750,12 @@ fake_attr: u16 roff = le16_to_cpu(attr->nres.run_off); CLST svcn = le64_to_cpu(attr->nres.svcn); + if (roff > t32) { + kfree(oa->attr); + oa->attr = NULL; + goto fake_attr; + } + err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn, le64_to_cpu(attr->nres.evcn), svcn, Add2Ptr(attr, roff), t32 - roff); diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index 24b57c3cc625..4a97a28cb8f2 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -1878,9 +1878,10 @@ int ntfs_security_init(struct ntfs_sb_info *sbi) goto out; } - root_sdh = resident_data(attr); + root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT)); if (root_sdh->type != ATTR_ZERO || - root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) { + root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH || + offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) { err = -EINVAL; goto out; } @@ -1896,9 +1897,10 @@ int ntfs_security_init(struct ntfs_sb_info *sbi) goto out; } - root_sii = resident_data(attr); + root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT)); if (root_sii->type != ATTR_ZERO || - root_sii->rule != NTFS_COLLATION_TYPE_UINT) { + root_sii->rule != NTFS_COLLATION_TYPE_UINT || + offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) { err = -EINVAL; goto out; } diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 76ebea253fa2..99f8a57e9f7a 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -1017,6 +1017,12 @@ ok: err = 0; } + /* check for index header length */ + if (offsetof(struct INDEX_BUFFER, ihdr) + ib->ihdr.used > bytes) { + err = -EINVAL; + goto out; + } + in->index = ib; *node = in; diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 64b4a3c29878..ed640e4e3fac 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -129,6 +129,9 @@ next_attr: rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size); asize = le32_to_cpu(attr->size); + if (le16_to_cpu(attr->name_off) + attr->name_len > asize) + goto out; + switch (attr->type) { case ATTR_STD: if (attr->non_res || @@ -364,7 +367,13 @@ next_attr: attr_unpack_run: roff = le16_to_cpu(attr->nres.run_off); + if (roff > asize) { + err = -EINVAL; + goto out; + } + t64 = le64_to_cpu(attr->nres.svcn); + err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn), t64, Add2Ptr(attr, roff), asize - roff); if (err < 0) diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c index 861e35791506..fd342da398be 100644 --- a/fs/ntfs3/record.c +++ b/fs/ntfs3/record.c @@ -220,6 +220,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) return NULL; } + if (off + asize < off) { + /* overflow check */ + return NULL; + } + attr = Add2Ptr(attr, asize); off += asize; } @@ -260,6 +265,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) if (t16 + t32 > asize) return NULL; + if (attr->name_len && + le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len > t16) { + return NULL; + } + return attr; } diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c index f3b88c7e35f7..33b1833ad525 100644 --- a/fs/ntfs3/super.c +++ b/fs/ntfs3/super.c @@ -672,7 +672,7 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot) if (boot->sectors_per_clusters <= 0x80) return boot->sectors_per_clusters; if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */ - return 1U << (0 - boot->sectors_per_clusters); + return 1U << -(s8)boot->sectors_per_clusters; return -EINVAL; } @@ -789,7 +789,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, : (u32)boot->record_size << sbi->cluster_bits; - if (record_size > MAXIMUM_BYTES_PER_MFT) + if (record_size > MAXIMUM_BYTES_PER_MFT || record_size < SECTOR_SIZE) goto out; sbi->record_bits = blksize_bits(record_size); @@ -1136,7 +1136,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc) goto put_inode_out; } bytes = inode->i_size; - sbi->def_table = t = kmalloc(bytes, GFP_NOFS); + sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN); if (!t) { err = -ENOMEM; goto put_inode_out; @@ -1255,9 +1255,9 @@ load_root: ref.low = cpu_to_le32(MFT_REC_ROOT); ref.seq = cpu_to_le16(MFT_REC_ROOT); inode = ntfs_iget5(sb, &ref, &NAME_ROOT); - if (IS_ERR(inode)) { + if (IS_ERR(inode) || !inode->i_op) { ntfs_err(sb, "Failed to load root."); - err = PTR_ERR(inode); + err = IS_ERR(inode) ? PTR_ERR(inode) : -EINVAL; goto out; } @@ -1276,6 +1276,7 @@ out: * Free resources here. * ntfs_fs_free will be called with fc->s_fs_info = NULL */ + put_mount_options(sbi->options); put_ntfs(sbi); sb->s_fs_info = NULL; diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c index eb799a5cdfad..8847db015908 100644 --- a/fs/ntfs3/xattr.c +++ b/fs/ntfs3/xattr.c @@ -107,7 +107,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea, return -EFBIG; /* Allocate memory for packed Ea. */ - ea_p = kmalloc(size + add_bytes, GFP_NOFS); + ea_p = kmalloc(size_add(size, add_bytes), GFP_NOFS); if (!ea_p) return -ENOMEM; diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 16f1bfc407f2..955f475f9aca 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -703,6 +703,8 @@ static struct ctl_table_header *ocfs2_table_header; static int __init ocfs2_stack_glue_init(void) { + int ret; + strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB); ocfs2_table_header = register_sysctl_table(ocfs2_root_table); @@ -712,7 +714,11 @@ static int __init ocfs2_stack_glue_init(void) return -ENOMEM; /* or something. */ } - return ocfs2_sysfs_init(); + ret = ocfs2_sysfs_init(); + if (ret) + unregister_sysctl_table(ocfs2_table_header); + + return ret; } static void __exit ocfs2_stack_glue_exit(void) diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index 29eaa4544372..1b508f543384 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -194,15 +194,10 @@ void orangefs_debugfs_init(int debug_mask) */ static void orangefs_kernel_debug_init(void) { - int rc = -ENOMEM; - char *k_buffer = NULL; + static char k_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { }; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); - k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); - if (!k_buffer) - goto out; - if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { strcpy(k_buffer, kernel_debug_string); strcat(k_buffer, "\n"); @@ -213,15 +208,14 @@ static void orangefs_kernel_debug_init(void) debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer, &kernel_debug_fops); - -out: - gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); } void orangefs_debugfs_cleanup(void) { debugfs_remove_recursive(debug_dir); + kfree(debug_help_string); + debug_help_string = NULL; } /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ @@ -297,18 +291,13 @@ static int help_show(struct seq_file *m, void *v) /* * initialize the client-debug file. */ -static int orangefs_client_debug_init(void) +static void orangefs_client_debug_init(void) { - int rc = -ENOMEM; - char *c_buffer = NULL; + static char c_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { }; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); - c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); - if (!c_buffer) - goto out; - if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { strcpy(c_buffer, client_debug_string); strcat(c_buffer, "\n"); @@ -322,13 +311,6 @@ static int orangefs_client_debug_init(void) debug_dir, c_buffer, &kernel_debug_fops); - - rc = 0; - -out: - - gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); - return rc; } /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/ @@ -671,6 +653,7 @@ int orangefs_prepare_debugfs_help_string(int at_boot) memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE); strlcat(debug_help_string, new, string_size); mutex_unlock(&orangefs_help_file_lock); + kfree(new); } rc = 0; diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c index cd7297815f91..5ab741c60b7e 100644 --- a/fs/orangefs/orangefs-mod.c +++ b/fs/orangefs/orangefs-mod.c @@ -141,7 +141,7 @@ static int __init orangefs_init(void) gossip_err("%s: could not initialize device subsystem %d!\n", __func__, ret); - goto cleanup_device; + goto cleanup_sysfs; } ret = register_filesystem(&orangefs_fs_type); @@ -152,11 +152,11 @@ static int __init orangefs_init(void) goto out; } - orangefs_sysfs_exit(); - -cleanup_device: orangefs_dev_cleanup(); +cleanup_sysfs: + orangefs_sysfs_exit(); + sysfs_init_failed: orangefs_debugfs_cleanup(); diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index f18490813170..eca984d6484d 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -589,28 +589,42 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, goto out_revert_creds; } - err = -ENOMEM; - override_cred = prepare_creds(); - if (override_cred) { + if (!attr->hardlink) { + err = -ENOMEM; + override_cred = prepare_creds(); + if (!override_cred) + goto out_revert_creds; + /* + * In the creation cases(create, mkdir, mknod, symlink), + * ovl should transfer current's fs{u,g}id to underlying + * fs. Because underlying fs want to initialize its new + * inode owner using current's fs{u,g}id. And in this + * case, the @inode is a new inode that is initialized + * in inode_init_owner() to current's fs{u,g}id. So use + * the inode's i_{u,g}id to override the cred's fs{u,g}id. + * + * But in the other hardlink case, ovl_link() does not + * create a new inode, so just use the ovl mounter's + * fs{u,g}id. + */ override_cred->fsuid = inode->i_uid; override_cred->fsgid = inode->i_gid; - if (!attr->hardlink) { - err = security_dentry_create_files_as(dentry, - attr->mode, &dentry->d_name, old_cred, - override_cred); - if (err) { - put_cred(override_cred); - goto out_revert_creds; - } + err = security_dentry_create_files_as(dentry, + attr->mode, &dentry->d_name, old_cred, + override_cred); + if (err) { + put_cred(override_cred); + goto out_revert_creds; } put_cred(override_creds(override_cred)); put_cred(override_cred); - - if (!ovl_dentry_is_whiteout(dentry)) - err = ovl_create_upper(dentry, inode, attr); - else - err = ovl_create_over_whiteout(dentry, inode, attr); } + + if (!ovl_dentry_is_whiteout(dentry)) + err = ovl_create_upper(dentry, inode, attr); + else + err = ovl_create_over_whiteout(dentry, inode, attr); + out_revert_creds: revert_creds(old_cred); return err; @@ -880,7 +894,6 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) { int err; const struct cred *old_cred; - struct dentry *upperdentry; bool lower_positive = ovl_lower_positive(dentry); LIST_HEAD(list); @@ -923,9 +936,8 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) * Note: we fail to update ctime if there was no copy-up, only a * whiteout */ - upperdentry = ovl_dentry_upper(dentry); - if (upperdentry) - ovl_copyattr(d_inode(upperdentry), d_inode(dentry)); + if (ovl_dentry_upper(dentry)) + ovl_copyattr(d_inode(dentry)); out_drop_write: ovl_drop_write(dentry); @@ -1272,9 +1284,9 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir, (d_inode(new) && ovl_type_origin(new))); /* copy ctime: */ - ovl_copyattr(d_inode(olddentry), d_inode(old)); + ovl_copyattr(d_inode(old)); if (d_inode(new) && ovl_dentry_upper(new)) - ovl_copyattr(d_inode(newdentry), d_inode(new)); + ovl_copyattr(d_inode(new)); out_dput: dput(newdentry); diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 44fea16751f1..28cb05ef018c 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -273,7 +273,7 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req) __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb, SB_FREEZE_WRITE); file_end_write(iocb->ki_filp); - ovl_copyattr(ovl_inode_real(inode), inode); + ovl_copyattr(inode); } orig_iocb->ki_pos = iocb->ki_pos; @@ -356,7 +356,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) inode_lock(inode); /* Update mode */ - ovl_copyattr(ovl_inode_real(inode), inode); + ovl_copyattr(inode); ret = file_remove_privs(file); if (ret) goto out_unlock; @@ -381,7 +381,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) ovl_iocb_to_rwf(ifl)); file_end_write(real.file); /* Update size */ - ovl_copyattr(ovl_inode_real(inode), inode); + ovl_copyattr(inode); } else { struct ovl_aio_req *aio_req; @@ -431,12 +431,11 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out, struct fd real; const struct cred *old_cred; struct inode *inode = file_inode(out); - struct inode *realinode = ovl_inode_real(inode); ssize_t ret; inode_lock(inode); /* Update mode */ - ovl_copyattr(realinode, inode); + ovl_copyattr(inode); ret = file_remove_privs(out); if (ret) goto out_unlock; @@ -452,7 +451,7 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out, file_end_write(real.file); /* Update size */ - ovl_copyattr(realinode, inode); + ovl_copyattr(inode); revert_creds(old_cred); fdput(real); @@ -517,19 +516,29 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len const struct cred *old_cred; int ret; + inode_lock(inode); + /* Update mode */ + ovl_copyattr(inode); + ret = file_remove_privs(file); + if (ret) + goto out_unlock; + ret = ovl_real_fdget(file, &real); if (ret) - return ret; + goto out_unlock; old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fallocate(real.file, mode, offset, len); revert_creds(old_cred); /* Update size */ - ovl_copyattr(ovl_inode_real(inode), inode); + ovl_copyattr(inode); fdput(real); +out_unlock: + inode_unlock(inode); + return ret; } @@ -567,14 +576,23 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, const struct cred *old_cred; loff_t ret; + inode_lock(inode_out); + if (op != OVL_DEDUPE) { + /* Update mode */ + ovl_copyattr(inode_out); + ret = file_remove_privs(file_out); + if (ret) + goto out_unlock; + } + ret = ovl_real_fdget(file_out, &real_out); if (ret) - return ret; + goto out_unlock; ret = ovl_real_fdget(file_in, &real_in); if (ret) { fdput(real_out); - return ret; + goto out_unlock; } old_cred = ovl_override_creds(file_inode(file_out)->i_sb); @@ -598,11 +616,14 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, revert_creds(old_cred); /* Update size */ - ovl_copyattr(ovl_inode_real(inode_out), inode_out); + ovl_copyattr(inode_out); fdput(real_in); fdput(real_out); +out_unlock: + inode_unlock(inode_out); + return ret; } diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 1f36158c7dbe..d41f0c8e0e2a 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -80,7 +80,7 @@ int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, err = notify_change(&init_user_ns, upperdentry, attr, NULL); revert_creds(old_cred); if (!err) - ovl_copyattr(upperdentry->d_inode, dentry->d_inode); + ovl_copyattr(dentry->d_inode); inode_unlock(upperdentry->d_inode); if (winode) @@ -377,7 +377,7 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name, revert_creds(old_cred); /* copy c/mtime */ - ovl_copyattr(d_inode(realdentry), inode); + ovl_copyattr(inode); out_drop_write: ovl_drop_write(dentry); @@ -579,7 +579,7 @@ int ovl_fileattr_set(struct user_namespace *mnt_userns, inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK); /* Update ctime */ - ovl_copyattr(ovl_inode_real(inode), inode); + ovl_copyattr(inode); } ovl_drop_write(dentry); out: @@ -777,16 +777,19 @@ void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip, unsigned long ino, int fsid) { struct inode *realinode; + struct ovl_inode *oi = OVL_I(inode); if (oip->upperdentry) - OVL_I(inode)->__upperdentry = oip->upperdentry; - if (oip->lowerpath && oip->lowerpath->dentry) - OVL_I(inode)->lower = igrab(d_inode(oip->lowerpath->dentry)); + oi->__upperdentry = oip->upperdentry; + if (oip->lowerpath && oip->lowerpath->dentry) { + oi->lowerpath.dentry = dget(oip->lowerpath->dentry); + oi->lowerpath.layer = oip->lowerpath->layer; + } if (oip->lowerdata) - OVL_I(inode)->lowerdata = igrab(d_inode(oip->lowerdata)); + oi->lowerdata = igrab(d_inode(oip->lowerdata)); realinode = ovl_inode_real(inode); - ovl_copyattr(realinode, inode); + ovl_copyattr(inode); ovl_copyflags(realinode, inode); ovl_map_ino(inode, ino, fsid); } diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 2cd5741c873b..2df3e74cdf0f 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -293,10 +293,12 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry); void ovl_path_upper(struct dentry *dentry, struct path *path); void ovl_path_lower(struct dentry *dentry, struct path *path); void ovl_path_lowerdata(struct dentry *dentry, struct path *path); +void ovl_i_path_real(struct inode *inode, struct path *path); enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); struct dentry *ovl_dentry_upper(struct dentry *dentry); struct dentry *ovl_dentry_lower(struct dentry *dentry); struct dentry *ovl_dentry_lowerdata(struct dentry *dentry); +const struct ovl_layer *ovl_i_layer_lower(struct inode *inode); const struct ovl_layer *ovl_layer_lower(struct dentry *dentry); struct dentry *ovl_dentry_real(struct dentry *dentry); struct dentry *ovl_i_dentry_upper(struct inode *inode); @@ -520,16 +522,7 @@ bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir); struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir); struct inode *ovl_get_inode(struct super_block *sb, struct ovl_inode_params *oip); -static inline void ovl_copyattr(struct inode *from, struct inode *to) -{ - to->i_uid = from->i_uid; - to->i_gid = from->i_gid; - to->i_mode = from->i_mode; - to->i_atime = from->i_atime; - to->i_mtime = from->i_mtime; - to->i_ctime = from->i_ctime; - i_size_write(to, i_size_read(from)); -} +void ovl_copyattr(struct inode *to); /* vfs inode flags copied from real to ovl inode */ #define OVL_COPY_I_FLAGS_MASK (S_SYNC | S_NOATIME | S_APPEND | S_IMMUTABLE) diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index 63efee554f69..b2d64f3c974b 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -129,7 +129,7 @@ struct ovl_inode { unsigned long flags; struct inode vfs_inode; struct dentry *__upperdentry; - struct inode *lower; + struct ovl_path lowerpath; /* synchronize copy up and more */ struct mutex lock; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 9837aaf9caf1..b3675d13c1ac 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -138,11 +138,16 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry, unsigned int flags, bool weak) { struct ovl_entry *oe = dentry->d_fsdata; + struct inode *inode = d_inode_rcu(dentry); struct dentry *upper; unsigned int i; int ret = 1; - upper = ovl_dentry_upper(dentry); + /* Careful in RCU mode */ + if (!inode) + return -ECHILD; + + upper = ovl_i_dentry_upper(inode); if (upper) ret = ovl_revalidate_real(upper, flags, weak); @@ -184,7 +189,8 @@ static struct inode *ovl_alloc_inode(struct super_block *sb) oi->version = 0; oi->flags = 0; oi->__upperdentry = NULL; - oi->lower = NULL; + oi->lowerpath.dentry = NULL; + oi->lowerpath.layer = NULL; oi->lowerdata = NULL; mutex_init(&oi->lock); @@ -205,7 +211,7 @@ static void ovl_destroy_inode(struct inode *inode) struct ovl_inode *oi = OVL_I(inode); dput(oi->__upperdentry); - iput(oi->lower); + dput(oi->lowerpath.dentry); if (S_ISDIR(inode->i_mode)) ovl_dir_cache_free(inode); else diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index f48284a2a896..9d33ce385bef 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -236,6 +236,17 @@ struct dentry *ovl_i_dentry_upper(struct inode *inode) return ovl_upperdentry_dereference(OVL_I(inode)); } +void ovl_i_path_real(struct inode *inode, struct path *path) +{ + path->dentry = ovl_i_dentry_upper(inode); + if (!path->dentry) { + path->dentry = OVL_I(inode)->lowerpath.dentry; + path->mnt = OVL_I(inode)->lowerpath.layer->mnt; + } else { + path->mnt = ovl_upper_mnt(OVL_FS(inode->i_sb)); + } +} + struct inode *ovl_inode_upper(struct inode *inode) { struct dentry *upperdentry = ovl_i_dentry_upper(inode); @@ -245,7 +256,9 @@ struct inode *ovl_inode_upper(struct inode *inode) struct inode *ovl_inode_lower(struct inode *inode) { - return OVL_I(inode)->lower; + struct dentry *lowerdentry = OVL_I(inode)->lowerpath.dentry; + + return lowerdentry ? d_inode(lowerdentry) : NULL; } struct inode *ovl_inode_real(struct inode *inode) @@ -443,7 +456,7 @@ static void ovl_dir_version_inc(struct dentry *dentry, bool impurity) void ovl_dir_modified(struct dentry *dentry, bool impurity) { /* Copy mtime/ctime */ - ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry)); + ovl_copyattr(d_inode(dentry)); ovl_dir_version_inc(dentry, impurity); } @@ -1060,3 +1073,33 @@ int ovl_sync_status(struct ovl_fs *ofs) return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq); } + +/* + * ovl_copyattr() - copy inode attributes from layer to ovl inode + * + * When overlay copies inode information from an upper or lower layer to the + * relevant overlay inode it will apply the idmapping of the upper or lower + * layer when doing so ensuring that the ovl inode ownership will correctly + * reflect the ownership of the idmapped upper or lower layer. For example, an + * idmapped upper or lower layer mapping id 1001 to id 1000 will take care to + * map any lower or upper inode owned by id 1001 to id 1000. These mapping + * helpers are nops when the relevant layer isn't idmapped. + */ +void ovl_copyattr(struct inode *inode) +{ + struct path realpath; + struct inode *realinode; + struct user_namespace *real_mnt_userns; + + ovl_i_path_real(inode, &realpath); + realinode = d_inode(realpath.dentry); + real_mnt_userns = mnt_user_ns(realpath.mnt); + + inode->i_uid = i_uid_into_mnt(real_mnt_userns, realinode); + inode->i_gid = i_gid_into_mnt(real_mnt_userns, realinode); + inode->i_mode = realinode->i_mode; + inode->i_atime = realinode->i_atime; + inode->i_mtime = realinode->i_mtime; + inode->i_ctime = realinode->i_ctime; + i_size_write(inode, i_size_read(realinode)); +} diff --git a/fs/pnode.c b/fs/pnode.c index 1106137c747a..468e4e65a615 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -244,7 +244,7 @@ static int propagate_one(struct mount *m) } do { struct mount *parent = last_source->mnt_parent; - if (last_source == first_source) + if (peers(last_source, first_source)) break; done = parent->mnt_master == p; if (done && peers(n, parent)) diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig index 8adabde685f1..c49d554cc9ae 100644 --- a/fs/pstore/Kconfig +++ b/fs/pstore/Kconfig @@ -126,6 +126,7 @@ config PSTORE_CONSOLE config PSTORE_PMSG bool "Log user space messages" depends on PSTORE + select RT_MUTEXES help When the option is enabled, pstore will export a character interface /dev/pmsg0 to log user space messages. On reboot diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c index d8542ec2f38c..18cf94b597e0 100644 --- a/fs/pstore/pmsg.c +++ b/fs/pstore/pmsg.c @@ -7,9 +7,10 @@ #include #include #include +#include #include "internal.h" -static DEFINE_MUTEX(pmsg_lock); +static DEFINE_RT_MUTEX(pmsg_lock); static ssize_t write_pmsg(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -28,9 +29,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf, if (!access_ok(buf, count)) return -EFAULT; - mutex_lock(&pmsg_lock); + rt_mutex_lock(&pmsg_lock); ret = psinfo->write_user(&record, buf); - mutex_unlock(&pmsg_lock); + rt_mutex_unlock(&pmsg_lock); return ret ? ret : count; } diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index fefe3d391d3a..f3fa3625d772 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -670,7 +670,7 @@ static int ramoops_parse_dt(struct platform_device *pdev, field = value; \ } - parse_u32("mem-type", pdata->record_size, pdata->mem_type); + parse_u32("mem-type", pdata->mem_type, pdata->mem_type); parse_u32("record-size", pdata->record_size, 0); parse_u32("console-size", pdata->console_size, 0); parse_u32("ftrace-size", pdata->ftrace_size, 0); @@ -735,6 +735,7 @@ static int ramoops_probe(struct platform_device *pdev) /* Make sure we didn't get bogus platform data pointer. */ if (!pdata) { pr_err("NULL platform data\n"); + err = -EINVAL; goto fail_out; } @@ -742,6 +743,7 @@ static int ramoops_probe(struct platform_device *pdev) !pdata->ftrace_size && !pdata->pmsg_size)) { pr_err("The memory size and the record/console size must be " "non-zero\n"); + err = -EINVAL; goto fail_out; } diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index fe5305028c6e..155c7010b1f8 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -439,7 +439,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size, phys_addr_t addr = page_start + i * PAGE_SIZE; pages[i] = pfn_to_page(addr >> PAGE_SHIFT); } - vaddr = vmap(pages, page_count, VM_MAP, prot); + /* + * VM_IOREMAP used here to bypass this region during vread() + * and kmap_atomic() (i.e. kcore) to avoid __va() failures. + */ + vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); kfree(pages); /* diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c index 7c8f8feac6c3..5d3f944f6018 100644 --- a/fs/pstore/zone.c +++ b/fs/pstore/zone.c @@ -761,7 +761,7 @@ static inline int notrace psz_kmsg_write_record(struct psz_context *cxt, /* avoid destroying old data, allocate a new one */ len = zone->buffer_size + sizeof(*zone->buffer); zone->oldbuf = zone->buffer; - zone->buffer = kzalloc(len, GFP_KERNEL); + zone->buffer = kzalloc(len, GFP_ATOMIC); if (!zone->buffer) { zone->buffer = zone->oldbuf; return -ENOMEM; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 09d1307959d0..cddd07b7d132 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2317,6 +2317,8 @@ static int vfs_setup_quota_inode(struct inode *inode, int type) struct super_block *sb = inode->i_sb; struct quota_info *dqopt = sb_dqopt(sb); + if (is_bad_inode(inode)) + return -EUCLEAN; if (!S_ISREG(inode->i_mode)) return -EACCES; if (IS_RDONLY(inode)) diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 3d7a35d6a18b..b916859992ec 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -696,6 +696,7 @@ static int reiserfs_create(struct user_namespace *mnt_userns, struct inode *dir, out_failed: reiserfs_write_unlock(dir->i_sb); + reiserfs_security_free(&security); return retval; } @@ -779,6 +780,7 @@ static int reiserfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, out_failed: reiserfs_write_unlock(dir->i_sb); + reiserfs_security_free(&security); return retval; } @@ -878,6 +880,7 @@ static int reiserfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, retval = journal_end(&th); out_failed: reiserfs_write_unlock(dir->i_sb); + reiserfs_security_free(&security); return retval; } @@ -1194,6 +1197,7 @@ static int reiserfs_symlink(struct user_namespace *mnt_userns, retval = journal_end(&th); out_failed: reiserfs_write_unlock(parent_dir->i_sb); + reiserfs_security_free(&security); return retval; } diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index 8965c8e5e172..857a65b05726 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -50,6 +50,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode, int error; sec->name = NULL; + sec->value = NULL; /* Don't add selinux attributes on xattrs - they'll never get used */ if (IS_PRIVATE(dir)) @@ -95,7 +96,6 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th, void reiserfs_security_free(struct reiserfs_security_handle *sec) { - kfree(sec->name); kfree(sec->value); sec->name = NULL; sec->value = NULL; diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 749385015a8d..5a59d56a2038 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -438,7 +438,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size) res += blocks; direct = 1; } - return blocks; + return res; } int sysv_getattr(struct user_namespace *mnt_userns, const struct path *path, diff --git a/fs/udf/inode.c b/fs/udf/inode.c index ea8f6cd01f50..d2488b7e54a5 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -438,6 +438,12 @@ static int udf_get_block(struct inode *inode, sector_t block, iinfo->i_next_alloc_goal++; } + /* + * Block beyond EOF and prealloc extents? Just discard preallocation + * as it is not useful and complicates things. + */ + if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents) + udf_discard_prealloc(inode); udf_clear_extent_cache(inode); phys = inode_getblk(inode, block, &err, &new); if (!phys) @@ -487,8 +493,6 @@ static int udf_do_extend_file(struct inode *inode, uint32_t add; int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); struct super_block *sb = inode->i_sb; - struct kernel_lb_addr prealloc_loc = {}; - uint32_t prealloc_len = 0; struct udf_inode_info *iinfo; int err; @@ -509,19 +513,6 @@ static int udf_do_extend_file(struct inode *inode, ~(sb->s_blocksize - 1); } - /* Last extent are just preallocated blocks? */ - if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == - EXT_NOT_RECORDED_ALLOCATED) { - /* Save the extent so that we can reattach it to the end */ - prealloc_loc = last_ext->extLocation; - prealloc_len = last_ext->extLength; - /* Mark the extent as a hole */ - last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); - last_ext->extLocation.logicalBlockNum = 0; - last_ext->extLocation.partitionReferenceNum = 0; - } - /* Can we merge with the previous extent? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { @@ -549,7 +540,7 @@ static int udf_do_extend_file(struct inode *inode, * more extents, we may need to enter possible following * empty indirect extent. */ - if (new_block_bytes || prealloc_len) + if (new_block_bytes) udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); } @@ -583,17 +574,6 @@ static int udf_do_extend_file(struct inode *inode, } out: - /* Do we have some preallocated blocks saved? */ - if (prealloc_len) { - err = udf_add_aext(inode, last_pos, &prealloc_loc, - prealloc_len, 1); - if (err) - return err; - last_ext->extLocation = prealloc_loc; - last_ext->extLength = prealloc_len; - count++; - } - /* last_pos should point to the last written extent... */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) last_pos->offset -= sizeof(struct short_ad); @@ -609,13 +589,17 @@ out: static void udf_do_extend_final_block(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, - uint32_t final_block_len) + uint32_t new_elen) { - struct super_block *sb = inode->i_sb; uint32_t added_bytes; - added_bytes = final_block_len - - (last_ext->extLength & (sb->s_blocksize - 1)); + /* + * Extent already large enough? It may be already rounded up to block + * size... + */ + if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) + return; + added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); last_ext->extLength += added_bytes; UDF_I(inode)->i_lenExtents += added_bytes; @@ -632,12 +616,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize) int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = newsize >> sb->s_blocksize_bits, offset; - unsigned long partial_final_block; + loff_t new_elen; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); struct kernel_long_ad extent; int err = 0; - int within_final_block; + bool within_last_ext; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); @@ -646,8 +630,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize) else BUG(); + /* + * When creating hole in file, just don't bother with preserving + * preallocation. It likely won't be very useful anyway. + */ + udf_discard_prealloc(inode); + etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); - within_final_block = (etype != -1); + within_last_ext = (etype != -1); + /* We don't expect extents past EOF... */ + WARN_ON_ONCE(within_last_ext && + elen > ((loff_t)offset + 1) << inode->i_blkbits); if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { @@ -663,19 +656,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize) extent.extLength |= etype << 30; } - partial_final_block = newsize & (sb->s_blocksize - 1); + new_elen = ((loff_t)offset << inode->i_blkbits) | + (newsize & (sb->s_blocksize - 1)); /* File has extent covering the new size (could happen when extending * inside a block)? */ - if (within_final_block) { + if (within_last_ext) { /* Extending file within the last file block */ - udf_do_extend_final_block(inode, &epos, &extent, - partial_final_block); + udf_do_extend_final_block(inode, &epos, &extent, new_elen); } else { - loff_t add = ((loff_t)offset << sb->s_blocksize_bits) | - partial_final_block; - err = udf_do_extend_file(inode, &epos, &extent, add); + err = udf_do_extend_file(inode, &epos, &extent, new_elen); } if (err < 0) @@ -776,10 +767,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, goto out_free; } - /* Are we beyond EOF? */ + /* Are we beyond EOF and preallocated extent? */ if (etype == -1) { int ret; loff_t hole_len; + isBeyondEOF = true; if (count) { if (c) diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 865e658535b1..0e30a50060d9 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -1091,8 +1091,9 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir, return -EINVAL; ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); - if (IS_ERR(ofi)) { - retval = PTR_ERR(ofi); + if (!ofi || IS_ERR(ofi)) { + if (IS_ERR(ofi)) + retval = PTR_ERR(ofi); goto end_rename; } @@ -1101,8 +1102,7 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir, brelse(ofibh.sbh); tloc = lelb_to_cpu(ocfi.icb.extLocation); - if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) - != old_inode->i_ino) + if (udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino) goto end_rename; nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi); diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c index 532cda99644e..036ebd892b85 100644 --- a/fs/udf/truncate.c +++ b/fs/udf/truncate.c @@ -120,60 +120,42 @@ void udf_truncate_tail_extent(struct inode *inode) void udf_discard_prealloc(struct inode *inode) { - struct extent_position epos = { NULL, 0, {0, 0} }; + struct extent_position epos = {}; + struct extent_position prev_epos = {}; struct kernel_lb_addr eloc; uint32_t elen; uint64_t lbcount = 0; int8_t etype = -1, netype; - int adsize; struct udf_inode_info *iinfo = UDF_I(inode); + int bsize = 1 << inode->i_blkbits; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || - inode->i_size == iinfo->i_lenExtents) + ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize)) return; - if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) - adsize = sizeof(struct short_ad); - else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) - adsize = sizeof(struct long_ad); - else - adsize = 0; - epos.block = iinfo->i_location; /* Find the last extent in the file */ - while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { - etype = netype; + while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 0)) != -1) { + brelse(prev_epos.bh); + prev_epos = epos; + if (prev_epos.bh) + get_bh(prev_epos.bh); + + etype = udf_next_aext(inode, &epos, &eloc, &elen, 1); lbcount += elen; } if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { - epos.offset -= adsize; lbcount -= elen; - extent_trunc(inode, &epos, &eloc, etype, elen, 0); - if (!epos.bh) { - iinfo->i_lenAlloc = - epos.offset - - udf_file_entry_alloc_offset(inode); - mark_inode_dirty(inode); - } else { - struct allocExtDesc *aed = - (struct allocExtDesc *)(epos.bh->b_data); - aed->lengthAllocDescs = - cpu_to_le32(epos.offset - - sizeof(struct allocExtDesc)); - if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || - UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) - udf_update_tag(epos.bh->b_data, epos.offset); - else - udf_update_tag(epos.bh->b_data, - sizeof(struct allocExtDesc)); - mark_buffer_dirty_inode(epos.bh, inode); - } + udf_delete_aext(inode, prev_epos); + udf_free_blocks(inode->i_sb, inode, &eloc, 0, + DIV_ROUND_UP(elen, 1 << inode->i_blkbits)); } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = lbcount; brelse(epos.bh); + brelse(prev_epos.bh); } static void udf_update_alloc_ext_desc(struct inode *inode, diff --git a/fs/xattr.c b/fs/xattr.c index 7117cb253864..4c82f271f4aa 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -1119,7 +1119,7 @@ static int xattr_list_one(char **buffer, ssize_t *remaining_size, ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size) { - bool trusted = capable(CAP_SYS_ADMIN); + bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN); struct simple_xattr *xattr; ssize_t remaining_size = size; int err = 0; diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h index 01e8bab1d767..1aa462e5cafd 100644 --- a/include/dt-bindings/clock/imx8mn-clock.h +++ b/include/dt-bindings/clock/imx8mn-clock.h @@ -19,7 +19,8 @@ #define IMX8MN_VIDEO_PLL1_REF_SEL 10 #define IMX8MN_DRAM_PLL_REF_SEL 11 #define IMX8MN_GPU_PLL_REF_SEL 12 -#define IMX8MN_VPU_PLL_REF_SEL 13 +#define IMX8MN_M7_ALT_PLL_REF_SEL 13 +#define IMX8MN_VPU_PLL_REF_SEL IMX8MN_M7_ALT_PLL_REF_SEL #define IMX8MN_ARM_PLL_REF_SEL 14 #define IMX8MN_SYS_PLL1_REF_SEL 15 #define IMX8MN_SYS_PLL2_REF_SEL 16 @@ -29,7 +30,8 @@ #define IMX8MN_VIDEO_PLL1 20 #define IMX8MN_DRAM_PLL 21 #define IMX8MN_GPU_PLL 22 -#define IMX8MN_VPU_PLL 23 +#define IMX8MN_M7_ALT_PLL 23 +#define IMX8MN_VPU_PLL IMX8MN_M7_ALT_PLL #define IMX8MN_ARM_PLL 24 #define IMX8MN_SYS_PLL1 25 #define IMX8MN_SYS_PLL2 26 @@ -39,7 +41,8 @@ #define IMX8MN_VIDEO_PLL1_BYPASS 30 #define IMX8MN_DRAM_PLL_BYPASS 31 #define IMX8MN_GPU_PLL_BYPASS 32 -#define IMX8MN_VPU_PLL_BYPASS 33 +#define IMX8MN_M7_ALT_PLL_BYPASS 33 +#define IMX8MN_VPU_PLL_BYPASS IMX8MN_M7_ALT_PLL_BYPASS #define IMX8MN_ARM_PLL_BYPASS 34 #define IMX8MN_SYS_PLL1_BYPASS 35 #define IMX8MN_SYS_PLL2_BYPASS 36 @@ -49,7 +52,8 @@ #define IMX8MN_VIDEO_PLL1_OUT 40 #define IMX8MN_DRAM_PLL_OUT 41 #define IMX8MN_GPU_PLL_OUT 42 -#define IMX8MN_VPU_PLL_OUT 43 +#define IMX8MN_M7_ALT_PLL_OUT 43 +#define IMX8MN_VPU_PLL_OUT IMX8MN_M7_ALT_PLL_OUT #define IMX8MN_ARM_PLL_OUT 44 #define IMX8MN_SYS_PLL1_OUT 45 #define IMX8MN_SYS_PLL2_OUT 46 diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index f60674692d36..ea2d919fd9c7 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -45,7 +45,7 @@ struct debugfs_u32_array { extern struct dentry *arch_debugfs_dir; -#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ +#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \ static int __fops ## _open(struct inode *inode, struct file *file) \ { \ __simple_attr_check_format(__fmt, 0ull); \ @@ -56,10 +56,16 @@ static const struct file_operations __fops = { \ .open = __fops ## _open, \ .release = simple_attr_release, \ .read = debugfs_attr_read, \ - .write = debugfs_attr_write, \ + .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \ .llseek = no_llseek, \ } +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ + DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false) + +#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \ + DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true) + typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); #if defined(CONFIG_DEBUG_FS) @@ -102,6 +108,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); ssize_t debugfs_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); +ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); @@ -254,6 +262,13 @@ static inline ssize_t debugfs_attr_write(struct file *file, return -ENODEV; } +static inline ssize_t debugfs_attr_write_signed(struct file *file, + const char __user *buf, + size_t len, loff_t *ppos) +{ + return -ENODEV; +} + static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, char *new_name) { diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 142474b4af96..d94b9ed9443e 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -149,8 +149,8 @@ struct devfreq_stats { * @work: delayed work for load monitoring. * @previous_freq: previously configured frequency value. * @last_status: devfreq user device info, performance statistics - * @data: Private data of the governor. The devfreq framework does not - * touch this. + * @data: devfreq driver pass to governors, governor should not change it. + * @governor_data: private data for governors, devfreq core doesn't touch it. * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs) * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs) * @scaling_min_freq: Limit minimum frequency requested by OPP interface @@ -187,7 +187,8 @@ struct devfreq { unsigned long previous_freq; struct devfreq_dev_status last_status; - void *data; /* private data for governors */ + void *data; + void *governor_data; struct dev_pm_qos_request user_min_freq_req; struct dev_pm_qos_request user_max_freq_req; diff --git a/include/linux/efi.h b/include/linux/efi.h index 4cd4ed34d3cb..5598fc348c69 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1114,8 +1114,6 @@ void efi_check_for_embedded_firmwares(void); static inline void efi_check_for_embedded_firmwares(void) { } #endif -efi_status_t efi_random_get_seed(void); - /* * Arch code can implement the following three template macros, avoiding * reptition for the void/non-void return cases of {__,}efi_call_virt(): diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 30eb30d6909b..3cd202d3eefb 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -61,7 +61,7 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) return ERR_PTR(-ENOSYS); } -static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) +static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n) { return -ENOSYS; } diff --git a/include/linux/fs.h b/include/linux/fs.h index d55fdc02f82d..1e1ac116dd13 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1195,6 +1195,7 @@ extern int locks_delete_block(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); +bool vfs_inode_has_locks(struct inode *inode); extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); extern void lease_get_mtime(struct inode *, struct timespec64 *time); @@ -1307,6 +1308,11 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) return 0; } +static inline bool vfs_inode_has_locks(struct inode *inode) +{ + return false; +} + static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { return -ENOLCK; @@ -3507,7 +3513,7 @@ void simple_transaction_set(struct file *file, size_t n); * All attributes contain a text representation of a numeric value * that are accessed with the get() and set() functions. */ -#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ +#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \ static int __fops ## _open(struct inode *inode, struct file *file) \ { \ __simple_attr_check_format(__fmt, 0ull); \ @@ -3518,10 +3524,16 @@ static const struct file_operations __fops = { \ .open = __fops ## _open, \ .release = simple_attr_release, \ .read = simple_attr_read, \ - .write = simple_attr_write, \ + .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \ .llseek = generic_file_llseek, \ } +#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ + DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false) + +#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \ + DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true) + static inline __printf(1, 2) void __simple_attr_check_format(const char *fmt, ...) { @@ -3536,6 +3548,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); ssize_t simple_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); +ssize_t simple_attr_write_signed(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 97a28ad3393b..369902d52f9c 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -8,27 +8,16 @@ #include struct device; - -/** - * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are - * preferable to the old integer-based handles. - * - * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid - * until the GPIO is released. - */ struct gpio_desc; - -/** - * Opaque descriptor for a structure of GPIO array attributes. This structure - * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be - * passed back to get/set array functions in order to activate fast processing - * path if applicable. - */ struct gpio_array; /** - * Struct containing an array of descriptors that can be obtained using - * gpiod_get_array(). + * struct gpio_descs - Struct containing an array of descriptors that can be + * obtained using gpiod_get_array() + * + * @info: Pointer to the opaque gpio_array structure + * @ndescs: Number of held descriptors + * @desc: Array of pointers to GPIO descriptors */ struct gpio_descs { struct gpio_array *info; @@ -43,8 +32,16 @@ struct gpio_descs { #define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4) /** - * Optional flags that can be passed to one of gpiod_* to configure direction - * and output value. These values cannot be OR'd. + * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to + * configure direction and output value. These values + * cannot be OR'd. + * + * @GPIOD_ASIS: Don't change anything + * @GPIOD_IN: Set lines to input mode + * @GPIOD_OUT_LOW: Set lines to output and drive them low + * @GPIOD_OUT_HIGH: Set lines to output and drive them high + * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low + * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high */ enum gpiod_flags { GPIOD_ASIS = 0, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index ddc8713ce57b..8499fc9220e0 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1307,6 +1307,8 @@ struct hv_ring_buffer_debug_info { int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info); +bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel); + /* Vmbus interface */ #define vmbus_driver_register(driver) \ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index cf49997d5903..8210a9e68215 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -32,6 +32,7 @@ struct adis_timeout { u16 sw_reset_ms; u16 self_test_ms; }; + /** * struct adis_data - ADIS chip variant specific data * @read_delay: SPI delay for read operations in us @@ -45,10 +46,11 @@ struct adis_timeout { * @self_test_mask: Bitmask of supported self-test operations * @self_test_reg: Register address to request self test command * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg - * @status_error_msgs: Array of error messgaes + * @status_error_msgs: Array of error messages * @status_error_mask: Bitmask of errors supported by the device * @timeouts: Chip specific delays * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable + * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin * @has_paging: True if ADIS device has paged registers * @burst_reg_cmd: Register command that triggers burst * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined, @@ -78,6 +80,7 @@ struct adis_data { unsigned int status_error_mask; int (*enable_irq)(struct adis *adis, bool enable); + bool unmasked_drdy; bool has_paging; @@ -128,12 +131,12 @@ struct adis { unsigned long irq_flag; void *buffer; - uint8_t tx[10] ____cacheline_aligned; - uint8_t rx[4]; + u8 tx[10] ____cacheline_aligned; + u8 rx[4]; }; int adis_init(struct adis *adis, struct iio_dev *indio_dev, - struct spi_device *spi, const struct adis_data *data); + struct spi_device *spi, const struct adis_data *data); int __adis_reset(struct adis *adis); /** @@ -154,9 +157,9 @@ static inline int adis_reset(struct adis *adis) } int __adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int val, unsigned int size); + unsigned int val, unsigned int size); int __adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size); + unsigned int *val, unsigned int size); /** * __adis_write_reg_8() - Write single byte to a register (unlocked) @@ -165,7 +168,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, * @value: The value to write */ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, - uint8_t val) + u8 val) { return __adis_write_reg(adis, reg, val, 1); } @@ -177,7 +180,7 @@ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, - uint16_t val) + u16 val) { return __adis_write_reg(adis, reg, val, 2); } @@ -189,7 +192,7 @@ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, - uint32_t val) + u32 val) { return __adis_write_reg(adis, reg, val, 4); } @@ -201,7 +204,7 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, - uint16_t *val) + u16 *val) { unsigned int tmp; int ret; @@ -220,7 +223,7 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, - uint32_t *val) + u32 *val) { unsigned int tmp; int ret; @@ -240,7 +243,7 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, * @size: The size of the @value (in bytes) */ static inline int adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int val, unsigned int size) + unsigned int val, unsigned int size) { int ret; @@ -259,7 +262,7 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg, * @size: The size of the @val buffer */ static int adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size) + unsigned int *val, unsigned int size) { int ret; @@ -277,7 +280,7 @@ static int adis_read_reg(struct adis *adis, unsigned int reg, * @value: The value to write */ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, - uint8_t val) + u8 val) { return adis_write_reg(adis, reg, val, 1); } @@ -289,7 +292,7 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, - uint16_t val) + u16 val) { return adis_write_reg(adis, reg, val, 2); } @@ -301,7 +304,7 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, - uint32_t val) + u32 val) { return adis_write_reg(adis, reg, val, 4); } @@ -313,7 +316,7 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, - uint16_t *val) + u16 *val) { unsigned int tmp; int ret; @@ -332,7 +335,7 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, - uint32_t *val) + u32 *val) { unsigned int tmp; int ret; @@ -403,9 +406,20 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, __adis_update_bits_base(adis, reg, mask, val, 2)); \ }) -int adis_enable_irq(struct adis *adis, bool enable); int __adis_check_status(struct adis *adis); int __adis_initial_startup(struct adis *adis); +int __adis_enable_irq(struct adis *adis, bool enable); + +static inline int adis_enable_irq(struct adis *adis, bool enable) +{ + int ret; + + mutex_lock(&adis->state_lock); + ret = __adis_enable_irq(adis, enable); + mutex_unlock(&adis->state_lock); + + return ret; +} static inline int adis_check_status(struct adis *adis) { @@ -441,8 +455,8 @@ static inline void adis_dev_unlock(struct adis *adis) } int adis_single_conversion(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, unsigned int error_mask, - int *val); + const struct iio_chan_spec *chan, + unsigned int error_mask, int *val); #define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \ .type = IIO_VOLTAGE, \ @@ -491,7 +505,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, .modified = 1, \ .channel2 = IIO_MOD_ ## mod, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ - info_sep, \ + (info_sep), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_shared_by_all = info_all, \ .address = (addr), \ @@ -525,7 +539,7 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); int adis_update_scan_mode(struct iio_dev *indio_dev, - const unsigned long *scan_mask); + const unsigned long *scan_mask); #else /* CONFIG_IIO_BUFFER */ @@ -549,7 +563,8 @@ static inline int devm_adis_probe_trigger(struct adis *adis, #ifdef CONFIG_DEBUG_FS int adis_debugfs_reg_access(struct iio_dev *indio_dev, - unsigned int reg, unsigned int writeval, unsigned int *readval); + unsigned int reg, unsigned int writeval, + unsigned int *readval); #else diff --git a/include/linux/libata.h b/include/linux/libata.h index a64e12605d31..d890c43cff14 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -565,7 +565,10 @@ struct ata_taskfile { u8 hob_lbam; u8 hob_lbah; - u8 feature; + union { + u8 error; + u8 feature; + }; u8 nsect; u8 lbal; u8 lbam; @@ -573,7 +576,10 @@ struct ata_taskfile { u8 device; - u8 command; /* IO operation */ + union { + u8 status; + u8 command; + }; u32 auxiliary; /* auxiliary field */ /* from SATA 3.1 and */ @@ -1471,51 +1477,61 @@ static inline int sata_srst_pmp(struct ata_link *link) return link->pmp; } -/* - * printk helpers - */ -__printf(3, 4) -void ata_port_printk(const struct ata_port *ap, const char *level, - const char *fmt, ...); -__printf(3, 4) -void ata_link_printk(const struct ata_link *link, const char *level, - const char *fmt, ...); -__printf(3, 4) -void ata_dev_printk(const struct ata_device *dev, const char *level, - const char *fmt, ...); +#define ata_port_printk(level, ap, fmt, ...) \ + pr_ ## level ("ata%u: " fmt, (ap)->print_id, ##__VA_ARGS__) #define ata_port_err(ap, fmt, ...) \ - ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) + ata_port_printk(err, ap, fmt, ##__VA_ARGS__) #define ata_port_warn(ap, fmt, ...) \ - ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__) + ata_port_printk(warn, ap, fmt, ##__VA_ARGS__) #define ata_port_notice(ap, fmt, ...) \ - ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__) + ata_port_printk(notice, ap, fmt, ##__VA_ARGS__) #define ata_port_info(ap, fmt, ...) \ - ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__) + ata_port_printk(info, ap, fmt, ##__VA_ARGS__) #define ata_port_dbg(ap, fmt, ...) \ - ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__) + ata_port_printk(debug, ap, fmt, ##__VA_ARGS__) + +#define ata_link_printk(level, link, fmt, ...) \ +do { \ + if (sata_pmp_attached((link)->ap) || \ + (link)->ap->slave_link) \ + pr_ ## level ("ata%u.%02u: " fmt, \ + (link)->ap->print_id, \ + (link)->pmp, \ + ##__VA_ARGS__); \ + else \ + pr_ ## level ("ata%u: " fmt, \ + (link)->ap->print_id, \ + ##__VA_ARGS__); \ +} while (0) #define ata_link_err(link, fmt, ...) \ - ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__) + ata_link_printk(err, link, fmt, ##__VA_ARGS__) #define ata_link_warn(link, fmt, ...) \ - ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__) + ata_link_printk(warn, link, fmt, ##__VA_ARGS__) #define ata_link_notice(link, fmt, ...) \ - ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__) + ata_link_printk(notice, link, fmt, ##__VA_ARGS__) #define ata_link_info(link, fmt, ...) \ - ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__) + ata_link_printk(info, link, fmt, ##__VA_ARGS__) #define ata_link_dbg(link, fmt, ...) \ - ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__) + ata_link_printk(debug, link, fmt, ##__VA_ARGS__) + +#define ata_dev_printk(level, dev, fmt, ...) \ + pr_ ## level("ata%u.%02u: " fmt, \ + (dev)->link->ap->print_id, \ + (dev)->link->pmp + (dev)->devno, \ + ##__VA_ARGS__) #define ata_dev_err(dev, fmt, ...) \ - ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__) + ata_dev_printk(err, dev, fmt, ##__VA_ARGS__) #define ata_dev_warn(dev, fmt, ...) \ - ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__) + ata_dev_printk(warn, dev, fmt, ##__VA_ARGS__) #define ata_dev_notice(dev, fmt, ...) \ - ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__) + ata_dev_printk(notice, dev, fmt, ##__VA_ARGS__) #define ata_dev_info(dev, fmt, ...) \ - ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) + ata_dev_printk(info, dev, fmt, ##__VA_ARGS__) #define ata_dev_dbg(dev, fmt, ...) \ - ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__) + ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__) void ata_print_version(const struct device *dev, const char *version); @@ -2049,11 +2065,8 @@ static inline u8 ata_wait_idle(struct ata_port *ap) { u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); -#ifdef ATA_DEBUG if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) - ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", - status); -#endif + ata_port_dbg(ap, "abnormal Status 0x%X\n", status); return status; } diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 8eca7f25c432..591bc4cefe1d 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h @@ -10,16 +10,29 @@ struct mb_cache; +/* Cache entry flags */ +enum { + MBE_REFERENCED_B = 0, + MBE_REUSABLE_B +}; + struct mb_cache_entry { /* List of entries in cache - protected by cache->c_list_lock */ struct list_head e_list; - /* Hash table list - protected by hash chain bitlock */ + /* + * Hash table list - protected by hash chain bitlock. The entry is + * guaranteed to be hashed while e_refcnt > 0. + */ struct hlist_bl_node e_hash_list; + /* + * Entry refcount. Once it reaches zero, entry is unhashed and freed. + * While refcount > 0, the entry is guaranteed to stay in the hash and + * e.g. mb_cache_entry_try_delete() will fail. + */ atomic_t e_refcnt; /* Key in hash - stable during lifetime of the entry */ u32 e_key; - u32 e_referenced:1; - u32 e_reusable:1; + unsigned long e_flags; /* User provided value - stable during lifetime of the entry */ u64 e_value; }; @@ -29,20 +42,20 @@ void mb_cache_destroy(struct mb_cache *cache); int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, u64 value, bool reusable); -void __mb_cache_entry_free(struct mb_cache_entry *entry); +void __mb_cache_entry_free(struct mb_cache *cache, + struct mb_cache_entry *entry); void mb_cache_entry_wait_unused(struct mb_cache_entry *entry); -static inline int mb_cache_entry_put(struct mb_cache *cache, - struct mb_cache_entry *entry) +static inline void mb_cache_entry_put(struct mb_cache *cache, + struct mb_cache_entry *entry) { unsigned int cnt = atomic_dec_return(&entry->e_refcnt); if (cnt > 0) { - if (cnt <= 3) + if (cnt <= 2) wake_up_var(&entry->e_refcnt); - return 0; + return; } - __mb_cache_entry_free(entry); - return 1; + __mb_cache_entry_free(cache, entry); } struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 66eaf0aa7f69..3e72133545ca 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1074,6 +1074,11 @@ enum { MLX5_VPORT_ADMIN_STATE_AUTO = 0x2, }; +enum { + MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1, + MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3, +}; + enum { MLX5_L3_PROT_TYPE_IPV4 = 0, MLX5_L3_PROT_TYPE_IPV6 = 1, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index cd9d1c95129e..49ea0004109e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -822,7 +822,8 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x2]; + u8 reserved_at_5[0x1]; + u8 vport_cvlan_insert_always[0x1]; u8 esw_shared_ingress_acl[0x1]; u8 esw_uplink_ingress_acl[0x1]; u8 root_ft_on_other_esw[0x1]; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3b97438afe3e..3a75d644a120 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -167,31 +167,38 @@ static inline bool dev_xmit_complete(int rc) * (unsigned long) so they can be read and written atomically. */ +#define NET_DEV_STAT(FIELD) \ + union { \ + unsigned long FIELD; \ + atomic_long_t __##FIELD; \ + } + struct net_device_stats { - unsigned long rx_packets; - unsigned long tx_packets; - unsigned long rx_bytes; - unsigned long tx_bytes; - unsigned long rx_errors; - unsigned long tx_errors; - unsigned long rx_dropped; - unsigned long tx_dropped; - unsigned long multicast; - unsigned long collisions; - unsigned long rx_length_errors; - unsigned long rx_over_errors; - unsigned long rx_crc_errors; - unsigned long rx_frame_errors; - unsigned long rx_fifo_errors; - unsigned long rx_missed_errors; - unsigned long tx_aborted_errors; - unsigned long tx_carrier_errors; - unsigned long tx_fifo_errors; - unsigned long tx_heartbeat_errors; - unsigned long tx_window_errors; - unsigned long rx_compressed; - unsigned long tx_compressed; + NET_DEV_STAT(rx_packets); + NET_DEV_STAT(tx_packets); + NET_DEV_STAT(rx_bytes); + NET_DEV_STAT(tx_bytes); + NET_DEV_STAT(rx_errors); + NET_DEV_STAT(tx_errors); + NET_DEV_STAT(rx_dropped); + NET_DEV_STAT(tx_dropped); + NET_DEV_STAT(multicast); + NET_DEV_STAT(collisions); + NET_DEV_STAT(rx_length_errors); + NET_DEV_STAT(rx_over_errors); + NET_DEV_STAT(rx_crc_errors); + NET_DEV_STAT(rx_frame_errors); + NET_DEV_STAT(rx_fifo_errors); + NET_DEV_STAT(rx_missed_errors); + NET_DEV_STAT(tx_aborted_errors); + NET_DEV_STAT(tx_carrier_errors); + NET_DEV_STAT(tx_fifo_errors); + NET_DEV_STAT(tx_heartbeat_errors); + NET_DEV_STAT(tx_window_errors); + NET_DEV_STAT(rx_compressed); + NET_DEV_STAT(tx_compressed); }; +#undef NET_DEV_STAT #include @@ -5477,4 +5484,9 @@ extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; extern struct net_device *blackhole_netdev; +/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ +#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) +#define DEV_STATS_ADD(DEV, FIELD, VAL) \ + atomic_long_add((VAL), &(DEV)->stats.__##FIELD) + #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index ada1296c87d5..72f5ebc5c97a 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -197,7 +197,7 @@ struct ip_set_region { }; /* Max range where every element is added/deleted in one step */ -#define IPSET_MAX_RANGE (1<<20) +#define IPSET_MAX_RANGE (1<<14) /* The max revision number supported by any set type + 1 */ #define IPSET_REVISION_MAX 9 diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 039f59ee8f43..de235916c31c 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -7,6 +7,7 @@ #ifndef _LINUX_NVME_H #define _LINUX_NVME_H +#include #include #include @@ -539,7 +540,7 @@ enum { NVME_CMD_EFFECTS_NCC = 1 << 2, NVME_CMD_EFFECTS_NIC = 1 << 3, NVME_CMD_EFFECTS_CCC = 1 << 4, - NVME_CMD_EFFECTS_CSE_MASK = 3 << 16, + NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16), NVME_CMD_EFFECTS_UUID_SEL = 1 << 19, }; diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 4669632bd72b..59d7228104d0 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -117,6 +117,69 @@ static inline bool __must_check __must_check_overflow(bool overflow) (*_d >> _to_shift) != _a); \ })) +/** + * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX + * + * @factor1: first factor + * @factor2: second factor + * + * Returns: calculate @factor1 * @factor2, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. The + * lvalue must be size_t to avoid implicit type conversion. + */ +static inline size_t __must_check size_mul(size_t factor1, size_t factor2) +{ + size_t bytes; + + if (check_mul_overflow(factor1, factor2, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * size_add() - Calculate size_t addition with saturation at SIZE_MAX + * + * @addend1: first addend + * @addend2: second addend + * + * Returns: calculate @addend1 + @addend2, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. The + * lvalue must be size_t to avoid implicit type conversion. + */ +static inline size_t __must_check size_add(size_t addend1, size_t addend2) +{ + size_t bytes; + + if (check_add_overflow(addend1, addend2, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX + * + * @minuend: value to subtract from + * @subtrahend: value to subtract from @minuend + * + * Returns: calculate @minuend - @subtrahend, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. For + * composition with the size_add() and size_mul() helpers, neither + * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX). + * The lvalue must be size_t to avoid implicit type conversion. + */ +static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) +{ + size_t bytes; + + if (minuend == SIZE_MAX || subtrahend == SIZE_MAX || + check_sub_overflow(minuend, subtrahend, &bytes)) + return SIZE_MAX; + + return bytes; +} + /** * array_size() - Calculate size of 2-dimensional array. * @@ -128,15 +191,7 @@ static inline bool __must_check __must_check_overflow(bool overflow) * Returns: number of bytes needed to represent the array or SIZE_MAX on * overflow. */ -static inline __must_check size_t array_size(size_t a, size_t b) -{ - size_t bytes; - - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - - return bytes; -} +#define array_size(a, b) size_mul(a, b) /** * array3_size() - Calculate size of 3-dimensional array. @@ -150,49 +205,7 @@ static inline __must_check size_t array_size(size_t a, size_t b) * Returns: number of bytes needed to represent the array or SIZE_MAX on * overflow. */ -static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) -{ - size_t bytes; - - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - if (check_mul_overflow(bytes, c, &bytes)) - return SIZE_MAX; - - return bytes; -} - -/* - * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for - * struct_size() below. - */ -static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) -{ - size_t bytes; - - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - if (check_add_overflow(bytes, c, &bytes)) - return SIZE_MAX; - - return bytes; -} - -/** - * struct_size() - Calculate size of structure with trailing array. - * @p: Pointer to the structure. - * @member: Name of the array member. - * @count: Number of elements in the array. - * - * Calculates size of memory needed for structure @p followed by an - * array of @count number of @member elements. - * - * Return: number of bytes needed or SIZE_MAX on overflow. - */ -#define struct_size(p, member, count) \ - __ab_c_size(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member),\ - sizeof(*(p))) +#define array3_size(a, b, c) size_mul(size_mul(a, b), c) /** * flex_array_size() - Calculate size of a flexible array member @@ -208,7 +221,22 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) * Return: number of bytes needed or SIZE_MAX on overflow. */ #define flex_array_size(p, member, count) \ - array_size(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member)) + size_mul(count, \ + sizeof(*(p)->member) + __must_be_array((p)->member)) + +/** + * struct_size() - Calculate size of structure with trailing flexible array. + * + * @p: Pointer to the structure. + * @member: Name of the array member. + * @count: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @count number of @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, count) \ + size_add(sizeof(*(p)), flex_array_size(p, member, count)) #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 069c7fd95396..a2f25b26ae1e 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -191,8 +191,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {} static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; } #define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;}) +#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;}) #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) #define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) +#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;}) static inline struct pid *tgid_pidfd_to_pid(const struct file *file) { diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index ba015a77238a..6e18ca234f81 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -83,6 +83,7 @@ struct sk_psock { u32 apply_bytes; u32 cork_bytes; u32 eval; + bool redir_ingress; /* undefined if sk_redir is null */ struct sk_msg *cork; struct sk_psock_progs progs; #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h index 137f9f2ac4c3..7bca213a3f83 100644 --- a/include/linux/soc/qcom/apr.h +++ b/include/linux/soc/qcom/apr.h @@ -79,6 +79,15 @@ struct apr_resp_pkt { #define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF) #define APR_SVC_MINOR_VERSION(v) (v & 0xFF) +struct packet_router; +struct pkt_router_svc { + struct device *dev; + struct packet_router *pr; + spinlock_t lock; + int id; + void *priv; +}; + struct apr_device { struct device dev; uint16_t svc_id; @@ -86,11 +95,12 @@ struct apr_device { uint32_t version; char name[APR_NAME_SIZE]; const char *service_path; - spinlock_t lock; + struct pkt_router_svc svc; struct list_head node; }; #define to_apr_device(d) container_of(d, struct apr_device, dev) +#define svc_to_apr_device(d) container_of(d, struct apr_device, svc) struct apr_driver { int (*probe)(struct apr_device *sl); diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index cd188a527d16..3b35b6f6533a 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -92,6 +92,11 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, char __user *, size_t); extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *); +/* returns true if the msg is in-flight, i.e., already eaten by the peer */ +static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) { + return (msg->copied != 0 && list_empty(&msg->list)); +} + struct rpc_clnt; extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *); extern int rpc_remove_client_dir(struct rpc_clnt *); diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index 93884086f392..adc80e29168e 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -35,7 +35,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) { struct rb_node *leftmost = rb_first_cached(&head->rb_root); - return rb_entry(leftmost, struct timerqueue_node, node); + return rb_entry_safe(leftmost, struct timerqueue_node, node); } static inline void timerqueue_init(struct timerqueue_node *node) diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index e2e44bb1dad8..c1e5910809ad 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -295,6 +295,9 @@ int typec_set_mode(struct typec_port *port, int mode); void *typec_get_drvdata(struct typec_port *port); +int typec_get_fw_cap(struct typec_capability *cap, + struct fwnode_handle *fwnode); + int typec_find_pwr_opmode(const char *name); int typec_find_orientation(const char *name); int typec_find_port_power_role(const char *name); diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h index 2f6b0861322a..ac60c9fcfe9a 100644 --- a/include/media/dvbdev.h +++ b/include/media/dvbdev.h @@ -126,6 +126,7 @@ struct dvb_adapter { * struct dvb_device - represents a DVB device node * * @list_head: List head with all DVB devices + * @ref: reference counter * @fops: pointer to struct file_operations * @adapter: pointer to the adapter that holds this device node * @type: type of the device, as defined by &enum dvb_device_type. @@ -156,6 +157,7 @@ struct dvb_adapter { */ struct dvb_device { struct list_head list_head; + struct kref ref; const struct file_operations *fops; struct dvb_adapter *adapter; enum dvb_device_type type; @@ -187,6 +189,20 @@ struct dvb_device { void *priv; }; +/** + * dvb_device_get - Increase dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +struct dvb_device *dvb_device_get(struct dvb_device *dvbdev); + +/** + * dvb_device_put - Decrease dvb_device reference + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_device_put(struct dvb_device *dvbdev); + /** * dvb_register_adapter - Registers a new DVB adapter * @@ -231,29 +247,17 @@ int dvb_register_device(struct dvb_adapter *adap, /** * dvb_remove_device - Remove a registered DVB device * - * This does not free memory. To do that, call dvb_free_device(). + * This does not free memory. dvb_free_device() will do that when + * reference counter is empty * * @dvbdev: pointer to struct dvb_device */ void dvb_remove_device(struct dvb_device *dvbdev); -/** - * dvb_free_device - Free memory occupied by a DVB device. - * - * Call dvb_unregister_device() before calling this function. - * - * @dvbdev: pointer to struct dvb_device - */ -void dvb_free_device(struct dvb_device *dvbdev); /** * dvb_unregister_device - Unregisters a DVB device * - * This is a combination of dvb_remove_device() and dvb_free_device(). - * Using this function is usually a mistake, and is often an indicator - * for a use-after-free bug (when a userspace process keeps a file - * handle to a detached device). - * * @dvbdev: pointer to struct dvb_device */ void dvb_unregister_device(struct dvb_device *dvbdev); diff --git a/include/net/dst.h b/include/net/dst.h index a057319aabef..17697ec79949 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -361,9 +361,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, struct net *net) { - /* TODO : stats should be SMP safe */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + DEV_STATS_INC(dev, rx_packets); + DEV_STATS_ADD(dev, rx_bytes, skb->len); __skb_tunnel_rx(skb, dev, net); } diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 3214848402ec..1120363987d0 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -93,8 +93,6 @@ struct mptcp_out_options { }; #ifdef CONFIG_MPTCP -extern struct request_sock_ops mptcp_subflow_request_sock_ops; - void mptcp_init(void); static inline bool sk_is_mptcp(const struct sock *sk) @@ -182,6 +180,9 @@ void mptcp_seq_show(struct seq_file *seq); int mptcp_subflow_init_cookie_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb); +struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener, + bool attach_listener); __be32 mptcp_get_reset_option(const struct sk_buff *skb); @@ -274,6 +275,13 @@ static inline int mptcp_subflow_init_cookie_req(struct request_sock *req, return 0; /* TCP fallback */ } +static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener, + bool attach_listener) +{ + return NULL; +} + static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); } #endif /* CONFIG_MPTCP */ diff --git a/include/net/mrp.h b/include/net/mrp.h index 1c308c034e1a..a8102661fd61 100644 --- a/include/net/mrp.h +++ b/include/net/mrp.h @@ -120,6 +120,7 @@ struct mrp_applicant { struct sk_buff *pdu; struct rb_root mad; struct rcu_head rcu; + bool active; }; struct mrp_port { diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 53746494eb84..80df8ff5e675 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -283,17 +283,29 @@ struct nft_set_iter { /** * struct nft_set_desc - description of set elements * + * @ktype: key type * @klen: key length + * @dtype: data type * @dlen: data length + * @objtype: object type + * @flags: flags * @size: number of set elements + * @policy: set policy + * @gc_int: garbage collector interval * @field_len: length of each field in concatenation, bytes * @field_count: number of concatenated fields in element * @expr: set must support for expressions */ struct nft_set_desc { + u32 ktype; unsigned int klen; + u32 dtype; unsigned int dlen; + u32 objtype; unsigned int size; + u32 policy; + u32 gc_int; + u64 timeout; u8 field_len[NFT_REG32_COUNT]; u8 field_count; bool expr; @@ -550,7 +562,9 @@ void *nft_set_catchall_gc(const struct nft_set *set); static inline unsigned long nft_set_gc_interval(const struct nft_set *set) { - return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ; + u32 gc_int = READ_ONCE(set->gc_int); + + return gc_int ? msecs_to_jiffies(gc_int) : HZ; } /** @@ -1499,6 +1513,9 @@ struct nft_trans_rule { struct nft_trans_set { struct nft_set *set; u32 set_id; + u32 gc_int; + u64 timeout; + bool update; bool bound; }; @@ -1508,6 +1525,12 @@ struct nft_trans_set { (((struct nft_trans_set *)trans->data)->set_id) #define nft_trans_set_bound(trans) \ (((struct nft_trans_set *)trans->data)->bound) +#define nft_trans_set_update(trans) \ + (((struct nft_trans_set *)trans->data)->update) +#define nft_trans_set_timeout(trans) \ + (((struct nft_trans_set *)trans->data)->timeout) +#define nft_trans_set_gc_int(trans) \ + (((struct nft_trans_set *)trans->data)->gc_int) struct nft_trans_chain { bool update; diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index efc9085c6892..6ec140b0a61b 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -16,6 +16,7 @@ struct sock_reuseport { u16 max_socks; /* length of socks */ u16 num_socks; /* elements in socks */ u16 num_closed_socks; /* closed elements in socks */ + u16 incoming_cpu; /* The last synq overflow event timestamp of this * reuse->socks[] group. */ @@ -58,5 +59,6 @@ static inline bool reuseport_has_conns(struct sock *sk) } void reuseport_has_conns_set(struct sock *sk); +void reuseport_update_incoming_cpu(struct sock *sk, int val); #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 81ef95dc27ba..fdac6913b6c8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2243,8 +2243,8 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); #endif /* CONFIG_BPF_SYSCALL */ -int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes, - int flags); +int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, + struct sk_msg *msg, u32 bytes, int flags); #endif /* CONFIG_NET_SOCK_MSG */ #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG) diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 22af68b01426..658fccdc8660 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -558,6 +558,8 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev, void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start); void snd_hdac_stream_clear(struct hdac_stream *azx_dev); void snd_hdac_stream_stop(struct hdac_stream *azx_dev); +void snd_hdac_stop_streams(struct hdac_bus *bus); +void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus); void snd_hdac_stream_reset(struct hdac_stream *azx_dev); void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set, unsigned int streams, unsigned int reg); diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index d4e31ea16aba..56ea5cde5e63 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -92,7 +92,6 @@ void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus, struct hdac_ext_stream *azx_dev, bool decouple); void snd_hdac_ext_stream_decouple(struct hdac_bus *bus, struct hdac_ext_stream *azx_dev, bool decouple); -void snd_hdac_ext_stop_streams(struct hdac_bus *bus); int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus, struct hdac_ext_stream *stream, u32 value); diff --git a/include/sound/pcm.h b/include/sound/pcm.h index e08bf475d02d..181df0452ae2 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -106,24 +106,24 @@ struct snd_pcm_ops { #define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1) /* If you change this don't forget to change rates[] table in pcm_native.c */ -#define SNDRV_PCM_RATE_5512 (1<<0) /* 5512Hz */ -#define SNDRV_PCM_RATE_8000 (1<<1) /* 8000Hz */ -#define SNDRV_PCM_RATE_11025 (1<<2) /* 11025Hz */ -#define SNDRV_PCM_RATE_16000 (1<<3) /* 16000Hz */ -#define SNDRV_PCM_RATE_22050 (1<<4) /* 22050Hz */ -#define SNDRV_PCM_RATE_32000 (1<<5) /* 32000Hz */ -#define SNDRV_PCM_RATE_44100 (1<<6) /* 44100Hz */ -#define SNDRV_PCM_RATE_48000 (1<<7) /* 48000Hz */ -#define SNDRV_PCM_RATE_64000 (1<<8) /* 64000Hz */ -#define SNDRV_PCM_RATE_88200 (1<<9) /* 88200Hz */ -#define SNDRV_PCM_RATE_96000 (1<<10) /* 96000Hz */ -#define SNDRV_PCM_RATE_176400 (1<<11) /* 176400Hz */ -#define SNDRV_PCM_RATE_192000 (1<<12) /* 192000Hz */ -#define SNDRV_PCM_RATE_352800 (1<<13) /* 352800Hz */ -#define SNDRV_PCM_RATE_384000 (1<<14) /* 384000Hz */ +#define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */ +#define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */ +#define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */ +#define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */ +#define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */ +#define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */ +#define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */ +#define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */ +#define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */ +#define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */ +#define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */ +#define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */ +#define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */ +#define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */ +#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */ -#define SNDRV_PCM_RATE_CONTINUOUS (1<<30) /* continuous range */ -#define SNDRV_PCM_RATE_KNOT (1<<31) /* supports more non-continuos rates */ +#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */ +#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */ #define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\ SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\ diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 0dcb361a98bb..ef3bb1bcea4e 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -295,9 +295,9 @@ struct snd_soc_dai_ops { unsigned int *rx_num, unsigned int *rx_slot); int (*set_tristate)(struct snd_soc_dai *dai, int tristate); - int (*set_sdw_stream)(struct snd_soc_dai *dai, - void *stream, int direction); - void *(*get_sdw_stream)(struct snd_soc_dai *dai, int direction); + int (*set_stream)(struct snd_soc_dai *dai, + void *stream, int direction); + void *(*get_stream)(struct snd_soc_dai *dai, int direction); /* * DAI digital mute - optional. @@ -515,42 +515,42 @@ static inline void *snd_soc_dai_get_drvdata(struct snd_soc_dai *dai) } /** - * snd_soc_dai_set_sdw_stream() - Configures a DAI for SDW stream operation + * snd_soc_dai_set_stream() - Configures a DAI for stream operation * @dai: DAI - * @stream: STREAM + * @stream: STREAM (opaque structure depending on DAI type) * @direction: Stream direction(Playback/Capture) - * SoundWire subsystem doesn't have a notion of direction and we reuse + * Some subsystems, such as SoundWire, don't have a notion of direction and we reuse * the ASoC stream direction to configure sink/source ports. * Playback maps to source ports and Capture for sink ports. * * This should be invoked with NULL to clear the stream set previously. * Returns 0 on success, a negative error code otherwise. */ -static inline int snd_soc_dai_set_sdw_stream(struct snd_soc_dai *dai, - void *stream, int direction) +static inline int snd_soc_dai_set_stream(struct snd_soc_dai *dai, + void *stream, int direction) { - if (dai->driver->ops->set_sdw_stream) - return dai->driver->ops->set_sdw_stream(dai, stream, direction); + if (dai->driver->ops->set_stream) + return dai->driver->ops->set_stream(dai, stream, direction); else return -ENOTSUPP; } /** - * snd_soc_dai_get_sdw_stream() - Retrieves SDW stream from DAI + * snd_soc_dai_get_stream() - Retrieves stream from DAI * @dai: DAI * @direction: Stream direction(Playback/Capture) * * This routine only retrieves that was previously configured - * with snd_soc_dai_get_sdw_stream() + * with snd_soc_dai_get_stream() * * Returns pointer to stream or an ERR_PTR value, e.g. * ERR_PTR(-ENOTSUPP) if callback is not supported; */ -static inline void *snd_soc_dai_get_sdw_stream(struct snd_soc_dai *dai, - int direction) +static inline void *snd_soc_dai_get_stream(struct snd_soc_dai *dai, + int direction) { - if (dai->driver->ops->get_sdw_stream) - return dai->driver->ops->get_sdw_stream(dai, direction); + if (dai->driver->ops->get_stream) + return dai->driver->ops->get_stream(dai, direction); else return ERR_PTR(-ENOTSUPP); } diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 61a64d1b2bb6..c649c7fcb9af 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -104,6 +104,7 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE); TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR); TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE); TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_ENCRYPTED_FILENAME); TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); #define show_fc_reason(reason) \ @@ -116,7 +117,8 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); { EXT4_FC_REASON_RESIZE, "RESIZE"}, \ { EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \ { EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}, \ - { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}) + { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}, \ + { EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"}) TRACE_EVENT(ext4_other_inode_update_time, TP_PROTO(struct inode *inode, ino_t orig_ino), @@ -2764,7 +2766,7 @@ TRACE_EVENT(ext4_fc_stats, ), TP_printk("dev %d,%d fc ineligible reasons:\n" - "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u " + "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u" "num_commits:%lu, ineligible: %lu, numblks: %lu", MAJOR(__entry->dev), MINOR(__entry->dev), FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), @@ -2776,6 +2778,7 @@ TRACE_EVENT(ext4_fc_stats, FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), + FC_REASON_NAME_STAT(EXT4_FC_REASON_ENCRYPTED_FILENAME), __entry->fc_commits, __entry->fc_ineligible_commits, __entry->fc_numblks) ); diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h index 59363a083ecb..d92691c78cff 100644 --- a/include/trace/events/ib_mad.h +++ b/include/trace/events/ib_mad.h @@ -49,7 +49,6 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, __field(int, retries_left) __field(int, max_retries) __field(int, retry) - __field(u16, pkey) ), TP_fast_assign( @@ -89,7 +88,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \ "method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \ "attr_id 0x%x attr_mod 0x%x => dlid 0x%08x sl %d "\ - "pkey 0x%x rpqn 0x%x rqpkey 0x%x", + "rpqn 0x%x rqpkey 0x%x", __entry->dev_index, __entry->port_num, __entry->qp_num, __entry->agent_priv, be64_to_cpu(__entry->wrtid), __entry->retries_left, __entry->max_retries, @@ -100,7 +99,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, be16_to_cpu(__entry->class_specific), be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id), be32_to_cpu(__entry->attr_mod), - be32_to_cpu(__entry->dlid), __entry->sl, __entry->pkey, + be32_to_cpu(__entry->dlid), __entry->sl, __entry->rqpn, __entry->rqkey ) ); @@ -204,7 +203,6 @@ TRACE_EVENT(ib_mad_recv_done_handler, __field(u16, wc_status) __field(u32, slid) __field(u32, dev_index) - __field(u16, pkey) ), TP_fast_assign( @@ -224,9 +222,6 @@ TRACE_EVENT(ib_mad_recv_done_handler, __entry->slid = wc->slid; __entry->src_qp = wc->src_qp; __entry->sl = wc->sl; - ib_query_pkey(qp_info->port_priv->device, - qp_info->port_priv->port_num, - wc->pkey_index, &__entry->pkey); __entry->wc_status = wc->status; ), @@ -234,7 +229,7 @@ TRACE_EVENT(ib_mad_recv_done_handler, "base_ver 0x%02x class 0x%02x class_ver 0x%02x " \ "method 0x%02x status 0x%04x class_specific 0x%04x " \ "tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \ - "slid 0x%08x src QP%d, sl %d pkey 0x%04x", + "slid 0x%08x src QP%d, sl %d", __entry->dev_index, __entry->port_num, __entry->qp_num, __entry->wc_status, __entry->length, @@ -244,7 +239,7 @@ TRACE_EVENT(ib_mad_recv_done_handler, be16_to_cpu(__entry->class_specific), be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id), be32_to_cpu(__entry->attr_mod), - __entry->slid, __entry->src_qp, __entry->sl, __entry->pkey + __entry->slid, __entry->src_qp, __entry->sl ) ); diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index a4dfe005983d..29414288ea3e 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(jbd2_commit, TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) - __field( int, transaction ) + __field( tid_t, transaction ) ), TP_fast_assign( @@ -49,7 +49,7 @@ DECLARE_EVENT_CLASS(jbd2_commit, __entry->transaction = commit_transaction->t_tid; ), - TP_printk("dev %d,%d transaction %d sync %d", + TP_printk("dev %d,%d transaction %u sync %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit) ); @@ -97,8 +97,8 @@ TRACE_EVENT(jbd2_end_commit, TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) - __field( int, transaction ) - __field( int, head ) + __field( tid_t, transaction ) + __field( tid_t, head ) ), TP_fast_assign( @@ -108,7 +108,7 @@ TRACE_EVENT(jbd2_end_commit, __entry->head = journal->j_tail_sequence; ), - TP_printk("dev %d,%d transaction %d sync %d head %d", + TP_printk("dev %d,%d transaction %u sync %d head %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit, __entry->head) ); @@ -134,14 +134,14 @@ TRACE_EVENT(jbd2_submit_inode_data, ); DECLARE_EVENT_CLASS(jbd2_handle_start_class, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks), TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, requested_blocks) @@ -155,28 +155,28 @@ DECLARE_EVENT_CLASS(jbd2_handle_start_class, __entry->requested_blocks = requested_blocks; ), - TP_printk("dev %d,%d tid %lu type %u line_no %u " + TP_printk("dev %d,%d tid %u type %u line_no %u " "requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); TRACE_EVENT(jbd2_handle_extend, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int buffer_credits, int requested_blocks), @@ -184,7 +184,7 @@ TRACE_EVENT(jbd2_handle_extend, TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, buffer_credits ) @@ -200,7 +200,7 @@ TRACE_EVENT(jbd2_handle_extend, __entry->requested_blocks = requested_blocks; ), - TP_printk("dev %d,%d tid %lu type %u line_no %u " + TP_printk("dev %d,%d tid %u type %u line_no %u " "buffer_credits %d requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->buffer_credits, @@ -208,7 +208,7 @@ TRACE_EVENT(jbd2_handle_extend, ); TRACE_EVENT(jbd2_handle_stats, - TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int interval, int sync, int requested_blocks, int dirtied_blocks), @@ -217,7 +217,7 @@ TRACE_EVENT(jbd2_handle_stats, TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, interval ) @@ -237,7 +237,7 @@ TRACE_EVENT(jbd2_handle_stats, __entry->dirtied_blocks = dirtied_blocks; ), - TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " + TP_printk("dev %d,%d tid %u type %u line_no %u interval %d " "sync %d requested_blocks %d dirtied_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->interval, @@ -246,14 +246,14 @@ TRACE_EVENT(jbd2_handle_stats, ); TRACE_EVENT(jbd2_run_stats, - TP_PROTO(dev_t dev, unsigned long tid, + TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned long, wait ) __field( unsigned long, request_delay ) __field( unsigned long, running ) @@ -279,7 +279,7 @@ TRACE_EVENT(jbd2_run_stats, __entry->blocks_logged = stats->rs_blocks_logged; ), - TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u " + TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u " "locked %u flushing %u logging %u handle_count %u " "blocks %u blocks_logged %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, @@ -294,14 +294,14 @@ TRACE_EVENT(jbd2_run_stats, ); TRACE_EVENT(jbd2_checkpoint_stats, - TP_PROTO(dev_t dev, unsigned long tid, + TP_PROTO(dev_t dev, tid_t tid, struct transaction_chp_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) - __field( unsigned long, tid ) + __field( tid_t, tid ) __field( unsigned long, chp_time ) __field( __u32, forced_to_close ) __field( __u32, written ) @@ -317,7 +317,7 @@ TRACE_EVENT(jbd2_checkpoint_stats, __entry->dropped = stats->cs_dropped; ), - TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u " + TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u " "written %u dropped %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, jiffies_to_msecs(__entry->chp_time), diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 808c73c52820..a50e4646bd6d 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -308,6 +308,13 @@ extern "C" { */ #define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ +/* 2 plane YCbCr420. + * 3 10 bit components and 2 padding bits packed into 4 bytes. + * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian + * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian + */ +#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */ + /* 3 plane non-subsampled (444) YCbCr * 16 bits per component, but only 10 bits are used and 6 bits are padded * index 0: Y plane, [15:0] Y:x [10:6] little endian @@ -842,6 +849,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) * and UV. Some SAND-using hardware stores UV in a separate tiled * image from Y to reduce the column height, which is not supported * with these modifiers. + * + * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also + * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes + * wide, but as this is a 10 bpp format that translates to 96 pixels. */ #define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \ diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index c750eac09fc9..f7c01709cb0f 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -272,7 +272,7 @@ struct dsa_completion_record { }; uint32_t delta_rec_size; - uint32_t crc_val; + uint64_t crc_val; /* DIF check & strip */ struct { diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 7272f85d6d6a..3736f2fe1541 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -3,7 +3,7 @@ #define _UAPI_LINUX_SWAB_H #include -#include +#include #include #include diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h index a75e14edc957..dbd60f48b4b0 100644 --- a/include/uapi/sound/asequencer.h +++ b/include/uapi/sound/asequencer.h @@ -344,10 +344,10 @@ typedef int __bitwise snd_seq_client_type_t; #define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2) /* event filter flags */ -#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */ -#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */ -#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */ -#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */ +#define SNDRV_SEQ_FILTER_BROADCAST (1U<<0) /* accept broadcast messages */ +#define SNDRV_SEQ_FILTER_MULTICAST (1U<<1) /* accept multicast messages */ +#define SNDRV_SEQ_FILTER_BOUNCE (1U<<2) /* accept bounce event in error */ +#define SNDRV_SEQ_FILTER_USE_EVENT (1U<<31) /* use event filter */ struct snd_seq_client_info { int client; /* client number to inquire */ diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index eebbe8a6da0c..c587221a289c 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2701,7 +2701,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) return false; } -static inline int io_fixup_rw_res(struct io_kiocb *req, unsigned res) +static inline int io_fixup_rw_res(struct io_kiocb *req, long res) { struct io_async_rw *io = req->async_data; @@ -7598,7 +7598,7 @@ static int io_run_task_work_sig(void) /* when returns >0, the caller should retry */ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, struct io_wait_queue *iowq, - ktime_t timeout) + ktime_t *timeout) { int ret; @@ -7610,7 +7610,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, if (test_bit(0, &ctx->check_cq_overflow)) return 1; - if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS)) + if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS)) return -ETIME; return 1; } @@ -7673,7 +7673,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, } prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, TASK_INTERRUPTIBLE); - ret = io_cqring_wait_schedule(ctx, &iowq, timeout); + ret = io_cqring_wait_schedule(ctx, &iowq, &timeout); finish_wait(&ctx->cq_wait, &iowq.wq); cond_resched(); } while (ret > 0); @@ -10895,8 +10895,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, return -ENXIO; if (ctx->restricted) { - if (opcode >= IORING_REGISTER_LAST) - return -EINVAL; opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); if (!test_bit(opcode, ctx->restrictions.register_op)) return -EACCES; @@ -11028,6 +11026,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, long ret = -EBADF; struct fd f; + if (opcode >= IORING_REGISTER_LAST) + return -EINVAL; + f = fdget(fd); if (!f.file) return -EBADF; diff --git a/kernel/Makefile b/kernel/Makefile index 0e119c52a2cd..599cb926449a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -59,7 +59,7 @@ obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ -obj-$(CONFIG_FUTEX) += futex.o +obj-$(CONFIG_FUTEX) += futex/ obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o ifneq ($(CONFIG_SMP),y) diff --git a/kernel/acct.c b/kernel/acct.c index 23a7ab8e6cbc..2b5cc63eb295 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -331,6 +331,8 @@ static comp_t encode_comp_t(unsigned long value) exp++; } + if (exp > (((comp_t) ~0U) >> MANTSIZE)) + return (comp_t) ~0U; /* * Clean it up and polish it off. */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7cb13b9f69a6..0c2fa93bd8d2 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3864,6 +3864,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env, break; } + if (btf_type_is_resolve_source_only(arg_type)) { + btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); + return -EINVAL; + } + if (args[i].name_off && (!btf_name_offset_valid(btf, args[i].name_off) || !btf_name_valid_identifier(btf, args[i].name_off))) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cceb29b0585f..488225bb42f6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -606,6 +606,14 @@ static const char *kernel_type_name(const struct btf* btf, u32 id) return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); } +/* The reg state of a pointer or a bounded scalar was saved when + * it was spilled to the stack. + */ +static bool is_spilled_reg(const struct bpf_stack_state *stack) +{ + return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; +} + static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { @@ -709,7 +717,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, continue; verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); - if (state->stack[i].slot_type[0] == STACK_SPILL) { + if (is_spilled_reg(&state->stack[i])) { reg = &state->stack[i].spilled_ptr; t = reg->type; verbose(env, "=%s", reg_type_str(env, t)); @@ -2351,7 +2359,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, reg->precise = true; } for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { - if (func->stack[j].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&func->stack[j])) continue; reg = &func->stack[j].spilled_ptr; if (reg->type != SCALAR_VALUE) @@ -2361,7 +2369,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, } } -static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, +static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) { struct bpf_verifier_state *st = env->cur_state; @@ -2378,7 +2386,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, if (!env->bpf_capable) return 0; - func = st->frame[st->curframe]; + func = st->frame[frame]; if (regno >= 0) { reg = &func->regs[regno]; if (reg->type != SCALAR_VALUE) { @@ -2393,7 +2401,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, } while (spi >= 0) { - if (func->stack[spi].slot_type[0] != STACK_SPILL) { + if (!is_spilled_reg(&func->stack[spi])) { stack_mask = 0; break; } @@ -2459,7 +2467,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, break; new_marks = false; - func = st->frame[st->curframe]; + func = st->frame[frame]; bitmap_from_u64(mask, reg_mask); for_each_set_bit(i, mask, 32) { reg = &func->regs[i]; @@ -2492,7 +2500,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, return 0; } - if (func->stack[i].slot_type[0] != STACK_SPILL) { + if (!is_spilled_reg(&func->stack[i])) { stack_mask &= ~(1ull << i); continue; } @@ -2525,12 +2533,17 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, static int mark_chain_precision(struct bpf_verifier_env *env, int regno) { - return __mark_chain_precision(env, regno, -1); + return __mark_chain_precision(env, env->cur_state->curframe, regno, -1); } -static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) +static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno) { - return __mark_chain_precision(env, -1, spi); + return __mark_chain_precision(env, frame, regno, -1); +} + +static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi) +{ + return __mark_chain_precision(env, frame, -1, spi); } static bool is_spillable_regtype(enum bpf_reg_type type) @@ -2682,7 +2695,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, /* regular write of data into stack destroys any spilled ptr */ state->stack[spi].spilled_ptr.type = NOT_INIT; /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ - if (state->stack[spi].slot_type[0] == STACK_SPILL) + if (is_spilled_reg(&state->stack[spi])) for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_MISC; @@ -2772,14 +2785,17 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, spi = slot / BPF_REG_SIZE; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; - if (!env->allow_ptr_leaks - && *stype != NOT_INIT - && *stype != SCALAR_VALUE) { - /* Reject the write if there's are spilled pointers in - * range. If we didn't reject here, the ptr status - * would be erased below (even though not all slots are - * actually overwritten), possibly opening the door to - * leaks. + if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { + /* Reject the write if range we may write to has not + * been initialized beforehand. If we didn't reject + * here, the ptr status would be erased below (even + * though not all slots are actually overwritten), + * possibly opening the door to leaks. + * + * We do however catch STACK_INVALID case below, and + * only allow reading possibly uninitialized memory + * later for CAP_PERFMON, as the write may not happen to + * that slot. */ verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", insn_idx, i); @@ -2892,7 +2908,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, stype = reg_state->stack[spi].slot_type; reg = ®_state->stack[spi].spilled_ptr; - if (stype[0] == STACK_SPILL) { + if (is_spilled_reg(®_state->stack[spi])) { if (size != BPF_REG_SIZE) { if (reg->type != SCALAR_VALUE) { verbose_linfo(env, env->insn_idx, "; "); @@ -4531,11 +4547,11 @@ static int check_stack_range_initialized( goto mark; } - if (state->stack[spi].slot_type[0] == STACK_SPILL && + if (is_spilled_reg(&state->stack[spi]) && state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) goto mark; - if (state->stack[spi].slot_type[0] == STACK_SPILL && + if (is_spilled_reg(&state->stack[spi]) && (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || env->allow_ptr_leaks)) { if (clobber) { @@ -7995,6 +8011,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, return err; return adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); + } else if (dst_reg->precise) { + /* if dst_reg is precise, src_reg should be precise as well */ + err = mark_chain_precision(env, insn->src_reg); + if (err) + return err; } } else { /* Pretend the src is a reg with a known value, since we only @@ -10334,9 +10355,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, * return false to continue verification of this path */ return false; - if (i % BPF_REG_SIZE) + if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) continue; - if (old->stack[spi].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&old->stack[spi])) continue; if (!regsafe(env, &old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) @@ -10527,34 +10548,36 @@ static int propagate_precision(struct bpf_verifier_env *env, { struct bpf_reg_state *state_reg; struct bpf_func_state *state; - int i, err = 0; + int i, err = 0, fr; - state = old->frame[old->curframe]; - state_reg = state->regs; - for (i = 0; i < BPF_REG_FP; i++, state_reg++) { - if (state_reg->type != SCALAR_VALUE || - !state_reg->precise) - continue; - if (env->log.level & BPF_LOG_LEVEL2) - verbose(env, "propagating r%d\n", i); - err = mark_chain_precision(env, i); - if (err < 0) - return err; - } + for (fr = old->curframe; fr >= 0; fr--) { + state = old->frame[fr]; + state_reg = state->regs; + for (i = 0; i < BPF_REG_FP; i++, state_reg++) { + if (state_reg->type != SCALAR_VALUE || + !state_reg->precise) + continue; + if (env->log.level & BPF_LOG_LEVEL2) + verbose(env, "frame %d: propagating r%d\n", i, fr); + err = mark_chain_precision_frame(env, fr, i); + if (err < 0) + return err; + } - for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) - continue; - state_reg = &state->stack[i].spilled_ptr; - if (state_reg->type != SCALAR_VALUE || - !state_reg->precise) - continue; - if (env->log.level & BPF_LOG_LEVEL2) - verbose(env, "propagating fp%d\n", - (-i - 1) * BPF_REG_SIZE); - err = mark_chain_precision_stack(env, i); - if (err < 0) - return err; + for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { + if (!is_spilled_reg(&state->stack[i])) + continue; + state_reg = &state->stack[i].spilled_ptr; + if (state_reg->type != SCALAR_VALUE || + !state_reg->precise) + continue; + if (env->log.level & BPF_LOG_LEVEL2) + verbose(env, "frame %d: propagating fp%d\n", + (-i - 1) * BPF_REG_SIZE, fr); + err = mark_chain_precision_stack_frame(env, fr, i); + if (err < 0) + return err; + } } return 0; } @@ -12109,6 +12132,10 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) continue; + /* Zero-extension is done by the caller. */ + if (bpf_pseudo_kfunc_call(&insn)) + continue; + if (WARN_ON(load_reg == -1)) { verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); return -EFAULT; diff --git a/kernel/cpu.c b/kernel/cpu.c index da871eb07566..393114c10c28 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -662,21 +662,51 @@ static bool cpuhp_next_state(bool bringup, return true; } -static int cpuhp_invoke_callback_range(bool bringup, - unsigned int cpu, - struct cpuhp_cpu_state *st, - enum cpuhp_state target) +static int __cpuhp_invoke_callback_range(bool bringup, + unsigned int cpu, + struct cpuhp_cpu_state *st, + enum cpuhp_state target, + bool nofail) { enum cpuhp_state state; - int err = 0; + int ret = 0; while (cpuhp_next_state(bringup, &state, st, target)) { + int err; + err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL); - if (err) + if (!err) + continue; + + if (nofail) { + pr_warn("CPU %u %s state %s (%d) failed (%d)\n", + cpu, bringup ? "UP" : "DOWN", + cpuhp_get_step(st->state)->name, + st->state, err); + ret = -1; + } else { + ret = err; break; + } } - return err; + return ret; +} + +static inline int cpuhp_invoke_callback_range(bool bringup, + unsigned int cpu, + struct cpuhp_cpu_state *st, + enum cpuhp_state target) +{ + return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false); +} + +static inline void cpuhp_invoke_callback_range_nofail(bool bringup, + unsigned int cpu, + struct cpuhp_cpu_state *st, + enum cpuhp_state target) +{ + __cpuhp_invoke_callback_range(bringup, cpu, st, target, true); } static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) @@ -994,7 +1024,6 @@ static int take_cpu_down(void *_param) struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); int err, cpu = smp_processor_id(); - int ret; /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); @@ -1007,13 +1036,10 @@ static int take_cpu_down(void *_param) */ WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1)); - /* Invoke the former CPU_DYING callbacks */ - ret = cpuhp_invoke_callback_range(false, cpu, st, target); - /* - * DYING must not fail! + * Invoke the former CPU_DYING callbacks. DYING must not fail! */ - WARN_ON_ONCE(ret); + cpuhp_invoke_callback_range_nofail(false, cpu, st, target); /* Give up timekeeping duties */ tick_handover_do_timer(); @@ -1285,16 +1311,14 @@ void notify_cpu_starting(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); - int ret; rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ cpumask_set_cpu(cpu, &cpus_booted_once_mask); - ret = cpuhp_invoke_callback_range(true, cpu, st, target); /* * STARTING must not fail! */ - WARN_ON_ONCE(ret); + cpuhp_invoke_callback_range_nofail(true, cpu, st, target); } /* @@ -2315,8 +2339,10 @@ static ssize_t target_store(struct device *dev, struct device_attribute *attr, if (st->state < target) ret = cpu_up(dev->id, target); - else + else if (st->state > target) ret = cpu_down(dev->id, target); + else if (WARN_ON(st->target != target)) + st->target = target; out: unlock_device_hotplug(); return ret ? ret : count; diff --git a/kernel/events/core.c b/kernel/events/core.c index 5422bd77c7d4..d8795036202a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11175,13 +11175,15 @@ static int pmu_dev_alloc(struct pmu *pmu) pmu->dev->groups = pmu->attr_groups; device_initialize(pmu->dev); - ret = dev_set_name(pmu->dev, "%s", pmu->name); - if (ret) - goto free_dev; dev_set_drvdata(pmu->dev, pmu); pmu->dev->bus = &pmu_bus; pmu->dev->release = pmu_dev_release; + + ret = dev_set_name(pmu->dev, "%s", pmu->name); + if (ret) + goto free_dev; + ret = device_add(pmu->dev); if (ret) goto free_dev; @@ -12213,12 +12215,12 @@ SYSCALL_DEFINE5(perf_event_open, if (flags & ~PERF_FLAG_ALL) return -EINVAL; - /* Do we allow access to perf_event_open(2) ? */ - err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); + err = perf_copy_attr(attr_uptr, &attr); if (err) return err; - err = perf_copy_attr(attr_uptr, &attr); + /* Do we allow access to perf_event_open(2) ? */ + err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); if (err) return err; diff --git a/kernel/fork.c b/kernel/fork.c index 908ba3c93893..3fb7e9e6a7b9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -446,6 +446,9 @@ void put_task_stack(struct task_struct *tsk) void free_task(struct task_struct *tsk) { +#ifdef CONFIG_SECCOMP + WARN_ON_ONCE(tsk->seccomp.filter); +#endif release_user_cpus_ptr(tsk); scs_release(tsk); @@ -2345,12 +2348,6 @@ static __latent_entropy struct task_struct *copy_process( spin_lock(¤t->sighand->siglock); - /* - * Copy seccomp details explicitly here, in case they were changed - * before holding sighand lock. - */ - copy_seccomp(p); - rseq_fork(p, clone_flags); /* Don't start children in a dying pid namespace */ @@ -2365,6 +2362,14 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; } + /* No more failure paths after this point. */ + + /* + * Copy seccomp details explicitly here, in case they were changed + * before holding sighand lock. + */ + copy_seccomp(p); + init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); diff --git a/kernel/futex/Makefile b/kernel/futex/Makefile new file mode 100644 index 000000000000..b89ba3fba343 --- /dev/null +++ b/kernel/futex/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-y += core.o diff --git a/kernel/futex.c b/kernel/futex/core.c similarity index 99% rename from kernel/futex.c rename to kernel/futex/core.c index c15ad276fd15..764e73622b38 100644 --- a/kernel/futex.c +++ b/kernel/futex/core.c @@ -42,7 +42,7 @@ #include -#include "locking/rtmutex_common.h" +#include "../locking/rtmutex_common.h" /* * READ this before attempting to hack on futexes! @@ -3632,6 +3632,7 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, bool pi, bool pending_op) { u32 uval, nval, mval; + pid_t owner; int err; /* Futex address must be 32bit aligned */ @@ -3653,6 +3654,10 @@ retry: * 2. A woken up waiter is killed before it can acquire the * futex in user space. * + * In the second case, the wake up notification could be generated + * by the unlock path in user space after setting the futex value + * to zero or by the kernel after setting the OWNER_DIED bit below. + * * In both cases the TID validation below prevents a wakeup of * potential waiters which can cause these waiters to block * forever. @@ -3661,24 +3666,27 @@ retry: * * 1) task->robust_list->list_op_pending != NULL * @pending_op == true - * 2) User space futex value == 0 + * 2) The owner part of user space futex value == 0 * 3) Regular futex: @pi == false * * If these conditions are met, it is safe to attempt waking up a * potential waiter without touching the user space futex value and - * trying to set the OWNER_DIED bit. The user space futex value is - * uncontended and the rest of the user space mutex state is - * consistent, so a woken waiter will just take over the - * uncontended futex. Setting the OWNER_DIED bit would create - * inconsistent state and malfunction of the user space owner died - * handling. + * trying to set the OWNER_DIED bit. If the futex value is zero, + * the rest of the user space mutex state is consistent, so a woken + * waiter will just take over the uncontended futex. Setting the + * OWNER_DIED bit would create inconsistent state and malfunction + * of the user space owner died handling. Otherwise, the OWNER_DIED + * bit is already set, and the woken waiter is expected to deal with + * this. */ - if (pending_op && !pi && !uval) { + owner = uval & FUTEX_TID_MASK; + + if (pending_op && !pi && !owner) { futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); return 0; } - if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) + if (owner != task_pid_vnr(curr)) return 0; /* diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 7971e989e425..74a4ef1da9ad 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -82,6 +82,7 @@ struct gcov_fn_info { * @version: gcov version magic indicating the gcc version used for compilation * @next: list head for a singly-linked list * @stamp: uniquifying time stamp + * @checksum: unique object checksum * @filename: name of the associated gcov data file * @merge: merge functions (null for unused counter type) * @n_functions: number of instrumented functions @@ -94,6 +95,10 @@ struct gcov_info { unsigned int version; struct gcov_info *next; unsigned int stamp; + /* Since GCC 12.1 a checksum field is added. */ +#if (__GNUC__ >= 12) + unsigned int checksum; +#endif const char *filename; void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int); unsigned int n_functions; diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index e58342ace11f..f1d83a8b4417 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -52,6 +52,7 @@ enum { * IRQS_PENDING - irq is pending and replayed later * IRQS_SUSPENDED - irq is suspended * IRQS_NMI - irq line is used to deliver NMIs + * IRQS_SYSFS - descriptor has been added to sysfs */ enum { IRQS_AUTODETECT = 0x00000001, @@ -64,6 +65,7 @@ enum { IRQS_SUSPENDED = 0x00000800, IRQS_TIMINGS = 0x00001000, IRQS_NMI = 0x00002000, + IRQS_SYSFS = 0x00004000, }; #include "debug.h" diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 21b3ac2a29d2..7a45fd593245 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -288,22 +288,25 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc) if (irq_kobj_base) { /* * Continue even in case of failure as this is nothing - * crucial. + * crucial and failures in the late irq_sysfs_init() + * cannot be rolled back. */ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) pr_warn("Failed to add kobject for irq %d\n", irq); + else + desc->istate |= IRQS_SYSFS; } } static void irq_sysfs_del(struct irq_desc *desc) { /* - * If irq_sysfs_init() has not yet been invoked (early boot), then - * irq_kobj_base is NULL and the descriptor was never added. - * kobject_del() complains about a object with no parent, so make - * it conditional. + * Only invoke kobject_del() when kobject_add() was successfully + * invoked for the descriptor. This covers both early boot, where + * sysfs is not initialized yet, and the case of a failed + * kobject_add() invocation. */ - if (irq_kobj_base) + if (desc->istate & IRQS_SYSFS) kobject_del(&desc->kobj); } diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 76e67d1e02d4..526510b3791e 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -14,10 +14,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include "encoding.h" @@ -1060,3 +1062,51 @@ EXPORT_SYMBOL(__tsan_atomic_thread_fence); void __tsan_atomic_signal_fence(int memorder); void __tsan_atomic_signal_fence(int memorder) { } EXPORT_SYMBOL(__tsan_atomic_signal_fence); + +#ifdef __HAVE_ARCH_MEMSET +void *__tsan_memset(void *s, int c, size_t count); +noinline void *__tsan_memset(void *s, int c, size_t count) +{ + /* + * Instead of not setting up watchpoints where accessed size is greater + * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE. + */ + size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE); + + check_access(s, check_len, KCSAN_ACCESS_WRITE); + return memset(s, c, count); +} +#else +void *__tsan_memset(void *s, int c, size_t count) __alias(memset); +#endif +EXPORT_SYMBOL(__tsan_memset); + +#ifdef __HAVE_ARCH_MEMMOVE +void *__tsan_memmove(void *dst, const void *src, size_t len); +noinline void *__tsan_memmove(void *dst, const void *src, size_t len) +{ + size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE); + + check_access(dst, check_len, KCSAN_ACCESS_WRITE); + check_access(src, check_len, 0); + return memmove(dst, src, len); +} +#else +void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove); +#endif +EXPORT_SYMBOL(__tsan_memmove); + +#ifdef __HAVE_ARCH_MEMCPY +void *__tsan_memcpy(void *dst, const void *src, size_t len); +noinline void *__tsan_memcpy(void *dst, const void *src, size_t len) +{ + size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE); + + check_access(dst, check_len, KCSAN_ACCESS_WRITE); + check_access(src, check_len, 0); + return memcpy(dst, src, len); +} +#else +void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy); +#endif +EXPORT_SYMBOL(__tsan_memcpy); diff --git a/kernel/padata.c b/kernel/padata.c index 18d3a5c699d8..c17f772cc315 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -207,14 +207,16 @@ int padata_do_parallel(struct padata_shell *ps, pw = padata_work_alloc(); spin_unlock(&padata_works_lock); + if (!pw) { + /* Maximum works limit exceeded, run in the current task. */ + padata->parallel(padata); + } + rcu_read_unlock_bh(); if (pw) { padata_work_init(pw, padata_parallel_worker, padata, 0); queue_work(pinst->parallel_wq, &pw->pw_work); - } else { - /* Maximum works limit exceeded, run in the current task. */ - padata->parallel(padata); } return 0; @@ -388,13 +390,16 @@ void padata_do_serial(struct padata_priv *padata) int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); struct padata_priv *cur; + struct list_head *pos; spin_lock(&reorder->lock); /* Sort in ascending order of sequence number. */ - list_for_each_entry_reverse(cur, &reorder->list, list) + list_for_each_prev(pos, &reorder->list) { + cur = list_entry(pos, struct padata_priv, list); if (cur->seq_nr < padata->seq_nr) break; - list_add(&padata->list, &cur->list); + } + list_add(&padata->list, pos); spin_unlock(&reorder->lock); /* diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 330d49937692..475d630e650f 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1719,8 +1719,8 @@ static unsigned long minimum_image_size(unsigned long saveable) * /sys/power/reserved_size, respectively). To make this happen, we compute the * total number of available page frames and allocate at least * - * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 - * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) + * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2 + * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) * * of them, which corresponds to the maximum size of a hibernation image. * diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index ae8396032b5d..4bd07cc3c0ea 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -892,32 +892,24 @@ static void trc_read_check_handler(void *t_in) // If the task is no longer running on this CPU, leave. if (unlikely(texp != t)) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); goto reset_ipi; // Already on holdout list, so will check later. } // If the task is not in a read-side critical section, and // if this is the last reader, awaken the grace-period kthread. if (likely(!READ_ONCE(t->trc_reader_nesting))) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); - // Mark as checked after decrement to avoid false - // positives on the above WARN_ON_ONCE(). WRITE_ONCE(t->trc_reader_checked, true); goto reset_ipi; } // If we are racing with an rcu_read_unlock_trace(), try again later. - if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); + if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) goto reset_ipi; - } WRITE_ONCE(t->trc_reader_checked, true); // Get here if the task is in a read-side critical section. Set // its state so that it will awaken the grace-period kthread upon // exit from that critical section. + atomic_inc(&trc_n_readers_need_end); // One more to wait on. WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); WRITE_ONCE(t->trc_reader_special.b.need_qs, true); @@ -1017,21 +1009,15 @@ static void trc_wait_for_one_reader(struct task_struct *t, if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) return; - atomic_inc(&trc_n_readers_need_end); per_cpu(trc_ipi_to_cpu, cpu) = true; t->trc_ipi_to_cpu = cpu; rcu_tasks_trace.n_ipis++; - if (smp_call_function_single(cpu, - trc_read_check_handler, t, 0)) { + if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { // Just in case there is some other reason for // failure than the target CPU being offline. rcu_tasks_trace.n_ipis_fails++; per_cpu(trc_ipi_to_cpu, cpu) = false; t->trc_ipi_to_cpu = cpu; - if (atomic_dec_and_test(&trc_n_readers_need_end)) { - WARN_ON_ONCE(1); - wake_up(&trc_wait); - } } } } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 63f7ce228cc3..cf101da389b0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2674,7 +2674,7 @@ void rcu_force_quiescent_state(void) struct rcu_node *rnp_old = NULL; /* Funnel through hierarchy to reduce memory contention. */ - rnp = __this_cpu_read(rcu_data.mynode); + rnp = raw_cpu_read(rcu_data.mynode); for (; rnp != NULL; rnp = rnp->parent) { ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || !raw_spin_trylock(&rnp->fqslock); diff --git a/kernel/relay.c b/kernel/relay.c index d1a67fbb819d..6825b8403877 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -151,13 +151,13 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) { struct rchan_buf *buf; - if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *)) + if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t)) return NULL; buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); if (!buf) return NULL; - buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *), + buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t), GFP_KERNEL); if (!buf->padding) goto free_buf; diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index ceb03d76c0cc..221ca1050573 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -124,7 +124,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, unsigned long cap, max_cap = 0; int cpu, max_cpu = -1; - if (!static_branch_unlikely(&sched_asym_cpucapacity)) + if (!sched_asym_cpucap_active()) return 1; /* Ensure the capacity of the CPUs fits the task. */ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 147b757d162b..2a2f32eaffcc 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -112,7 +112,7 @@ static inline unsigned long __dl_bw_capacity(int i) */ static inline unsigned long dl_bw_capacity(int i) { - if (!static_branch_unlikely(&sched_asym_cpucapacity) && + if (!sched_asym_cpucap_active() && capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; } else { @@ -1703,7 +1703,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags) * Take the capacity of the CPU into account to * ensure it fits the requirement of the task. */ - if (static_branch_unlikely(&sched_asym_cpucapacity)) + if (sched_asym_cpucap_active()) select_rq |= !dl_task_fits_capacity(p, cpu); if (select_rq) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a853e4e9e3c3..6648683cd964 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4120,14 +4120,140 @@ done: trace_sched_util_est_se_tp(&p->se); } -static inline int task_fits_capacity(struct task_struct *p, long capacity) +static inline int util_fits_cpu(unsigned long util, + unsigned long uclamp_min, + unsigned long uclamp_max, + int cpu) { - return fits_capacity(uclamp_task_util(p), capacity); + unsigned long capacity_orig, capacity_orig_thermal; + unsigned long capacity = capacity_of(cpu); + bool fits, uclamp_max_fits; + + /* + * Check if the real util fits without any uclamp boost/cap applied. + */ + fits = fits_capacity(util, capacity); + + if (!uclamp_is_used()) + return fits; + + /* + * We must use capacity_orig_of() for comparing against uclamp_min and + * uclamp_max. We only care about capacity pressure (by using + * capacity_of()) for comparing against the real util. + * + * If a task is boosted to 1024 for example, we don't want a tiny + * pressure to skew the check whether it fits a CPU or not. + * + * Similarly if a task is capped to capacity_orig_of(little_cpu), it + * should fit a little cpu even if there's some pressure. + * + * Only exception is for thermal pressure since it has a direct impact + * on available OPP of the system. + * + * We honour it for uclamp_min only as a drop in performance level + * could result in not getting the requested minimum performance level. + * + * For uclamp_max, we can tolerate a drop in performance level as the + * goal is to cap the task. So it's okay if it's getting less. + * + * In case of capacity inversion, which is not handled yet, we should + * honour the inverted capacity for both uclamp_min and uclamp_max all + * the time. + */ + capacity_orig = capacity_orig_of(cpu); + capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu); + + /* + * We want to force a task to fit a cpu as implied by uclamp_max. + * But we do have some corner cases to cater for.. + * + * + * C=z + * | ___ + * | C=y | | + * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max + * | C=x | | | | + * | ___ | | | | + * | | | | | | | (util somewhere in this region) + * | | | | | | | + * | | | | | | | + * +---------------------------------------- + * cpu0 cpu1 cpu2 + * + * In the above example if a task is capped to a specific performance + * point, y, then when: + * + * * util = 80% of x then it does not fit on cpu0 and should migrate + * to cpu1 + * * util = 80% of y then it is forced to fit on cpu1 to honour + * uclamp_max request. + * + * which is what we're enforcing here. A task always fits if + * uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig, + * the normal upmigration rules should withhold still. + * + * Only exception is when we are on max capacity, then we need to be + * careful not to block overutilized state. This is so because: + * + * 1. There's no concept of capping at max_capacity! We can't go + * beyond this performance level anyway. + * 2. The system is being saturated when we're operating near + * max capacity, it doesn't make sense to block overutilized. + */ + uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE); + uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig); + fits = fits || uclamp_max_fits; + + /* + * + * C=z + * | ___ (region a, capped, util >= uclamp_max) + * | C=y | | + * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max + * | C=x | | | | + * | ___ | | | | (region b, uclamp_min <= util <= uclamp_max) + * |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min + * | | | | | | | + * | | | | | | | (region c, boosted, util < uclamp_min) + * +---------------------------------------- + * cpu0 cpu1 cpu2 + * + * a) If util > uclamp_max, then we're capped, we don't care about + * actual fitness value here. We only care if uclamp_max fits + * capacity without taking margin/pressure into account. + * See comment above. + * + * b) If uclamp_min <= util <= uclamp_max, then the normal + * fits_capacity() rules apply. Except we need to ensure that we + * enforce we remain within uclamp_max, see comment above. + * + * c) If util < uclamp_min, then we are boosted. Same as (b) but we + * need to take into account the boosted value fits the CPU without + * taking margin/pressure into account. + * + * Cases (a) and (b) are handled in the 'fits' variable already. We + * just need to consider an extra check for case (c) after ensuring we + * handle the case uclamp_min > uclamp_max. + */ + uclamp_min = min(uclamp_min, uclamp_max); + if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE) + fits = fits && (uclamp_min <= capacity_orig_thermal); + + return fits; +} + +static inline int task_fits_cpu(struct task_struct *p, int cpu) +{ + unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN); + unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX); + unsigned long util = task_util_est(p); + return util_fits_cpu(util, uclamp_min, uclamp_max, cpu); } static inline void update_misfit_status(struct task_struct *p, struct rq *rq) { - if (!static_branch_unlikely(&sched_asym_cpucapacity)) + if (!sched_asym_cpucap_active()) return; if (!p || p->nr_cpus_allowed == 1) { @@ -4135,7 +4261,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) return; } - if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { + if (task_fits_cpu(p, cpu_of(rq))) { rq->misfit_task_load = 0; return; } @@ -6372,21 +6498,23 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool static int select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) { - unsigned long task_util, best_cap = 0; + unsigned long task_util, util_min, util_max, best_cap = 0; int cpu, best_cpu = -1; struct cpumask *cpus; cpus = this_cpu_cpumask_var_ptr(select_idle_mask); cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); - task_util = uclamp_task_util(p); + task_util = task_util_est(p); + util_min = uclamp_eff_value(p, UCLAMP_MIN); + util_max = uclamp_eff_value(p, UCLAMP_MAX); for_each_cpu_wrap(cpu, cpus, target) { unsigned long cpu_cap = capacity_of(cpu); if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) continue; - if (fits_capacity(task_util, cpu_cap)) + if (util_fits_cpu(task_util, util_min, util_max, cpu)) return cpu; if (cpu_cap > best_cap) { @@ -6398,10 +6526,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) return best_cpu; } -static inline bool asym_fits_capacity(int task_util, int cpu) +static inline bool asym_fits_cpu(unsigned long util, + unsigned long util_min, + unsigned long util_max, + int cpu) { - if (static_branch_unlikely(&sched_asym_cpucapacity)) - return fits_capacity(task_util, capacity_of(cpu)); + if (sched_asym_cpucap_active()) + return util_fits_cpu(util, util_min, util_max, cpu); return true; } @@ -6413,16 +6544,18 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) { bool has_idle_core = false; struct sched_domain *sd; - unsigned long task_util; + unsigned long task_util, util_min, util_max; int i, recent_used_cpu; /* * On asymmetric system, update task utilization because we will check * that the task fits with cpu's capacity. */ - if (static_branch_unlikely(&sched_asym_cpucapacity)) { + if (sched_asym_cpucap_active()) { sync_entity_load_avg(&p->se); - task_util = uclamp_task_util(p); + task_util = task_util_est(p); + util_min = uclamp_eff_value(p, UCLAMP_MIN); + util_max = uclamp_eff_value(p, UCLAMP_MAX); } /* @@ -6431,7 +6564,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) lockdep_assert_irqs_disabled(); if ((available_idle_cpu(target) || sched_idle_cpu(target)) && - asym_fits_capacity(task_util, target)) + asym_fits_cpu(task_util, util_min, util_max, target)) return target; /* @@ -6439,7 +6572,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (prev != target && cpus_share_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) && - asym_fits_capacity(task_util, prev)) + asym_fits_cpu(task_util, util_min, util_max, prev)) return prev; /* @@ -6454,7 +6587,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) in_task() && prev == smp_processor_id() && this_rq()->nr_running <= 1 && - asym_fits_capacity(task_util, prev)) { + asym_fits_cpu(task_util, util_min, util_max, prev)) { return prev; } @@ -6466,12 +6599,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) cpus_share_cache(recent_used_cpu, target) && (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && - asym_fits_capacity(task_util, recent_used_cpu)) { - /* - * Replace recent_used_cpu with prev as it is a potential - * candidate for the next wake: - */ - p->recent_used_cpu = prev; + asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) { return recent_used_cpu; } @@ -6479,7 +6607,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) * For asymmetric CPU capacity systems, our domain of interest is * sd_asym_cpucapacity rather than sd_llc. */ - if (static_branch_unlikely(&sched_asym_cpucapacity)) { + if (sched_asym_cpucap_active()) { sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target)); /* * On an asymmetric CPU capacity system where an exclusive @@ -8033,7 +8161,7 @@ static int detach_tasks(struct lb_env *env) case migrate_misfit: /* This is not a misfit task */ - if (task_fits_capacity(p, capacity_of(env->src_cpu))) + if (task_fits_cpu(p, env->src_cpu)) goto next; env->imbalance = 0; @@ -8918,6 +9046,10 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, memset(sgs, 0, sizeof(*sgs)); + /* Assume that task can't fit any CPU of the group */ + if (sd->flags & SD_ASYM_CPUCAPACITY) + sgs->group_misfit_task_load = 1; + for_each_cpu(i, sched_group_span(group)) { struct rq *rq = cpu_rq(i); unsigned int local; @@ -8937,12 +9069,12 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, if (!nr_running && idle_cpu_without(i, p)) sgs->idle_cpus++; - } + /* Check if task fits in the CPU */ + if (sd->flags & SD_ASYM_CPUCAPACITY && + sgs->group_misfit_task_load && + task_fits_cpu(p, i)) + sgs->group_misfit_task_load = 0; - /* Check if task fits in the group */ - if (sd->flags & SD_ASYM_CPUCAPACITY && - !task_fits_capacity(p, group->sgc->max_capacity)) { - sgs->group_misfit_task_load = 1; } sgs->group_capacity = group->sgc->capacity; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f75dcd3537b8..add67f811e00 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -473,7 +473,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) unsigned int cpu_cap; /* Only heterogeneous systems can benefit from this check */ - if (!static_branch_unlikely(&sched_asym_cpucapacity)) + if (!sched_asym_cpucap_active()) return true; min_cap = uclamp_eff_value(p, UCLAMP_MIN); @@ -1736,7 +1736,7 @@ static int find_lowest_rq(struct task_struct *task) * If we're on asym system ensure we consider the different capacities * of the CPUs when searching for the lowest_mask. */ - if (static_branch_unlikely(&sched_asym_cpucapacity)) { + if (sched_asym_cpucap_active()) { ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, task, lowest_mask, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7a3fcd70aa86..e1f46ed412bc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1788,6 +1788,11 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); extern struct static_key_false sched_asym_cpucapacity; +static __always_inline bool sched_asym_cpucap_active(void) +{ + return static_branch_unlikely(&sched_asym_cpucapacity); +} + struct sched_group_capacity { atomic_t ref; /* @@ -2916,6 +2921,15 @@ static inline bool uclamp_is_used(void) return static_branch_likely(&sched_uclamp_used); } #else /* CONFIG_UCLAMP_TASK */ +static inline unsigned long uclamp_eff_value(struct task_struct *p, + enum uclamp_id clamp_id) +{ + if (clamp_id == UCLAMP_MIN) + return 0; + + return SCHED_CAPACITY_SCALE; +} + static inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 420ff4bc67fd..4265d125d50f 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -328,6 +328,7 @@ config SCHED_TRACER config HWLAT_TRACER bool "Tracer to detect hardware latencies (like SMIs)" select GENERIC_TRACER + select TRACER_MAX_TRACE help This tracer, when enabled will create one or more kernel threads, depending on what the cpumask file is set to, which each thread @@ -363,6 +364,7 @@ config HWLAT_TRACER config OSNOISE_TRACER bool "OS Noise tracer" select GENERIC_TRACER + select TRACER_MAX_TRACE help In the context of high-performance computing (HPC), the Operating System Noise (osnoise) refers to the interference experienced by an diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index eaa98e2b468f..16b0d3fa56e0 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1547,7 +1547,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags, static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) { - if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) + if ((iter->ent->type != TRACE_BLK) || + !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) return TRACE_TYPE_UNHANDLED; return print_one_line(iter, true); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b26de6ff5d0b..114d94d1e0a9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1409,6 +1409,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr) return false; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); +#define free_snapshot(tr) do { } while (0) #endif /* CONFIG_TRACER_SNAPSHOT */ void tracer_tracing_off(struct trace_array *tr) @@ -1679,6 +1680,8 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) } unsigned long __read_mostly tracing_thresh; + +#ifdef CONFIG_TRACER_MAX_TRACE static const struct file_operations tracing_max_lat_fops; #ifdef LATENCY_FS_NOTIFY @@ -1735,18 +1738,14 @@ void latency_fsnotify(struct trace_array *tr) irq_work_queue(&tr->fsnotify_irqwork); } -#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \ - || defined(CONFIG_OSNOISE_TRACER) +#else /* !LATENCY_FS_NOTIFY */ #define trace_create_maxlat_file(tr, d_tracer) \ trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ d_tracer, &tr->max_latency, &tracing_max_lat_fops) -#else -#define trace_create_maxlat_file(tr, d_tracer) do { } while (0) #endif -#ifdef CONFIG_TRACER_MAX_TRACE /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, @@ -1821,14 +1820,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, ring_buffer_record_off(tr->max_buffer.buffer); #ifdef CONFIG_TRACER_SNAPSHOT - if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) - goto out_unlock; + if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { + arch_spin_unlock(&tr->max_lock); + return; + } #endif swap(tr->array_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); - out_unlock: arch_spin_unlock(&tr->max_lock); } @@ -1875,6 +1875,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } + #endif /* CONFIG_TRACER_MAX_TRACE */ static int wait_on_pipe(struct trace_iterator *iter, int full) @@ -6536,7 +6537,7 @@ out: return ret; } -#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) +#ifdef CONFIG_TRACER_MAX_TRACE static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, @@ -6763,7 +6764,20 @@ waitagain: ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { - /* don't print partial lines */ + /* + * If one print_trace_line() fills entire trace_seq in one shot, + * trace_seq_to_user() will returns -EBUSY because save_len == 0, + * In this case, we need to consume it, otherwise, loop will peek + * this event next time, resulting in an infinite loop. + */ + if (save_len == 0) { + iter->seq.full = 0; + trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); + trace_consume(iter); + break; + } + + /* In other cases, don't print partial lines */ iter->seq.seq.len = save_len; break; } @@ -7560,7 +7574,7 @@ static const struct file_operations tracing_thresh_fops = { .llseek = generic_file_llseek, }; -#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) +#ifdef CONFIG_TRACER_MAX_TRACE static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, @@ -9549,7 +9563,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) create_trace_options_dir(tr); +#ifdef CONFIG_TRACER_MAX_TRACE trace_create_maxlat_file(tr, d_tracer); +#endif if (ftrace_create_function_files(tr, d_tracer)) MEM_FAIL(1, "Could not allocate function filter files"); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 28ea6c0be495..9fc598165f3a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -309,8 +309,7 @@ struct trace_array { struct array_buffer max_buffer; bool allocated_snapshot; #endif -#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \ - || defined(CONFIG_OSNOISE_TRACER) +#ifdef CONFIG_TRACER_MAX_TRACE unsigned long max_latency; #ifdef CONFIG_FSNOTIFY struct dentry *d_max_latency; @@ -688,12 +687,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, void *cond_data); void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu); -#endif /* CONFIG_TRACER_MAX_TRACE */ -#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \ - || defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY) +#ifdef CONFIG_FSNOTIFY #define LATENCY_FS_NOTIFY #endif +#endif /* CONFIG_TRACER_MAX_TRACE */ #ifdef LATENCY_FS_NOTIFY void latency_fsnotify(struct trace_array *tr); @@ -1941,17 +1939,30 @@ static __always_inline void trace_iterator_reset(struct trace_iterator *iter) } /* Check the name is good for event/group/fields */ -static inline bool is_good_name(const char *name) +static inline bool __is_good_name(const char *name, bool hash_ok) { - if (!isalpha(*name) && *name != '_') + if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-')) return false; while (*++name != '\0') { - if (!isalpha(*name) && !isdigit(*name) && *name != '_') + if (!isalpha(*name) && !isdigit(*name) && *name != '_' && + (!hash_ok || *name != '-')) return false; } return true; } +/* Check the name is good for event/group/fields */ +static inline bool is_good_name(const char *name) +{ + return __is_good_name(name, false); +} + +/* Check the name is good for system */ +static inline bool is_good_system_name(const char *name) +{ + return __is_good_name(name, true); +} + /* Convert certain expected symbols into '_' when generating event names */ static inline void sanitize_event_name(char *name) { diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index d19dab21848e..9806316af127 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -567,6 +567,9 @@ static void eprobe_trigger_func(struct event_trigger_data *data, { struct eprobe_data *edata = data->private_data; + if (unlikely(!rec)) + return; + if (unlikely(!rec)) return; diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 31e3e0bbd129..cc1a078f7a16 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -452,7 +452,7 @@ struct action_data { * event param, and is passed to the synthetic event * invocation. */ - unsigned int var_ref_idx[TRACING_MAP_VARS_MAX]; + unsigned int var_ref_idx[SYNTH_FIELDS_MAX]; struct synth_event *synth_event; bool use_trace_keyword; char *synth_event_name; @@ -1895,7 +1895,9 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, return ref_field; } } - + /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */ + if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX) + return NULL; ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); if (ref_field) { if (init_var_ref(ref_field, var_field, system, event_name)) { @@ -3188,6 +3190,7 @@ static int parse_action_params(struct trace_array *tr, char *params, while (params) { if (data->n_params >= SYNTH_FIELDS_MAX) { hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); + ret = -EINVAL; goto out; } @@ -3524,6 +3527,10 @@ static int trace_action_create(struct hist_trigger_data *hist_data, lockdep_assert_held(&event_mutex); + /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */ + if (data->n_params > SYNTH_FIELDS_MAX) + return -EINVAL; + if (data->use_trace_keyword) synth_event_name = data->synth_event_name; else @@ -5954,7 +5961,7 @@ enable: /* Just return zero, not the number of registered triggers */ ret = 0; out: - if (ret == 0) + if (ret == 0 && glob[0]) hist_err_clear(); return ret; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 47eb92b3edd0..2fdf3fd591e1 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -1275,12 +1275,12 @@ static int __create_synth_event(const char *name, const char *raw_fields) goto err; } - fields[n_fields++] = field; if (n_fields == SYNTH_FIELDS_MAX) { synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0); ret = -EINVAL; goto err; } + fields[n_fields++] = field; n_fields_this_loop++; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 2bbe4a7c6a2b..cb8f9fe5669a 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -246,7 +246,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup, return -EINVAL; } strlcpy(buf, event, slash - event + 1); - if (!is_good_name(buf)) { + if (!is_good_system_name(buf)) { trace_probe_log_err(offset, BAD_GROUP_NAME); return -EINVAL; } diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 6946f8e204e3..793c31b7e417 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -440,6 +440,7 @@ static int object_cpu_offline(unsigned int cpu) struct debug_percpu_free *percpu_pool; struct hlist_node *tmp; struct debug_obj *obj; + unsigned long flags; /* Remote access is safe as the CPU is dead already */ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); @@ -447,6 +448,12 @@ static int object_cpu_offline(unsigned int cpu) hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } + + raw_spin_lock_irqsave(&pool_lock, flags); + obj_pool_used -= percpu_pool->obj_free; + debug_objects_freed += percpu_pool->obj_free; + raw_spin_unlock_irqrestore(&pool_lock, flags); + percpu_pool->obj_free = 0; return 0; @@ -1321,6 +1328,8 @@ static int __init debug_objects_replace_static_objects(void) hlist_add_head(&obj->node, &objects); } + debug_objects_allocated += i; + /* * debug_objects_mem_init() is now called early that only one CPU is up * and interrupts have been disabled, so it is safe to replace the @@ -1389,6 +1398,7 @@ void __init debug_objects_mem_init(void) debug_objects_enabled = 0; kmem_cache_destroy(obj_cache); pr_warn("out of memory.\n"); + return; } else debug_objects_selftest(); diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c index 5f4b07b56cd9..973866438608 100644 --- a/lib/fonts/fonts.c +++ b/lib/fonts/fonts.c @@ -135,8 +135,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w, if (res > 20) c += 20 - res; - if ((font_w & (1 << (f->width - 1))) && - (font_h & (1 << (f->height - 1)))) + if ((font_w & (1U << (f->width - 1))) && + (font_h & (1U << (f->height - 1)))) c += 1000; if (c > cc) { diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c index 21016b32d313..2b24ea6c9497 100644 --- a/lib/notifier-error-inject.c +++ b/lib/notifier-error-inject.c @@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set, +DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set, "%lld\n"); static struct dentry *debugfs_create_errno(const char *name, umode_t mode, diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 1bccd6cd5f48..e68be7aba7d1 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -1111,6 +1111,7 @@ static int __init test_firmware_init(void) rc = misc_register(&test_fw_misc_device); if (rc) { + __test_firmware_config_free(); kfree(test_fw_config); pr_err("could not register misc device: %d\n", rc); return rc; diff --git a/lib/test_overflow.c b/lib/test_overflow.c index 7a4b6f6c5473..7a5a5738d2d2 100644 --- a/lib/test_overflow.c +++ b/lib/test_overflow.c @@ -588,12 +588,110 @@ static int __init test_overflow_allocation(void) return err; } +struct __test_flex_array { + unsigned long flags; + size_t count; + unsigned long data[]; +}; + +static int __init test_overflow_size_helpers(void) +{ + struct __test_flex_array *obj; + int count = 0; + int err = 0; + int var; + +#define check_one_size_helper(expected, func, args...) ({ \ + bool __failure = false; \ + size_t _r; \ + \ + _r = func(args); \ + if (_r != (expected)) { \ + pr_warn("expected " #func "(" #args ") " \ + "to return %zu but got %zu instead\n", \ + (size_t)(expected), _r); \ + __failure = true; \ + } \ + count++; \ + __failure; \ +}) + + var = 4; + err |= check_one_size_helper(20, size_mul, var++, 5); + err |= check_one_size_helper(20, size_mul, 4, var++); + err |= check_one_size_helper(0, size_mul, 0, 3); + err |= check_one_size_helper(0, size_mul, 3, 0); + err |= check_one_size_helper(6, size_mul, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3); + + var = 4; + err |= check_one_size_helper(9, size_add, var++, 5); + err |= check_one_size_helper(9, size_add, 4, var++); + err |= check_one_size_helper(9, size_add, 9, 0); + err |= check_one_size_helper(9, size_add, 0, 9); + err |= check_one_size_helper(5, size_add, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3); + + var = 4; + err |= check_one_size_helper(1, size_sub, var--, 3); + err |= check_one_size_helper(1, size_sub, 4, var--); + err |= check_one_size_helper(1, size_sub, 3, 2); + err |= check_one_size_helper(9, size_sub, 9, 0); + err |= check_one_size_helper(SIZE_MAX, size_sub, 9, -3); + err |= check_one_size_helper(SIZE_MAX, size_sub, 0, 9); + err |= check_one_size_helper(SIZE_MAX, size_sub, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0); + err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10); + err |= check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX); + err |= check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX); + err |= check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1); + err |= check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3); + err |= check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3); + + var = 4; + err |= check_one_size_helper(4 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + err |= check_one_size_helper(5 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + err |= check_one_size_helper(0, flex_array_size, obj, data, 0); + err |= check_one_size_helper(sizeof(*obj->data), + flex_array_size, obj, data, 1); + err |= check_one_size_helper(7 * sizeof(*obj->data), + flex_array_size, obj, data, 7); + err |= check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, -1); + err |= check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, SIZE_MAX - 4); + + var = 4; + err |= check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), + struct_size, obj, data, var++); + err |= check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), + struct_size, obj, data, var++); + err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0); + err |= check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), + struct_size, obj, data, 1); + err |= check_one_size_helper(SIZE_MAX, + struct_size, obj, data, -3); + err |= check_one_size_helper(SIZE_MAX, + struct_size, obj, data, SIZE_MAX - 3); + + pr_info("%d overflow size helper tests finished\n", count); + + return err; +} + static int __init test_module_init(void) { int err = 0; err |= test_overflow_calculation(); err |= test_overflow_shift(); + err |= test_overflow_size_helpers(); err |= test_overflow_allocation(); if (err) { diff --git a/mm/compaction.c b/mm/compaction.c index e8fcf0e0c1ca..89517ad5d6a0 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1350,7 +1350,7 @@ move_freelist_tail(struct list_head *freelist, struct page *freepage) } static void -fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) +fast_isolate_around(struct compact_control *cc, unsigned long pfn) { unsigned long start_pfn, end_pfn; struct page *page; @@ -1371,21 +1371,13 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long if (!page) return; - /* Scan before */ - if (start_pfn != pfn) { - isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); - if (cc->nr_freepages >= cc->nr_migratepages) - return; - } - - /* Scan after */ - start_pfn = pfn + nr_isolated; - if (start_pfn < end_pfn) - isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); + isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ if (cc->nr_freepages < cc->nr_migratepages) set_pageblock_skip(page); + + return; } /* Search orders in round-robin fashion */ @@ -1561,7 +1553,7 @@ fast_isolate_freepages(struct compact_control *cc) return cc->free_pfn; low_pfn = page_to_pfn(page); - fast_isolate_around(cc, low_pfn, nr_isolated); + fast_isolate_around(cc, low_pfn); return low_pfn; } diff --git a/net/802/mrp.c b/net/802/mrp.c index 35e04cc5390c..c10a432a5b43 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -606,7 +606,10 @@ static void mrp_join_timer(struct timer_list *t) spin_unlock(&app->lock); mrp_queue_xmit(app); - mrp_join_timer_arm(app); + spin_lock(&app->lock); + if (likely(app->active)) + mrp_join_timer_arm(app); + spin_unlock(&app->lock); } static void mrp_periodic_timer_arm(struct mrp_applicant *app) @@ -620,11 +623,12 @@ static void mrp_periodic_timer(struct timer_list *t) struct mrp_applicant *app = from_timer(app, t, periodic_timer); spin_lock(&app->lock); - mrp_mad_event(app, MRP_EVENT_PERIODIC); - mrp_pdu_queue(app); + if (likely(app->active)) { + mrp_mad_event(app, MRP_EVENT_PERIODIC); + mrp_pdu_queue(app); + mrp_periodic_timer_arm(app); + } spin_unlock(&app->lock); - - mrp_periodic_timer_arm(app); } static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) @@ -872,6 +876,7 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl) app->dev = dev; app->app = appl; app->mad = RB_ROOT; + app->active = true; spin_lock_init(&app->lock); skb_queue_head_init(&app->queue); rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); @@ -900,6 +905,9 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) RCU_INIT_POINTER(port->applicants[appl->type], NULL); + spin_lock_bh(&app->lock); + app->active = false; + spin_unlock_bh(&app->lock); /* Delete timer and generate a final TX event to flush out * all pending messages before the applicant is gone. */ diff --git a/net/9p/client.c b/net/9p/client.c index 565aee6dfcc6..08e0c9990af0 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -281,6 +281,11 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size) p9pdu_reset(&req->rc); req->t_err = 0; req->status = REQ_STATUS_ALLOC; + /* refcount needs to be set to 0 before inserting into the idr + * so p9_tag_lookup does not accept a request that is not fully + * initialized. refcount_set to 2 below will mark request ready. + */ + refcount_set(&req->refcount, 0); init_waitqueue_head(&req->wq); INIT_LIST_HEAD(&req->req_list); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index bb84ff5fb98a..a41b4dcf1a7a 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -5101,7 +5101,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; else *req_complete = bt_cb(skb)->hci.req_complete; - kfree_skb(skb); + dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); } diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 761efd7da514..e15fcf72a342 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4453,7 +4453,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, chan->ident = cmd->ident; l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); - chan->num_conf_rsp++; + if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) + chan->num_conf_rsp++; /* Reset config buffer. */ chan->conf_len = 0; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index f09f0a78eb7b..04000499f4a2 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -7971,7 +7971,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, * extra parameters we don't know about will be ignored in this request. */ if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_INVALID_PARAMS); flags = __le32_to_cpu(cp->flags); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 7324764384b6..8d6fce9005bd 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -590,7 +590,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) ret = rfcomm_dlc_send_frag(d, frag); if (ret < 0) { - kfree_skb(frag); + dev_kfree_skb_irq(frag); goto unlock; } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 7583ee98c35b..11d254ce3581 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -470,9 +470,6 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) { struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; - if (!skb->len) - return -EINVAL; - if (!__skb) return 0; diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 2809cbd6b7f7..d8cb4b2a076b 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -269,11 +269,15 @@ int cfctrl_linkup_request(struct cflayer *layer, default: pr_warn("Request setup of bad link type = %d\n", param->linktype); + cfpkt_destroy(pkt); return -EINVAL; } req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) + if (!req) { + cfpkt_destroy(pkt); return -ENOMEM; + } + req->client_layer = user_layer; req->cmd = CFCTRL_CMD_LINK_SETUP; req->param = *param; diff --git a/net/core/dev.c b/net/core/dev.c index be51644e95da..33d6b691e15e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10640,24 +10640,16 @@ void netdev_run_todo(void) void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, const struct net_device_stats *netdev_stats) { -#if BITS_PER_LONG == 64 - BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); - memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); - /* zero out counters that only exist in rtnl_link_stats64 */ - memset((char *)stats64 + sizeof(*netdev_stats), 0, - sizeof(*stats64) - sizeof(*netdev_stats)); -#else - size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); - const unsigned long *src = (const unsigned long *)netdev_stats; + size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); + const atomic_long_t *src = (atomic_long_t *)netdev_stats; u64 *dst = (u64 *)stats64; BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); for (i = 0; i < n; i++) - dst[i] = src[i]; + dst[i] = atomic_long_read(&src[i]); /* zero out counters that only exist in rtnl_link_stats64 */ memset((char *)stats64 + n * sizeof(u64), 0, sizeof(*stats64) - n * sizeof(u64)); -#endif } EXPORT_SYMBOL(netdev_stats_to_stats64); diff --git a/net/core/filter.c b/net/core/filter.c index fb5b9dbf3bc0..b2031148dd8b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2123,8 +2123,17 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, { unsigned int mlen = skb_network_offset(skb); + if (unlikely(skb->len <= mlen)) { + kfree_skb(skb); + return -ERANGE; + } + if (mlen) { __skb_pull(skb, mlen); + if (unlikely(!skb->len)) { + kfree_skb(skb); + return -ERANGE; + } /* At ingress, the mac header has already been pulled once. * At egress, skb_pospull_rcsum has to be done in case that @@ -2144,7 +2153,7 @@ static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, u32 flags) { /* Verify that a link layer header is carried */ - if (unlikely(skb->mac_header >= skb->network_header)) { + if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) { kfree_skb(skb); return -ERANGE; } @@ -3173,15 +3182,18 @@ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) { + void *old_data; + /* skb_ensure_writable() is not needed here, as we're * already working on an uncloned skb. */ if (unlikely(!pskb_may_pull(skb, off + len))) return -ENOMEM; - skb_postpull_rcsum(skb, skb->data + off, len); - memmove(skb->data + len, skb->data, off); + old_data = skb->data; __skb_pull(skb, len); + skb_postpull_rcsum(skb, old_data + off, len); + memmove(skb->data, old_data, off); return 0; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 6706bd3c8e9c..058ec2f17da6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2263,6 +2263,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) insp = list; } else { /* Eaten partially. */ + if (skb_is_gso(skb) && !list->head_frag && + skb_headlen(list)) + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; if (skb_shared(list)) { /* Sucks! We need to fork list. :-( */ diff --git a/net/core/skmsg.c b/net/core/skmsg.c index f562f7e2bdc7..dc9b93d8f0d3 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -880,13 +880,16 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, ret = sk_psock_map_verd(ret, msg->sk_redir); psock->apply_bytes = msg->apply_bytes; if (ret == __SK_REDIRECT) { - if (psock->sk_redir) + if (psock->sk_redir) { sock_put(psock->sk_redir); - psock->sk_redir = msg->sk_redir; - if (!psock->sk_redir) { + psock->sk_redir = NULL; + } + if (!msg->sk_redir) { ret = __SK_DROP; goto out; } + psock->redir_ingress = sk_msg_to_ingress(msg); + psock->sk_redir = msg->sk_redir; sock_hold(psock->sk_redir); } out: diff --git a/net/core/sock.c b/net/core/sock.c index 9bcffe1d5332..b7ac53e72d1a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1302,7 +1302,7 @@ set_sndbuf: break; } case SO_INCOMING_CPU: - WRITE_ONCE(sk->sk_incoming_cpu, val); + reuseport_update_incoming_cpu(sk, val); break; case SO_CNX_ADVICE: diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 4f4bc163a223..ae6013a8bce5 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -349,11 +349,13 @@ static void sock_map_free(struct bpf_map *map) sk = xchg(psk, NULL); if (sk) { + sock_hold(sk); lock_sock(sk); rcu_read_lock(); sock_map_unref(sk, psk); rcu_read_unlock(); release_sock(sk); + sock_put(sk); } } diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index fb90e1e00773..5a165286e4d8 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -37,6 +37,70 @@ void reuseport_has_conns_set(struct sock *sk) } EXPORT_SYMBOL(reuseport_has_conns_set); +static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse) +{ + /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ + WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1); +} + +static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse) +{ + /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ + WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1); +} + +static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) +{ + if (sk->sk_incoming_cpu >= 0) + __reuseport_get_incoming_cpu(reuse); +} + +static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) +{ + if (sk->sk_incoming_cpu >= 0) + __reuseport_put_incoming_cpu(reuse); +} + +void reuseport_update_incoming_cpu(struct sock *sk, int val) +{ + struct sock_reuseport *reuse; + int old_sk_incoming_cpu; + + if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) { + /* Paired with REAE_ONCE() in sk_incoming_cpu_update() + * and compute_score(). + */ + WRITE_ONCE(sk->sk_incoming_cpu, val); + return; + } + + spin_lock_bh(&reuseport_lock); + + /* This must be done under reuseport_lock to avoid a race with + * reuseport_grow(), which accesses sk->sk_incoming_cpu without + * lock_sock() when detaching a shutdown()ed sk. + * + * Paired with READ_ONCE() in reuseport_select_sock_by_hash(). + */ + old_sk_incoming_cpu = sk->sk_incoming_cpu; + WRITE_ONCE(sk->sk_incoming_cpu, val); + + reuse = rcu_dereference_protected(sk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + + /* reuseport_grow() has detached a closed sk. */ + if (!reuse) + goto out; + + if (old_sk_incoming_cpu < 0 && val >= 0) + __reuseport_get_incoming_cpu(reuse); + else if (old_sk_incoming_cpu >= 0 && val < 0) + __reuseport_put_incoming_cpu(reuse); + +out: + spin_unlock_bh(&reuseport_lock); +} + static int reuseport_sock_index(struct sock *sk, const struct sock_reuseport *reuse, bool closed) @@ -64,6 +128,7 @@ static void __reuseport_add_sock(struct sock *sk, /* paired with smp_rmb() in reuseport_(select|migrate)_sock() */ smp_wmb(); reuse->num_socks++; + reuseport_get_incoming_cpu(sk, reuse); } static bool __reuseport_detach_sock(struct sock *sk, @@ -76,6 +141,7 @@ static bool __reuseport_detach_sock(struct sock *sk, reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; reuse->num_socks--; + reuseport_put_incoming_cpu(sk, reuse); return true; } @@ -86,6 +152,7 @@ static void __reuseport_add_closed_sock(struct sock *sk, reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk; /* paired with READ_ONCE() in inet_csk_bind_conflict() */ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1); + reuseport_get_incoming_cpu(sk, reuse); } static bool __reuseport_detach_closed_sock(struct sock *sk, @@ -99,6 +166,7 @@ static bool __reuseport_detach_closed_sock(struct sock *sk, reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks]; /* paired with READ_ONCE() in inet_csk_bind_conflict() */ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1); + reuseport_put_incoming_cpu(sk, reuse); return true; } @@ -166,6 +234,7 @@ int reuseport_alloc(struct sock *sk, bool bind_inany) reuse->bind_inany = bind_inany; reuse->socks[0] = sk; reuse->num_socks = 1; + reuseport_get_incoming_cpu(sk, reuse); rcu_assign_pointer(sk->sk_reuseport_cb, reuse); out: @@ -209,6 +278,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) more_reuse->reuseport_id = reuse->reuseport_id; more_reuse->bind_inany = reuse->bind_inany; more_reuse->has_conns = reuse->has_conns; + more_reuse->incoming_cpu = reuse->incoming_cpu; memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); @@ -458,18 +528,32 @@ static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks, static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse, u32 hash, u16 num_socks) { + struct sock *first_valid_sk = NULL; int i, j; i = j = reciprocal_scale(hash, num_socks); - while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) { + do { + struct sock *sk = reuse->socks[i]; + + if (sk->sk_state != TCP_ESTABLISHED) { + /* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */ + if (!READ_ONCE(reuse->incoming_cpu)) + return sk; + + /* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */ + if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) + return sk; + + if (!first_valid_sk) + first_valid_sk = sk; + } + i++; if (i >= num_socks) i = 0; - if (i == j) - return NULL; - } + } while (i != j); - return reuse->socks[i]; + return first_valid_sk; } /** diff --git a/net/core/stream.c b/net/core/stream.c index a61130504827..d7c5413d16d5 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -196,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk) /* First the read buffer. */ __skb_queue_purge(&sk->sk_receive_queue); + /* Next, the error queue. + * We need to use queue lock, because other threads might + * add packets to the queue without socket lock being held. + */ + skb_queue_purge(&sk->sk_error_queue); + /* Next, the write queue. */ WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index f8f7b7c34e7d..e443088ab0f6 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -529,6 +529,7 @@ static void dsa_tag_8021q_teardown(struct dsa_switch *ds) int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto) { struct dsa_8021q_context *ctx; + int err; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -541,7 +542,15 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto) ds->tag_8021q_ctx = ctx; - return dsa_tag_8021q_setup(ds); + err = dsa_tag_8021q_setup(ds); + if (err) + goto err_free; + + return 0; + +err_free: + kfree(ctx); + return err; } EXPORT_SYMBOL_GPL(dsa_tag_8021q_register); diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index e4983f473a3c..6991d77dcb2e 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1988,7 +1988,8 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) } else { /* Driver expects to be called at twice the frequency in rc */ int n = rc * 2, interval = HZ / n; - u64 count = n * id.data, i = 0; + u64 count = mul_u32_u32(n, id.data); + u64 i = 0; do { rtnl_lock(); diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index a1045c3d71b4..7ce40b49c956 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -219,7 +219,9 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb->dev = master->dev; skb_reset_mac_header(skb); skb_reset_mac_len(skb); + spin_lock_bh(&hsr->seqnr_lock); hsr_forward_skb(skb, master); + spin_unlock_bh(&hsr->seqnr_lock); } else { atomic_long_inc(&dev->tx_dropped); dev_kfree_skb_any(skb); @@ -278,7 +280,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master, __u8 type = HSR_TLV_LIFE_CHECK; struct hsr_sup_payload *hsr_sp; struct hsr_sup_tag *hsr_stag; - unsigned long irqflags; struct sk_buff *skb; *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); @@ -299,7 +300,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version); /* From HSRv1 on we have separate supervision sequence numbers. */ - spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); + spin_lock_bh(&hsr->seqnr_lock); if (hsr->prot_version > 0) { hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); hsr->sup_sequence_nr++; @@ -307,7 +308,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master, hsr_stag->sequence_nr = htons(hsr->sequence_nr); hsr->sequence_nr++; } - spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); hsr_stag->HSR_TLV_type = type; /* TODO: Why 12 in HSRv0? */ @@ -318,11 +318,13 @@ static void send_hsr_supervision_frame(struct hsr_port *master, hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr); - if (skb_put_padto(skb, ETH_ZLEN)) + if (skb_put_padto(skb, ETH_ZLEN)) { + spin_unlock_bh(&hsr->seqnr_lock); return; + } hsr_forward_skb(skb, master); - + spin_unlock_bh(&hsr->seqnr_lock); return; } @@ -332,7 +334,6 @@ static void send_prp_supervision_frame(struct hsr_port *master, struct hsr_priv *hsr = master->hsr; struct hsr_sup_payload *hsr_sp; struct hsr_sup_tag *hsr_stag; - unsigned long irqflags; struct sk_buff *skb; skb = hsr_init_skb(master); @@ -347,7 +348,7 @@ static void send_prp_supervision_frame(struct hsr_port *master, set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0)); /* From HSRv1 on we have separate supervision sequence numbers. */ - spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); + spin_lock_bh(&hsr->seqnr_lock); hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); hsr->sup_sequence_nr++; hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD; @@ -358,13 +359,12 @@ static void send_prp_supervision_frame(struct hsr_port *master, ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr); if (skb_put_padto(skb, ETH_ZLEN)) { - spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); + spin_unlock_bh(&hsr->seqnr_lock); return; } - spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); - hsr_forward_skb(skb, master); + spin_unlock_bh(&hsr->seqnr_lock); } /* Announce (supervision frame) timer function @@ -444,7 +444,7 @@ void hsr_dev_setup(struct net_device *dev) dev->header_ops = &hsr_header_ops; dev->netdev_ops = &hsr_device_ops; SET_NETDEV_DEVTYPE(dev, &hsr_type); - dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; dev->needs_free_netdev = true; diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 07892c4b6d0c..35382ed686d1 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -458,7 +458,6 @@ static void handle_std_frame(struct sk_buff *skb, { struct hsr_port *port = frame->port_rcv; struct hsr_priv *hsr = port->hsr; - unsigned long irqflags; frame->skb_hsr = NULL; frame->skb_prp = NULL; @@ -468,10 +467,9 @@ static void handle_std_frame(struct sk_buff *skb, frame->is_from_san = true; } else { /* Sequence nr for the master node */ - spin_lock_irqsave(&hsr->seqnr_lock, irqflags); + lockdep_assert_held(&hsr->seqnr_lock); frame->sequence_nr = hsr->sequence_nr; hsr->sequence_nr++; - spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags); } } @@ -572,11 +570,13 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) { struct hsr_frame_info frame; + rcu_read_lock(); if (fill_frame_info(&frame, skb, port) < 0) goto out_drop; hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); hsr_forward_do(&frame); + rcu_read_unlock(); /* Gets called for ingress frames as well as egress from master port. * So check and increment stats for master port only here. */ @@ -591,6 +591,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) return; out_drop: + rcu_read_unlock(); port->dev->stats.tx_dropped++; kfree_skb(skb); } diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index e31949479305..414bf4d3d3c9 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -159,6 +159,7 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, return NULL; ether_addr_copy(new_node->macaddress_A, addr); + spin_lock_init(&new_node->seq_out_lock); /* We are only interested in time diffs here, so use current jiffies * as initialization. (0 could trigger an spurious ring error warning). @@ -313,6 +314,7 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) goto done; ether_addr_copy(node_real->macaddress_B, ethhdr->h_source); + spin_lock_bh(&node_real->seq_out_lock); for (i = 0; i < HSR_PT_PORTS; i++) { if (!node_curr->time_in_stale[i] && time_after(node_curr->time_in[i], node_real->time_in[i])) { @@ -323,12 +325,16 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i])) node_real->seq_out[i] = node_curr->seq_out[i]; } + spin_unlock_bh(&node_real->seq_out_lock); node_real->addr_B_port = port_rcv->type; spin_lock_bh(&hsr->list_lock); - list_del_rcu(&node_curr->mac_list); + if (!node_curr->removed) { + list_del_rcu(&node_curr->mac_list); + node_curr->removed = true; + kfree_rcu(node_curr, rcu_head); + } spin_unlock_bh(&hsr->list_lock); - kfree_rcu(node_curr, rcu_head); done: /* PRP uses v0 header */ @@ -416,13 +422,17 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, u16 sequence_nr) { + spin_lock_bh(&node->seq_out_lock); if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) && time_is_after_jiffies(node->time_out[port->type] + - msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) + msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) { + spin_unlock_bh(&node->seq_out_lock); return 1; + } node->time_out[port->type] = jiffies; node->seq_out[port->type] = sequence_nr; + spin_unlock_bh(&node->seq_out_lock); return 0; } @@ -502,9 +512,12 @@ void hsr_prune_nodes(struct timer_list *t) if (time_is_before_jiffies(timestamp + msecs_to_jiffies(HSR_NODE_FORGET_TIME))) { hsr_nl_nodedown(hsr, node->macaddress_A); - list_del_rcu(&node->mac_list); - /* Note that we need to free this entry later: */ - kfree_rcu(node, rcu_head); + if (!node->removed) { + list_del_rcu(&node->mac_list); + node->removed = true; + /* Note that we need to free this entry later: */ + kfree_rcu(node, rcu_head); + } } } spin_unlock_bh(&hsr->list_lock); diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index d9628e7a5f05..48990166e4c4 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -69,6 +69,8 @@ void prp_update_san_info(struct hsr_node *node, bool is_sup); struct hsr_node { struct list_head mac_list; + /* Protect R/W access to seq_out */ + spinlock_t seq_out_lock; unsigned char macaddress_A[ETH_ALEN]; unsigned char macaddress_B[ETH_ALEN]; /* Local slave through which AddrB frames are received from this node */ @@ -80,6 +82,7 @@ struct hsr_node { bool san_a; bool san_b; u16 seq_out[HSR_PT_PORTS]; + bool removed; struct rcu_head rcu_head; }; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index a53f9bf7886f..8039097546de 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -155,10 +155,14 @@ static int inet_csk_bind_conflict(const struct sock *sk, */ sk_for_each_bound(sk2, &tb->owners) { - if (sk != sk2 && - (!sk->sk_bound_dev_if || - !sk2->sk_bound_dev_if || - sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { + int bound_dev_if2; + + if (sk == sk2) + continue; + bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); + if ((!sk->sk_bound_dev_if || + !bound_dev_if2 || + sk->sk_bound_dev_if == bound_dev_if2)) { if (reuse && sk2->sk_reuse && sk2->sk_state != TCP_LISTEN) { if ((!relax || diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 940839264025..3aab914eb103 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -290,12 +290,11 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, struct tcp_request_sock *treq; struct request_sock *req; -#ifdef CONFIG_MPTCP if (sk_is_mptcp(sk)) - ops = &mptcp_subflow_request_sock_ops; -#endif + req = mptcp_subflow_reqsk_alloc(ops, sk, false); + else + req = inet_reqsk_alloc(ops, sk, false); - req = inet_reqsk_alloc(ops, sk, false); if (!req) return NULL; diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 5194c6870273..b4b642e3de78 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -45,8 +45,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, tmp->sg.end = i; if (apply) { apply_bytes -= size; - if (!apply_bytes) + if (!apply_bytes) { + if (sge->length) + sk_msg_iter_var_prev(i); break; + } } } while (i != msg->sg.end); @@ -131,10 +134,9 @@ static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, return ret; } -int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, - u32 bytes, int flags) +int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, + struct sk_msg *msg, u32 bytes, int flags) { - bool ingress = sk_msg_to_ingress(msg); struct sk_psock *psock = sk_psock_get(sk); int ret; @@ -277,10 +279,10 @@ msg_bytes_ready: static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg, int *copied, int flags) { - bool cork = false, enospc = sk_msg_full(msg); + bool cork = false, enospc = sk_msg_full(msg), redir_ingress; struct sock *sk_redir; u32 tosend, origsize, sent, delta = 0; - u32 eval = __SK_NONE; + u32 eval; int ret; more_data: @@ -311,6 +313,7 @@ more_data: tosend = msg->sg.size; if (psock->apply_bytes && psock->apply_bytes < tosend) tosend = psock->apply_bytes; + eval = __SK_NONE; switch (psock->eval) { case __SK_PASS: @@ -322,6 +325,7 @@ more_data: sk_msg_apply_bytes(psock, tosend); break; case __SK_REDIRECT: + redir_ingress = psock->redir_ingress; sk_redir = psock->sk_redir; sk_msg_apply_bytes(psock, tosend); if (!psock->apply_bytes) { @@ -338,7 +342,8 @@ more_data: release_sock(sk); origsize = msg->sg.size; - ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); + ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, + msg, tosend, flags); sent = origsize - msg->sg.size; if (eval == __SK_REDIRECT) diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c index 46101fd67a47..1ff5b8e30bb9 100644 --- a/net/ipv4/udp_tunnel_core.c +++ b/net/ipv4/udp_tunnel_core.c @@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); void udp_tunnel_sock_release(struct socket *sock) { rcu_assign_sk_user_data(sock->sk, NULL); + synchronize_rcu(); kernel_sock_shutdown(sock, SHUT_RDWR); sock_release(sock); } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 946871741f12..d4cdc2b1b468 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -696,7 +696,7 @@ static int ipip6_rcv(struct sk_buff *skb) skb->dev = tunnel->dev; if (packet_is_spoofed(skb, iph, tunnel)) { - tunnel->dev->stats.rx_errors++; + DEV_STATS_INC(tunnel->dev, rx_errors); goto out; } @@ -716,8 +716,8 @@ static int ipip6_rcv(struct sk_buff *skb) net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", &iph->saddr, iph->tos); if (err > 1) { - ++tunnel->dev->stats.rx_frame_errors; - ++tunnel->dev->stats.rx_errors; + DEV_STATS_INC(tunnel->dev, rx_frame_errors); + DEV_STATS_INC(tunnel->dev, rx_errors); goto out; } } @@ -948,7 +948,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, if (!rt) { rt = ip_route_output_flow(tunnel->net, &fl4, NULL); if (IS_ERR(rt)) { - dev->stats.tx_carrier_errors++; + DEV_STATS_INC(dev, tx_carrier_errors); goto tx_error_icmp; } dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr); @@ -956,14 +956,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, if (rt->rt_type != RTN_UNICAST) { ip_rt_put(rt); - dev->stats.tx_carrier_errors++; + DEV_STATS_INC(dev, tx_carrier_errors); goto tx_error_icmp; } tdev = rt->dst.dev; if (tdev == dev) { ip_rt_put(rt); - dev->stats.collisions++; + DEV_STATS_INC(dev, collisions); goto tx_error; } @@ -976,7 +976,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, mtu = dst_mtu(&rt->dst) - t_hlen; if (mtu < IPV4_MIN_MTU) { - dev->stats.collisions++; + DEV_STATS_INC(dev, collisions); ip_rt_put(rt); goto tx_error; } @@ -1015,7 +1015,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { ip_rt_put(rt); - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); kfree_skb(skb); return NETDEV_TX_OK; } @@ -1045,7 +1045,7 @@ tx_error_icmp: dst_link_failure(skb); tx_error: kfree_skb(skb); - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); return NETDEV_TX_OK; } @@ -1064,7 +1064,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb, return NETDEV_TX_OK; tx_error: kfree_skb(skb); - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); return NETDEV_TX_OK; } @@ -1093,7 +1093,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb, return NETDEV_TX_OK; tx_err: - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); return NETDEV_TX_OK; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 041859b5b71d..a3347f245782 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -2059,6 +2059,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, ret = cfg80211_register_netdevice(ndev); if (ret) { + ieee80211_if_free(ndev); free_netdev(ndev); return ret; } diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 5ef9013b94c7..ba4241d7c5ef 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -45,7 +45,6 @@ static void subflow_req_destructor(struct request_sock *req) sock_put((struct sock *)subflow_req->msk); mptcp_token_destroy_request(req); - tcp_request_sock_ops.destructor(req); } static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, @@ -483,9 +482,8 @@ do_reset: mptcp_subflow_reset(sk); } -struct request_sock_ops mptcp_subflow_request_sock_ops; -EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops); -static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops; +static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; +static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) { @@ -497,7 +495,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; - return tcp_conn_request(&mptcp_subflow_request_sock_ops, + return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, &subflow_request_sock_ipv4_ops, sk, skb); drop: @@ -505,10 +503,17 @@ drop: return 0; } +static void subflow_v4_req_destructor(struct request_sock *req) +{ + subflow_req_destructor(req); + tcp_request_sock_ops.destructor(req); +} + #if IS_ENABLED(CONFIG_MPTCP_IPV6) -static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops; -static struct inet_connection_sock_af_ops subflow_v6_specific; -static struct inet_connection_sock_af_ops subflow_v6m_specific; +static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; +static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; +static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; +static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; static struct proto tcpv6_prot_override; static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) @@ -528,15 +533,36 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) return 0; } - return tcp_conn_request(&mptcp_subflow_request_sock_ops, + return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, &subflow_request_sock_ipv6_ops, sk, skb); drop: tcp_listendrop(sk); return 0; /* don't send reset */ } + +static void subflow_v6_req_destructor(struct request_sock *req) +{ + subflow_req_destructor(req); + tcp6_request_sock_ops.destructor(req); +} #endif +struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener, + bool attach_listener) +{ + if (ops->family == AF_INET) + ops = &mptcp_subflow_v4_request_sock_ops; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + else if (ops->family == AF_INET6) + ops = &mptcp_subflow_v6_request_sock_ops; +#endif + + return inet_reqsk_alloc(ops, sk_listener, attach_listener); +} +EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); + /* validate hmac received in third ACK */ static bool subflow_hmac_valid(const struct request_sock *req, const struct mptcp_options_received *mp_opt) @@ -790,7 +816,7 @@ dispose_child: return child; } -static struct inet_connection_sock_af_ops subflow_specific; +static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; static struct proto tcp_prot_override; enum mapping_status { @@ -1327,7 +1353,7 @@ static void subflow_write_space(struct sock *ssk) mptcp_write_space(sk); } -static struct inet_connection_sock_af_ops * +static const struct inet_connection_sock_af_ops * subflow_default_af_ops(struct sock *sk) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) @@ -1342,7 +1368,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); struct inet_connection_sock *icsk = inet_csk(sk); - struct inet_connection_sock_af_ops *target; + const struct inet_connection_sock_af_ops *target; target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk); @@ -1782,7 +1808,6 @@ static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { static int subflow_ops_init(struct request_sock_ops *subflow_ops) { subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); - subflow_ops->slab_name = "request_sock_subflow"; subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, subflow_ops->obj_size, 0, @@ -1792,16 +1817,17 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops) if (!subflow_ops->slab) return -ENOMEM; - subflow_ops->destructor = subflow_req_destructor; - return 0; } void __init mptcp_subflow_init(void) { - mptcp_subflow_request_sock_ops = tcp_request_sock_ops; - if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0) - panic("MPTCP: failed to init subflow request sock ops\n"); + mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; + mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; + mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; + + if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) + panic("MPTCP: failed to init subflow v4 request sock ops\n"); subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; @@ -1815,6 +1841,20 @@ void __init mptcp_subflow_init(void) tcp_prot_override.release_cb = tcp_release_cb_override; #if IS_ENABLED(CONFIG_MPTCP_IPV6) + /* In struct mptcp_subflow_request_sock, we assume the TCP request sock + * structures for v4 and v6 have the same size. It should not changed in + * the future but better to make sure to be warned if it is no longer + * the case. + */ + BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); + + mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; + mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; + mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; + + if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) + panic("MPTCP: failed to init subflow v6 request sock ops\n"); + subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 16ae92054baa..ae061b27e446 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1698,9 +1698,10 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); ip_set_unlock(set); retried = true; - } while (ret == -EAGAIN && - set->variant->resize && - (ret = set->variant->resize(set, retried)) == 0); + } while (ret == -ERANGE || + (ret == -EAGAIN && + set->variant->resize && + (ret = set->variant->resize(set, retried)) == 0)); if (!ret || (ret == -IPSET_ERR_EXIST && eexist)) return 0; diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index 75d556d71652..24adcdd7a0b1 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -98,11 +98,11 @@ static int hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ip4 *h = set->data; + struct hash_ip4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ip4_elem e = { 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0, hosts; + u32 ip = 0, ip_to = 0, hosts, i = 0; int ret = 0; if (tb[IPSET_ATTR_LINENO]) @@ -147,14 +147,14 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); - /* 64bit division is not allowed on 32bit */ - if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE) - return -ERANGE; - if (retried) ip = ntohl(h->next.ip); - for (; ip <= ip_to;) { + for (; ip <= ip_to; i++) { e.ip = htonl(ip); + if (i > IPSET_MAX_RANGE) { + hash_ip4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c index 153de3457423..a22ec1a6f6ec 100644 --- a/net/netfilter/ipset/ip_set_hash_ipmark.c +++ b/net/netfilter/ipset/ip_set_hash_ipmark.c @@ -97,11 +97,11 @@ static int hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ipmark4 *h = set->data; + struct hash_ipmark4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipmark4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip, ip_to = 0; + u32 ip, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -148,13 +148,14 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], ip_set_mask_from_to(ip, ip_to, cidr); } - if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE) - return -ERANGE; - if (retried) ip = ntohl(h->next.ip); - for (; ip <= ip_to; ip++) { + for (; ip <= ip_to; ip++, i++) { e.ip = htonl(ip); + if (i > IPSET_MAX_RANGE) { + hash_ipmark4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 7303138e46be..10481760a9b2 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -105,11 +105,11 @@ static int hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ipport4 *h = set->data; + struct hash_ipport4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipport4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip, ip_to = 0, p = 0, port, port_to; + u32 ip, ip_to = 0, p = 0, port, port_to, i = 0; bool with_ports = false; int ret; @@ -173,17 +173,18 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } - if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) - return -ERANGE; - if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; ip++) { p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) : port; - for (; p <= port_to; p++) { + for (; p <= port_to; p++, i++) { e.ip = htonl(ip); e.port = htons(p); + if (i > IPSET_MAX_RANGE) { + hash_ipport4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 334fb1ad0e86..39a01934b153 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -108,11 +108,11 @@ static int hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ipportip4 *h = set->data; + struct hash_ipportip4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip, ip_to = 0, p = 0, port, port_to; + u32 ip, ip_to = 0, p = 0, port, port_to, i = 0; bool with_ports = false; int ret; @@ -180,17 +180,18 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } - if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) - return -ERANGE; - if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; ip++) { p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) : port; - for (; p <= port_to; p++) { + for (; p <= port_to; p++, i++) { e.ip = htonl(ip); e.port = htons(p); + if (i > IPSET_MAX_RANGE) { + hash_ipportip4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 7df94f437f60..5c6de605a9fb 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -160,12 +160,12 @@ static int hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_ipportnet4 *h = set->data; + struct hash_ipportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; - u32 ip2_from = 0, ip2_to = 0, ip2; + u32 ip2_from = 0, ip2_to = 0, ip2, i = 0; bool with_ports = false; u8 cidr; int ret; @@ -253,9 +253,6 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } - if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) - return -ERANGE; - ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); @@ -282,9 +279,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], for (; p <= port_to; p++) { e.port = htons(p); do { + i++; e.ip2 = htonl(ip2); ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr); e.cidr = cidr - 1; + if (i > IPSET_MAX_RANGE) { + hash_ipportnet4_data_next(&h->next, + &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 1422739d9aa2..ce0a9ce5a91f 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -136,11 +136,11 @@ static int hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_net4 *h = set->data; + struct hash_net4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_net4_elem e = { .cidr = HOST_MASK }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0, ipn, n = 0; + u32 ip = 0, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -188,19 +188,16 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } - ipn = ip; - do { - ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr); - n++; - } while (ipn++ < ip_to); - - if (n > IPSET_MAX_RANGE) - return -ERANGE; if (retried) ip = ntohl(h->next.ip); do { + i++; e.ip = htonl(ip); + if (i > IPSET_MAX_RANGE) { + hash_net4_data_next(&h->next, &e); + return -ERANGE; + } ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 9810f5bf63f5..031073286236 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0, ipn, n = 0; + u32 ip = 0, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -256,19 +256,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip, ip_to, e.cidr); } - ipn = ip; - do { - ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr); - n++; - } while (ipn++ < ip_to); - - if (n > IPSET_MAX_RANGE) - return -ERANGE; if (retried) ip = ntohl(h->next.ip); do { + i++; e.ip = htonl(ip); + if (i > IPSET_MAX_RANGE) { + hash_netiface4_data_next(&h->next, &e); + return -ERANGE; + } ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ret = adtfn(set, &e, &ext, &ext, flags); diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index 3d09eefe998a..c07b70bf32db 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c @@ -163,13 +163,12 @@ static int hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_netnet4 *h = set->data; + struct hash_netnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netnet4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0; - u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn; - u64 n = 0, m = 0; + u32 ip2 = 0, ip2_from = 0, ip2_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -245,19 +244,6 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); } - ipn = ip; - do { - ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]); - n++; - } while (ipn++ < ip_to); - ipn = ip2_from; - do { - ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]); - m++; - } while (ipn++ < ip2_to); - - if (n*m > IPSET_MAX_RANGE) - return -ERANGE; if (retried) { ip = ntohl(h->next.ip[0]); @@ -270,7 +256,12 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], e.ip[0] = htonl(ip); ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); do { + i++; e.ip[1] = htonl(ip2); + if (i > IPSET_MAX_RANGE) { + hash_netnet4_data_next(&h->next, &e); + return -ERANGE; + } ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 09cf72eb37f8..d1a0628df4ef 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -154,12 +154,11 @@ static int hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_netport4 *h = set->data; + struct hash_netport4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn; - u64 n = 0; + u32 port, port_to, p = 0, ip = 0, ip_to = 0, i = 0; bool with_ports = false; u8 cidr; int ret; @@ -236,14 +235,6 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip, ip_to, e.cidr + 1); } - ipn = ip; - do { - ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr); - n++; - } while (ipn++ < ip_to); - - if (n*(port_to - port + 1) > IPSET_MAX_RANGE) - return -ERANGE; if (retried) { ip = ntohl(h->next.ip); @@ -255,8 +246,12 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], e.ip = htonl(ip); ip = ip_set_range_to_cidr(ip, ip_to, &cidr); e.cidr = cidr - 1; - for (; p <= port_to; p++) { + for (; p <= port_to; p++, i++) { e.port = htons(p); + if (i > IPSET_MAX_RANGE) { + hash_netport4_data_next(&h->next, &e); + return -ERANGE; + } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index 19bcdb3141f6..005a7ce87217 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -173,17 +173,26 @@ hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } +static u32 +hash_netportnet4_range_to_cidr(u32 from, u32 to, u8 *cidr) +{ + if (from == 0 && to == UINT_MAX) { + *cidr = 0; + return to; + } + return ip_set_range_to_cidr(from, to, cidr); +} + static int hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { - const struct hash_netportnet4 *h = set->data; + struct hash_netportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netportnet4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; - u32 ip2_from = 0, ip2_to = 0, ip2, ipn; - u64 n = 0, m = 0; + u32 ip2_from = 0, ip2_to = 0, ip2, i = 0; bool with_ports = false; int ret; @@ -285,19 +294,6 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); } - ipn = ip; - do { - ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]); - n++; - } while (ipn++ < ip_to); - ipn = ip2_from; - do { - ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]); - m++; - } while (ipn++ < ip2_to); - - if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE) - return -ERANGE; if (retried) { ip = ntohl(h->next.ip[0]); @@ -310,13 +306,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], do { e.ip[0] = htonl(ip); - ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); + ip = hash_netportnet4_range_to_cidr(ip, ip_to, &e.cidr[0]); for (; p <= port_to; p++) { e.port = htons(p); do { + i++; e.ip[1] = htonl(ip2); - ip2 = ip_set_range_to_cidr(ip2, ip2_to, - &e.cidr[1]); + if (i > IPSET_MAX_RANGE) { + hash_netportnet4_data_next(&h->next, + &e); + return -ERANGE; + } + ip2 = hash_netportnet4_range_to_cidr(ip2, + ip2_to, &e.cidr[1]); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c index 61e3b05cf02c..1020d67600a9 100644 --- a/net/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/netfilter/nf_conntrack_proto_icmpv6.c @@ -129,6 +129,56 @@ static void icmpv6_error_log(const struct sk_buff *skb, nf_l4proto_log_invalid(skb, state, IPPROTO_ICMPV6, "%s", msg); } +static noinline_for_stack int +nf_conntrack_icmpv6_redirect(struct nf_conn *tmpl, struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state) +{ + u8 hl = ipv6_hdr(skb)->hop_limit; + union nf_inet_addr outer_daddr; + union { + struct nd_opt_hdr nd_opt; + struct rd_msg rd_msg; + } tmp; + const struct nd_opt_hdr *nd_opt; + const struct rd_msg *rd_msg; + + rd_msg = skb_header_pointer(skb, dataoff, sizeof(*rd_msg), &tmp.rd_msg); + if (!rd_msg) { + icmpv6_error_log(skb, state, "short redirect"); + return -NF_ACCEPT; + } + + if (rd_msg->icmph.icmp6_code != 0) + return NF_ACCEPT; + + if (hl != 255 || !(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { + icmpv6_error_log(skb, state, "invalid saddr or hoplimit for redirect"); + return -NF_ACCEPT; + } + + dataoff += sizeof(*rd_msg); + + /* warning: rd_msg no longer usable after this call */ + nd_opt = skb_header_pointer(skb, dataoff, sizeof(*nd_opt), &tmp.nd_opt); + if (!nd_opt || nd_opt->nd_opt_len == 0) { + icmpv6_error_log(skb, state, "redirect without options"); + return -NF_ACCEPT; + } + + /* We could call ndisc_parse_options(), but it would need + * skb_linearize() and a bit more work. + */ + if (nd_opt->nd_opt_type != ND_OPT_REDIRECT_HDR) + return NF_ACCEPT; + + memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr, + sizeof(outer_daddr.ip6)); + dataoff += 8; + return nf_conntrack_inet_error(tmpl, skb, dataoff, state, + IPPROTO_ICMPV6, &outer_daddr); +} + int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, @@ -159,6 +209,9 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, return NF_ACCEPT; } + if (icmp6h->icmp6_type == NDISC_REDIRECT) + return nf_conntrack_icmpv6_redirect(tmpl, skb, dataoff, state); + /* is not error message ? */ if (icmp6h->icmp6_type >= 128) return NF_ACCEPT; diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 66c9a6c2b9cf..336f282a221f 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -372,12 +372,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, const __be32 *addr, const __be32 *mask) { struct flow_action_entry *entry; - int i, j; + int i; - for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) { + for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) { entry = flow_action_entry_next(flow_rule); flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6, - offset + i, &addr[j], mask); + offset + i * sizeof(u32), &addr[i], mask); } } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 3fac57d66dda..81bd13b3d8fd 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -465,8 +465,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx) return 0; } -static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, - struct nft_set *set) +static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, + struct nft_set *set, + const struct nft_set_desc *desc) { struct nft_trans *trans; @@ -474,17 +475,28 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, if (trans == NULL) return -ENOMEM; - if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { + if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) { nft_trans_set_id(trans) = ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); nft_activate_next(ctx->net, set); } nft_trans_set(trans) = set; + if (desc) { + nft_trans_set_update(trans) = true; + nft_trans_set_gc_int(trans) = desc->gc_int; + nft_trans_set_timeout(trans) = desc->timeout; + } nft_trans_commit_list_add_tail(ctx->net, trans); return 0; } +static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, + struct nft_set *set) +{ + return __nft_trans_set_add(ctx, msg_type, set, NULL); +} + static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set) { int err; @@ -3635,8 +3647,7 @@ static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags) static const struct nft_set_ops * nft_select_set_ops(const struct nft_ctx *ctx, const struct nlattr * const nla[], - const struct nft_set_desc *desc, - enum nft_set_policies policy) + const struct nft_set_desc *desc) { struct nftables_pernet *nft_net = nft_pernet(ctx->net); const struct nft_set_ops *ops, *bops; @@ -3665,7 +3676,7 @@ nft_select_set_ops(const struct nft_ctx *ctx, if (!ops->estimate(desc, flags, &est)) continue; - switch (policy) { + switch (desc->policy) { case NFT_SET_POL_PERFORMANCE: if (est.lookup < best.lookup) break; @@ -3900,8 +3911,10 @@ static int nf_tables_fill_set_concat(struct sk_buff *skb, static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, const struct nft_set *set, u16 event, u16 flags) { - struct nlmsghdr *nlh; + u64 timeout = READ_ONCE(set->timeout); + u32 gc_int = READ_ONCE(set->gc_int); u32 portid = ctx->portid; + struct nlmsghdr *nlh; struct nlattr *nest; u32 seq = ctx->seq; int i; @@ -3937,13 +3950,13 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, nla_put_be32(skb, NFTA_SET_OBJ_TYPE, htonl(set->objtype))) goto nla_put_failure; - if (set->timeout && + if (timeout && nla_put_be64(skb, NFTA_SET_TIMEOUT, - nf_jiffies64_to_msecs(set->timeout), + nf_jiffies64_to_msecs(timeout), NFTA_SET_PAD)) goto nla_put_failure; - if (set->gc_int && - nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int))) + if (gc_int && + nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(gc_int))) goto nla_put_failure; if (set->policy != NFT_SET_POL_PERFORMANCE) { @@ -4244,15 +4257,94 @@ static int nf_tables_set_desc_parse(struct nft_set_desc *desc, return err; } +static int nft_set_expr_alloc(struct nft_ctx *ctx, struct nft_set *set, + const struct nlattr * const *nla, + struct nft_expr **exprs, int *num_exprs, + u32 flags) +{ + struct nft_expr *expr; + int err, i; + + if (nla[NFTA_SET_EXPR]) { + expr = nft_set_elem_expr_alloc(ctx, set, nla[NFTA_SET_EXPR]); + if (IS_ERR(expr)) { + err = PTR_ERR(expr); + goto err_set_expr_alloc; + } + exprs[0] = expr; + (*num_exprs)++; + } else if (nla[NFTA_SET_EXPRESSIONS]) { + struct nlattr *tmp; + int left; + + if (!(flags & NFT_SET_EXPR)) { + err = -EINVAL; + goto err_set_expr_alloc; + } + i = 0; + nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) { + if (i == NFT_SET_EXPR_MAX) { + err = -E2BIG; + goto err_set_expr_alloc; + } + if (nla_type(tmp) != NFTA_LIST_ELEM) { + err = -EINVAL; + goto err_set_expr_alloc; + } + expr = nft_set_elem_expr_alloc(ctx, set, tmp); + if (IS_ERR(expr)) { + err = PTR_ERR(expr); + goto err_set_expr_alloc; + } + exprs[i++] = expr; + (*num_exprs)++; + } + } + + return 0; + +err_set_expr_alloc: + for (i = 0; i < *num_exprs; i++) + nft_expr_destroy(ctx, exprs[i]); + + return err; +} + +static bool nft_set_is_same(const struct nft_set *set, + const struct nft_set_desc *desc, + struct nft_expr *exprs[], u32 num_exprs, u32 flags) +{ + int i; + + if (set->ktype != desc->ktype || + set->dtype != desc->dtype || + set->flags != flags || + set->klen != desc->klen || + set->dlen != desc->dlen || + set->field_count != desc->field_count || + set->num_exprs != num_exprs) + return false; + + for (i = 0; i < desc->field_count; i++) { + if (set->field_len[i] != desc->field_len[i]) + return false; + } + + for (i = 0; i < num_exprs; i++) { + if (set->exprs[i]->ops != exprs[i]->ops) + return false; + } + + return true; +} + static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - u32 ktype, dtype, flags, policy, gc_int, objtype; struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); u8 family = info->nfmsg->nfgen_family; const struct nft_set_ops *ops; - struct nft_expr *expr = NULL; struct net *net = info->net; struct nft_set_desc desc; struct nft_table *table; @@ -4260,10 +4352,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, struct nft_set *set; struct nft_ctx ctx; size_t alloc_size; - u64 timeout; + int num_exprs = 0; char *name; int err, i; u16 udlen; + u32 flags; u64 size; if (nla[NFTA_SET_TABLE] == NULL || @@ -4274,10 +4367,10 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, memset(&desc, 0, sizeof(desc)); - ktype = NFT_DATA_VALUE; + desc.ktype = NFT_DATA_VALUE; if (nla[NFTA_SET_KEY_TYPE] != NULL) { - ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE])); - if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK) + desc.ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE])); + if ((desc.ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK) return -EINVAL; } @@ -4302,17 +4395,17 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, return -EOPNOTSUPP; } - dtype = 0; + desc.dtype = 0; if (nla[NFTA_SET_DATA_TYPE] != NULL) { if (!(flags & NFT_SET_MAP)) return -EINVAL; - dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE])); - if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK && - dtype != NFT_DATA_VERDICT) + desc.dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE])); + if ((desc.dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK && + desc.dtype != NFT_DATA_VERDICT) return -EINVAL; - if (dtype != NFT_DATA_VERDICT) { + if (desc.dtype != NFT_DATA_VERDICT) { if (nla[NFTA_SET_DATA_LEN] == NULL) return -EINVAL; desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN])); @@ -4327,34 +4420,34 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, if (!(flags & NFT_SET_OBJECT)) return -EINVAL; - objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE])); - if (objtype == NFT_OBJECT_UNSPEC || - objtype > NFT_OBJECT_MAX) + desc.objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE])); + if (desc.objtype == NFT_OBJECT_UNSPEC || + desc.objtype > NFT_OBJECT_MAX) return -EOPNOTSUPP; } else if (flags & NFT_SET_OBJECT) return -EINVAL; else - objtype = NFT_OBJECT_UNSPEC; + desc.objtype = NFT_OBJECT_UNSPEC; - timeout = 0; + desc.timeout = 0; if (nla[NFTA_SET_TIMEOUT] != NULL) { if (!(flags & NFT_SET_TIMEOUT)) return -EINVAL; - err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &timeout); + err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &desc.timeout); if (err) return err; } - gc_int = 0; + desc.gc_int = 0; if (nla[NFTA_SET_GC_INTERVAL] != NULL) { if (!(flags & NFT_SET_TIMEOUT)) return -EINVAL; - gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL])); + desc.gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL])); } - policy = NFT_SET_POL_PERFORMANCE; + desc.policy = NFT_SET_POL_PERFORMANCE; if (nla[NFTA_SET_POLICY] != NULL) - policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY])); + desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY])); if (nla[NFTA_SET_DESC] != NULL) { err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]); @@ -4386,6 +4479,8 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, return PTR_ERR(set); } } else { + struct nft_expr *exprs[NFT_SET_EXPR_MAX] = {}; + if (info->nlh->nlmsg_flags & NLM_F_EXCL) { NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]); return -EEXIST; @@ -4393,13 +4488,29 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, if (info->nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - return 0; + err = nft_set_expr_alloc(&ctx, set, nla, exprs, &num_exprs, flags); + if (err < 0) + return err; + + err = 0; + if (!nft_set_is_same(set, &desc, exprs, num_exprs, flags)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]); + err = -EEXIST; + } + + for (i = 0; i < num_exprs; i++) + nft_expr_destroy(&ctx, exprs[i]); + + if (err < 0) + return err; + + return __nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set, &desc); } if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) return -ENOENT; - ops = nft_select_set_ops(&ctx, nla, &desc, policy); + ops = nft_select_set_ops(&ctx, nla, &desc); if (IS_ERR(ops)) return PTR_ERR(ops); @@ -4439,18 +4550,18 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, set->table = table; write_pnet(&set->net, net); set->ops = ops; - set->ktype = ktype; + set->ktype = desc.ktype; set->klen = desc.klen; - set->dtype = dtype; - set->objtype = objtype; + set->dtype = desc.dtype; + set->objtype = desc.objtype; set->dlen = desc.dlen; set->flags = flags; set->size = desc.size; - set->policy = policy; + set->policy = desc.policy; set->udlen = udlen; set->udata = udata; - set->timeout = timeout; - set->gc_int = gc_int; + set->timeout = desc.timeout; + set->gc_int = desc.gc_int; set->field_count = desc.field_count; for (i = 0; i < desc.field_count; i++) @@ -4460,43 +4571,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, if (err < 0) goto err_set_init; - if (nla[NFTA_SET_EXPR]) { - expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]); - if (IS_ERR(expr)) { - err = PTR_ERR(expr); - goto err_set_expr_alloc; - } - set->exprs[0] = expr; - set->num_exprs++; - } else if (nla[NFTA_SET_EXPRESSIONS]) { - struct nft_expr *expr; - struct nlattr *tmp; - int left; - - if (!(flags & NFT_SET_EXPR)) { - err = -EINVAL; - goto err_set_expr_alloc; - } - i = 0; - nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) { - if (i == NFT_SET_EXPR_MAX) { - err = -E2BIG; - goto err_set_expr_alloc; - } - if (nla_type(tmp) != NFTA_LIST_ELEM) { - err = -EINVAL; - goto err_set_expr_alloc; - } - expr = nft_set_elem_expr_alloc(&ctx, set, tmp); - if (IS_ERR(expr)) { - err = PTR_ERR(expr); - goto err_set_expr_alloc; - } - set->exprs[i++] = expr; - set->num_exprs++; - } - } + err = nft_set_expr_alloc(&ctx, set, nla, set->exprs, &num_exprs, flags); + if (err < 0) + goto err_set_destroy; + set->num_exprs = num_exprs; set->handle = nf_tables_alloc_handle(table); err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); @@ -4510,7 +4589,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, err_set_expr_alloc: for (i = 0; i < set->num_exprs; i++) nft_expr_destroy(&ctx, set->exprs[i]); - +err_set_destroy: ops->destroy(set); err_set_init: kfree(set->name); @@ -5815,7 +5894,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, return err; } else if (set->flags & NFT_SET_TIMEOUT && !(flags & NFT_SET_ELEM_INTERVAL_END)) { - timeout = set->timeout; + timeout = READ_ONCE(set->timeout); } expiration = 0; @@ -5916,7 +5995,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (err < 0) goto err_parse_key_end; - if (timeout != set->timeout) { + if (timeout != READ_ONCE(set->timeout)) { err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT); if (err < 0) goto err_parse_key_end; @@ -8771,14 +8850,20 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_NEWSET: - nft_clear(net, nft_trans_set(trans)); - /* This avoids hitting -EBUSY when deleting the table - * from the transaction. - */ - if (nft_set_is_anonymous(nft_trans_set(trans)) && - !list_empty(&nft_trans_set(trans)->bindings)) - trans->ctx.table->use--; + if (nft_trans_set_update(trans)) { + struct nft_set *set = nft_trans_set(trans); + WRITE_ONCE(set->timeout, nft_trans_set_timeout(trans)); + WRITE_ONCE(set->gc_int, nft_trans_set_gc_int(trans)); + } else { + nft_clear(net, nft_trans_set(trans)); + /* This avoids hitting -EBUSY when deleting the table + * from the transaction. + */ + if (nft_set_is_anonymous(nft_trans_set(trans)) && + !list_empty(&nft_trans_set(trans)->bindings)) + trans->ctx.table->use--; + } nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_NEWSET, GFP_KERNEL); nft_trans_destroy(trans); @@ -9000,6 +9085,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; case NFT_MSG_NEWSET: + if (nft_trans_set_update(trans)) { + nft_trans_destroy(trans); + break; + } trans->ctx.table->use--; if (nft_trans_set_bound(trans)) { nft_trans_destroy(trans); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index a207f0b8137b..d928d5a24bbc 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1497,6 +1497,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) u32 dev_idx, se_idx; u8 *apdu; size_t apdu_len; + int rc; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX] || @@ -1510,25 +1511,37 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) if (!dev) return -ENODEV; - if (!dev->ops || !dev->ops->se_io) - return -ENOTSUPP; + if (!dev->ops || !dev->ops->se_io) { + rc = -EOPNOTSUPP; + goto put_dev; + } apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]); - if (apdu_len == 0) - return -EINVAL; + if (apdu_len == 0) { + rc = -EINVAL; + goto put_dev; + } apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]); - if (!apdu) - return -EINVAL; + if (!apdu) { + rc = -EINVAL; + goto put_dev; + } ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + if (!ctx) { + rc = -ENOMEM; + goto put_dev; + } ctx->dev_idx = dev_idx; ctx->se_idx = se_idx; - return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); + rc = nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); + +put_dev: + nfc_put_device(dev); + return rc; } static int nfc_genl_vendor_cmd(struct sk_buff *skb, @@ -1551,14 +1564,21 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb, subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]); dev = nfc_get_device(dev_idx); - if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds) + if (!dev) return -ENODEV; + if (!dev->vendor_cmds || !dev->n_vendor_cmds) { + err = -ENODEV; + goto put_dev; + } + if (info->attrs[NFC_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]); data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]); - if (data_len == 0) - return -EINVAL; + if (data_len == 0) { + err = -EINVAL; + goto put_dev; + } } else { data = NULL; data_len = 0; @@ -1573,10 +1593,14 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb, dev->cur_cmd_info = info; err = cmd->doit(dev, data, data_len); dev->cur_cmd_info = NULL; - return err; + goto put_dev; } - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + +put_dev: + nfc_put_device(dev); + return err; } /* message building helper */ diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 94c48122fdc3..795a25ecb893 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -946,6 +946,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) struct sw_flow_mask mask; struct sk_buff *reply; struct datapath *dp; + struct sw_flow_key *key; struct sw_flow_actions *acts; struct sw_flow_match match; u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); @@ -973,24 +974,26 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) } /* Extract key. */ - ovs_match_init(&match, &new_flow->key, false, &mask); + key = kzalloc(sizeof(*key), GFP_KERNEL); + if (!key) { + error = -ENOMEM; + goto err_kfree_key; + } + + ovs_match_init(&match, key, false, &mask); error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK], log); if (error) goto err_kfree_flow; + ovs_flow_mask_key(&new_flow->key, key, true, &mask); + /* Extract flow identifier. */ error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], - &new_flow->key, log); + key, log); if (error) goto err_kfree_flow; - /* unmasked key is needed to match when ufid is not used. */ - if (ovs_identifier_is_key(&new_flow->id)) - match.key = new_flow->id.unmasked_key; - - ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask); - /* Validate actions. */ error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, &acts, log); @@ -1017,7 +1020,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) if (ovs_identifier_is_ufid(&new_flow->id)) flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id); if (!flow) - flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key); + flow = ovs_flow_tbl_lookup(&dp->table, key); if (likely(!flow)) { rcu_assign_pointer(new_flow->sf_acts, acts); @@ -1087,6 +1090,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) if (reply) ovs_notify(&dp_flow_genl_family, reply, info); + + kfree(key); return 0; err_unlock_ovs: @@ -1096,6 +1101,8 @@ err_kfree_acts: ovs_nla_free_flow_actions(acts); err_kfree_flow: ovs_flow_free(new_flow, false); +err_kfree_key: + kfree(key); error: return error; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ceca0d6c41b5..7f9f2d0ef0e6 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1888,12 +1888,22 @@ oom: static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) { + int depth; + if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && sock->type == SOCK_RAW) { skb_reset_mac_header(skb); skb->protocol = dev_parse_header_protocol(skb); } + /* Move network header to the right position for VLAN tagged packets */ + if (likely(skb->dev->type == ARPHRD_ETHER) && + eth_type_vlan(skb->protocol) && + __vlan_get_protocol(skb, skb->protocol, &depth) != 0) { + if (pskb_may_pull(skb, depth)) + skb_set_network_header(skb, depth); + } + skb_probe_transport_header(skb); } @@ -3008,6 +3018,11 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->mark = sockc.mark; skb->tstamp = sockc.transmit_time; + if (unlikely(extra_len == 4)) + skb->no_fcs = 1; + + packet_parse_headers(skb, sock); + if (has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); if (err) @@ -3016,11 +3031,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) virtio_net_hdr_set_proto(skb, &vnet_hdr); } - packet_parse_headers(skb, sock); - - if (unlikely(extra_len == 4)) - skb->no_fcs = 1; - err = po->xmit(skb); if (unlikely(err != 0)) { if (err > 0) diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 9683617db704..08c117bc083e 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -93,7 +93,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, *_hard_ack = hard_ack; *_top = top; - pkt->ack.bufferSpace = htons(8); + pkt->ack.bufferSpace = htons(0); pkt->ack.maxSkew = htons(0); pkt->ack.firstPacket = htonl(hard_ack + 1); pkt->ack.previousPacket = htonl(call->ackr_highest_seq); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 3c3a626459de..d4e4e94f4f98 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -716,7 +716,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (call->tx_total_len != -1 || call->tx_pending || call->tx_top != 0) - goto error_put; + goto out_put_unlock; call->tx_total_len = p.call.tx_total_len; } } diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 742c7d49a958..8d1ef858db87 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -332,7 +332,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct tcindex_filter_result *r, struct nlattr **tb, struct nlattr *est, u32 flags, struct netlink_ext_ack *extack) { - struct tcindex_filter_result new_filter_result, *old_r = r; + struct tcindex_filter_result new_filter_result; struct tcindex_data *cp = NULL, *oldp; struct tcindex_filter *f = NULL; /* make gcc behave */ struct tcf_result cr = {}; @@ -401,7 +401,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, err = tcindex_filter_result_init(&new_filter_result, cp, net); if (err < 0) goto errout_alloc; - if (old_r) + if (r) cr = r->res; err = -EBUSY; @@ -478,14 +478,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, tcf_bind_filter(tp, &cr, base); } - if (old_r && old_r != r) { - err = tcindex_filter_result_init(old_r, cp, net); - if (err < 0) { - kfree(f); - goto errout_alloc; - } - } - oldp = p; r->res = cr; tcf_exts_change(&r->exts, &e); diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 4ce681361851..5c1235e6076a 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp, * the value carried. */ if (em_hdr->flags & TCF_EM_SIMPLE) { + if (em->ops->datalen > 0) + goto errout; if (data_len < sizeof(u32)) goto errout; em->data = *(u32 *) data; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 70fe1c5e44ad..33737169cc2d 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -397,10 +397,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, result = tcf_classify(skb, NULL, fl, &res, true); if (result < 0) continue; + if (result == TC_ACT_SHOT) + goto done; + flow = (struct atm_flow_data *)res.class; if (!flow) flow = lookup_flow(sch, res.classid); - goto done; + goto drop; } } flow = NULL; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index fd7e10567371..46b3dd71777d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -231,6 +231,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) result = tcf_classify(skb, NULL, fl, &res, true); if (!fl || result < 0) goto fallback; + if (result == TC_ACT_SHOT) + return NULL; cl = (void *)res.class; if (!cl) { @@ -251,8 +253,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; fallthrough; - case TC_ACT_SHOT: - return NULL; case TC_ACT_RECLASSIFY: return cbq_reclassify(skb, cl); } diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index b46a416787ec..43ebf090029d 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -84,17 +84,18 @@ static struct ctl_table sctp_table[] = { { /* sentinel */ } }; +/* The following index defines are used in sctp_sysctl_net_register(). + * If you add new items to the sctp_net_table, please ensure that + * the index values of these defines hold the same meaning indicated by + * their macro names when they appear in sctp_net_table. + */ +#define SCTP_RTO_MIN_IDX 0 +#define SCTP_RTO_MAX_IDX 1 +#define SCTP_PF_RETRANS_IDX 2 +#define SCTP_PS_RETRANS_IDX 3 + static struct ctl_table sctp_net_table[] = { - { - .procname = "rto_initial", - .data = &init_net.sctp.rto_initial, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ONE, - .extra2 = &timer_max - }, - { + [SCTP_RTO_MIN_IDX] = { .procname = "rto_min", .data = &init_net.sctp.rto_min, .maxlen = sizeof(unsigned int), @@ -103,7 +104,7 @@ static struct ctl_table sctp_net_table[] = { .extra1 = SYSCTL_ONE, .extra2 = &init_net.sctp.rto_max }, - { + [SCTP_RTO_MAX_IDX] = { .procname = "rto_max", .data = &init_net.sctp.rto_max, .maxlen = sizeof(unsigned int), @@ -112,6 +113,33 @@ static struct ctl_table sctp_net_table[] = { .extra1 = &init_net.sctp.rto_min, .extra2 = &timer_max }, + [SCTP_PF_RETRANS_IDX] = { + .procname = "pf_retrans", + .data = &init_net.sctp.pf_retrans, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &init_net.sctp.ps_retrans, + }, + [SCTP_PS_RETRANS_IDX] = { + .procname = "ps_retrans", + .data = &init_net.sctp.ps_retrans, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &init_net.sctp.pf_retrans, + .extra2 = &ps_retrans_max, + }, + { + .procname = "rto_initial", + .data = &init_net.sctp.rto_initial, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = &timer_max + }, { .procname = "rto_alpha_exp_divisor", .data = &init_net.sctp.rto_alpha, @@ -207,24 +235,6 @@ static struct ctl_table sctp_net_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, - { - .procname = "pf_retrans", - .data = &init_net.sctp.pf_retrans, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = &init_net.sctp.ps_retrans, - }, - { - .procname = "ps_retrans", - .data = &init_net.sctp.ps_retrans, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &init_net.sctp.pf_retrans, - .extra2 = &ps_retrans_max, - }, { .procname = "sndbuf_policy", .data = &init_net.sctp.sndbuf_policy, @@ -586,6 +596,11 @@ int sctp_sysctl_net_register(struct net *net) for (i = 0; table[i].data; i++) table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; + table[SCTP_RTO_MIN_IDX].extra2 = &net->sctp.rto_max; + table[SCTP_RTO_MAX_IDX].extra1 = &net->sctp.rto_min; + table[SCTP_PF_RETRANS_IDX].extra2 = &net->sctp.ps_retrans; + table[SCTP_PS_RETRANS_IDX].extra1 = &net->sctp.pf_retrans; + net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); if (net->sctp.sysctl_header == NULL) { kfree(table); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 5f42aa5fc612..2ff66a6a7e54 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -301,7 +301,7 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth list_for_each_entry(pos, &pipe->in_downcall, list) { if (!uid_eq(pos->uid, uid)) continue; - if (auth && pos->auth->service != auth->service) + if (pos->auth->service != auth->service) continue; refcount_inc(&pos->count); return pos; @@ -685,6 +685,21 @@ out: return err; } +static struct gss_upcall_msg * +gss_find_downcall(struct rpc_pipe *pipe, kuid_t uid) +{ + struct gss_upcall_msg *pos; + list_for_each_entry(pos, &pipe->in_downcall, list) { + if (!uid_eq(pos->uid, uid)) + continue; + if (!rpc_msg_is_inflight(&pos->msg)) + continue; + refcount_inc(&pos->count); + return pos; + } + return NULL; +} + #define MSG_BUF_MAXSIZE 1024 static ssize_t @@ -731,7 +746,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) err = -ENOENT; /* Find a matching upcall */ spin_lock(&pipe->lock); - gss_msg = __gss_find_upcall(pipe, uid, NULL); + gss_msg = gss_find_downcall(pipe, uid); if (gss_msg == NULL) { spin_unlock(&pipe->lock); goto err_put_ctx; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 1f2817195549..48b608cb5f5e 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1162,18 +1162,23 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp, return res; inlen = svc_getnl(argv); - if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) + if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) { + kfree(in_handle->data); return SVC_DENIED; + } pages = DIV_ROUND_UP(inlen, PAGE_SIZE); in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL); - if (!in_token->pages) + if (!in_token->pages) { + kfree(in_handle->data); return SVC_DENIED; + } in_token->page_base = 0; in_token->page_len = inlen; for (i = 0; i < pages; i++) { in_token->pages[i] = alloc_page(GFP_KERNEL); if (!in_token->pages[i]) { + kfree(in_handle->data); gss_free_in_token_pages(in_token); return SVC_DENIED; } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index ca2a494d727b..bbeb80e1133d 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1375,7 +1375,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, break; default: err = -EAFNOSUPPORT; - goto out; + goto out_release; } if (err < 0) { dprintk("RPC: can't bind UDP socket (%d)\n", err); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 1295f9ab839f..507ba8b79992 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -858,7 +858,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, return req; out3: - kfree(req->rl_sendbuf); + rpcrdma_regbuf_free(req->rl_sendbuf); out2: kfree(req); out1: diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 794ef3b3d7d4..c0fea678abb1 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -801,7 +801,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, struct sk_psock *psock; struct sock *sk_redir; struct tls_rec *rec; - bool enospc, policy; + bool enospc, policy, redir_ingress; int err = 0, send; u32 delta = 0; @@ -846,6 +846,7 @@ more_data: } break; case __SK_REDIRECT: + redir_ingress = psock->redir_ingress; sk_redir = psock->sk_redir; memcpy(&msg_redir, msg, sizeof(*msg)); if (msg->apply_bytes < send) @@ -855,7 +856,8 @@ more_data: sk_msg_return_zero(sk, msg, send); msg->sg.size -= send; release_sock(sk); - err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags); + err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, + &msg_redir, send, flags); lock_sock(sk); if (err < 0) { *copied -= sk_msg_free_nocharge(sk, &msg_redir); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index a579e28bd213..0a59a00cb581 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1865,13 +1865,20 @@ restart_locked: unix_state_lock(sk); err = 0; - if (unix_peer(sk) == other) { + if (sk->sk_type == SOCK_SEQPACKET) { + /* We are here only when racing with unix_release_sock() + * is clearing @other. Never change state to TCP_CLOSE + * unlike SOCK_DGRAM wants. + */ + unix_state_unlock(sk); + err = -EPIPE; + } else if (unix_peer(sk) == other) { unix_peer(sk) = NULL; unix_dgram_peer_wake_disconnect_wakeup(sk, other); + sk->sk_state = TCP_CLOSE; unix_state_unlock(sk); - sk->sk_state = TCP_CLOSE; unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; @@ -3401,6 +3408,7 @@ static int __init af_unix_init(void) rc = proto_register(&unix_stream_proto, 1); if (rc != 0) { pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); + proto_unregister(&unix_dgram_proto); goto out; } diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index b17dc9745188..94c1112f1c8c 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1711,7 +1711,11 @@ static int vmci_transport_dgram_enqueue( if (!dg) return -ENOMEM; - memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); + err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); + if (err) { + kfree(dg); + return err; + } dg->dst = vmci_make_handle(remote_addr->svm_cid, remote_addr->svm_port); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 7b19a2087db9..d0fbe822e793 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -4247,8 +4247,10 @@ static int __init regulatory_init_db(void) return -EINVAL; err = load_builtin_regdb_keys(); - if (err) + if (err) { + platform_device_unregister(reg_pdev); return err; + } /* We always try to get an update for the static regdomain */ err = regulatory_hint_core(cfg80211_world_regdom->alpha2); diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c index 9ec93d90e8a5..4eb7aa11cfbb 100644 --- a/samples/vfio-mdev/mdpy-fb.c +++ b/samples/vfio-mdev/mdpy-fb.c @@ -109,7 +109,7 @@ static int mdpy_fb_probe(struct pci_dev *pdev, ret = pci_request_regions(pdev, "mdpy-fb"); if (ret < 0) - return ret; + goto err_disable_dev; pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format); pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET, &width); @@ -191,6 +191,9 @@ err_release_fb: err_release_regions: pci_release_regions(pdev); +err_disable_dev: + pci_disable_device(pdev); + return ret; } @@ -199,7 +202,10 @@ static void mdpy_fb_remove(struct pci_dev *pdev) struct fb_info *info = pci_get_drvdata(pdev); unregister_framebuffer(info); + iounmap(info->screen_base); framebuffer_release(info); + pci_release_regions(pdev); + pci_disable_device(pdev); } static struct pci_device_id mdpy_fb_pci_table[] = { diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index 942ed8de36d3..2e509e32cf75 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -240,6 +240,9 @@ config INIT_ON_FREE_DEFAULT_ON config CC_HAS_ZERO_CALL_USED_REGS def_bool $(cc-option,-fzero-call-used-regs=used-gpr) + # https://github.com/ClangBuiltLinux/linux/issues/1766 + # https://github.com/llvm/llvm-project/issues/59242 + depends on !CC_IS_CLANG || CLANG_VERSION > 150006 config ZERO_CALL_USED_REGS bool "Enable register zeroing on function exit" diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index a891705b1d57..8c7719108d7f 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -867,8 +867,10 @@ static struct multi_transaction *multi_transaction_new(struct file *file, if (!t) return ERR_PTR(-ENOMEM); kref_init(&t->count); - if (copy_from_user(t->data, buf, size)) + if (copy_from_user(t->data, buf, size)) { + put_multi_transaction(t); return ERR_PTR(-EFAULT); + } return t; } diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index f72406fe1bf2..10274eb90fa3 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -1170,10 +1170,10 @@ static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb #endif /* - * The cred blob is a pointer to, not an instance of, an aa_task_ctx. + * The cred blob is a pointer to, not an instance of, an aa_label. */ struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = { - .lbs_cred = sizeof(struct aa_task_ctx *), + .lbs_cred = sizeof(struct aa_label *), .lbs_file = sizeof(struct aa_file_ctx), .lbs_task = sizeof(struct aa_task_ctx), }; diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 4c010c9a6af1..fcf22577f606 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -1125,7 +1125,7 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ - mutex_lock_nested(&ns->parent->lock, ns->level); + mutex_lock_nested(&ns->parent->lock, ns->parent->level); __aa_bump_ns_revision(ns); __aa_remove_ns(ns); mutex_unlock(&ns->parent->lock); diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c index 70921d95fb40..53d24cf63893 100644 --- a/security/apparmor/policy_ns.c +++ b/security/apparmor/policy_ns.c @@ -121,7 +121,7 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name) return ns; fail_unconfined: - kfree_sensitive(ns->base.hname); + aa_policy_destroy(&ns->base); fail_ns: kfree_sensitive(ns); return NULL; diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 03c9609ca407..d5b3a062d1d1 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -964,7 +964,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns) * if not specified use previous version * Mask off everything that is not kernel abi version */ - if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) { + if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v8)) { audit_iface(NULL, NULL, NULL, "unsupported interface version", e, error); return error; diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 04375df52fc9..fe5cb7696993 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -81,6 +81,17 @@ free_and_exit: return -ENOMEM; } +static void dev_exceptions_move(struct list_head *dest, struct list_head *orig) +{ + struct dev_exception_item *ex, *tmp; + + lockdep_assert_held(&devcgroup_mutex); + + list_for_each_entry_safe(ex, tmp, orig, list) { + list_move_tail(&ex->list, dest); + } +} + /* * called under devcgroup_mutex */ @@ -603,11 +614,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, int count, rc = 0; struct dev_exception_item ex; struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent); + struct dev_cgroup tmp_devcgrp; if (!capable(CAP_SYS_ADMIN)) return -EPERM; memset(&ex, 0, sizeof(ex)); + memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp)); b = buffer; switch (*b) { @@ -619,15 +632,27 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, if (!may_allow_all(parent)) return -EPERM; - dev_exception_clean(devcgroup); - devcgroup->behavior = DEVCG_DEFAULT_ALLOW; - if (!parent) + if (!parent) { + devcgroup->behavior = DEVCG_DEFAULT_ALLOW; + dev_exception_clean(devcgroup); break; + } - rc = dev_exceptions_copy(&devcgroup->exceptions, - &parent->exceptions); + INIT_LIST_HEAD(&tmp_devcgrp.exceptions); + rc = dev_exceptions_copy(&tmp_devcgrp.exceptions, + &devcgroup->exceptions); if (rc) return rc; + dev_exception_clean(devcgroup); + rc = dev_exceptions_copy(&devcgroup->exceptions, + &parent->exceptions); + if (rc) { + dev_exceptions_move(&devcgroup->exceptions, + &tmp_devcgrp.exceptions); + return rc; + } + devcgroup->behavior = DEVCG_DEFAULT_ALLOW; + dev_exception_clean(&tmp_devcgrp); break; case DEVCG_DENY: if (css_has_online_children(&devcgroup->css)) diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c index 3b06a01bd0fd..aa93b750a9f3 100644 --- a/security/integrity/digsig.c +++ b/security/integrity/digsig.c @@ -122,6 +122,7 @@ int __init integrity_init_keyring(const unsigned int id) { struct key_restriction *restriction; key_perm_t perm; + int ret; perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH; @@ -142,7 +143,10 @@ int __init integrity_init_keyring(const unsigned int id) perm |= KEY_USR_WRITE; out: - return __integrity_init_keyring(id, perm, restriction); + ret = __integrity_init_keyring(id, perm, restriction); + if (ret) + kfree(restriction); + return ret; } static int __init integrity_add_key(const unsigned int id, const void *data, diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 748b97a2582a..ed43d30682ff 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -391,12 +391,6 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) nentry->lsm[i].type = entry->lsm[i].type; nentry->lsm[i].args_p = entry->lsm[i].args_p; - /* - * Remove the reference from entry so that the associated - * memory will not be freed during a later call to - * ima_lsm_free_rule(entry). - */ - entry->lsm[i].args_p = NULL; ima_filter_rule_init(nentry->lsm[i].type, Audit_equal, nentry->lsm[i].args_p, @@ -410,6 +404,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) static int ima_lsm_update_rule(struct ima_rule_entry *entry) { + int i; struct ima_rule_entry *nentry; nentry = ima_lsm_copy_rule(entry); @@ -424,7 +419,8 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry) * references and the entry itself. All other memory refrences will now * be owned by nentry. */ - ima_lsm_free_rule(entry); + for (i = 0; i < MAX_LSM_RULES; i++) + ima_filter_rule_free(entry->lsm[i].rule); kfree(entry); return 0; @@ -542,6 +538,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, const char *func_data) { int i; + bool result = false; + struct ima_rule_entry *lsm_rule = rule; + bool rule_reinitialized = false; if ((rule->flags & IMA_FUNC) && (rule->func != func && func != POST_SETATTR)) @@ -590,35 +589,55 @@ static bool ima_match_rules(struct ima_rule_entry *rule, int rc = 0; u32 osid; - if (!rule->lsm[i].rule) { - if (!rule->lsm[i].args_p) + if (!lsm_rule->lsm[i].rule) { + if (!lsm_rule->lsm[i].args_p) continue; else return false; } + +retry: switch (i) { case LSM_OBJ_USER: case LSM_OBJ_ROLE: case LSM_OBJ_TYPE: security_inode_getsecid(inode, &osid); - rc = ima_filter_rule_match(osid, rule->lsm[i].type, + rc = ima_filter_rule_match(osid, lsm_rule->lsm[i].type, Audit_equal, - rule->lsm[i].rule); + lsm_rule->lsm[i].rule); break; case LSM_SUBJ_USER: case LSM_SUBJ_ROLE: case LSM_SUBJ_TYPE: - rc = ima_filter_rule_match(secid, rule->lsm[i].type, + rc = ima_filter_rule_match(secid, lsm_rule->lsm[i].type, Audit_equal, - rule->lsm[i].rule); + lsm_rule->lsm[i].rule); break; default: break; } - if (!rc) - return false; + + if (rc == -ESTALE && !rule_reinitialized) { + lsm_rule = ima_lsm_copy_rule(rule); + if (lsm_rule) { + rule_reinitialized = true; + goto retry; + } + } + if (!rc) { + result = false; + goto out; + } } - return true; + result = true; + +out: + if (rule_reinitialized) { + for (i = 0; i < MAX_LSM_RULES; i++) + ima_filter_rule_free(lsm_rule->lsm[i].rule); + kfree(lsm_rule); + } + return result; } /* diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c index db1ad6d7a57f..31a8388e3dfa 100644 --- a/security/integrity/ima/ima_template.c +++ b/security/integrity/ima/ima_template.c @@ -241,11 +241,11 @@ int template_desc_init_fields(const char *template_fmt, } if (fields && num_fields) { - *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL); + *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL); if (*fields == NULL) return -ENOMEM; - memcpy(*fields, found_fields, i * sizeof(*fields)); + memcpy(*fields, found_fields, i * sizeof(**fields)); *num_fields = i; } @@ -336,8 +336,11 @@ static struct ima_template_desc *restore_template_fmt(char *template_name) template_desc->name = ""; template_desc->fmt = kstrdup(template_name, GFP_KERNEL); - if (!template_desc->fmt) + if (!template_desc->fmt) { + kfree(template_desc); + template_desc = NULL; goto out; + } spin_lock(&template_list); list_add_tail_rcu(&template_desc->list, &defined_templates); diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c index 185c609c6e38..d2f2c3936277 100644 --- a/security/integrity/platform_certs/load_uefi.c +++ b/security/integrity/platform_certs/load_uefi.c @@ -34,6 +34,7 @@ static const struct dmi_system_id uefi_skip_cert[] = { { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") }, { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") }, { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMacPro1,1") }, { } }; diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c index b12f7d986b1e..5fce105a372d 100644 --- a/security/loadpin/loadpin.c +++ b/security/loadpin/loadpin.c @@ -118,21 +118,11 @@ static void loadpin_sb_free_security(struct super_block *mnt_sb) } } -static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, - bool contents) +static int loadpin_check(struct file *file, enum kernel_read_file_id id) { struct super_block *load_root; const char *origin = kernel_read_file_id_str(id); - /* - * If we will not know that we'll be seeing the full contents - * then we cannot trust a load will be complete and unchanged - * off disk. Treat all contents=false hooks as if there were - * no associated file struct. - */ - if (!contents) - file = NULL; - /* If the file id is excluded, ignore the pinning. */ if ((unsigned int)id < ARRAY_SIZE(ignore_read_file_id) && ignore_read_file_id[id]) { @@ -187,9 +177,25 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, return 0; } +static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, + bool contents) +{ + /* + * LoadPin only cares about the _origin_ of a file, not its + * contents, so we can ignore the "are full contents available" + * argument here. + */ + return loadpin_check(file, id); +} + static int loadpin_load_data(enum kernel_load_data_id id, bool contents) { - return loadpin_read_file(NULL, (enum kernel_read_file_id) id, contents); + /* + * LoadPin only cares about the _origin_ of a file, not its + * contents, so a NULL file is passed, and we can ignore the + * state of "contents". + */ + return loadpin_check(NULL, (enum kernel_read_file_id) id); } static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = { diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index f38c2e5e9a29..44e06ef4ff0b 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1419,8 +1419,10 @@ static int snd_pcm_do_start(struct snd_pcm_substream *substream, static void snd_pcm_undo_start(struct snd_pcm_substream *substream, snd_pcm_state_t state) { - if (substream->runtime->trigger_master == substream) + if (substream->runtime->trigger_master == substream) { substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); + substream->runtime->stop_operating = true; + } } static void snd_pcm_post_start(struct snd_pcm_substream *substream, diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c index d3bc9e8c407d..f0d34cf70c3e 100644 --- a/sound/drivers/mts64.c +++ b/sound/drivers/mts64.c @@ -815,6 +815,9 @@ static void snd_mts64_interrupt(void *private) u8 status, data; struct snd_rawmidi_substream *substream; + if (!mts) + return; + spin_lock(&mts->lock); ret = mts64_read(mts->pardev->port); data = ret & 0x00ff; diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c index 37154ed43bd5..c09652da43ff 100644 --- a/sound/hda/ext/hdac_ext_stream.c +++ b/sound/hda/ext/hdac_ext_stream.c @@ -475,23 +475,6 @@ int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus, } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo); - -/** - * snd_hdac_ext_stop_streams - stop all stream if running - * @bus: HD-audio core bus - */ -void snd_hdac_ext_stop_streams(struct hdac_bus *bus) -{ - struct hdac_stream *stream; - - if (bus->chip_init) { - list_for_each_entry(stream, &bus->stream_list, list) - snd_hdac_stream_stop(stream); - snd_hdac_bus_stop_chip(bus); - } -} -EXPORT_SYMBOL_GPL(snd_hdac_ext_stop_streams); - /** * snd_hdac_ext_stream_drsm_enable - enable DMA resume for a stream * @bus: HD-audio core bus diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index aa7955fdf68a..eea22cf72aef 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -142,6 +142,33 @@ void snd_hdac_stream_stop(struct hdac_stream *azx_dev) } EXPORT_SYMBOL_GPL(snd_hdac_stream_stop); +/** + * snd_hdac_stop_streams - stop all streams + * @bus: HD-audio core bus + */ +void snd_hdac_stop_streams(struct hdac_bus *bus) +{ + struct hdac_stream *stream; + + list_for_each_entry(stream, &bus->stream_list, list) + snd_hdac_stream_stop(stream); +} +EXPORT_SYMBOL_GPL(snd_hdac_stop_streams); + +/** + * snd_hdac_stop_streams_and_chip - stop all streams and chip if running + * @bus: HD-audio core bus + */ +void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus) +{ + + if (bus->chip_init) { + snd_hdac_stop_streams(bus); + snd_hdac_bus_stop_chip(bus); + } +} +EXPORT_SYMBOL_GPL(snd_hdac_stop_streams_and_chip); + /** * snd_hdac_stream_reset - reset a stream * @azx_dev: HD-audio core stream to reset diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index bb31b7fe867d..477a5b4b50bc 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c @@ -361,7 +361,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, pci_dev->devfn); - if (pci_enable_device(pci_dev) < 0) { + if (pcim_enable_device(pci_dev) < 0) { dev_err(&pci_dev->dev, "pci_enable_device failed, disabling device\n"); return -EIO; diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 75dcb14ff20a..0ff286b7b66b 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1033,10 +1033,8 @@ EXPORT_SYMBOL_GPL(azx_init_chip); void azx_stop_all_streams(struct azx *chip) { struct hdac_bus *bus = azx_bus(chip); - struct hdac_stream *s; - list_for_each_entry(s, &bus->stream_list, list) - snd_hdac_stream_stop(s); + snd_hdac_stop_streams(bus); } EXPORT_SYMBOL_GPL(azx_stop_all_streams); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index ba1289abd45f..5bf0597ce38b 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1962,6 +1962,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) static const struct snd_pci_quirk force_connect_list[] = { SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1), SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1), SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1), {} diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 28f8d98d39f1..642e212278ac 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6709,6 +6709,34 @@ static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec, } } +static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const struct hda_pintbl pincfgs[] = { + { 0x14, 0x90170151 }, + { 0x17, 0x90170150 }, + { } + }; + static const hda_nid_t conn[] = { 0x02, 0x03 }; + static const hda_nid_t preferred_pairs[] = { + 0x14, 0x02, + 0x17, 0x03, + 0x21, 0x02, + 0 + }; + struct alc_spec *spec = codec->spec; + + alc_fixup_no_shutup(codec, fix, action); + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_apply_pincfgs(codec, pincfgs); + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + spec->gen.preferred_dacs = preferred_pairs; + break; + } +} + enum { ALC269_FIXUP_GPIO2, ALC269_FIXUP_SONY_VAIO, @@ -6940,6 +6968,8 @@ enum { ALC285_FIXUP_LEGION_Y9000X_SPEAKERS, ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED, + ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS, + ALC236_FIXUP_DELL_DUAL_CODECS, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -8766,6 +8796,18 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC285_FIXUP_HP_MUTE_LED, }, + [ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc295_fixup_dell_inspiron_top_speakers, + .chained = true, + .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, + }, + [ALC236_FIXUP_DELL_DUAL_CODECS] = { + .type = HDA_FIXUP_PINS, + .v.func = alc1220_fixup_gb_dual_codecs, + .chained = true, + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8865,6 +8907,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), + SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), + SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -10593,6 +10643,17 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec, } } +static void alc897_fixup_lenovo_headset_mode(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; + spec->gen.hp_automute_hook = alc897_hp_automute_hook; + } +} + static const struct coef_fw alc668_coefs[] = { WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0), WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80), @@ -10676,6 +10737,8 @@ enum { ALC897_FIXUP_LENOVO_HEADSET_MIC, ALC897_FIXUP_HEADSET_MIC_PIN, ALC897_FIXUP_HP_HSMIC_VERB, + ALC897_FIXUP_LENOVO_HEADSET_MODE, + ALC897_FIXUP_HEADSET_MIC_PIN2, }; static const struct hda_fixup alc662_fixups[] = { @@ -11102,6 +11165,19 @@ static const struct hda_fixup alc662_fixups[] = { { } }, }, + [ALC897_FIXUP_LENOVO_HEADSET_MODE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc897_fixup_lenovo_headset_mode, + }, + [ALC897_FIXUP_HEADSET_MIC_PIN2] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -11154,6 +11230,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c index 390dd6c7f6a5..de5955db0a5f 100644 --- a/sound/soc/codecs/hdac_hda.c +++ b/sound/soc/codecs/hdac_hda.c @@ -46,9 +46,8 @@ static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai); static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai); -static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai, - unsigned int tx_mask, unsigned int rx_mask, - int slots, int slot_width); +static int hdac_hda_dai_set_stream(struct snd_soc_dai *dai, void *stream, + int direction); static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt, struct snd_soc_dai *dai); @@ -58,7 +57,7 @@ static const struct snd_soc_dai_ops hdac_hda_dai_ops = { .prepare = hdac_hda_dai_prepare, .hw_params = hdac_hda_dai_hw_params, .hw_free = hdac_hda_dai_hw_free, - .set_tdm_slot = hdac_hda_dai_set_tdm_slot, + .set_stream = hdac_hda_dai_set_stream, }; static struct snd_soc_dai_driver hdac_hda_dais[] = { @@ -180,21 +179,22 @@ static struct snd_soc_dai_driver hdac_hda_dais[] = { }; -static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai, - unsigned int tx_mask, unsigned int rx_mask, - int slots, int slot_width) +static int hdac_hda_dai_set_stream(struct snd_soc_dai *dai, + void *stream, int direction) { struct snd_soc_component *component = dai->component; struct hdac_hda_priv *hda_pvt; struct hdac_hda_pcm *pcm; + struct hdac_stream *hstream; + + if (!stream) + return -EINVAL; hda_pvt = snd_soc_component_get_drvdata(component); pcm = &hda_pvt->pcm[dai->id]; + hstream = (struct hdac_stream *)stream; - if (tx_mask) - pcm->stream_tag[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask; - else - pcm->stream_tag[SNDRV_PCM_STREAM_CAPTURE] = rx_mask; + pcm->stream_tag[direction] = hstream->stream_tag; return 0; } diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c index 12323d4b5bfa..97b64477dde6 100644 --- a/sound/soc/codecs/max98373-sdw.c +++ b/sound/soc/codecs/max98373-sdw.c @@ -741,7 +741,7 @@ static int max98373_sdw_set_tdm_slot(struct snd_soc_dai *dai, static const struct snd_soc_dai_ops max98373_dai_sdw_ops = { .hw_params = max98373_sdw_dai_hw_params, .hw_free = max98373_pcm_hw_free, - .set_sdw_stream = max98373_set_sdw_stream, + .set_stream = max98373_set_sdw_stream, .shutdown = max98373_shutdown, .set_tdm_slot = max98373_sdw_set_tdm_slot, }; diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index 33ed3bbf0362..c45819ccf9ea 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c @@ -1635,7 +1635,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) if (val > 6) { dev_err(dev, "Invalid pll-in\n"); ret = -EINVAL; - goto err_clk; + goto err_pm; } pcm512x->pll_in = val; } @@ -1644,7 +1644,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) if (val > 6) { dev_err(dev, "Invalid pll-out\n"); ret = -EINVAL; - goto err_clk; + goto err_pm; } pcm512x->pll_out = val; } @@ -1653,12 +1653,12 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) dev_err(dev, "Error: both pll-in and pll-out, or none\n"); ret = -EINVAL; - goto err_clk; + goto err_pm; } if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) { dev_err(dev, "Error: pll-in == pll-out\n"); ret = -EINVAL; - goto err_clk; + goto err_pm; } } #endif diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c index 8472d855c325..03adf3324b81 100644 --- a/sound/soc/codecs/rt1308-sdw.c +++ b/sound/soc/codecs/rt1308-sdw.c @@ -613,7 +613,7 @@ static const struct snd_soc_component_driver soc_component_sdw_rt1308 = { static const struct snd_soc_dai_ops rt1308_aif_dai_ops = { .hw_params = rt1308_sdw_hw_params, .hw_free = rt1308_sdw_pcm_hw_free, - .set_sdw_stream = rt1308_set_sdw_stream, + .set_stream = rt1308_set_sdw_stream, .shutdown = rt1308_sdw_shutdown, .set_tdm_slot = rt1308_sdw_set_tdm_slot, }; diff --git a/sound/soc/codecs/rt1316-sdw.c b/sound/soc/codecs/rt1316-sdw.c index 09cf3ca86fa4..1e04aa8ab166 100644 --- a/sound/soc/codecs/rt1316-sdw.c +++ b/sound/soc/codecs/rt1316-sdw.c @@ -602,7 +602,7 @@ static const struct snd_soc_component_driver soc_component_sdw_rt1316 = { static const struct snd_soc_dai_ops rt1316_aif_dai_ops = { .hw_params = rt1316_sdw_hw_params, .hw_free = rt1316_sdw_pcm_hw_free, - .set_sdw_stream = rt1316_set_sdw_stream, + .set_stream = rt1316_set_sdw_stream, .shutdown = rt1316_sdw_shutdown, }; diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c index c592c40a7ab3..604754e4b29f 100644 --- a/sound/soc/codecs/rt298.c +++ b/sound/soc/codecs/rt298.c @@ -1173,6 +1173,13 @@ static const struct dmi_system_id force_combo_jack_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Geminilake") } }, + { + .ident = "Intel Kabylake R RVP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform") + } + }, { } }; diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index ecbaf129a6e3..51b385575a5c 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -3313,8 +3313,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, if (ret < 0) goto err; - pm_runtime_put(&i2c->dev); - return 0; err: pm_runtime_disable(&i2c->dev); diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c index a030c9987b92..f04e18c32489 100644 --- a/sound/soc/codecs/rt5682-sdw.c +++ b/sound/soc/codecs/rt5682-sdw.c @@ -272,7 +272,7 @@ static int rt5682_sdw_hw_free(struct snd_pcm_substream *substream, static const struct snd_soc_dai_ops rt5682_sdw_ops = { .hw_params = rt5682_sdw_hw_params, .hw_free = rt5682_sdw_hw_free, - .set_sdw_stream = rt5682_set_sdw_stream, + .set_stream = rt5682_set_sdw_stream, .shutdown = rt5682_sdw_shutdown, }; diff --git a/sound/soc/codecs/rt700.c b/sound/soc/codecs/rt700.c index e049d672ccfd..3de3406d653e 100644 --- a/sound/soc/codecs/rt700.c +++ b/sound/soc/codecs/rt700.c @@ -1015,7 +1015,7 @@ static int rt700_pcm_hw_free(struct snd_pcm_substream *substream, static const struct snd_soc_dai_ops rt700_ops = { .hw_params = rt700_pcm_hw_params, .hw_free = rt700_pcm_hw_free, - .set_sdw_stream = rt700_set_sdw_stream, + .set_stream = rt700_set_sdw_stream, .shutdown = rt700_shutdown, }; diff --git a/sound/soc/codecs/rt711-sdca.c b/sound/soc/codecs/rt711-sdca.c index 3b5df3ea2f60..5ad53bbc8528 100644 --- a/sound/soc/codecs/rt711-sdca.c +++ b/sound/soc/codecs/rt711-sdca.c @@ -1361,7 +1361,7 @@ static int rt711_sdca_pcm_hw_free(struct snd_pcm_substream *substream, static const struct snd_soc_dai_ops rt711_sdca_ops = { .hw_params = rt711_sdca_pcm_hw_params, .hw_free = rt711_sdca_pcm_hw_free, - .set_sdw_stream = rt711_sdca_set_sdw_stream, + .set_stream = rt711_sdca_set_sdw_stream, .shutdown = rt711_sdca_shutdown, }; diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c index 51a98e730fc8..286d882636e0 100644 --- a/sound/soc/codecs/rt711.c +++ b/sound/soc/codecs/rt711.c @@ -1092,7 +1092,7 @@ static int rt711_pcm_hw_free(struct snd_pcm_substream *substream, static const struct snd_soc_dai_ops rt711_ops = { .hw_params = rt711_pcm_hw_params, .hw_free = rt711_pcm_hw_free, - .set_sdw_stream = rt711_set_sdw_stream, + .set_stream = rt711_set_sdw_stream, .shutdown = rt711_shutdown, }; diff --git a/sound/soc/codecs/rt715-sdca.c b/sound/soc/codecs/rt715-sdca.c index 66e166568c50..bfa536bd7196 100644 --- a/sound/soc/codecs/rt715-sdca.c +++ b/sound/soc/codecs/rt715-sdca.c @@ -938,7 +938,7 @@ static int rt715_sdca_pcm_hw_free(struct snd_pcm_substream *substream, static const struct snd_soc_dai_ops rt715_sdca_ops = { .hw_params = rt715_sdca_pcm_hw_params, .hw_free = rt715_sdca_pcm_hw_free, - .set_sdw_stream = rt715_sdca_set_sdw_stream, + .set_stream = rt715_sdca_set_sdw_stream, .shutdown = rt715_sdca_shutdown, }; diff --git a/sound/soc/codecs/rt715.c b/sound/soc/codecs/rt715.c index 1352869cc086..a64d11a74751 100644 --- a/sound/soc/codecs/rt715.c +++ b/sound/soc/codecs/rt715.c @@ -909,7 +909,7 @@ static int rt715_pcm_hw_free(struct snd_pcm_substream *substream, static const struct snd_soc_dai_ops rt715_ops = { .hw_params = rt715_pcm_hw_params, .hw_free = rt715_pcm_hw_free, - .set_sdw_stream = rt715_set_sdw_stream, + .set_stream = rt715_set_sdw_stream, .shutdown = rt715_shutdown, }; diff --git a/sound/soc/codecs/sdw-mockup.c b/sound/soc/codecs/sdw-mockup.c index 8ea13cfa9f8e..7c612aaf31c7 100644 --- a/sound/soc/codecs/sdw-mockup.c +++ b/sound/soc/codecs/sdw-mockup.c @@ -138,7 +138,7 @@ static int sdw_mockup_pcm_hw_free(struct snd_pcm_substream *substream, static const struct snd_soc_dai_ops sdw_mockup_ops = { .hw_params = sdw_mockup_pcm_hw_params, .hw_free = sdw_mockup_pcm_hw_free, - .set_sdw_stream = sdw_mockup_set_sdw_stream, + .set_stream = sdw_mockup_set_sdw_stream, .shutdown = sdw_mockup_shutdown, }; diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c index 8cdc45e669f2..b95cbae58641 100644 --- a/sound/soc/codecs/wcd938x.c +++ b/sound/soc/codecs/wcd938x.c @@ -4302,7 +4302,7 @@ static int wcd938x_codec_set_sdw_stream(struct snd_soc_dai *dai, static const struct snd_soc_dai_ops wcd938x_sdw_dai_ops = { .hw_params = wcd938x_codec_hw_params, .hw_free = wcd938x_codec_free, - .set_sdw_stream = wcd938x_codec_set_sdw_stream, + .set_stream = wcd938x_codec_set_sdw_stream, }; static struct snd_soc_dai_driver wcd938x_dais[] = { diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index f117ec0c489f..6759db92f6c4 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -3853,7 +3853,12 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data) } else { dev_dbg(component->dev, "Jack not detected\n"); + /* Release wm8994->accdet_lock to avoid deadlock: + * cancel_delayed_work_sync() takes wm8994->mic_work internal + * lock and wm1811_mic_work takes wm8994->accdet_lock */ + mutex_unlock(&wm8994->accdet_lock); cancel_delayed_work_sync(&wm8994->mic_work); + mutex_lock(&wm8994->accdet_lock); snd_soc_component_update_bits(component, WM8958_MICBIAS2, WM8958_MICB2_DISCH, WM8958_MICB2_DISCH); diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 564b78f3cdd0..0222370ff95d 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -1026,7 +1026,7 @@ static const struct snd_soc_dai_ops wsa881x_dai_ops = { .hw_params = wsa881x_hw_params, .hw_free = wsa881x_hw_free, .mute_stream = wsa881x_digital_mute, - .set_sdw_stream = wsa881x_set_sdw_stream, + .set_stream = wsa881x_set_sdw_stream, }; static struct snd_soc_dai_driver wsa881x_dais[] = { diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c index 28cbcdb56857..89814f68ff56 100644 --- a/sound/soc/generic/audio-graph-card.c +++ b/sound/soc/generic/audio-graph-card.c @@ -483,8 +483,10 @@ static int __graph_for_each_link(struct asoc_simple_priv *priv, of_node_put(codec_ep); of_node_put(codec_port); - if (ret < 0) + if (ret < 0) { + of_node_put(cpu_ep); return ret; + } codec_port_old = codec_port; } diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index f9c82ebc552c..888e04c57757 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -570,6 +570,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { + /* Advantech MICA-071 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"), + }, + /* OVCD Th = 1500uA to reliable detect head-phones vs -set */ + .driver_data = (void *)(BYT_RT5640_IN3_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_1500UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"), diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 294e76d590ad..2d53a707aff9 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -291,7 +291,7 @@ int sdw_prepare(struct snd_pcm_substream *substream) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s", dai->name); @@ -311,7 +311,7 @@ int sdw_trigger(struct snd_pcm_substream *substream, int cmd) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s", dai->name); @@ -350,7 +350,7 @@ int sdw_hw_free(struct snd_pcm_substream *substream) /* Find stream from first CPU DAI */ dai = asoc_rtd_to_cpu(rtd, 0); - sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + sdw_stream = snd_soc_dai_get_stream(dai, substream->stream); if (IS_ERR(sdw_stream)) { dev_err(rtd->dev, "no stream found for DAI %s", dai->name); diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c index e4aa366d356e..db41bd717065 100644 --- a/sound/soc/intel/skylake/skl-pcm.c +++ b/sound/soc/intel/skylake/skl-pcm.c @@ -562,11 +562,8 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream, stream_tag = hdac_stream(link_dev)->stream_tag; - /* set the stream tag in the codec dai dma params */ - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0); - else - snd_soc_dai_set_tdm_slot(codec_dai, 0, stream_tag, 0, 0); + /* set the hdac_stream in the codec dai */ + snd_soc_dai_set_stream(codec_dai, hdac_stream(link_dev), substream->stream); p_params.s_fmt = snd_pcm_format_width(params_format(params)); p_params.ch = params_channels(params); diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index 5b1a15e39912..46bb3b8bd5af 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -439,7 +439,7 @@ static int skl_free(struct hdac_bus *bus) skl->init_done = 0; /* to be sure */ - snd_hdac_ext_stop_streams(bus); + snd_hdac_stop_streams_and_chip(bus); if (bus->irq >= 0) free_irq(bus->irq, (void *)bus); @@ -1096,7 +1096,10 @@ static void skl_shutdown(struct pci_dev *pci) if (!skl->init_done) return; - snd_hdac_ext_stop_streams(bus); + snd_hdac_stop_streams(bus); + snd_hdac_ext_bus_link_power_down_all(bus); + skl_dsp_sleep(skl->dsp); + list_for_each_entry(s, &bus->stream_list, list) { stream = stream_to_hdac_ext_stream(s); snd_hdac_ext_stream_decouple(bus, stream, false); diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 7ad5d9a924d8..4e1fc4ba5150 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -56,7 +56,8 @@ #define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11) #define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10) #define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9) -#define JZ_AIC_CTRL_FLUSH BIT(8) +#define JZ_AIC_CTRL_TFLUSH BIT(8) +#define JZ_AIC_CTRL_RFLUSH BIT(7) #define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6) #define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5) #define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4) @@ -91,6 +92,8 @@ enum jz47xx_i2s_version { struct i2s_soc_info { enum jz47xx_i2s_version version; struct snd_soc_dai_driver *dai; + + bool shared_fifo_flush; }; struct jz4740_i2s { @@ -119,19 +122,44 @@ static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s, writel(value, i2s->base + reg); } +static inline void jz4740_i2s_set_bits(const struct jz4740_i2s *i2s, + unsigned int reg, uint32_t bits) +{ + uint32_t value = jz4740_i2s_read(i2s, reg); + value |= bits; + jz4740_i2s_write(i2s, reg, value); +} + static int jz4740_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); - uint32_t conf, ctrl; + uint32_t conf; int ret; + /* + * When we can flush FIFOs independently, only flush the FIFO + * that is starting up. We can do this when the DAI is active + * because it does not disturb other active substreams. + */ + if (!i2s->soc_info->shared_fifo_flush) { + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_TFLUSH); + else + jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_RFLUSH); + } + if (snd_soc_dai_active(dai)) return 0; - ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); - ctrl |= JZ_AIC_CTRL_FLUSH; - jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); + /* + * When there is a shared flush bit for both FIFOs, the TFLUSH + * bit flushes both FIFOs. Flushing while the DAI is active would + * cause FIFO underruns in other active substreams so we have to + * guard this behind the snd_soc_dai_active() check. + */ + if (i2s->soc_info->shared_fifo_flush) + jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_TFLUSH); ret = clk_prepare_enable(i2s->clk_i2s); if (ret) @@ -462,6 +490,7 @@ static struct snd_soc_dai_driver jz4740_i2s_dai = { static const struct i2s_soc_info jz4740_i2s_soc_info = { .version = JZ_I2S_JZ4740, .dai = &jz4740_i2s_dai, + .shared_fifo_flush = true, }; static const struct i2s_soc_info jz4760_i2s_soc_info = { diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c index d884bb7c0fc7..1c28b41e4311 100644 --- a/sound/soc/mediatek/common/mtk-btcvsd.c +++ b/sound/soc/mediatek/common/mtk-btcvsd.c @@ -1038,11 +1038,9 @@ static int mtk_pcm_btcvsd_copy(struct snd_soc_component *component, struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - mtk_btcvsd_snd_write(bt, buf, count); + return mtk_btcvsd_snd_write(bt, buf, count); else - mtk_btcvsd_snd_read(bt, buf, count); - - return 0; + return mtk_btcvsd_snd_read(bt, buf, count); } /* kcontrol */ diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c index 6350390414d4..8092506facbd 100644 --- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c +++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c @@ -1054,6 +1054,7 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) int irq_id; struct mtk_base_afe *afe; struct mt8173_afe_private *afe_priv; + struct snd_soc_component *comp_pcm, *comp_hdmi; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33)); if (ret) @@ -1071,16 +1072,6 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) afe->dev = &pdev->dev; - irq_id = platform_get_irq(pdev, 0); - if (irq_id <= 0) - return irq_id < 0 ? irq_id : -ENXIO; - ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler, - 0, "Afe_ISR_Handle", (void *)afe); - if (ret) { - dev_err(afe->dev, "could not request_irq\n"); - return ret; - } - afe->base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(afe->base_addr)) return PTR_ERR(afe->base_addr); @@ -1142,23 +1133,65 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) if (ret) goto err_pm_disable; - ret = devm_snd_soc_register_component(&pdev->dev, - &mt8173_afe_pcm_dai_component, - mt8173_afe_pcm_dais, - ARRAY_SIZE(mt8173_afe_pcm_dais)); + comp_pcm = devm_kzalloc(&pdev->dev, sizeof(*comp_pcm), GFP_KERNEL); + if (!comp_pcm) { + ret = -ENOMEM; + goto err_pm_disable; + } + + ret = snd_soc_component_initialize(comp_pcm, + &mt8173_afe_pcm_dai_component, + &pdev->dev); if (ret) goto err_pm_disable; - ret = devm_snd_soc_register_component(&pdev->dev, - &mt8173_afe_hdmi_dai_component, - mt8173_afe_hdmi_dais, - ARRAY_SIZE(mt8173_afe_hdmi_dais)); +#ifdef CONFIG_DEBUG_FS + comp_pcm->debugfs_prefix = "pcm"; +#endif + + ret = snd_soc_add_component(comp_pcm, + mt8173_afe_pcm_dais, + ARRAY_SIZE(mt8173_afe_pcm_dais)); if (ret) goto err_pm_disable; + comp_hdmi = devm_kzalloc(&pdev->dev, sizeof(*comp_hdmi), GFP_KERNEL); + if (!comp_hdmi) { + ret = -ENOMEM; + goto err_pm_disable; + } + + ret = snd_soc_component_initialize(comp_hdmi, + &mt8173_afe_hdmi_dai_component, + &pdev->dev); + if (ret) + goto err_pm_disable; + +#ifdef CONFIG_DEBUG_FS + comp_hdmi->debugfs_prefix = "hdmi"; +#endif + + ret = snd_soc_add_component(comp_hdmi, + mt8173_afe_hdmi_dais, + ARRAY_SIZE(mt8173_afe_hdmi_dais)); + if (ret) + goto err_cleanup_components; + + irq_id = platform_get_irq(pdev, 0); + if (irq_id <= 0) + return irq_id < 0 ? irq_id : -ENXIO; + ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler, + 0, "Afe_ISR_Handle", (void *)afe); + if (ret) { + dev_err(afe->dev, "could not request_irq\n"); + goto err_pm_disable; + } + dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n"); return 0; +err_cleanup_components: + snd_soc_unregister_component(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; @@ -1166,6 +1199,8 @@ err_pm_disable: static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev) { + snd_soc_unregister_component(&pdev->dev); + pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mt8173_afe_runtime_suspend(&pdev->dev); diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c index 390da5bf727e..9421b919d462 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c @@ -200,14 +200,16 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev) if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node = of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1); if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } mt8173_rt5650_rt5514_codec_conf[0].dlc.of_node = mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node; @@ -219,6 +221,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); +out: of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c index a56c1e87d564..4dab1ee69ec0 100644 --- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c +++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c @@ -647,8 +647,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) } card = (struct snd_soc_card *)of_device_get_match_data(&pdev->dev); - if (!card) + if (!card) { + of_node_put(platform_node); return -EINVAL; + } card->dev = &pdev->dev; ec_codec = of_parse_phandle(pdev->dev.of_node, "mediatek,ec-codec", 0); @@ -737,8 +739,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) } priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + if (!priv) { + ret = -ENOMEM; + goto out; + } snd_soc_card_set_drvdata(card, priv); @@ -746,7 +750,8 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) if (IS_ERR(priv->pinctrl)) { dev_err(&pdev->dev, "%s devm_pinctrl_get failed\n", __func__); - return PTR_ERR(priv->pinctrl); + ret = PTR_ERR(priv->pinctrl); + goto out; } for (i = 0; i < PIN_STATE_MAX; i++) { @@ -779,6 +784,7 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) ret = devm_snd_soc_register_card(&pdev->dev, card); +out: of_node_put(platform_node); of_node_put(ec_codec); of_node_put(hdmi_codec); diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c index 5d520e18e512..99b245e3079a 100644 --- a/sound/soc/pxa/mmp-pcm.c +++ b/sound/soc/pxa/mmp-pcm.c @@ -98,7 +98,7 @@ static bool filter(struct dma_chan *chan, void *param) devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name, dma_data->ssp_id); - if ((strcmp(dev_name(chan->device->dev), devname) == 0) && + if (devname && (strcmp(dev_name(chan->device->dev), devname) == 0) && (chan->chan_id == dma_data->dma_res->start)) { found = true; } diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c index 77a556b27cf0..24a1c121cb2e 100644 --- a/sound/soc/qcom/lpass-sc7180.c +++ b/sound/soc/qcom/lpass-sc7180.c @@ -131,6 +131,9 @@ static int sc7180_lpass_init(struct platform_device *pdev) drvdata->clks = devm_kcalloc(dev, variant->num_clks, sizeof(*drvdata->clks), GFP_KERNEL); + if (!drvdata->clks) + return -ENOMEM; + drvdata->num_clks = variant->num_clks; for (i = 0; i < drvdata->num_clks; i++) diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index 0adfc5708949..4da5ad609fce 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -56,8 +56,8 @@ static int sdm845_slim_snd_hw_params(struct snd_pcm_substream *substream, int ret = 0, i; for_each_rtd_codec_dais(rtd, i, codec_dai) { - sruntime = snd_soc_dai_get_sdw_stream(codec_dai, - substream->stream); + sruntime = snd_soc_dai_get_stream(codec_dai, + substream->stream); if (sruntime != ERR_PTR(-ENOTSUPP)) pdata->sruntime[cpu_dai->id] = sruntime; diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c index e5190aa588c6..feb6589171ca 100644 --- a/sound/soc/qcom/sm8250.c +++ b/sound/soc/qcom/sm8250.c @@ -70,8 +70,8 @@ static int sm8250_snd_hw_params(struct snd_pcm_substream *substream, switch (cpu_dai->id) { case WSA_CODEC_DMA_RX_0: for_each_rtd_codec_dais(rtd, i, codec_dai) { - sruntime = snd_soc_dai_get_sdw_stream(codec_dai, - substream->stream); + sruntime = snd_soc_dai_get_stream(codec_dai, + substream->stream); if (sruntime != ERR_PTR(-ENOTSUPP)) pdata->sruntime[cpu_dai->id] = sruntime; } diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 38bd603eeb45..7c0b0fe326c2 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -368,6 +368,7 @@ static int rockchip_pdm_runtime_resume(struct device *dev) ret = clk_prepare_enable(pdm->hclk); if (ret) { + clk_disable_unprepare(pdm->clk); dev_err(pdm->dev, "hclock enable failed %d\n", ret); return ret; } diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c index d027ca4b1796..09a25d84fee6 100644 --- a/sound/soc/rockchip/rockchip_spdif.c +++ b/sound/soc/rockchip/rockchip_spdif.c @@ -88,6 +88,7 @@ static int __maybe_unused rk_spdif_runtime_resume(struct device *dev) ret = clk_prepare_enable(spdif->hclk); if (ret) { + clk_disable_unprepare(spdif->mclk); dev_err(spdif->dev, "hclk clock enable failed %d\n", ret); return ret; } diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index 6704dbcd101c..5f355b8d57a0 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -236,11 +236,8 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, if (!link) return -EINVAL; - /* set the stream tag in the codec dai dma params */ - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0); - else - snd_soc_dai_set_tdm_slot(codec_dai, 0, stream_tag, 0, 0); + /* set the hdac_stream in the codec dai */ + snd_soc_dai_set_stream(codec_dai, hdac_stream(link_dev), substream->stream); p_params.s_fmt = snd_pcm_format_width(params_format(params)); p_params.ch = params_channels(params); diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 59faa5a9a714..b67617b68e50 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -304,7 +304,8 @@ static void line6_data_received(struct urb *urb) for (;;) { done = line6_midibuf_read(mb, line6->buffer_message, - LINE6_MIDI_MESSAGE_MAXLEN); + LINE6_MIDI_MESSAGE_MAXLEN, + LINE6_MIDIBUF_READ_RX); if (done <= 0) break; diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c index ba0e2b7e8fe1..0838632c788e 100644 --- a/sound/usb/line6/midi.c +++ b/sound/usb/line6/midi.c @@ -44,7 +44,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream) int req, done; for (;;) { - req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size); + req = min3(line6_midibuf_bytes_free(mb), line6->max_packet_size, + LINE6_FALLBACK_MAXPACKETSIZE); done = snd_rawmidi_transmit_peek(substream, chunk, req); if (done == 0) @@ -56,7 +57,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream) for (;;) { done = line6_midibuf_read(mb, chunk, - LINE6_FALLBACK_MAXPACKETSIZE); + LINE6_FALLBACK_MAXPACKETSIZE, + LINE6_MIDIBUF_READ_TX); if (done == 0) break; diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c index 6a70463f82c4..e7f830f7526c 100644 --- a/sound/usb/line6/midibuf.c +++ b/sound/usb/line6/midibuf.c @@ -9,6 +9,7 @@ #include "midibuf.h" + static int midibuf_message_length(unsigned char code) { int message_length; @@ -20,12 +21,7 @@ static int midibuf_message_length(unsigned char code) message_length = length[(code >> 4) - 8]; } else { - /* - Note that according to the MIDI specification 0xf2 is - the "Song Position Pointer", but this is used by Line 6 - to send sysex messages to the host. - */ - static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1, + static const int length[] = { -1, 2, 2, 2, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1 }; message_length = length[code & 0x0f]; @@ -125,7 +121,7 @@ int line6_midibuf_write(struct midi_buffer *this, unsigned char *data, } int line6_midibuf_read(struct midi_buffer *this, unsigned char *data, - int length) + int length, int read_type) { int bytes_used; int length1, length2; @@ -148,9 +144,22 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data, length1 = this->size - this->pos_read; - /* check MIDI command length */ command = this->buf[this->pos_read]; + /* + PODxt always has status byte lower nibble set to 0010, + when it means to send 0000, so we correct if here so + that control/program changes come on channel 1 and + sysex message status byte is correct + */ + if (read_type == LINE6_MIDIBUF_READ_RX) { + if (command == 0xb2 || command == 0xc2 || command == 0xf2) { + unsigned char fixed = command & 0xf0; + this->buf[this->pos_read] = fixed; + command = fixed; + } + } + /* check MIDI command length */ if (command & 0x80) { midi_length = midibuf_message_length(command); this->command_prev = command; diff --git a/sound/usb/line6/midibuf.h b/sound/usb/line6/midibuf.h index 124a8f9f7e96..542e8d836f87 100644 --- a/sound/usb/line6/midibuf.h +++ b/sound/usb/line6/midibuf.h @@ -8,6 +8,9 @@ #ifndef MIDIBUF_H #define MIDIBUF_H +#define LINE6_MIDIBUF_READ_TX 0 +#define LINE6_MIDIBUF_READ_RX 1 + struct midi_buffer { unsigned char *buf; int size; @@ -23,7 +26,7 @@ extern void line6_midibuf_destroy(struct midi_buffer *mb); extern int line6_midibuf_ignore(struct midi_buffer *mb, int length); extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split); extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data, - int length); + int length, int read_type); extern void line6_midibuf_reset(struct midi_buffer *mb); extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data, int length); diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c index 16e644330c4d..54be5ac919bf 100644 --- a/sound/usb/line6/pod.c +++ b/sound/usb/line6/pod.c @@ -159,8 +159,9 @@ static struct line6_pcm_properties pod_pcm_properties = { .bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */ }; + static const char pod_version_header[] = { - 0xf2, 0x7e, 0x7f, 0x06, 0x02 + 0xf0, 0x7e, 0x7f, 0x06, 0x02 }; static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code, diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 95358f04341b..b2f896e86353 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -76,6 +76,8 @@ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f0a) }, /* E-Mu 0204 USB */ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) }, +/* Ktmicro Usb_audio device */ +{ USB_DEVICE_VENDOR_SPEC(0x31b2, 0x0011) }, /* * Creative Technology, Ltd Live! Cam Sync HD [VF0770] diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index a7e54a08fb54..c2e109860fbc 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h @@ -14,6 +14,8 @@ #define UINT_MAX (~0U) #endif +#define _RET_IP_ ((unsigned long)__builtin_return_address(0)) + #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) @@ -52,6 +54,10 @@ _min1 < _min2 ? _min1 : _min2; }) #endif +#define max_t(type, x, y) max((type)x, (type)y) +#define min_t(type, x, y) min((type)x, (type)y) +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) + #ifndef roundup #define roundup(x, y) ( \ { \ diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 6fffb3cdf39b..49bd43b998c8 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -249,8 +249,15 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); +#ifdef __cplusplus +/* forward-declaring enums in C++ isn't compatible with pure C enums, so + * instead define bpf_enable_stats() as accepting int as an input + */ +LIBBPF_API int bpf_enable_stats(int type); +#else enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); +#endif struct bpf_prog_bind_opts { size_t sz; /* size of this struct for forward/backward compatibility */ diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 5f3d20ae66d5..3ed759f53e7c 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -3718,14 +3718,14 @@ static inline __u16 btf_fwd_kind(struct btf_type *t) } /* Check if given two types are identical ARRAY definitions */ -static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) +static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) { struct btf_type *t1, *t2; t1 = btf_type_by_id(d->btf, id1); t2 = btf_type_by_id(d->btf, id2); if (!btf_is_array(t1) || !btf_is_array(t2)) - return 0; + return false; return btf_equal_array(t1, t2); } @@ -3749,7 +3749,9 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id m1 = btf_members(t1); m2 = btf_members(t2); for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { - if (m1->type != m2->type) + if (m1->type != m2->type && + !btf_dedup_identical_arrays(d, m1->type, m2->type) && + !btf_dedup_identical_structs(d, m1->type, m2->type)) return false; } return true; diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 841cc68e3f42..f620911ad3bb 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -215,6 +215,17 @@ static int btf_dump_resize(struct btf_dump *d) return 0; } +static void btf_dump_free_names(struct hashmap *map) +{ + size_t bkt; + struct hashmap_entry *cur; + + hashmap__for_each_entry(map, cur, bkt) + free((void *)cur->key); + + hashmap__free(map); +} + void btf_dump__free(struct btf_dump *d) { int i; @@ -233,8 +244,8 @@ void btf_dump__free(struct btf_dump *d) free(d->cached_names); free(d->emit_queue); free(d->decl_stack); - hashmap__free(d->type_names); - hashmap__free(d->ident_names); + btf_dump_free_names(d->type_names); + btf_dump_free_names(d->ident_names); free(d); } @@ -1457,11 +1468,23 @@ static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id, static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, const char *orig_name) { + char *old_name, *new_name; size_t dup_cnt = 0; + int err; + + new_name = strdup(orig_name); + if (!new_name) + return 1; hashmap__find(name_map, orig_name, (void **)&dup_cnt); dup_cnt++; - hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL); + + err = hashmap__set(name_map, new_name, (void *)dup_cnt, + (const void **)&old_name, NULL); + if (err) + free(new_name); + + free(old_name); return dup_cnt; } @@ -1892,7 +1915,7 @@ static int btf_dump_struct_data(struct btf_dump *d, { const struct btf_member *m = btf_members(t); __u16 n = btf_vlen(t); - int i, err; + int i, err = 0; /* note that we increment depth before calling btf_dump_print() below; * this is intentional. btf_dump_data_newline() will not print a diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 050622649797..f87a15bbf53b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3763,6 +3763,9 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, int l = 0, r = obj->nr_programs - 1, m; struct bpf_program *prog; + if (!obj->nr_programs) + return NULL; + while (l < r) { m = l + (r - l + 1) / 2; prog = &obj->programs[m]; diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 72e5d23f1ad8..308c8806ad94 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -197,7 +197,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, return false; insn = find_insn(file, func->sec, func->offset); - if (!insn->func) + if (!insn || !insn->func) return false; func_for_each_insn(file, func, insn) { @@ -846,6 +846,16 @@ static const char *uaccess_safe_builtin[] = { "__tsan_read_write4", "__tsan_read_write8", "__tsan_read_write16", + "__tsan_volatile_read1", + "__tsan_volatile_read2", + "__tsan_volatile_read4", + "__tsan_volatile_read8", + "__tsan_volatile_read16", + "__tsan_volatile_write1", + "__tsan_volatile_write2", + "__tsan_volatile_write4", + "__tsan_volatile_write8", + "__tsan_volatile_write16", "__tsan_atomic8_load", "__tsan_atomic16_load", "__tsan_atomic32_load", diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index abf88a1ad455..aad65c95c371 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -558,26 +558,14 @@ static int enable_counters(void) return err; } - if (stat_config.initial_delay < 0) { - pr_info(EVLIST_DISABLED_MSG); - return 0; - } - - if (stat_config.initial_delay > 0) { - pr_info(EVLIST_DISABLED_MSG); - usleep(stat_config.initial_delay * USEC_PER_MSEC); - } - /* * We need to enable counters only if: * - we don't have tracee (attaching to task or cpu) * - we have initial delay configured */ - if (!target__none(&target) || stat_config.initial_delay) { + if (!target__none(&target)) { if (!all_counters_use_bpf) evlist__enable(evsel_list); - if (stat_config.initial_delay > 0) - pr_info(EVLIST_ENABLED_MSG); } return 0; } @@ -953,18 +941,31 @@ try_again_reset: return err; } - /* - * Enable counters and exec the command: - */ - if (forks) { + if (stat_config.initial_delay) { + pr_info(EVLIST_DISABLED_MSG); + } else { err = enable_counters(); if (err) return -1; + } + + /* Exec the command, if any */ + if (forks) evlist__start_workload(evsel_list); - t0 = rdclock(); - clock_gettime(CLOCK_MONOTONIC, &ref_time); + if (stat_config.initial_delay > 0) { + usleep(stat_config.initial_delay * USEC_PER_MSEC); + err = enable_counters(); + if (err) + return -1; + pr_info(EVLIST_ENABLED_MSG); + } + + t0 = rdclock(); + clock_gettime(CLOCK_MONOTONIC, &ref_time); + + if (forks) { if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) status = dispatch_events(forks, timeout, interval, ×); if (child_pid != -1) { @@ -982,13 +983,6 @@ try_again_reset: if (WIFSIGNALED(status)) psignal(WTERMSIG(status), argv[0]); } else { - err = enable_counters(); - if (err) - return -1; - - t0 = rdclock(); - clock_gettime(CLOCK_MONOTONIC, &ref_time); - status = dispatch_events(forks, timeout, interval, ×); } diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 2bf21194c7b3..2fea9952818f 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -87,6 +87,8 @@ # define F_LINUX_SPECIFIC_BASE 1024 #endif +#define RAW_SYSCALL_ARGS_NUM 6 + /* * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 */ @@ -107,7 +109,7 @@ struct syscall_fmt { const char *sys_enter, *sys_exit; } bpf_prog_name; - struct syscall_arg_fmt arg[6]; + struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; u8 nr_args; bool errpid; bool timeout; @@ -1224,7 +1226,7 @@ struct syscall { */ struct bpf_map_syscall_entry { bool enabled; - u16 string_args_len[6]; + u16 string_args_len[RAW_SYSCALL_ARGS_NUM]; }; /* @@ -1649,7 +1651,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) { int idx; - if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0) + if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) nr_args = sc->fmt->nr_args; sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); @@ -1782,11 +1784,11 @@ static int trace__read_syscall_info(struct trace *trace, int id) #endif sc = trace->syscalls.table + id; if (sc->nonexistent) - return 0; + return -EEXIST; if (name == NULL) { sc->nonexistent = true; - return 0; + return -EEXIST; } sc->name = name; @@ -1800,11 +1802,18 @@ static int trace__read_syscall_info(struct trace *trace, int id) sc->tp_format = trace_event__tp_format("syscalls", tp_name); } - if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields)) - return -ENOMEM; - - if (IS_ERR(sc->tp_format)) + /* + * Fails to read trace point format via sysfs node, so the trace point + * doesn't exist. Set the 'nonexistent' flag as true. + */ + if (IS_ERR(sc->tp_format)) { + sc->nonexistent = true; return PTR_ERR(sc->tp_format); + } + + if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? + RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields)) + return -ENOMEM; sc->args = sc->tp_format->format.fields; /* @@ -2122,11 +2131,8 @@ static struct syscall *trace__syscall_info(struct trace *trace, (err = trace__read_syscall_info(trace, id)) != 0) goto out_cant_read; - if (trace->syscalls.table[id].name == NULL) { - if (trace->syscalls.table[id].nonexistent) - return NULL; + if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) goto out_cant_read; - } return &trace->syscalls.table[id]; diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index e99b41f9be45..cd978c240e0d 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c @@ -224,6 +224,19 @@ static int add_cgroup_name(const char *fpath, const struct stat *sb __maybe_unus return 0; } +static int check_and_add_cgroup_name(const char *fpath) +{ + struct cgroup_name *cn; + + list_for_each_entry(cn, &cgroup_list, list) { + if (!strcmp(cn->name, fpath)) + return 0; + } + + /* pretend if it's added by ftw() */ + return add_cgroup_name(fpath, NULL, FTW_D, NULL); +} + static void release_cgroup_list(void) { struct cgroup_name *cn; @@ -242,7 +255,7 @@ static int list_cgroups(const char *str) struct cgroup_name *cn; char *s; - /* use given name as is - for testing purpose */ + /* use given name as is when no regex is given */ for (;;) { p = strchr(str, ','); e = p ? p : eos; @@ -253,13 +266,13 @@ static int list_cgroups(const char *str) s = strndup(str, e - str); if (!s) return -1; - /* pretend if it's added by ftw() */ - ret = add_cgroup_name(s, NULL, FTW_D, NULL); + + ret = check_and_add_cgroup_name(s); free(s); - if (ret) + if (ret < 0) return -1; } else { - if (add_cgroup_name("", NULL, FTW_D, NULL) < 0) + if (check_and_add_cgroup_name("/") < 0) return -1; } diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index 15a4547d608e..090a76be522b 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -127,6 +127,7 @@ int perf_data__open_dir(struct perf_data *data) file->size = st.st_size; } + closedir(dir); if (!files) return -EINVAL; @@ -135,6 +136,7 @@ int perf_data__open_dir(struct perf_data *data) return 0; out_err: + closedir(dir); close_dir(files, nr); return ret; } diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 65e6c22f38e4..190e818a0717 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -241,6 +241,10 @@ int perf_quiet_option(void) opt++; } + /* For debug variables that are used as bool types, set to 0. */ + redirect_to_stderr = 0; + debug_peo_args = 0; + return 0; } diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 609ca1671501..623527edeac1 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -308,26 +308,13 @@ static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name, { Dwarf_Attribute attr; - if (dwarf_attr(tp_die, attr_name, &attr) == NULL || + if (dwarf_attr_integrate(tp_die, attr_name, &attr) == NULL || dwarf_formudata(&attr, result) != 0) return -ENOENT; return 0; } -/* Get attribute and translate it as a sdata */ -static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name, - Dwarf_Sword *result) -{ - Dwarf_Attribute attr; - - if (dwarf_attr(tp_die, attr_name, &attr) == NULL || - dwarf_formsdata(&attr, result) != 0) - return -ENOENT; - - return 0; -} - /** * die_is_signed_type - Check whether a type DIE is signed or not * @tp_die: a DIE of a type @@ -467,9 +454,9 @@ int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs) /* Get the call file index number in CU DIE */ static int die_get_call_fileno(Dwarf_Die *in_die) { - Dwarf_Sword idx; + Dwarf_Word idx; - if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0) + if (die_get_attr_udata(in_die, DW_AT_call_file, &idx) == 0) return (int)idx; else return -ENOENT; @@ -478,9 +465,9 @@ static int die_get_call_fileno(Dwarf_Die *in_die) /* Get the declared file index number in CU DIE */ static int die_get_decl_fileno(Dwarf_Die *pdie) { - Dwarf_Sword idx; + Dwarf_Word idx; - if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0) + if (die_get_attr_udata(pdie, DW_AT_decl_file, &idx) == 0) return (int)idx; else return -ENOENT; diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 6c183df191aa..fd42f768e584 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1292,7 +1292,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, (!used_opd && syms_ss->adjust_symbols)) { GElf_Phdr phdr; - if (elf_read_program_header(syms_ss->elf, + if (elf_read_program_header(runtime_ss->elf, (u64)sym.st_value, &phdr)) { pr_debug4("%s: failed to find program header for " "symbol: %s st_value: %#" PRIx64 "\n", diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 09d1578f9d66..1737c59e4ff6 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1963,7 +1963,7 @@ sub run_scp_mod { sub _get_grub_index { - my ($command, $target, $skip) = @_; + my ($command, $target, $skip, $submenu) = @_; return if (defined($grub_number) && defined($last_grub_menu) && $last_grub_menu eq $grub_menu && defined($last_machine) && @@ -1980,11 +1980,16 @@ sub _get_grub_index { my $found = 0; + my $submenu_number = 0; + while () { if (/$target/) { $grub_number++; $found = 1; last; + } elsif (defined($submenu) && /$submenu/) { + $submenu_number++; + $grub_number = -1; } elsif (/$skip/) { $grub_number++; } @@ -1993,6 +1998,9 @@ sub _get_grub_index { dodie "Could not find '$grub_menu' through $command on $machine" if (!$found); + if ($submenu_number > 0) { + $grub_number = "$submenu_number>$grub_number"; + } doprint "$grub_number\n"; $last_grub_menu = $grub_menu; $last_machine = $machine; @@ -2003,6 +2011,7 @@ sub get_grub_index { my $command; my $target; my $skip; + my $submenu; my $grub_menu_qt; if ($reboot_type !~ /^grub/) { @@ -2017,8 +2026,9 @@ sub get_grub_index { $skip = '^\s*title\s'; } elsif ($reboot_type eq "grub2") { $command = "cat $grub_file"; - $target = '^menuentry.*' . $grub_menu_qt; - $skip = '^menuentry\s|^submenu\s'; + $target = '^\s*menuentry.*' . $grub_menu_qt; + $skip = '^\s*menuentry'; + $submenu = '^\s*submenu\s'; } elsif ($reboot_type eq "grub2bls") { $command = $grub_bls_get; $target = '^title=.*' . $grub_menu_qt; @@ -2027,7 +2037,7 @@ sub get_grub_index { return; } - _get_grub_index($command, $target, $skip); + _get_grub_index($command, $target, $skip, $submenu); } sub wait_for_input { @@ -2090,7 +2100,7 @@ sub reboot_to { if ($reboot_type eq "grub") { run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'"; } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) { - run_ssh "$grub_reboot $grub_number"; + run_ssh "$grub_reboot \"'$grub_number'\""; } elsif ($reboot_type eq "syslinux") { run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path"; } elsif (defined $reboot_script) { @@ -3768,9 +3778,10 @@ sub test_this_config { # .config to make sure it is missing the config that # we had before my %configs = %min_configs; - delete $configs{$config}; + $configs{$config} = "# $config is not set"; make_new_config ((values %configs), (values %keep_configs)); make_oldconfig; + delete $configs{$config}; undef %configs; assign_configs \%configs, $output_config; diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 14206d1d1efe..56a4873a343c 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -114,19 +114,27 @@ ifdef building_out_of_srctree override LDFLAGS = endif -ifneq ($(O),) - BUILD := $(O)/kselftest +top_srcdir ?= ../../.. + +ifeq ("$(origin O)", "command line") + KBUILD_OUTPUT := $(O) +endif + +ifneq ($(KBUILD_OUTPUT),) + # Make's built-in functions such as $(abspath ...), $(realpath ...) cannot + # expand a shell special character '~'. We use a somewhat tedious way here. + abs_objtree := $(shell cd $(top_srcdir) && mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd) + $(if $(abs_objtree),, \ + $(error failed to create output directory "$(KBUILD_OUTPUT)")) + # $(realpath ...) resolves symlinks + abs_objtree := $(realpath $(abs_objtree)) + BUILD := $(abs_objtree)/kselftest else - ifneq ($(KBUILD_OUTPUT),) - BUILD := $(KBUILD_OUTPUT)/kselftest - else - BUILD := $(shell pwd) - DEFAULT_INSTALL_HDR_PATH := 1 - endif + BUILD := $(CURDIR) + DEFAULT_INSTALL_HDR_PATH := 1 endif # Prepare for headers install -top_srcdir ?= ../../.. include $(top_srcdir)/scripts/subarch.include ARCH ?= $(SUBARCH) export KSFT_KHDR_INSTALL_DONE := 1 diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh index 9de1d123f4f5..a08c02abde12 100755 --- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh +++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh @@ -496,8 +496,8 @@ dummy_reporter_test() check_reporter_info dummy healthy 3 3 10 true - echo 8192> $DEBUGFS_DIR/health/binary_len - check_fail $? "Failed set dummy reporter binary len to 8192" + echo 8192 > $DEBUGFS_DIR/health/binary_len + check_err $? "Failed set dummy reporter binary len to 8192" local dump=$(devlink health dump show $DL_HANDLE reporter dummy -j) check_err $? "Failed show dump of dummy reporter" diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh index a90f394f9aa9..d374878cc0ba 100755 --- a/tools/testing/selftests/efivarfs/efivarfs.sh +++ b/tools/testing/selftests/efivarfs/efivarfs.sh @@ -87,6 +87,11 @@ test_create_read() { local file=$efivarfs_mount/$FUNCNAME-$test_guid ./create-read $file + if [ $? -ne 0 ]; then + echo "create and read $file failed" + file_cleanup $file + exit 1 + fi file_cleanup $file } diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc index 3145b0f1835c..27a68bbe778b 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc @@ -38,11 +38,18 @@ cnt_trace() { test_event_enabled() { val=$1 + check_times=10 # wait for 10 * SLEEP_TIME at most - e=`cat $EVENT_ENABLE` - if [ "$e" != $val ]; then - fail "Expected $val but found $e" - fi + while [ $check_times -ne 0 ]; do + e=`cat $EVENT_ENABLE` + if [ "$e" == $val ]; then + return 0 + fi + sleep $SLEEP_TIME + check_times=$((check_times - 1)) + done + + fail "Expected $val but found $e" } run_enable_disable() { diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index 1d806b8ffee2..766c1790df66 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -72,7 +72,7 @@ struct memslot_antagonist_args { static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, uint64_t nr_modifications) { - uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; + uint64_t pages = max_t(int, vm_get_page_size(vm), getpagesize()) / vm_get_page_size(vm); uint64_t gpa; int i; diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index fe7ee2b0f29c..a3df78b7702c 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -129,6 +129,11 @@ endef clean: $(CLEAN) +# Enables to extend CFLAGS and LDFLAGS from command line, e.g. +# make USERCFLAGS=-Werror USERLDFLAGS=-static +CFLAGS += $(USERCFLAGS) +LDFLAGS += $(USERLDFLAGS) + # When make O= with kselftest target from main level # the following aren't defined. # diff --git a/tools/testing/selftests/net/toeplitz.sh b/tools/testing/selftests/net/toeplitz.sh index 0a49907cd4fe..da5bfd834eff 100755 --- a/tools/testing/selftests/net/toeplitz.sh +++ b/tools/testing/selftests/net/toeplitz.sh @@ -32,7 +32,7 @@ DEV="eth0" # This is determined by reading the RSS indirection table using ethtool. get_rss_cfg_num_rxqs() { echo $(ethtool -x "${DEV}" | - egrep [[:space:]]+[0-9]+:[[:space:]]+ | + grep -E [[:space:]]+[0-9]+:[[:space:]]+ | cut -d: -f2- | awk '{$1=$1};1' | tr ' ' '\n' | diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh index b48e1833bc89..76645aaf2b58 100755 --- a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh +++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh @@ -35,6 +35,8 @@ cleanup() { for i in 1 2;do ip netns del nsrouter$i;done } +trap cleanup EXIT + ipv4() { echo -n 192.168.$1.2 } @@ -146,11 +148,17 @@ ip netns exec nsclient1 nft -f - < /dev/null + +expect="packets 1 bytes 112" +check_counter nsclient1 "redir4" "$expect" +if [ $? -ne 0 ];then + ret=1 +fi + +ip netns exec "nsclient1" ping -c 1 dead:1::42 > /dev/null +expect="packets 1 bytes 192" +check_counter nsclient1 "redir6" "$expect" +if [ $? -ne 0 ];then + ret=1 +fi + +if [ $ret -eq 0 ];then + echo "PASS: icmp redirects had RELATED state" +else + echo "ERROR: icmp redirect RELATED state test has failed" +fi + exit $ret diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c index fbbdffdb2e5d..f20d1c166d1e 100644 --- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c +++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c @@ -24,6 +24,7 @@ static int check_cpu_dscr_default(char *file, unsigned long val) rc = read(fd, buf, sizeof(buf)); if (rc == -1) { perror("read() failed"); + close(fd); return 1; } close(fd); @@ -65,8 +66,10 @@ static int check_all_cpu_dscr_defaults(unsigned long val) if (access(file, F_OK)) continue; - if (check_cpu_dscr_default(file, val)) + if (check_cpu_dscr_default(file, val)) { + closedir(sysfs); return 1; + } } closedir(sysfs); return 0; diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c index e7ceabed7f51..7d0aa22bdc12 100644 --- a/tools/testing/selftests/proc/proc-uptime-002.c +++ b/tools/testing/selftests/proc/proc-uptime-002.c @@ -17,6 +17,7 @@ // while shifting across CPUs. #undef NDEBUG #include +#include #include #include #include @@ -54,7 +55,7 @@ int main(void) len += sizeof(unsigned long); free(m); m = malloc(len); - } while (sys_sched_getaffinity(0, len, m) == -EINVAL); + } while (sys_sched_getaffinity(0, len, m) == -1 && errno == EINVAL); fd = open("/proc/uptime", O_RDONLY); assert(fd >= 0);