summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ferrazzi <alicef@gentoo.org>2023-02-01 17:03:33 +0900
committerAlice Ferrazzi <alicef@gentoo.org>2023-02-01 17:03:33 +0900
commitcabf8a4c1fb2af4bb6390d026ade7dc8133df2d5 (patch)
tree011d876fbde04b7aecff2e20dc5237a27b3cc499
parentLinux patch 6.1.8 (diff)
downloadlinux-patches-cabf8a4c.tar.gz
linux-patches-cabf8a4c.tar.bz2
linux-patches-cabf8a4c.zip
Linux patch 6.1.96.1-11
Signed-off-by: Alice Ferrazzi <alicef@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1008_linux-6.1.9.patch11622
2 files changed, 11626 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 396dd2ee..0965d9a9 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-6.1.8.patch
From: http://www.kernel.org
Desc: Linux 6.1.8
+Patch: 1008_linux-6.1.9.patch
+From: http://www.kernel.org
+Desc: Linux 6.1.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-6.1.9.patch b/1008_linux-6.1.9.patch
new file mode 100644
index 00000000..f4ffc2a3
--- /dev/null
+++ b/1008_linux-6.1.9.patch
@@ -0,0 +1,11622 @@
+diff --git a/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml b/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
+index c46378efc1239..92e899905ef88 100644
+--- a/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
++++ b/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
+@@ -16,7 +16,7 @@ properties:
+ compatible:
+ items:
+ - enum:
+- - renesas,i2c-r9a09g011 # RZ/V2M
++ - renesas,r9a09g011-i2c # RZ/V2M
+ - const: renesas,rzv2m-i2c
+
+ reg:
+@@ -66,7 +66,7 @@ examples:
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ i2c0: i2c@a4030000 {
+- compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c";
++ compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c";
+ reg = <0xa4030000 0x80>;
+ interrupts = <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;
+diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
+index 01f9d4e236e94..a7feb497eb89b 100644
+--- a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
++++ b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
+@@ -19,8 +19,8 @@ description: |
+ additional information and example.
+
+ patternProperties:
+- # 25 LDOs
+- "^LDO([1-9]|[1][0-9]|2[0-5])$":
++ # 25 LDOs, without LDO10-12
++ "^LDO([1-9]|1[3-9]|2[0-5])$":
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+@@ -30,6 +30,23 @@ patternProperties:
+ required:
+ - regulator-name
+
++ "^LDO(1[0-2])$":
++ type: object
++ $ref: regulator.yaml#
++ unevaluatedProperties: false
++ description:
++ Properties for single LDO regulator.
++
++ properties:
++ samsung,ext-control-gpios:
++ maxItems: 1
++ description:
++ LDO10, LDO11 and LDO12 can be configured to external control over
++ GPIO.
++
++ required:
++ - regulator-name
++
+ # 5 bucks
+ "^BUCK[1-5]$":
+ type: object
+diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
+index 90a7cabf58feb..d4148418350c6 100644
+--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
++++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
+@@ -80,7 +80,7 @@ properties:
+ insensitive, letters in the riscv,isa string must be all
+ lowercase to simplify parsing.
+ $ref: "/schemas/types.yaml#/definitions/string"
+- pattern: ^rv(?:64|32)imaf?d?q?c?b?v?k?h?(?:_[hsxz](?:[a-z])+)*$
++ pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
+
+ # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
+ timebase-frequency: false
+diff --git a/Documentation/devicetree/bindings/sound/everest,es8326.yaml b/Documentation/devicetree/bindings/sound/everest,es8326.yaml
+old mode 100755
+new mode 100644
+diff --git a/Documentation/x86/amd-memory-encryption.rst b/Documentation/x86/amd-memory-encryption.rst
+index a1940ebe7be50..934310ce72582 100644
+--- a/Documentation/x86/amd-memory-encryption.rst
++++ b/Documentation/x86/amd-memory-encryption.rst
+@@ -95,3 +95,39 @@ by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
+ not enable SME, then Linux will not be able to activate memory encryption, even
+ if configured to do so by default or the mem_encrypt=on command line parameter
+ is specified.
++
++Secure Nested Paging (SNP)
++==========================
++
++SEV-SNP introduces new features (SEV_FEATURES[1:63]) which can be enabled
++by the hypervisor for security enhancements. Some of these features need
++guest side implementation to function correctly. The below table lists the
++expected guest behavior with various possible scenarios of guest/hypervisor
++SNP feature support.
++
+++-----------------+---------------+---------------+------------------+
++| Feature Enabled | Guest needs | Guest has | Guest boot |
++| by the HV | implementation| implementation| behaviour |
+++=================+===============+===============+==================+
++| No | No | No | Boot |
++| | | | |
+++-----------------+---------------+---------------+------------------+
++| No | Yes | No | Boot |
++| | | | |
+++-----------------+---------------+---------------+------------------+
++| No | Yes | Yes | Boot |
++| | | | |
+++-----------------+---------------+---------------+------------------+
++| Yes | No | No | Boot with |
++| | | | feature enabled |
+++-----------------+---------------+---------------+------------------+
++| Yes | Yes | No | Graceful boot |
++| | | | failure |
+++-----------------+---------------+---------------+------------------+
++| Yes | Yes | Yes | Boot with |
++| | | | feature enabled |
+++-----------------+---------------+---------------+------------------+
++
++More details in AMD64 APM[1] Vol 2: 15.34.10 SEV_STATUS MSR
++
++[1] https://www.amd.com/system/files/TechDocs/40332.pdf
+diff --git a/Makefile b/Makefile
+index 49261450039a1..3778b422fa113 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+@@ -538,7 +538,7 @@ LDFLAGS_MODULE =
+ CFLAGS_KERNEL =
+ RUSTFLAGS_KERNEL =
+ AFLAGS_KERNEL =
+-export LDFLAGS_vmlinux =
++LDFLAGS_vmlinux =
+
+ # Use USERINCLUDE when you must reference the UAPI directories only.
+ USERINCLUDE := \
+@@ -1232,6 +1232,18 @@ vmlinux.o modules.builtin.modinfo modules.builtin: vmlinux_o
+ @:
+
+ PHONY += vmlinux
++# LDFLAGS_vmlinux in the top Makefile defines linker flags for the top vmlinux,
++# not for decompressors. LDFLAGS_vmlinux in arch/*/boot/compressed/Makefile is
++# unrelated; the decompressors just happen to have the same base name,
++# arch/*/boot/compressed/vmlinux.
++# Export LDFLAGS_vmlinux only to scripts/Makefile.vmlinux.
++#
++# _LDFLAGS_vmlinux is a workaround for the 'private export' bug:
++# https://savannah.gnu.org/bugs/?61463
++# For Make > 4.4, the following simple code will work:
++# vmlinux: private export LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
++vmlinux: private _LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
++vmlinux: export LDFLAGS_vmlinux = $(_LDFLAGS_vmlinux)
+ vmlinux: vmlinux.o $(KBUILD_LDS) modpost
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux
+
+@@ -1517,6 +1529,7 @@ endif
+ # *.ko are usually independent of vmlinux, but CONFIG_DEBUG_INFOBTF_MODULES
+ # is an exception.
+ ifdef CONFIG_DEBUG_INFO_BTF_MODULES
++KBUILD_BUILTIN := 1
+ modules: vmlinux
+ endif
+
+diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+index 4bc4371e6bae5..4b81a975c979d 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+@@ -632,7 +632,6 @@
+ &uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+- uart-has-rtscts;
+ rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
+index 162dc259edc8c..5a74c7f68eb62 100644
+--- a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
++++ b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
+@@ -32,7 +32,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/imx7d-pico-dwarf.dts
+index 5162fe227d1ea..fdc10563f1473 100644
+--- a/arch/arm/boot/dts/imx7d-pico-dwarf.dts
++++ b/arch/arm/boot/dts/imx7d-pico-dwarf.dts
+@@ -32,7 +32,7 @@
+ };
+
+ &i2c1 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+@@ -52,7 +52,7 @@
+ };
+
+ &i2c4 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx7d-pico-nymph.dts b/arch/arm/boot/dts/imx7d-pico-nymph.dts
+index 104a85254adbb..5afb1674e0125 100644
+--- a/arch/arm/boot/dts/imx7d-pico-nymph.dts
++++ b/arch/arm/boot/dts/imx7d-pico-nymph.dts
+@@ -43,7 +43,7 @@
+ };
+
+ &i2c1 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+@@ -64,7 +64,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
+index 8f5477e307dd4..37a5d96aaf642 100644
+--- a/arch/arm/boot/dts/sam9x60.dtsi
++++ b/arch/arm/boot/dts/sam9x60.dtsi
+@@ -564,7 +564,7 @@
+ mpddrc: mpddrc@ffffe800 {
+ compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc";
+ reg = <0xffffe800 0x200>;
+- clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>;
++ clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>;
+ clock-names = "ddrck", "mpddr";
+ };
+
+diff --git a/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi b/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
+index d865ab5d866b9..dd23de85100c4 100644
+--- a/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
++++ b/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
+@@ -101,8 +101,12 @@
+
+ &qspi {
+ pinctrl-names = "default", "sleep";
+- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
++ pinctrl-0 = <&qspi_clk_pins_a
++ &qspi_bk1_pins_a
++ &qspi_cs1_pins_a>;
++ pinctrl-1 = <&qspi_clk_sleep_pins_a
++ &qspi_bk1_sleep_pins_a
++ &qspi_cs1_sleep_pins_a>;
+ reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
+index 30156b7546ed6..d540550f7da26 100644
+--- a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
++++ b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
+@@ -391,8 +391,12 @@
+
+ &qspi {
+ pinctrl-names = "default", "sleep";
+- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
++ pinctrl-0 = <&qspi_clk_pins_a
++ &qspi_bk1_pins_a
++ &qspi_cs1_pins_a>;
++ pinctrl-1 = <&qspi_clk_sleep_pins_a
++ &qspi_bk1_sleep_pins_a
++ &qspi_cs1_sleep_pins_a>;
+ reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+index 238a611192e72..d3b85a8764d74 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+@@ -428,8 +428,12 @@
+
+ &qspi {
+ pinctrl-names = "default", "sleep";
+- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
++ pinctrl-0 = <&qspi_clk_pins_a
++ &qspi_bk1_pins_a
++ &qspi_cs1_pins_a>;
++ pinctrl-1 = <&qspi_clk_sleep_pins_a
++ &qspi_bk1_sleep_pins_a
++ &qspi_cs1_sleep_pins_a>;
+ reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+index 134a798ad3f23..bb40fb46da81d 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+@@ -247,8 +247,12 @@
+
+ &qspi {
+ pinctrl-names = "default", "sleep";
+- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
++ pinctrl-0 = <&qspi_clk_pins_a
++ &qspi_bk1_pins_a
++ &qspi_cs1_pins_a>;
++ pinctrl-1 = <&qspi_clk_sleep_pins_a
++ &qspi_bk1_sleep_pins_a
++ &qspi_cs1_sleep_pins_a>;
+ reg = <0x58003000 0x1000>, <0x70000000 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c
+index 3e63445cde062..cc86977d0a340 100644
+--- a/arch/arm/mach-imx/cpu-imx25.c
++++ b/arch/arm/mach-imx/cpu-imx25.c
+@@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void)
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
+ iim_base = of_iomap(np, 0);
++ of_node_put(np);
+ BUG_ON(!iim_base);
+ rev = readl(iim_base + MXC_IIMSREV);
+ iounmap(iim_base);
+diff --git a/arch/arm/mach-imx/cpu-imx27.c b/arch/arm/mach-imx/cpu-imx27.c
+index bf70e13bbe9ee..1d28939083683 100644
+--- a/arch/arm/mach-imx/cpu-imx27.c
++++ b/arch/arm/mach-imx/cpu-imx27.c
+@@ -28,6 +28,7 @@ static int mx27_read_cpu_rev(void)
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm");
+ ccm_base = of_iomap(np, 0);
++ of_node_put(np);
+ BUG_ON(!ccm_base);
+ /*
+ * now we have access to the IO registers. As we need
+diff --git a/arch/arm/mach-imx/cpu-imx31.c b/arch/arm/mach-imx/cpu-imx31.c
+index b9c24b851d1ab..35c544924e509 100644
+--- a/arch/arm/mach-imx/cpu-imx31.c
++++ b/arch/arm/mach-imx/cpu-imx31.c
+@@ -39,6 +39,7 @@ static int mx31_read_cpu_rev(void)
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim");
+ iim_base = of_iomap(np, 0);
++ of_node_put(np);
+ BUG_ON(!iim_base);
+
+ /* read SREV register from IIM module */
+diff --git a/arch/arm/mach-imx/cpu-imx35.c b/arch/arm/mach-imx/cpu-imx35.c
+index 80e7d8ab9f1bb..1fe75b39c2d99 100644
+--- a/arch/arm/mach-imx/cpu-imx35.c
++++ b/arch/arm/mach-imx/cpu-imx35.c
+@@ -21,6 +21,7 @@ static int mx35_read_cpu_rev(void)
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim");
+ iim_base = of_iomap(np, 0);
++ of_node_put(np);
+ BUG_ON(!iim_base);
+
+ rev = imx_readl(iim_base + MXC_IIMSREV);
+diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
+index ad56263778f93..a67c89bf155dd 100644
+--- a/arch/arm/mach-imx/cpu-imx5.c
++++ b/arch/arm/mach-imx/cpu-imx5.c
+@@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat)
+
+ np = of_find_compatible_node(NULL, NULL, compat);
+ iim_base = of_iomap(np, 0);
++ of_node_put(np);
+ WARN_ON(!iim_base);
+
+ srev = readl(iim_base + IIM_SREV) & 0xff;
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index c1494a4dee25b..53f2d8774fdb9 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc)
+ mpu_setup();
+
+ /* allocate the zero page. */
+- zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++ zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
+index 03266bd90a06b..169f047fbca50 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
+@@ -120,7 +120,7 @@
+ &ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_espi2>;
+- cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
++ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ eeprom@0 {
+@@ -316,7 +316,7 @@
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82
+ MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82
+- MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41
++ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41
+ >;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+index d3ee6fc4baabd..72311b55f06da 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+@@ -759,6 +759,7 @@
+ &usbotg2 {
+ dr_mode = "host";
+ vbus-supply = <&reg_usb2_vbus>;
++ over-current-active-low;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+index c2a5c2f7b204b..7c3f5c54f0400 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+@@ -9,6 +9,7 @@
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&dailink_master>;
++ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "imx8mm-wm8904";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPOUTL",
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
+index 73cc3fafa0180..b2bcd22821702 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
+@@ -11,6 +11,7 @@
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&dailink_master>;
++ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,name = "imx8mm-nau8822";
+ simple-audio-card,routing =
+ "Headphones", "LHP",
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+index b4c1ef2559f20..126c839b45f2d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+@@ -36,8 +36,8 @@
+
+ pcie0_refclk: pcie0-refclk {
+ compatible = "fixed-clock";
+- #clock-cells = <0>;
+- clock-frequency = <100000000>;
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
+ };
+
+ reg_can1_stby: regulator-can1-stby {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+index 79b290a002c19..ecc4bce6db97c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+@@ -99,7 +99,6 @@
+
+ regulators {
+ buck1: BUCK1 {
+- regulator-compatible = "BUCK1";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+@@ -108,7 +107,6 @@
+ };
+
+ buck2: BUCK2 {
+- regulator-compatible = "BUCK2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+@@ -119,7 +117,6 @@
+ };
+
+ buck4: BUCK4 {
+- regulator-compatible = "BUCK4";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+@@ -127,7 +124,6 @@
+ };
+
+ buck5: BUCK5 {
+- regulator-compatible = "BUCK5";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+@@ -135,7 +131,6 @@
+ };
+
+ buck6: BUCK6 {
+- regulator-compatible = "BUCK6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+@@ -143,7 +138,6 @@
+ };
+
+ ldo1: LDO1 {
+- regulator-compatible = "LDO1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+@@ -151,7 +145,6 @@
+ };
+
+ ldo2: LDO2 {
+- regulator-compatible = "LDO2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+@@ -159,7 +152,6 @@
+ };
+
+ ldo3: LDO3 {
+- regulator-compatible = "LDO3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+@@ -167,13 +159,11 @@
+ };
+
+ ldo4: LDO4 {
+- regulator-compatible = "LDO4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo5: LDO5 {
+- regulator-compatible = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index d944ecca1b3c2..47fd6a0ba05ad 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -523,6 +523,7 @@
+ compatible = "fsl,imx8mp-gpc";
+ reg = <0x303a0000 0x1000>;
+ interrupt-parent = <&gic>;
++ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+@@ -589,7 +590,7 @@
+ reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
+ };
+
+- pgc_hsiomix: power-domains@17 {
++ pgc_hsiomix: power-domain@17 {
+ #power-domain-cells = <0>;
+ reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>;
+ clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+index 69786c326db00..27f9a9f331346 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+@@ -74,7 +74,7 @@
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+- MX93_PAD_SD1_CLK__USDHC1_CLK 0x17fe
++ MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe
+ MX93_PAD_SD1_CMD__USDHC1_CMD 0x13fe
+ MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe
+ MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe
+@@ -84,7 +84,7 @@
+ MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe
+ MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe
+ MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe
+- MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x17fe
++ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
+ >;
+ };
+
+@@ -102,7 +102,7 @@
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+- MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe
++ MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe
+ MX93_PAD_SD2_CMD__USDHC2_CMD 0x13fe
+ MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe
+ MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe
+diff --git a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
+index 44ed6f963b75a..8e2ac687a410b 100644
+--- a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
+@@ -97,7 +97,7 @@
+
+ uart1: serial@12100 {
+ compatible = "snps,dw-apb-uart";
+- reg = <0x11000 0x100>;
++ reg = <0x12100 0x100>;
+ reg-shift = <2>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ reg-io-width = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+index c4e87d0aec42f..3ab0ad14e8704 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+@@ -11,6 +11,12 @@
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/input/gpio-keys.h>
+
++/delete-node/ &adsp_mem;
++/delete-node/ &audio_mem;
++/delete-node/ &mpss_mem;
++/delete-node/ &peripheral_region;
++/delete-node/ &rmtfs_mem;
++
+ / {
+ model = "Xiaomi Mi 4C";
+ compatible = "xiaomi,libra", "qcom,msm8992";
+@@ -70,25 +76,67 @@
+ #size-cells = <2>;
+ ranges;
+
+- /* This is for getting crash logs using Android downstream kernels */
+- ramoops@dfc00000 {
+- compatible = "ramoops";
+- reg = <0x0 0xdfc00000 0x0 0x40000>;
+- console-size = <0x10000>;
+- record-size = <0x10000>;
+- ftrace-size = <0x10000>;
+- pmsg-size = <0x20000>;
++ memory_hole: hole@6400000 {
++ reg = <0 0x06400000 0 0x600000>;
++ no-map;
++ };
++
++ memory_hole2: hole2@6c00000 {
++ reg = <0 0x06c00000 0 0x2400000>;
++ no-map;
++ };
++
++ mpss_mem: mpss@9000000 {
++ reg = <0 0x09000000 0 0x5a00000>;
++ no-map;
++ };
++
++ tzapp: tzapp@ea00000 {
++ reg = <0 0x0ea00000 0 0x1900000>;
++ no-map;
++ };
++
++ mdm_rfsa_mem: mdm-rfsa@ca0b0000 {
++ reg = <0 0xca0b0000 0 0x10000>;
++ no-map;
++ };
++
++ rmtfs_mem: rmtfs@ca100000 {
++ compatible = "qcom,rmtfs-mem";
++ reg = <0 0xca100000 0 0x180000>;
++ no-map;
++
++ qcom,client-id = <1>;
+ };
+
+- modem_region: modem_region@9000000 {
+- reg = <0x0 0x9000000 0x0 0x5a00000>;
++ audio_mem: audio@cb400000 {
++ reg = <0 0xcb000000 0 0x400000>;
++ no-mem;
++ };
++
++ qseecom_mem: qseecom@cb400000 {
++ reg = <0 0xcb400000 0 0x1c00000>;
++ no-mem;
++ };
++
++ adsp_rfsa_mem: adsp-rfsa@cd000000 {
++ reg = <0 0xcd000000 0 0x10000>;
+ no-map;
+ };
+
+- tzapp: modem_region@ea00000 {
+- reg = <0x0 0xea00000 0x0 0x1900000>;
++ sensor_rfsa_mem: sensor-rfsa@cd010000 {
++ reg = <0 0xcd010000 0 0x10000>;
+ no-map;
+ };
++
++ ramoops@dfc00000 {
++ compatible = "ramoops";
++ reg = <0 0xdfc00000 0 0x40000>;
++ console-size = <0x10000>;
++ record-size = <0x10000>;
++ ftrace-size = <0x10000>;
++ pmsg-size = <0x20000>;
++ };
+ };
+ };
+
+@@ -130,11 +178,6 @@
+ status = "okay";
+ };
+
+-&peripheral_region {
+- reg = <0x0 0x7400000 0x0 0x1c00000>;
+- no-map;
+-};
+-
+ &pm8994_spmi_regulators {
+ VDD_APC0: s8 {
+ regulator-min-microvolt = <680000>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
+index 750643763a760..f4be09fc1b151 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
+@@ -36,10 +36,6 @@
+ compatible = "qcom,rpmcc-msm8992", "qcom,rpmcc";
+ };
+
+-&tcsr_mutex {
+- compatible = "qcom,sfpb-mutex";
+-};
+-
+ &timer {
+ interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 9f2a136d5cbc5..146a4285c3952 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -1173,7 +1173,7 @@
+ clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+
+ resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+- <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
++ <&gcc GCC_USB4_DP_PHY_PRIM_BCR>;
+ reset-names = "phy", "common";
+
+ power-domains = <&gcc USB30_PRIM_GDSC>;
+diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
+index b9f3165075c9d..b13c22046de58 100644
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -14,8 +14,16 @@
+
+ #ifdef CONFIG_EFI
+ extern void efi_init(void);
++
++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg);
+ #else
+ #define efi_init()
++
++static inline
++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
++{
++ return false;
++}
+ #endif
+
+ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+@@ -40,8 +48,17 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+ })
+
+ extern spinlock_t efi_rt_lock;
++extern u64 *efi_rt_stack_top;
+ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
+
++/*
++ * efi_rt_stack_top[-1] contains the value the stack pointer had before
++ * switching to the EFI runtime stack.
++ */
++#define current_in_efi() \
++ (!preemptible() && efi_rt_stack_top != NULL && \
++ on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
++
+ #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+
+ /*
+diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
+index 5a0edb064ea47..327cdcfcb1db0 100644
+--- a/arch/arm64/include/asm/stacktrace.h
++++ b/arch/arm64/include/asm/stacktrace.h
+@@ -104,4 +104,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void)
+ #define stackinfo_get_sdei_critical() stackinfo_get_unknown()
+ #endif
+
++#ifdef CONFIG_EFI
++extern u64 *efi_rt_stack_top;
++
++static inline struct stack_info stackinfo_get_efi(void)
++{
++ unsigned long high = (u64)efi_rt_stack_top;
++ unsigned long low = high - THREAD_SIZE;
++
++ return (struct stack_info) {
++ .low = low,
++ .high = high,
++ };
++}
++#endif
++
+ #endif /* __ASM_STACKTRACE_H */
+diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
+index 2d3c4b02393e4..e8ae803662cf1 100644
+--- a/arch/arm64/kernel/efi-rt-wrapper.S
++++ b/arch/arm64/kernel/efi-rt-wrapper.S
+@@ -7,7 +7,7 @@
+ #include <asm/assembler.h>
+
+ SYM_FUNC_START(__efi_rt_asm_wrapper)
+- stp x29, x30, [sp, #-32]!
++ stp x29, x30, [sp, #-112]!
+ mov x29, sp
+
+ /*
+@@ -17,11 +17,21 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ */
+ stp x1, x18, [sp, #16]
+
++ /*
++ * Preserve all callee saved registers and preserve the stack pointer
++ * value at the base of the EFI runtime stack so we can recover from
++ * synchronous exceptions occurring while executing the firmware
++ * routines.
++ */
++ stp x19, x20, [sp, #32]
++ stp x21, x22, [sp, #48]
++ stp x23, x24, [sp, #64]
++ stp x25, x26, [sp, #80]
++ stp x27, x28, [sp, #96]
++
+ ldr_l x16, efi_rt_stack_top
+ mov sp, x16
+-#ifdef CONFIG_SHADOW_CALL_STACK
+- str x18, [sp, #-16]!
+-#endif
++ stp x18, x29, [sp, #-16]!
+
+ /*
+ * We are lucky enough that no EFI runtime services take more than
+@@ -36,10 +46,13 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ mov x4, x6
+ blr x8
+
++ mov x16, sp
+ mov sp, x29
++ str xzr, [x16, #8] // clear recorded task SP value
++
+ ldp x1, x2, [sp, #16]
+ cmp x2, x18
+- ldp x29, x30, [sp], #32
++ ldp x29, x30, [sp], #112
+ b.ne 0f
+ ret
+ 0:
+@@ -57,3 +70,18 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+
+ b efi_handle_corrupted_x18 // tail call
+ SYM_FUNC_END(__efi_rt_asm_wrapper)
++
++SYM_CODE_START(__efi_rt_asm_recover)
++ mov sp, x30
++
++ ldr_l x16, efi_rt_stack_top // clear recorded task SP value
++ str xzr, [x16, #-8]
++
++ ldp x19, x20, [sp, #32]
++ ldp x21, x22, [sp, #48]
++ ldp x23, x24, [sp, #64]
++ ldp x25, x26, [sp, #80]
++ ldp x27, x28, [sp, #96]
++ ldp x29, x30, [sp], #112
++ ret
++SYM_CODE_END(__efi_rt_asm_recover)
+diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
+index 386bd81ca12bb..b273900f45668 100644
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -11,6 +11,7 @@
+ #include <linux/init.h>
+
+ #include <asm/efi.h>
++#include <asm/stacktrace.h>
+
+ static bool region_is_misaligned(const efi_memory_desc_t *md)
+ {
+@@ -149,6 +150,28 @@ DEFINE_SPINLOCK(efi_rt_lock);
+
+ asmlinkage u64 *efi_rt_stack_top __ro_after_init;
+
++asmlinkage efi_status_t __efi_rt_asm_recover(void);
++
++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
++{
++ /* Check whether the exception occurred while running the firmware */
++ if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
++ return false;
++
++ pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
++ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
++ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++
++ regs->regs[0] = EFI_ABORTED;
++ regs->regs[30] = efi_rt_stack_top[-1];
++ regs->pc = (u64)__efi_rt_asm_recover;
++
++ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
++ regs->regs[18] = efi_rt_stack_top[-2];
++
++ return true;
++}
++
+ /* EFI requires 8 KiB of stack space for runtime services */
+ static_assert(THREAD_SIZE >= SZ_8K);
+
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index 117e2c180f3c7..83154303e682c 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -5,6 +5,7 @@
+ * Copyright (C) 2012 ARM Ltd.
+ */
+ #include <linux/kernel.h>
++#include <linux/efi.h>
+ #include <linux/export.h>
+ #include <linux/ftrace.h>
+ #include <linux/sched.h>
+@@ -12,6 +13,7 @@
+ #include <linux/sched/task_stack.h>
+ #include <linux/stacktrace.h>
+
++#include <asm/efi.h>
+ #include <asm/irq.h>
+ #include <asm/stack_pointer.h>
+ #include <asm/stacktrace.h>
+@@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
+ : stackinfo_get_unknown(); \
+ })
+
++#define STACKINFO_EFI \
++ ({ \
++ ((task == current) && current_in_efi()) \
++ ? stackinfo_get_efi() \
++ : stackinfo_get_unknown(); \
++ })
++
+ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
+@@ -199,6 +208,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
+ STACKINFO_SDEI(normal),
+ STACKINFO_SDEI(critical),
++#endif
++#ifdef CONFIG_EFI
++ STACKINFO_EFI,
+ #endif
+ };
+ struct unwind_state state = {
+diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
+index 826ff6f2a4e7b..5bdada3137287 100644
+--- a/arch/arm64/kvm/vgic/vgic-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-v3.c
+@@ -350,26 +350,23 @@ retry:
+ * The deactivation of the doorbell interrupt will trigger the
+ * unmapping of the associated vPE.
+ */
+-static void unmap_all_vpes(struct vgic_dist *dist)
++static void unmap_all_vpes(struct kvm *kvm)
+ {
+- struct irq_desc *desc;
++ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+
+- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+- irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
+- }
++ for (i = 0; i < dist->its_vm.nr_vpes; i++)
++ free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
+ }
+
+-static void map_all_vpes(struct vgic_dist *dist)
++static void map_all_vpes(struct kvm *kvm)
+ {
+- struct irq_desc *desc;
++ struct vgic_dist *dist = &kvm->arch.vgic;
+ int i;
+
+- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+- irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
+- }
++ for (i = 0; i < dist->its_vm.nr_vpes; i++)
++ WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
++ dist->its_vm.vpes[i]->irq));
+ }
+
+ /**
+@@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
+ * and enabling of the doorbells have already been done.
+ */
+ if (kvm_vgic_global_state.has_gicv4_1) {
+- unmap_all_vpes(dist);
++ unmap_all_vpes(kvm);
+ vlpi_avail = true;
+ }
+
+@@ -444,7 +441,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
+
+ out:
+ if (vlpi_avail)
+- map_all_vpes(dist);
++ map_all_vpes(kvm);
+
+ return ret;
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
+index ad06ba6c9b009..a413718be92b8 100644
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
+ *val = !!(*ptr & mask);
+ }
+
++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
++{
++ return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
++}
++
+ /**
+ * vgic_v4_init - Initialize the GICv4 data structures
+ * @kvm: Pointer to the VM being initialized
+@@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm)
+ irq_flags &= ~IRQ_NOAUTOEN;
+ irq_set_status_flags(irq, irq_flags);
+
+- ret = request_irq(irq, vgic_v4_doorbell_handler,
+- 0, "vcpu", vcpu);
++ ret = vgic_v4_request_vpe_irq(vcpu, irq);
+ if (ret) {
+ kvm_err("failed to allocate vcpu IRQ%d\n", irq);
+ /*
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index 0c8da72953f06..23e280fa0a16f 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -331,5 +331,6 @@ int vgic_v4_init(struct kvm *kvm);
+ void vgic_v4_teardown(struct kvm *kvm);
+ void vgic_v4_configure_vsgis(struct kvm *kvm);
+ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
+
+ #endif
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 74f76514a48d0..3eb2825d08cff 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -30,6 +30,7 @@
+ #include <asm/bug.h>
+ #include <asm/cmpxchg.h>
+ #include <asm/cpufeature.h>
++#include <asm/efi.h>
+ #include <asm/exception.h>
+ #include <asm/daifflags.h>
+ #include <asm/debug-monitors.h>
+@@ -397,6 +398,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
+ msg = "paging request";
+ }
+
++ if (efi_runtime_fixup_exception(regs, msg))
++ return;
++
+ die_kernel_fault(msg, addr, esr, regs);
+ }
+
+diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
+index b865046e4dbbc..4bf6c449d78b6 100644
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -326,7 +326,7 @@ clear_bss_done:
+ call soc_early_init
+ tail start_kernel
+
+-#if CONFIG_RISCV_BOOT_SPINWAIT
++#ifdef CONFIG_RISCV_BOOT_SPINWAIT
+ .Lsecondary_start:
+ /* Set trap vector to spin forever to help debug */
+ la a3, .Lsecondary_park
+diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
+index d73e96f6ed7c5..a20568bd1f1a8 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.c
++++ b/arch/riscv/kernel/probes/simulate-insn.c
+@@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg
+ u32 rd_index = (opcode >> 7) & 0x1f;
+ u32 rs1_index = (opcode >> 15) & 0x1f;
+
+- ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
++ ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
+ if (!ret)
+ return ret;
+
+- ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
++ ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
+ if (!ret)
+ return ret;
+
+diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
+index 3373df413c882..ddb2afba6d255 100644
+--- a/arch/riscv/kernel/smpboot.c
++++ b/arch/riscv/kernel/smpboot.c
+@@ -39,7 +39,6 @@ static DECLARE_COMPLETION(cpu_running);
+
+ void __init smp_prepare_boot_cpu(void)
+ {
+- init_cpu_topology();
+ }
+
+ void __init smp_prepare_cpus(unsigned int max_cpus)
+@@ -48,6 +47,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ int ret;
+ unsigned int curr_cpuid;
+
++ init_cpu_topology();
++
+ curr_cpuid = smp_processor_id();
+ store_cpu_topology(curr_cpuid);
+ numa_store_cpu_info(curr_cpuid);
+diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
+index 77f24262c25c1..ac665b9670c5d 100644
+--- a/arch/s390/include/asm/debug.h
++++ b/arch/s390/include/asm/debug.h
+@@ -4,8 +4,8 @@
+ *
+ * Copyright IBM Corp. 1999, 2020
+ */
+-#ifndef DEBUG_H
+-#define DEBUG_H
++#ifndef _ASM_S390_DEBUG_H
++#define _ASM_S390_DEBUG_H
+
+ #include <linux/string.h>
+ #include <linux/spinlock.h>
+@@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
+
+ #endif /* MODULE */
+
+-#endif /* DEBUG_H */
++#endif /* _ASM_S390_DEBUG_H */
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 5ea3830af0ccf..f81d96710595a 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -79,6 +79,7 @@ SECTIONS
+ _end_amode31_refs = .;
+ }
+
++ . = ALIGN(PAGE_SIZE);
+ _edata = .; /* End of data section */
+
+ /* will be freed after init */
+@@ -193,6 +194,7 @@ SECTIONS
+
+ BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE)
+
++ . = ALIGN(PAGE_SIZE);
+ _end = . ;
+
+ /*
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index ab569faf0df24..6d74acea5e859 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -83,8 +83,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
+ struct esca_block *sca = vcpu->kvm->arch.sca;
+ union esca_sigp_ctrl *sigp_ctrl =
+ &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+- union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
++ union esca_sigp_ctrl new_val = {0}, old_val;
+
++ old_val = READ_ONCE(*sigp_ctrl);
+ new_val.scn = src_id;
+ new_val.c = 1;
+ old_val.c = 0;
+@@ -95,8 +96,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+ union bsca_sigp_ctrl *sigp_ctrl =
+ &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+- union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
++ union bsca_sigp_ctrl new_val = {0}, old_val;
+
++ old_val = READ_ONCE(*sigp_ctrl);
+ new_val.scn = src_id;
+ new_val.c = 1;
+ old_val.c = 0;
+@@ -126,16 +128,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
+ struct esca_block *sca = vcpu->kvm->arch.sca;
+ union esca_sigp_ctrl *sigp_ctrl =
+ &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+- union esca_sigp_ctrl old = *sigp_ctrl;
++ union esca_sigp_ctrl old;
+
++ old = READ_ONCE(*sigp_ctrl);
+ expect = old.value;
+ rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
+ } else {
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+ union bsca_sigp_ctrl *sigp_ctrl =
+ &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+- union bsca_sigp_ctrl old = *sigp_ctrl;
++ union bsca_sigp_ctrl old;
+
++ old = READ_ONCE(*sigp_ctrl);
+ expect = old.value;
+ rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
+ }
+diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
+index d4a314cc50d6e..321a5011042d4 100644
+--- a/arch/x86/boot/compressed/ident_map_64.c
++++ b/arch/x86/boot/compressed/ident_map_64.c
+@@ -180,6 +180,12 @@ void initialize_identity_maps(void *rmode)
+
+ /* Load the new page-table. */
+ write_cr3(top_level_pgt);
++
++ /*
++ * Now that the required page table mappings are established and a
++ * GHCB can be used, check for SNP guest/HV feature compatibility.
++ */
++ snp_check_features();
+ }
+
+ static pte_t *split_large_pmd(struct x86_mapping_info *info,
+diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
+index 62208ec04ca4b..20118fb7c53bb 100644
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -126,6 +126,7 @@ static inline void console_init(void)
+
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ void sev_enable(struct boot_params *bp);
++void snp_check_features(void);
+ void sev_es_shutdown_ghcb(void);
+ extern bool sev_es_check_ghcb_fault(unsigned long address);
+ void snp_set_page_private(unsigned long paddr);
+@@ -143,6 +144,7 @@ static inline void sev_enable(struct boot_params *bp)
+ if (bp)
+ bp->cc_blob_address = 0;
+ }
++static inline void snp_check_features(void) { }
+ static inline void sev_es_shutdown_ghcb(void) { }
+ static inline bool sev_es_check_ghcb_fault(unsigned long address)
+ {
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index c93930d5ccbd0..d63ad8f99f83a 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -208,6 +208,23 @@ void sev_es_shutdown_ghcb(void)
+ error("Can't unmap GHCB page");
+ }
+
++static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set,
++ unsigned int reason, u64 exit_info_2)
++{
++ u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason);
++
++ vc_ghcb_invalidate(ghcb);
++ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST);
++ ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
++ ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
++
++ sev_es_wr_ghcb_msr(__pa(ghcb));
++ VMGEXIT();
++
++ while (true)
++ asm volatile("hlt\n" : : : "memory");
++}
++
+ bool sev_es_check_ghcb_fault(unsigned long address)
+ {
+ /* Check whether the fault was on the GHCB page */
+@@ -270,6 +287,59 @@ static void enforce_vmpl0(void)
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
+ }
+
++/*
++ * SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
++ * guest side implementation for proper functioning of the guest. If any
++ * of these features are enabled in the hypervisor but are lacking guest
++ * side implementation, the behavior of the guest will be undefined. The
++ * guest could fail in non-obvious way making it difficult to debug.
++ *
++ * As the behavior of reserved feature bits is unknown to be on the
++ * safe side add them to the required features mask.
++ */
++#define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \
++ MSR_AMD64_SNP_REFLECT_VC | \
++ MSR_AMD64_SNP_RESTRICTED_INJ | \
++ MSR_AMD64_SNP_ALT_INJ | \
++ MSR_AMD64_SNP_DEBUG_SWAP | \
++ MSR_AMD64_SNP_VMPL_SSS | \
++ MSR_AMD64_SNP_SECURE_TSC | \
++ MSR_AMD64_SNP_VMGEXIT_PARAM | \
++ MSR_AMD64_SNP_VMSA_REG_PROTECTION | \
++ MSR_AMD64_SNP_RESERVED_BIT13 | \
++ MSR_AMD64_SNP_RESERVED_BIT15 | \
++ MSR_AMD64_SNP_RESERVED_MASK)
++
++/*
++ * SNP_FEATURES_PRESENT is the mask of SNP features that are implemented
++ * by the guest kernel. As and when a new feature is implemented in the
++ * guest kernel, a corresponding bit should be added to the mask.
++ */
++#define SNP_FEATURES_PRESENT (0)
++
++void snp_check_features(void)
++{
++ u64 unsupported;
++
++ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++ return;
++
++ /*
++ * Terminate the boot if hypervisor has enabled any feature lacking
++ * guest side implementation. Pass on the unsupported features mask through
++ * EXIT_INFO_2 of the GHCB protocol so that those features can be reported
++ * as part of the guest boot failure.
++ */
++ unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
++ if (unsupported) {
++ if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
++ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
++
++ sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN,
++ GHCB_SNP_UNSUPPORTED, unsupported);
++ }
++}
++
+ void sev_enable(struct boot_params *bp)
+ {
+ unsigned int eax, ebx, ecx, edx;
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index d6f3703e41194..4386b10682ce4 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
+ * numbered counter following it.
+ */
+ for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
+- even_ctr_mask |= 1 << i;
++ even_ctr_mask |= BIT_ULL(i);
+
+ pair_constraint = (struct event_constraint)
+ __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index a2834bc93149a..3019fb1926e35 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -41,6 +41,7 @@
+ * MSR_CORE_C1_RES: CORE C1 Residency Counter
+ * perf code: 0x00
+ * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
++ * MTL
+ * Scope: Core (each processor core has a MSR)
+ * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
+ * perf code: 0x01
+@@ -51,50 +52,50 @@
+ * perf code: 0x02
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
+ * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
+- * TGL,TNT,RKL,ADL,RPL,SPR
++ * TGL,TNT,RKL,ADL,RPL,SPR,MTL
+ * Scope: Core
+ * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
+ * perf code: 0x03
+ * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
+- * ICL,TGL,RKL,ADL,RPL
++ * ICL,TGL,RKL,ADL,RPL,MTL
+ * Scope: Core
+ * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
+ * perf code: 0x00
+ * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
+ * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
+- * RPL,SPR
++ * RPL,SPR,MTL
+ * Scope: Package (physical package)
+ * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
+ * perf code: 0x01
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
+ * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
+- * ADL,RPL
++ * ADL,RPL,MTL
+ * Scope: Package (physical package)
+ * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
+ * perf code: 0x02
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
+ * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
+- * TGL,TNT,RKL,ADL,RPL,SPR
++ * TGL,TNT,RKL,ADL,RPL,SPR,MTL
+ * Scope: Package (physical package)
+ * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
+ * perf code: 0x03
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
+- * KBL,CML,ICL,TGL,RKL,ADL,RPL
++ * KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
+ * Scope: Package (physical package)
+ * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
+ * perf code: 0x04
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
+- * ADL,RPL
++ * ADL,RPL,MTL
+ * Scope: Package (physical package)
+ * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
+ * perf code: 0x05
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
+- * ADL,RPL
++ * ADL,RPL,MTL
+ * Scope: Package (physical package)
+ * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
+ * perf code: 0x06
+ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
+- * TNT,RKL,ADL,RPL
++ * TNT,RKL,ADL,RPL,MTL
+ * Scope: Package (physical package)
+ *
+ */
+@@ -686,6 +687,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates),
++ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &adl_cstates),
++ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &adl_cstates),
+ { },
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index 6f1ccc57a6921..459b1aafd4d4a 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -1833,6 +1833,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
++ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
+ {},
+ };
+diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
+index ecced3a52668a..c65d8906cbcf4 100644
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -69,6 +69,7 @@ static bool test_intel(int idx, void *data)
+ case INTEL_FAM6_BROADWELL_G:
+ case INTEL_FAM6_BROADWELL_X:
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
++ case INTEL_FAM6_EMERALDRAPIDS_X:
+
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_D:
+@@ -107,6 +108,8 @@ static bool test_intel(int idx, void *data)
+ case INTEL_FAM6_RAPTORLAKE:
+ case INTEL_FAM6_RAPTORLAKE_P:
+ case INTEL_FAM6_RAPTORLAKE_S:
++ case INTEL_FAM6_METEORLAKE:
++ case INTEL_FAM6_METEORLAKE_L:
+ if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
+ return true;
+ break;
+diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
+index 65064d9f7fa6e..8eb74cf386dbe 100644
+--- a/arch/x86/include/asm/acpi.h
++++ b/arch/x86/include/asm/acpi.h
+@@ -14,6 +14,7 @@
+ #include <asm/mmu.h>
+ #include <asm/mpspec.h>
+ #include <asm/x86_init.h>
++#include <asm/cpufeature.h>
+
+ #ifdef CONFIG_ACPI_APEI
+ # include <asm/pgtable_types.h>
+@@ -63,6 +64,13 @@ extern int (*acpi_suspend_lowlevel)(void);
+ /* Physical address to resume after wakeup */
+ unsigned long acpi_get_wakeup_address(void);
+
++static inline bool acpi_skip_set_wakeup_address(void)
++{
++ return cpu_feature_enabled(X86_FEATURE_XENPV);
++}
++
++#define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address
++
+ /*
+ * Check if the CPU can handle C2 and deeper
+ */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 4a2af82553e4f..91447f018f6e4 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -571,6 +571,26 @@
+ #define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
+ #define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
+
++/* SNP feature bits enabled by the hypervisor */
++#define MSR_AMD64_SNP_VTOM BIT_ULL(3)
++#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(4)
++#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(5)
++#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(6)
++#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(7)
++#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(8)
++#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(9)
++#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(10)
++#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(11)
++#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(12)
++#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(14)
++#define MSR_AMD64_SNP_VMSA_REG_PROTECTION BIT_ULL(16)
++#define MSR_AMD64_SNP_SMT_PROTECTION BIT_ULL(17)
++
++/* SNP feature bits reserved for future use. */
++#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13)
++#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15)
++#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, 18)
++
+ #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+
+ /* AMD Collaborative Processor Performance Control MSRs */
+diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
+index f69c168391aa5..80e1df482337d 100644
+--- a/arch/x86/include/uapi/asm/svm.h
++++ b/arch/x86/include/uapi/asm/svm.h
+@@ -116,6 +116,12 @@
+ #define SVM_VMGEXIT_AP_CREATE 1
+ #define SVM_VMGEXIT_AP_DESTROY 2
+ #define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
++#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
++#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
++ /* SW_EXITINFO1[3:0] */ \
++ (((((u64)reason_set) & 0xf)) | \
++ /* SW_EXITINFO1[11:4] */ \
++ ((((u64)reason_code) & 0xff) << 4))
+ #define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+
+ /* Exit code reserved for hypervisor/software use */
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index 15aefa3f3e18e..f91e5e31aa4f0 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
+ disable_irq_nosync(irq);
+ io_apic_irqs &= ~(1<<irq);
+ irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
++ irq_set_status_flags(irq, IRQ_LEVEL);
+ enable_irq(irq);
+ lapic_assign_legacy_vector(irq, true);
+ }
+diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
+index beb1bada1b0ab..c683666876f1c 100644
+--- a/arch/x86/kernel/irqinit.c
++++ b/arch/x86/kernel/irqinit.c
+@@ -65,8 +65,10 @@ void __init init_ISA_irqs(void)
+
+ legacy_pic->init(0);
+
+- for (i = 0; i < nr_legacy_irqs(); i++)
++ for (i = 0; i < nr_legacy_irqs(); i++) {
+ irq_set_chip_and_handler(i, chip, handle_level_irq);
++ irq_set_status_flags(i, IRQ_LEVEL);
++ }
+ }
+
+ void __init init_IRQ(void)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 63247c57c72cc..4ae248e87f5ed 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3412,18 +3412,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
+ {
+ u32 ar;
+
+- if (var->unusable || !var->present)
+- ar = 1 << 16;
+- else {
+- ar = var->type & 15;
+- ar |= (var->s & 1) << 4;
+- ar |= (var->dpl & 3) << 5;
+- ar |= (var->present & 1) << 7;
+- ar |= (var->avl & 1) << 12;
+- ar |= (var->l & 1) << 13;
+- ar |= (var->db & 1) << 14;
+- ar |= (var->g & 1) << 15;
+- }
++ ar = var->type & 15;
++ ar |= (var->s & 1) << 4;
++ ar |= (var->dpl & 3) << 5;
++ ar |= (var->present & 1) << 7;
++ ar |= (var->avl & 1) << 12;
++ ar |= (var->l & 1) << 13;
++ ar |= (var->db & 1) << 14;
++ ar |= (var->g & 1) << 15;
++ ar |= (var->unusable || !var->present) << 16;
+
+ return ar;
+ }
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 16dcd31d124fe..192d1784e409b 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -432,6 +432,13 @@ static const struct dmi_system_id asus_laptop[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ },
+ },
++ {
++ .ident = "Asus ExpertBook B2402CBA",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
++ },
++ },
+ {
+ .ident = "Asus ExpertBook B2502",
+ .matches = {
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 0b557c0d405ef..4ca6672512722 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -60,13 +60,17 @@ static struct notifier_block tts_notifier = {
+ .priority = 0,
+ };
+
++#ifndef acpi_skip_set_wakeup_address
++#define acpi_skip_set_wakeup_address() false
++#endif
++
+ static int acpi_sleep_prepare(u32 acpi_state)
+ {
+ #ifdef CONFIG_ACPI_SLEEP
+ unsigned long acpi_wakeup_address;
+
+ /* do we have a wakeup address for S2 and S3? */
+- if (acpi_state == ACPI_STATE_S3) {
++ if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
+ acpi_wakeup_address = acpi_get_wakeup_address();
+ if (!acpi_wakeup_address)
+ return -EFAULT;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 1db8e68cd8bce..5c32b318c173d 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -110,26 +110,6 @@ static bool nvidia_wmi_ec_supported(void)
+ }
+ #endif
+
+-static bool apple_gmux_backlight_present(void)
+-{
+- struct acpi_device *adev;
+- struct device *dev;
+-
+- adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+- if (!adev)
+- return false;
+-
+- dev = acpi_get_first_physical_node(adev);
+- if (!dev)
+- return false;
+-
+- /*
+- * drivers/platform/x86/apple-gmux.c only supports old style
+- * Apple GMUX with an IO-resource.
+- */
+- return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
+-}
+-
+ /* Force to use vendor driver when the ACPI device is known to be
+ * buggy */
+ static int video_detect_force_vendor(const struct dmi_system_id *d)
+@@ -600,6 +580,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
+ },
+ },
++ {
++ .callback = video_detect_force_native,
++ /* Asus U46E */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "U46E"),
++ },
++ },
+ {
+ .callback = video_detect_force_native,
+ /* Asus UX303UB */
+@@ -608,6 +596,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
+ },
+ },
++ {
++ .callback = video_detect_force_native,
++ /* HP EliteBook 8460p */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"),
++ },
++ },
++ {
++ .callback = video_detect_force_native,
++ /* HP Pavilion g6-1d80nr / B4U19UA */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
++ DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"),
++ },
++ },
+ {
+ .callback = video_detect_force_native,
+ /* Samsung N150P */
+@@ -756,6 +761,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ {
+ static DEFINE_MUTEX(init_mutex);
+ static bool nvidia_wmi_ec_present;
++ static bool apple_gmux_present;
+ static bool native_available;
+ static bool init_done;
+ static long video_caps;
+@@ -769,6 +775,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ ACPI_UINT32_MAX, find_video, NULL,
+ &video_caps, NULL);
+ nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
++ apple_gmux_present = apple_gmux_detect(NULL, NULL);
+ init_done = true;
+ }
+ if (native)
+@@ -790,7 +797,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ if (nvidia_wmi_ec_present)
+ return acpi_backlight_nvidia_wmi_ec;
+
+- if (apple_gmux_backlight_present())
++ if (apple_gmux_present)
+ return acpi_backlight_apple_gmux;
+
+ /* Use ACPI video if available, except when native should be preferred. */
+diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
+index 36833a8629980..d9b305a3427f7 100644
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -650,6 +650,7 @@ config PATA_CS5530
+ config PATA_CS5535
+ tristate "CS5535 PATA support (Experimental)"
+ depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
++ depends on !UML
+ help
+ This option enables support for the NatSemi/AMD CS5535
+ companion chip used with the Geode processor family.
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index 2a5a37fcd9987..7f338cb4fb7b8 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -989,26 +989,32 @@ struct fwnode_handle *
+ fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev)
+ {
++ struct fwnode_handle *ep, *port_parent = NULL;
+ const struct fwnode_handle *parent;
+- struct fwnode_handle *ep;
+
+ /*
+ * If this function is in a loop and the previous iteration returned
+ * an endpoint from fwnode->secondary, then we need to use the secondary
+ * as parent rather than @fwnode.
+ */
+- if (prev)
+- parent = fwnode_graph_get_port_parent(prev);
+- else
++ if (prev) {
++ port_parent = fwnode_graph_get_port_parent(prev);
++ parent = port_parent;
++ } else {
+ parent = fwnode;
++ }
+ if (IS_ERR_OR_NULL(parent))
+ return NULL;
+
+ ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
+ if (ep)
+- return ep;
++ goto out_put_port_parent;
++
++ ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
+
+- return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
++out_put_port_parent:
++ fwnode_handle_put(port_parent);
++ return ep;
+ }
+ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
+
+diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
+index 4d1976ca50727..929410d0dd6fe 100644
+--- a/drivers/base/test/test_async_driver_probe.c
++++ b/drivers/base/test/test_async_driver_probe.c
+@@ -145,7 +145,7 @@ static int __init test_async_probe_init(void)
+ calltime = ktime_get();
+ for_each_online_cpu(cpu) {
+ nid = cpu_to_node(cpu);
+- pdev = &sync_dev[sync_id];
++ pdev = &async_dev[async_id];
+
+ *pdev = test_platform_device_register_node("test_async_driver",
+ async_id,
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index 78334da74d8bf..5eb8c7855970d 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ goto out_alloc;
+ }
+
+- ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
++ ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+ GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 17b677b5d3b22..e546932046309 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -2092,13 +2092,12 @@ static void __exit ublk_exit(void)
+ struct ublk_device *ub;
+ int id;
+
+- class_destroy(ublk_chr_class);
+-
+- misc_deregister(&ublk_misc);
+-
+ idr_for_each_entry(&ublk_index_idr, ub, id)
+ ublk_remove(ub);
+
++ class_destroy(ublk_chr_class);
++ misc_deregister(&ublk_misc);
++
+ idr_destroy(&ublk_index_idr);
+ unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
+ }
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index c10fc33b29b18..b74289a95a171 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ return -ENODEV;
+ }
+
+- clk = clk_get(cpu_dev, 0);
++ clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 432dfb4e8027e..022e3555407c8 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
+ cpu_data = policy->driver_data;
+ perf_caps = &cpu_data->perf_caps;
+ max_cap = arch_scale_cpu_capacity(cpu);
+- min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
++ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
++ perf_caps->highest_perf);
+ if ((min_cap == 0) || (max_cap < min_cap))
+ return 0;
+ return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
+@@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
+ cpu_data = policy->driver_data;
+ perf_caps = &cpu_data->perf_caps;
+ max_cap = arch_scale_cpu_capacity(cpu_dev->id);
+- min_cap = div_u64(max_cap * perf_caps->lowest_perf,
+- perf_caps->highest_perf);
+-
+- perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
++ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
++ perf_caps->highest_perf);
++ perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
++ max_cap);
+ min_step = min_cap / CPPC_EM_CAP_STEP;
+ max_step = max_cap / CPPC_EM_CAP_STEP;
+
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index 6ac3800db4508..69a8742c0a7a3 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -135,6 +135,7 @@ static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra210", },
++ { .compatible = "nvidia,tegra234", },
+
+ { .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8996", },
+@@ -148,6 +149,7 @@ static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm6115", },
+ { .compatible = "qcom,sm6350", },
++ { .compatible = "qcom,sm6375", },
+ { .compatible = "qcom,sm8150", },
+ { .compatible = "qcom,sm8250", },
+ { .compatible = "qcom,sm8350", },
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index c741b6431958c..8a6e6b60d66f3 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan)
+ /* The channel is already in use, update client count */
+ if (chan->client_count) {
+ __module_get(owner);
+- goto out;
++ chan->client_count++;
++ return 0;
+ }
+
+ if (!try_module_get(owner))
+@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan)
+ goto err_out;
+ }
+
++ chan->client_count++;
++
+ if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+ balance_ref_count(chan);
+
+-out:
+- chan->client_count++;
+ return 0;
+
+ err_out:
+diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
+index 377da23012ac2..a2bf13ff18b6d 100644
+--- a/drivers/dma/ptdma/ptdma-dev.c
++++ b/drivers/dma/ptdma/ptdma-dev.c
+@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
+ u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
+ u32 tail;
++ unsigned long flags;
+
+ if (soc) {
+ desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
+ desc->dw0 &= ~DWORD0_SOC;
+ }
+- mutex_lock(&cmd_q->q_mutex);
++ spin_lock_irqsave(&cmd_q->q_lock, flags);
+
+ /* Copy 32-byte command descriptor to hw queue. */
+ memcpy(q_desc, desc, 32);
+@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
+
+ /* Turn the queue back on using our cached control register */
+ pt_start_queue(cmd_q);
+- mutex_unlock(&cmd_q->q_mutex);
++ spin_unlock_irqrestore(&cmd_q->q_lock, flags);
+
+ return 0;
+ }
+@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
+
+ cmd_q->pt = pt;
+ cmd_q->dma_pool = dma_pool;
+- mutex_init(&cmd_q->q_mutex);
++ spin_lock_init(&cmd_q->q_lock);
+
+ /* Page alignment satisfies our needs for N <= 128 */
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
+index d093c43b7d134..21b4bf895200b 100644
+--- a/drivers/dma/ptdma/ptdma.h
++++ b/drivers/dma/ptdma/ptdma.h
+@@ -196,7 +196,7 @@ struct pt_cmd_queue {
+ struct ptdma_desc *qbase;
+
+ /* Aligned queue start address (per requirement) */
+- struct mutex q_mutex ____cacheline_aligned;
++ spinlock_t q_lock ____cacheline_aligned;
+ unsigned int qidx;
+
+ unsigned int qsize;
+diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
+index 3f56514bbef8f..98d45ee4b4e34 100644
+--- a/drivers/dma/qcom/gpi.c
++++ b/drivers/dma/qcom/gpi.c
+@@ -1756,6 +1756,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
+ if (spi->cmd == SPI_RX) {
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
++ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ } else if (spi->cmd == SPI_TX) {
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ } else { /* SPI_DUPLEX */
+diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
+index fa9bda4a2bc6f..75af3488a3baf 100644
+--- a/drivers/dma/tegra186-gpc-dma.c
++++ b/drivers/dma/tegra186-gpc-dma.c
+@@ -707,6 +707,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
+ return err;
+ }
+
++ vchan_terminate_vdesc(&tdc->dma_desc->vd);
+ tegra_dma_disable(tdc);
+ tdc->dma_desc = NULL;
+ }
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index 7b5081989b3d6..b86b809eb1f7e 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -761,11 +761,12 @@ static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
+ if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+- udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
++ if (uc->config.ep_type != PSIL_EP_NATIVE)
++ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ } else {
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+- if (!uc->bchan)
++ if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
+ }
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 8cd4e69dc7b4c..7660175704883 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -3141,8 +3141,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
+ /* Initialize the channels */
+ for_each_child_of_node(node, child) {
+ err = xilinx_dma_child_probe(xdev, child);
+- if (err < 0)
++ if (err < 0) {
++ of_node_put(child);
+ goto error;
++ }
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 878deb4880cdb..0689e15107213 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -34,6 +34,9 @@
+ static DEFINE_MUTEX(device_ctls_mutex);
+ static LIST_HEAD(edac_device_list);
+
++/* Default workqueue processing interval on this instance, in msecs */
++#define DEFAULT_POLL_INTERVAL 1000
++
+ #ifdef CONFIG_EDAC_DEBUG
+ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+ {
+@@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
+ * whole one second to save timers firing all over the period
+ * between integral seconds
+ */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ * timers firing on sub-second basis, while they are happy
+ * to fire together on the 1 second exactly
+ */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -400,7 +403,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+ edac_dev->delay = msecs_to_jiffies(msec);
+
+ /* See comment in edac_device_workq_setup() above */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_mod_work(&edac_dev->work, edac_dev->delay);
+@@ -442,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
+ /* This instance is NOW RUNNING */
+ edac_dev->op_state = OP_RUNNING_POLL;
+
+- /*
+- * enable workq processing on this instance,
+- * default = 1000 msec
+- */
+- edac_device_workq_setup(edac_dev, 1000);
++ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
+ } else {
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
+diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
+index 61b76ec226af1..19fba258ae108 100644
+--- a/drivers/edac/highbank_mc_edac.c
++++ b/drivers/edac/highbank_mc_edac.c
+@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
+ drvdata = mci->pvt_info;
+ platform_set_drvdata(pdev, mci);
+
+- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+- return -ENOMEM;
++ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
++ res = -ENOMEM;
++ goto free;
++ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+@@ -243,6 +245,7 @@ err2:
+ edac_mc_del_mc(&pdev->dev);
+ err:
+ devres_release_group(&pdev->dev, NULL);
++free:
+ edac_mc_free(mci);
+ return res;
+ }
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index 97a27e42dd610..c45519f59dc11 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -252,7 +252,7 @@ clear:
+ static int
+ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
+ {
+- struct llcc_drv_data *drv = edev_ctl->pvt_info;
++ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
+ int ret;
+
+ ret = dump_syn_reg_values(drv, bank, err_type);
+@@ -289,7 +289,7 @@ static irqreturn_t
+ llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ {
+ struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
+- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
++ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
+ irqreturn_t irq_rc = IRQ_NONE;
+ u32 drp_error, trp_error, i;
+ int ret;
+@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ edev_ctl->dev_name = dev_name(dev);
+ edev_ctl->ctl_name = "llcc";
+ edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
+- edev_ctl->pvt_info = llcc_driv_data;
+
+ rc = edac_device_add_device(edev_ctl);
+ if (rc)
+diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
+index 1dfe534b85184..87b4f4d35f062 100644
+--- a/drivers/firmware/arm_scmi/shmem.c
++++ b/drivers/firmware/arm_scmi/shmem.c
+@@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+ {
++ size_t len = ioread32(&shmem->length);
++
+ xfer->hdr.status = ioread32(shmem->msg_payload);
+ /* Skip the length of header and status in shmem area i.e 8 bytes */
+- xfer->rx.len = min_t(size_t, xfer->rx.len,
+- ioread32(&shmem->length) - 8);
++ xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+@@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer)
+ {
++ size_t len = ioread32(&shmem->length);
++
+ /* Skip only the length of header in shmem area i.e 4 bytes */
+- xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
++ xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
+index 33c9b81a55cd1..1db975c088969 100644
+--- a/drivers/firmware/arm_scmi/virtio.c
++++ b/drivers/firmware/arm_scmi/virtio.c
+@@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
+ }
+
+ vioch->shutdown_done = &vioch_shutdown_done;
+- virtio_break_device(vioch->vqueue->vdev);
+ if (!vioch->is_rx && vioch->deferred_tx_wq)
+ /* Cannot be kicked anymore after this...*/
+ vioch->deferred_tx_wq = NULL;
+@@ -482,6 +481,12 @@ static int virtio_chan_free(int id, void *p, void *data)
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+
++ /*
++ * Break device to inhibit further traffic flowing while shutting down
++ * the channels: doing it later holding vioch->lock creates unsafe
++ * locking dependency chains as reported by LOCKDEP.
++ */
++ virtio_break_device(vioch->vqueue->vdev);
+ scmi_vio_channel_cleanup_sync(vioch);
+
+ scmi_free_channel(cinfo, data, id);
+diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
+index 60075e0e4943a..1fba4e09cdcff 100644
+--- a/drivers/firmware/efi/runtime-wrappers.c
++++ b/drivers/firmware/efi/runtime-wrappers.c
+@@ -84,6 +84,7 @@ struct efi_runtime_work efi_rts_work;
+ else \
+ pr_err("Failed to queue work to efi_rts_wq.\n"); \
+ \
++ WARN_ON_ONCE(efi_rts_work.status == EFI_ABORTED); \
+ exit: \
+ efi_rts_work.efi_rts_id = EFI_NONE; \
+ efi_rts_work.status; \
+diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
+index 9ca21feb9d454..f3694d3478019 100644
+--- a/drivers/firmware/google/coreboot_table.c
++++ b/drivers/firmware/google/coreboot_table.c
+@@ -93,7 +93,12 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
+ for (i = 0; i < header->table_entries; i++) {
+ entry = ptr_entry;
+
+- device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
++ if (entry->size < sizeof(*entry)) {
++ dev_warn(dev, "coreboot table entry too small!\n");
++ return -EINVAL;
++ }
++
++ device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+@@ -101,7 +106,7 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
+ device->dev.parent = dev;
+ device->dev.bus = &coreboot_bus_type;
+ device->dev.release = coreboot_device_release;
+- memcpy(&device->entry, ptr_entry, entry->size);
++ memcpy(device->raw, ptr_entry, entry->size);
+
+ ret = device_register(&device->dev);
+ if (ret) {
+diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
+index beb778674acdc..4a89277b99a39 100644
+--- a/drivers/firmware/google/coreboot_table.h
++++ b/drivers/firmware/google/coreboot_table.h
+@@ -66,6 +66,7 @@ struct coreboot_device {
+ struct coreboot_table_entry entry;
+ struct lb_cbmem_ref cbmem_ref;
+ struct lb_framebuffer framebuffer;
++ DECLARE_FLEX_ARRAY(u8, raw);
+ };
+ };
+
+diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
+index 2e17797091133..7edcdc5750802 100644
+--- a/drivers/gpio/gpio-ep93xx.c
++++ b/drivers/gpio/gpio-ep93xx.c
+@@ -148,7 +148,7 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
+ */
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
+- int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
++ int port_f_idx = (irq & 7) ^ 4; /* {20..23,48..51} -> {0..7} */
+ int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx;
+
+ chained_irq_enter(irqchip, desc);
+diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
+index c871602fc5ba9..853d9aa6b3b1f 100644
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
++#include <linux/spinlock.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/gpio/driver.h>
+ #include <linux/of.h>
+@@ -147,6 +148,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxc_gpio_port *port = gc->private;
++ unsigned long flags;
+ u32 bit, val;
+ u32 gpio_idx = d->hwirq;
+ int edge;
+@@ -185,6 +187,8 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
+ return -EINVAL;
+ }
+
++ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
++
+ if (GPIO_EDGE_SEL >= 0) {
+ val = readl(port->base + GPIO_EDGE_SEL);
+ if (edge == GPIO_INT_BOTH_EDGES)
+@@ -204,15 +208,20 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
+
+ writel(1 << gpio_idx, port->base + GPIO_ISR);
+
+- return 0;
++ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
++
++ return port->gc.direction_input(&port->gc, gpio_idx);
+ }
+
+ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
+ {
+ void __iomem *reg = port->base;
++ unsigned long flags;
+ u32 bit, val;
+ int edge;
+
++ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
++
+ reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
+ bit = gpio & 0xf;
+ val = readl(reg);
+@@ -227,9 +236,12 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
+ } else {
+ pr_err("mxc: invalid configuration for GPIO %d: %x\n",
+ gpio, edge);
+- return;
++ goto unlock;
+ }
+ writel(val | (edge << (bit << 1)), reg);
++
++unlock:
++ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
+ }
+
+ /* handle 32 interrupts in one status register */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 29f045079a3e1..404c839683b1c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -2130,7 +2130,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
+ }
+
+ amdgpu_amdkfd_remove_eviction_fence(
+- bo, bo->kfd_bo->process_info->eviction_fence);
++ bo, bo->vm_bo->vm->process_info->eviction_fence);
+
+ amdgpu_bo_unreserve(bo);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 99f5e38c4835e..3380daf42da8a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -586,10 +586,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+ if (adev->gfx.gfx_off_req_count == 0 &&
+ !adev->gfx.gfx_off_state) {
+ /* If going to s2idle, no need to wait */
+- if (adev->in_s0ix)
+- delay = GFX_OFF_NO_DELAY;
+- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
++ if (adev->in_s0ix) {
++ if (!amdgpu_dpm_set_powergating_by_smu(adev,
++ AMD_IP_BLOCK_TYPE_GFX, true))
++ adev->gfx.gfx_off_state = true;
++ } else {
++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ delay);
++ }
+ }
+ } else {
+ if (adev->gfx.gfx_off_req_count == 0) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 725876b4f02ed..32b0ea8757fa5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -192,7 +192,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
+ mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
+ mes_add_queue_pkt.tma_addr = input->tma_addr;
+ mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
+- mes_add_queue_pkt.trap_en = 1;
+
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index ecb4c3abc6297..c06ada0844ba1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
+ queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
+
+ if (q->wptr_bo) {
+- wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
++ wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
+ queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 64fdf63093a00..63feea08904cb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -570,6 +570,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
+ goto reserve_bo_failed;
+ }
+
++ if (clear) {
++ r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
++ if (r) {
++ pr_debug("failed %d to sync bo\n", r);
++ amdgpu_bo_unreserve(bo);
++ goto reserve_bo_failed;
++ }
++ }
++
+ r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
+ if (r) {
+ pr_debug("failed %d to reserve bo\n", r);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index e10f1f15c9c43..85bd1f18259c7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1737,10 +1737,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ adev->dm.vblank_control_workqueue = NULL;
+ }
+
+- for (i = 0; i < adev->dm.display_indexes_num; i++) {
+- drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
+- }
+-
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+@@ -9404,6 +9400,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ bool lock_and_validation_needed = false;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
++ struct drm_dp_mst_topology_mgr *mgr;
++ struct drm_dp_mst_topology_state *mst_state;
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ #endif
+
+@@ -9652,6 +9650,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ lock_and_validation_needed = true;
+ }
+
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++ /* set the slot info for each mst_state based on the link encoding format */
++ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
++ struct amdgpu_dm_connector *aconnector;
++ struct drm_connector *connector;
++ struct drm_connector_list_iter iter;
++ u8 link_coding_cap;
++
++ drm_connector_list_iter_begin(dev, &iter);
++ drm_for_each_connector_iter(connector, &iter) {
++ if (connector->index == mst_state->mgr->conn_base_id) {
++ aconnector = to_amdgpu_dm_connector(connector);
++ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
++ drm_dp_mst_update_slots(mst_state, link_coding_cap);
++
++ break;
++ }
++ }
++ drm_connector_list_iter_end(&iter);
++ }
++#endif
++
+ /**
+ * Streams and planes are reset when there are changes that affect
+ * bandwidth. Anything that affects bandwidth needs to go through
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index f72c013d3a5b0..16623f73ddbe6 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ }
+
+ static void
+-fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
+- struct amdgpu_dm_connector *aconnector,
++fill_dc_mst_payload_table_from_drm(struct dc_link *link,
++ bool enable,
++ struct drm_dp_mst_atomic_payload *target_payload,
+ struct dc_dp_mst_stream_allocation_table *table)
+ {
+ struct dc_dp_mst_stream_allocation_table new_table = { 0 };
+ struct dc_dp_mst_stream_allocation *sa;
+- struct drm_dp_mst_atomic_payload *payload;
++ struct link_mst_stream_allocation_table copy_of_link_table =
++ link->mst_stream_alloc_table;
++
++ int i;
++ int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
++ struct link_mst_stream_allocation *dc_alloc;
++
++ /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
++ if (enable) {
++ dc_alloc =
++ &copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
++ dc_alloc->vcp_id = target_payload->vcpi;
++ dc_alloc->slot_count = target_payload->time_slots;
++ } else {
++ for (i = 0; i < copy_of_link_table.stream_count; i++) {
++ dc_alloc =
++ &copy_of_link_table.stream_allocations[i];
++
++ if (dc_alloc->vcp_id == target_payload->vcpi) {
++ dc_alloc->vcp_id = 0;
++ dc_alloc->slot_count = 0;
++ break;
++ }
++ }
++ ASSERT(i != copy_of_link_table.stream_count);
++ }
+
+ /* Fill payload info*/
+- list_for_each_entry(payload, &mst_state->payloads, next) {
+- if (payload->delete)
+- continue;
+-
+- sa = &new_table.stream_allocations[new_table.stream_count];
+- sa->slot_count = payload->time_slots;
+- sa->vcp_id = payload->vcpi;
+- new_table.stream_count++;
++ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
++ dc_alloc =
++ &copy_of_link_table.stream_allocations[i];
++ if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
++ sa = &new_table.stream_allocations[new_table.stream_count];
++ sa->slot_count = dc_alloc->slot_count;
++ sa->vcp_id = dc_alloc->vcp_id;
++ new_table.stream_count++;
++ }
+ }
+
+ /* Overwrite the old table */
+@@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ * AUX message. The sequence is slot 1-63 allocated sequence for each
+ * stream. AMD ASIC stream slot allocation should follow the same
+ * sequence. copy DRM MST allocation to dc */
+- fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
++ fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 6483ba266893d..8561e9b017a2e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -468,7 +468,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
+ static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+ {
+ drm_encoder_cleanup(encoder);
+- kfree(encoder);
+ }
+
+ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+@@ -897,11 +896,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+- mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
+-#if defined(CONFIG_DRM_AMD_DC_DCN)
+- drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
+-#endif
+-
+ /* Set up params */
+ for (i = 0; i < dc_state->stream_count; i++) {
+ struct dc_dsc_policy dsc_policy = {0};
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index d7b1ace6328a0..40b9d2ce08e66 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ int i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
++ /* adjust for drm changes*/
++ bool update_drm_mst_state = true;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ const struct dc_link_settings empty_link_settings = {0};
+ DC_LOGGER_INIT(link->ctx->logger);
+
++
+ /* deallocate_mst_payload is called before disable link. When mode or
+ * disable/enable monitor, new stream is created which is not in link
+ * stream[] yet. For this, payload is not allocated yet, so de-alloc
+@@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ &empty_link_settings,
+ avg_time_slots_per_mtp);
+
+- if (mst_mode) {
++ if (mst_mode || update_drm_mst_state) {
+ /* when link is in mst mode, reply on mst manager to remove
+ * payload
+ */
+@@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ stream->ctx,
+ stream);
+
++ if (!update_drm_mst_state)
++ dm_helpers_dp_mst_send_payload_allocation(
++ stream->ctx,
++ stream,
++ false);
++ }
++
++ if (update_drm_mst_state)
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+- }
+
+ return DC_OK;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 4c20d17e7416e..cf96c3f2affe4 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
+ MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
+ PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
+ MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
++ MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
+ };
+
+ static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 51a46689cda70..4ca37261584a9 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+
+ mgr->payload_count--;
+ mgr->next_start_slot -= payload->time_slots;
++
++ if (payload->delete)
++ drm_dp_mst_put_port_malloc(payload->port);
+ }
+ EXPORT_SYMBOL(drm_dp_remove_payload);
+
+@@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
+
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
+ if (!payload->delete) {
+- drm_dp_mst_put_port_malloc(port);
+ payload->pbn = 0;
+ payload->delete = true;
+ topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 52d8800a8ab86..3659f0465a724 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
++ }, { /* Lenovo Ideapad D330-10IGL (HD) */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
++ },
++ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Yoga Book X90F / X91F / X91L */
+ .matches = {
+ /* Non exact match to match all versions */
+diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
+index 7de37f8c68fd0..83229a031af0f 100644
+--- a/drivers/gpu/drm/drm_vma_manager.c
++++ b/drivers/gpu/drm/drm_vma_manager.c
+@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ }
+ EXPORT_SYMBOL(drm_vma_offset_remove);
+
+-/**
+- * drm_vma_node_allow - Add open-file to list of allowed users
+- * @node: Node to modify
+- * @tag: Tag of file to remove
+- *
+- * Add @tag to the list of allowed open-files for this node. If @tag is
+- * already on this list, the ref-count is incremented.
+- *
+- * The list of allowed-users is preserved across drm_vma_offset_add() and
+- * drm_vma_offset_remove() calls. You may even call it if the node is currently
+- * not added to any offset-manager.
+- *
+- * You must remove all open-files the same number of times as you added them
+- * before destroying the node. Otherwise, you will leak memory.
+- *
+- * This is locked against concurrent access internally.
+- *
+- * RETURNS:
+- * 0 on success, negative error code on internal failure (out-of-mem)
+- */
+-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
++static int vma_node_allow(struct drm_vma_offset_node *node,
++ struct drm_file *tag, bool ref_counted)
+ {
+ struct rb_node **iter;
+ struct rb_node *parent = NULL;
+@@ -282,7 +263,8 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+ entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
+
+ if (tag == entry->vm_tag) {
+- entry->vm_count++;
++ if (ref_counted)
++ entry->vm_count++;
+ goto unlock;
+ } else if (tag > entry->vm_tag) {
+ iter = &(*iter)->rb_right;
+@@ -307,8 +289,58 @@ unlock:
+ kfree(new);
+ return ret;
+ }
++
++/**
++ * drm_vma_node_allow - Add open-file to list of allowed users
++ * @node: Node to modify
++ * @tag: Tag of file to remove
++ *
++ * Add @tag to the list of allowed open-files for this node. If @tag is
++ * already on this list, the ref-count is incremented.
++ *
++ * The list of allowed-users is preserved across drm_vma_offset_add() and
++ * drm_vma_offset_remove() calls. You may even call it if the node is currently
++ * not added to any offset-manager.
++ *
++ * You must remove all open-files the same number of times as you added them
++ * before destroying the node. Otherwise, you will leak memory.
++ *
++ * This is locked against concurrent access internally.
++ *
++ * RETURNS:
++ * 0 on success, negative error code on internal failure (out-of-mem)
++ */
++int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
++{
++ return vma_node_allow(node, tag, true);
++}
+ EXPORT_SYMBOL(drm_vma_node_allow);
+
++/**
++ * drm_vma_node_allow_once - Add open-file to list of allowed users
++ * @node: Node to modify
++ * @tag: Tag of file to remove
++ *
++ * Add @tag to the list of allowed open-files for this node.
++ *
++ * The list of allowed-users is preserved across drm_vma_offset_add() and
++ * drm_vma_offset_remove() calls. You may even call it if the node is currently
++ * not added to any offset-manager.
++ *
++ * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
++ * should only be called once after this.
++ *
++ * This is locked against concurrent access internally.
++ *
++ * RETURNS:
++ * 0 on success, negative error code on internal failure (out-of-mem)
++ */
++int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
++{
++ return vma_node_allow(node, tag, false);
++}
++EXPORT_SYMBOL(drm_vma_node_allow_once);
++
+ /**
+ * drm_vma_node_revoke - Remove open-file from list of allowed users
+ * @node: Node to modify
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 78b3427471bd7..b94bcceeff705 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -5216,9 +5216,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ intel_bios_init_panel(dev_priv, &intel_connector->panel,
+ encoder->devdata, IS_ERR(edid) ? NULL : edid);
+
+- intel_panel_add_edid_fixed_modes(intel_connector,
+- intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE ||
+- intel_vrr_is_capable(intel_connector));
++ intel_panel_add_edid_fixed_modes(intel_connector, true);
+
+ /* MSO requires information from the EDID */
+ intel_edp_mso_init(intel_dp);
+diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
+index 41cec9dc42231..f72f4646c0d70 100644
+--- a/drivers/gpu/drm/i915/display/intel_panel.c
++++ b/drivers/gpu/drm/i915/display/intel_panel.c
+@@ -85,9 +85,10 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
+ static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
+ {
+- return drm_mode_match(mode, preferred_mode,
+- DRM_MODE_MATCH_FLAGS |
+- DRM_MODE_MATCH_3D_FLAGS) &&
++ u32 sync_flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
++ DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC;
++
++ return (mode->flags & ~sync_flags) == (preferred_mode->flags & ~sync_flags) &&
+ mode->hdisplay == preferred_mode->hdisplay &&
+ mode->vdisplay == preferred_mode->vdisplay;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index 354c1d6dab846..d445e2d63c9c8 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -697,7 +697,7 @@ insert:
+ GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
+ out:
+ if (file)
+- drm_vma_node_allow(&mmo->vma_node, file);
++ drm_vma_node_allow_once(&mmo->vma_node, file);
+ return mmo;
+
+ err:
+diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+index c570cf780079a..436598f19522c 100644
+--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
++++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+@@ -1697,7 +1697,7 @@ static int igt_shrink_thp(void *arg)
+ I915_SHRINK_ACTIVE);
+ i915_vma_unpin(vma);
+ if (err)
+- goto out_put;
++ goto out_wf;
+
+ /*
+ * Now that the pages are *unpinned* shrinking should invoke
+@@ -1713,19 +1713,19 @@ static int igt_shrink_thp(void *arg)
+ pr_err("unexpected pages mismatch, should_swap=%s\n",
+ str_yes_no(should_swap));
+ err = -EINVAL;
+- goto out_put;
++ goto out_wf;
+ }
+
+ if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
+ pr_err("unexpected residual page-size bits, should_swap=%s\n",
+ str_yes_no(should_swap));
+ err = -EINVAL;
+- goto out_put;
++ goto out_wf;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+- goto out_put;
++ goto out_wf;
+
+ while (n--) {
+ err = cpu_check(obj, n, 0xdeadbeaf);
+diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+index 310fb83c527eb..2990dd4d4a0d8 100644
+--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
++++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+@@ -28,8 +28,7 @@ struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
+
+ int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+ struct intel_selftest_saved_policy *saved,
+- u32 modify_type)
+-
++ enum selftest_scheduler_modify modify_type)
+ {
+ int err;
+
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index e033d6a67a20c..870252bef23f3 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
+ #define GBIF_CLIENT_HALT_MASK BIT(0)
+ #define GBIF_ARB_HALT_MASK BIT(1)
+
+-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
++static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
++ bool gx_off)
+ {
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+@@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+ return;
+ }
+
+- /* Halt the gx side of GBIF */
+- gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+- spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
++ if (gx_off) {
++ /* Halt the gx side of GBIF */
++ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
++ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
++ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+@@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+ /* Halt the gmu cm3 core */
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+- a6xx_bus_clear_pending_transactions(adreno_gpu);
++ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+
+ /* Reset GPU core blocks */
+ gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
+@@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
+ return;
+ }
+
+- a6xx_bus_clear_pending_transactions(adreno_gpu);
++ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+
+ /* tell the GMU we want to slumber */
+ ret = a6xx_gmu_notify_slumber(gmu);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index e846e629c00d8..9d7fc44c1e2a9 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1277,6 +1277,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
+ if (hang_debug)
+ a6xx_dump(gpu);
+
++ /*
++ * To handle recovery specific sequences during the rpm suspend we are
++ * about to trigger
++ */
++ a6xx_gpu->hung = true;
++
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
+@@ -1319,6 +1325,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
+ mutex_unlock(&gpu->active_lock);
+
+ msm_gpu_hw_init(gpu);
++ a6xx_gpu->hung = false;
+ }
+
+ static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+index ab853f61db632..eea2e60ce3b7b 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+@@ -32,6 +32,7 @@ struct a6xx_gpu {
+ void *llc_slice;
+ void *htw_llc_slice;
+ bool have_mmu500;
++ bool hung;
+ };
+
+ #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 5a0e8491cd3a0..2e7531d2a5d6e 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -351,6 +351,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ /* Ensure string is null terminated: */
+ str[len] = '\0';
+
++ mutex_lock(&gpu->lock);
++
+ if (param == MSM_PARAM_COMM) {
+ paramp = &ctx->comm;
+ } else {
+@@ -360,6 +362,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ kfree(*paramp);
+ *paramp = str;
+
++ mutex_unlock(&gpu->lock);
++
+ return 0;
+ }
+ case MSM_PARAM_SYSPROF:
+diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
+index 021f4e29b613b..4f495eecc34ba 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.c
++++ b/drivers/gpu/drm/msm/msm_gpu.c
+@@ -335,6 +335,8 @@ static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **
+ struct msm_file_private *ctx = submit->queue->ctx;
+ struct task_struct *task;
+
++ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
++
+ /* Note that kstrdup will return NULL if argument is NULL: */
+ *comm = kstrdup(ctx->comm, GFP_KERNEL);
+ *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
+diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
+index 58a72e6b14008..a89bfdc3d7f90 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.h
++++ b/drivers/gpu/drm/msm/msm_gpu.h
+@@ -366,10 +366,18 @@ struct msm_file_private {
+ */
+ int sysprof;
+
+- /** comm: Overridden task comm, see MSM_PARAM_COMM */
++ /**
++ * comm: Overridden task comm, see MSM_PARAM_COMM
++ *
++ * Accessed under msm_gpu::lock
++ */
+ char *comm;
+
+- /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */
++ /**
++ * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
++ *
++ * Accessed under msm_gpu::lock
++ */
+ char *cmdline;
+
+ /**
+diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
+index 079600328be18..e6403a9d66ade 100644
+--- a/drivers/gpu/drm/panfrost/Kconfig
++++ b/drivers/gpu/drm/panfrost/Kconfig
+@@ -3,7 +3,8 @@
+ config DRM_PANFROST
+ tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
+ depends on DRM
+- depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
++ depends on ARM || ARM64 || COMPILE_TEST
++ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ depends on MMU
+ select DRM_SCHED
+ select IOMMU_SUPPORT
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index 231add8b8e127..ce0ea446bd707 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -179,6 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
+ bo->validated_shader = NULL;
+ }
+
++ mutex_destroy(&bo->madv_lock);
+ drm_gem_dma_free(&bo->base);
+ }
+
+@@ -394,7 +395,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
+ {
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_bo *bo;
+- int ret;
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return ERR_PTR(-ENODEV);
+@@ -406,9 +406,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
+ bo->madv = VC4_MADV_WILLNEED;
+ refcount_set(&bo->usecnt, 0);
+
+- ret = drmm_mutex_init(dev, &bo->madv_lock);
+- if (ret)
+- return ERR_PTR(ret);
++ mutex_init(&bo->madv_lock);
+
+ mutex_lock(&vc4->bo_lock);
+ bo->label = VC4_BO_TYPE_KERNEL;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
+old mode 100755
+new mode 100644
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index ab125f79408f2..1fb0f7105fb21 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -282,7 +282,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ }
+ rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
+ if (rc)
+- return rc;
++ goto cleanup;
+ mp2_ops->start(privdata, info);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+index 4da2f9f62aba3..a1d6e08fab7d4 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+@@ -160,7 +160,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+ }
+ rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
+ if (rc)
+- return rc;
++ goto cleanup;
+
+ writel(0, privdata->mmio + AMD_P2C_MSG(0));
+ mp2_ops->start(privdata, info);
+diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
+index 467d789f9bc2d..25ed7b9a917e4 100644
+--- a/drivers/hid/hid-betopff.c
++++ b/drivers/hid/hid-betopff.c
+@@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid)
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev;
+- int field_count = 0;
+ int error;
+ int i, j;
+
+@@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid)
+ * -----------------------------------------
+ * Do init them with default value.
+ */
++ if (report->maxfield < 4) {
++ hid_err(hid, "not enough fields in the report: %d\n",
++ report->maxfield);
++ return -ENODEV;
++ }
+ for (i = 0; i < report->maxfield; i++) {
++ if (report->field[i]->report_count < 1) {
++ hid_err(hid, "no values in the field\n");
++ return -ENODEV;
++ }
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] = 0x00;
+- field_count++;
+ }
+ }
+
+- if (field_count < 4) {
+- hid_err(hid, "not enough fields in the report: %d\n",
+- field_count);
+- return -ENODEV;
+- }
+-
+ betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
+ if (!betopff)
+ return -ENOMEM;
+diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
+index e8c5e3ac9fff1..e8b16665860d6 100644
+--- a/drivers/hid/hid-bigbenff.c
++++ b/drivers/hid/hid-bigbenff.c
+@@ -344,6 +344,11 @@ static int bigben_probe(struct hid_device *hid,
+ }
+
+ report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
++ if (list_empty(report_list)) {
++ hid_err(hid, "no output report found\n");
++ error = -ENODEV;
++ goto error_hw_stop;
++ }
+ bigben->report = list_entry(report_list->next,
+ struct hid_report, list);
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index bd47628da6be0..3e1803592bd4a 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -993,8 +993,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
+ * Validating on id 0 means we should examine the first
+ * report in the list.
+ */
+- report = list_entry(
+- hid->report_enum[type].report_list.next,
++ report = list_first_entry_or_null(
++ &hid->report_enum[type].report_list,
+ struct hid_report, list);
+ } else {
+ report = hid->report_enum[type].report_id_hash[id];
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 82713ef3aaa64..c3735848ed5db 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -274,7 +274,6 @@
+ #define USB_DEVICE_ID_CH_AXIS_295 0x001c
+
+ #define USB_VENDOR_ID_CHERRY 0x046a
+-#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c
+ #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
+ #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
+
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 0e9702c7f7d6c..be3ad02573de8 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -54,7 +54,6 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+index 40554c8daca07..00046cbfd4ed0 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/dma-if.c
++++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+@@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ int required_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+
++ if (!dev->ishtp_dma_tx_map) {
++ dev_err(dev->devc, "Fail to allocate Tx map\n");
++ return NULL;
++ }
++
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+ free = 1;
+@@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ return;
+ }
+
++ if (!dev->ishtp_dma_tx_map) {
++ dev_err(dev->devc, "Fail to allocate Tx map\n");
++ return;
++ }
++
+ i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (j = 0; j < acked_slots; j++) {
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index c023b691441ea..bceaf70f4e237 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -351,7 +351,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+ *
+ * If your hardware is free from tHD;STA issue, try this one.
+ */
+- return DIV_ROUND_CLOSEST(ic_clk * tSYMBOL, MICRO) - 8 + offset;
++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) -
++ 8 + offset;
+ else
+ /*
+ * Conditional expression:
+@@ -367,7 +368,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+ * The reason why we need to take into account "tf" here,
+ * is the same as described in i2c_dw_scl_lcnt().
+ */
+- return DIV_ROUND_CLOSEST(ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset;
++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) -
++ 3 + offset;
+ }
+
+ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+@@ -383,7 +385,8 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+ * account the fall time of SCL signal (tf). Default tf value
+ * should be 0.3 us, for safety.
+ */
+- return DIV_ROUND_CLOSEST(ic_clk * (tLOW + tf), MICRO) - 1 + offset;
++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) -
++ 1 + offset;
+ }
+
+ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index ba043b5473936..74182db03a88b 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -351,13 +351,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+
+ if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
+ dev_pm_set_driver_flags(&pdev->dev,
+- DPM_FLAG_SMART_PREPARE |
+- DPM_FLAG_MAY_SKIP_RESUME);
++ DPM_FLAG_SMART_PREPARE);
+ } else {
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+- DPM_FLAG_SMART_SUSPEND |
+- DPM_FLAG_MAY_SKIP_RESUME);
++ DPM_FLAG_SMART_SUSPEND);
+ }
+
+ device_enable_async_suspend(&pdev->dev);
+@@ -419,21 +417,8 @@ static int dw_i2c_plat_prepare(struct device *dev)
+ */
+ return !has_acpi_companion(dev);
+ }
+-
+-static void dw_i2c_plat_complete(struct device *dev)
+-{
+- /*
+- * The device can only be in runtime suspend at this point if it has not
+- * been resumed throughout the ending system suspend/resume cycle, so if
+- * the platform firmware might mess up with it, request the runtime PM
+- * framework to resume it.
+- */
+- if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
+- pm_request_resume(dev);
+-}
+ #else
+ #define dw_i2c_plat_prepare NULL
+-#define dw_i2c_plat_complete NULL
+ #endif
+
+ #ifdef CONFIG_PM
+@@ -483,7 +468,6 @@ static int __maybe_unused dw_i2c_plat_resume(struct device *dev)
+
+ static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
+ .prepare = dw_i2c_plat_prepare,
+- .complete = dw_i2c_plat_complete,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
+ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
+ };
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 26b021f43ba40..11b1c1603aeb4 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -2957,15 +2957,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
+ bool __rdma_block_iter_next(struct ib_block_iter *biter)
+ {
+ unsigned int block_offset;
++ unsigned int sg_delta;
+
+ if (!biter->__sg_nents || !biter->__sg)
+ return false;
+
+ biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
+ block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
+- biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
++ sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
+
+- if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
++ if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
++ biter->__sg_advance += sg_delta;
++ } else {
+ biter->__sg_advance = 0;
+ biter->__sg = sg_next(biter->__sg);
+ biter->__sg_nents--;
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+index 186d302912606..b02f2f0809c81 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -23,18 +23,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
++static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
++ const struct mmu_notifier_range *range,
++ unsigned long cur_seq);
+ static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
+ struct tid_group *grp,
+ unsigned int start, u16 count,
+ u32 *tidlist, unsigned int *tididx,
+ unsigned int *pmapped);
+-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+- struct tid_group **grp);
++static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
++static void __clear_tid_node(struct hfi1_filedata *fd,
++ struct tid_rb_node *node);
+ static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
+
+ static const struct mmu_interval_notifier_ops tid_mn_ops = {
+ .invalidate = tid_rb_invalidate,
+ };
++static const struct mmu_interval_notifier_ops tid_cover_ops = {
++ .invalidate = tid_cover_invalidate,
++};
+
+ /*
+ * Initialize context and file private data needed for Expected
+@@ -253,53 +260,65 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ tididx = 0, mapped, mapped_pages = 0;
+ u32 *tidlist = NULL;
+ struct tid_user_buf *tidbuf;
++ unsigned long mmu_seq = 0;
+
+ if (!PAGE_ALIGNED(tinfo->vaddr))
+ return -EINVAL;
++ if (tinfo->length == 0)
++ return -EINVAL;
+
+ tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
+ if (!tidbuf)
+ return -ENOMEM;
+
++ mutex_init(&tidbuf->cover_mutex);
+ tidbuf->vaddr = tinfo->vaddr;
+ tidbuf->length = tinfo->length;
+ tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
+ GFP_KERNEL);
+ if (!tidbuf->psets) {
+- kfree(tidbuf);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto fail_release_mem;
++ }
++
++ if (fd->use_mn) {
++ ret = mmu_interval_notifier_insert(
++ &tidbuf->notifier, current->mm,
++ tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
++ &tid_cover_ops);
++ if (ret)
++ goto fail_release_mem;
++ mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
+ }
+
+ pinned = pin_rcv_pages(fd, tidbuf);
+ if (pinned <= 0) {
+- kfree(tidbuf->psets);
+- kfree(tidbuf);
+- return pinned;
++ ret = (pinned < 0) ? pinned : -ENOSPC;
++ goto fail_unpin;
+ }
+
+ /* Find sets of physically contiguous pages */
+ tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
+
+- /*
+- * We don't need to access this under a lock since tid_used is per
+- * process and the same process cannot be in hfi1_user_exp_rcv_clear()
+- * and hfi1_user_exp_rcv_setup() at the same time.
+- */
++ /* Reserve the number of expected tids to be used. */
+ spin_lock(&fd->tid_lock);
+ if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
+ pageset_count = fd->tid_limit - fd->tid_used;
+ else
+ pageset_count = tidbuf->n_psets;
++ fd->tid_used += pageset_count;
+ spin_unlock(&fd->tid_lock);
+
+- if (!pageset_count)
+- goto bail;
++ if (!pageset_count) {
++ ret = -ENOSPC;
++ goto fail_unreserve;
++ }
+
+ ngroups = pageset_count / dd->rcv_entries.group_size;
+ tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
+ if (!tidlist) {
+ ret = -ENOMEM;
+- goto nomem;
++ goto fail_unreserve;
+ }
+
+ tididx = 0;
+@@ -395,43 +414,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ }
+ unlock:
+ mutex_unlock(&uctxt->exp_mutex);
+-nomem:
+ hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
+ mapped_pages, ret);
+- if (tididx) {
+- spin_lock(&fd->tid_lock);
+- fd->tid_used += tididx;
+- spin_unlock(&fd->tid_lock);
+- tinfo->tidcnt = tididx;
+- tinfo->length = mapped_pages * PAGE_SIZE;
+-
+- if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+- tidlist, sizeof(tidlist[0]) * tididx)) {
+- /*
+- * On failure to copy to the user level, we need to undo
+- * everything done so far so we don't leak resources.
+- */
+- tinfo->tidlist = (unsigned long)&tidlist;
+- hfi1_user_exp_rcv_clear(fd, tinfo);
+- tinfo->tidlist = 0;
+- ret = -EFAULT;
+- goto bail;
++
++ /* fail if nothing was programmed, set error if none provided */
++ if (tididx == 0) {
++ if (ret >= 0)
++ ret = -ENOSPC;
++ goto fail_unreserve;
++ }
++
++ /* adjust reserved tid_used to actual count */
++ spin_lock(&fd->tid_lock);
++ fd->tid_used -= pageset_count - tididx;
++ spin_unlock(&fd->tid_lock);
++
++ /* unpin all pages not covered by a TID */
++ unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
++ false);
++
++ if (fd->use_mn) {
++ /* check for an invalidate during setup */
++ bool fail = false;
++
++ mutex_lock(&tidbuf->cover_mutex);
++ fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
++ mutex_unlock(&tidbuf->cover_mutex);
++
++ if (fail) {
++ ret = -EBUSY;
++ goto fail_unprogram;
+ }
+ }
+
+- /*
+- * If not everything was mapped (due to insufficient RcvArray entries,
+- * for example), unpin all unmapped pages so we can pin them nex time.
+- */
+- if (mapped_pages != pinned)
+- unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
+- (pinned - mapped_pages), false);
+-bail:
++ tinfo->tidcnt = tididx;
++ tinfo->length = mapped_pages * PAGE_SIZE;
++
++ if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
++ tidlist, sizeof(tidlist[0]) * tididx)) {
++ ret = -EFAULT;
++ goto fail_unprogram;
++ }
++
++ if (fd->use_mn)
++ mmu_interval_notifier_remove(&tidbuf->notifier);
++ kfree(tidbuf->pages);
+ kfree(tidbuf->psets);
++ kfree(tidbuf);
+ kfree(tidlist);
++ return 0;
++
++fail_unprogram:
++ /* unprogram, unmap, and unpin all allocated TIDs */
++ tinfo->tidlist = (unsigned long)tidlist;
++ hfi1_user_exp_rcv_clear(fd, tinfo);
++ tinfo->tidlist = 0;
++ pinned = 0; /* nothing left to unpin */
++ pageset_count = 0; /* nothing left reserved */
++fail_unreserve:
++ spin_lock(&fd->tid_lock);
++ fd->tid_used -= pageset_count;
++ spin_unlock(&fd->tid_lock);
++fail_unpin:
++ if (fd->use_mn)
++ mmu_interval_notifier_remove(&tidbuf->notifier);
++ if (pinned > 0)
++ unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
++fail_release_mem:
+ kfree(tidbuf->pages);
++ kfree(tidbuf->psets);
+ kfree(tidbuf);
+- return ret > 0 ? 0 : ret;
++ kfree(tidlist);
++ return ret;
+ }
+
+ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+@@ -452,7 +506,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+
+ mutex_lock(&uctxt->exp_mutex);
+ for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
+- ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
++ ret = unprogram_rcvarray(fd, tidinfo[tididx]);
+ if (ret) {
+ hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
+ ret);
+@@ -706,6 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ }
+
+ node->fdata = fd;
++ mutex_init(&node->invalidate_mutex);
+ node->phys = page_to_phys(pages[0]);
+ node->npages = npages;
+ node->rcventry = rcventry;
+@@ -721,11 +776,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ &tid_mn_ops);
+ if (ret)
+ goto out_unmap;
+- /*
+- * FIXME: This is in the wrong order, the notifier should be
+- * established before the pages are pinned by pin_rcv_pages.
+- */
+- mmu_interval_read_begin(&node->notifier);
+ }
+ fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
+
+@@ -745,8 +795,7 @@ out_unmap:
+ return -EFAULT;
+ }
+
+-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+- struct tid_group **grp)
++static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
+ {
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+@@ -769,9 +818,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+ if (!node || node->rcventry != (uctxt->expected_base + rcventry))
+ return -EBADF;
+
+- if (grp)
+- *grp = node->grp;
+-
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(&node->notifier);
+ cacheless_tid_rb_remove(fd, node);
+@@ -779,23 +825,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+ return 0;
+ }
+
+-static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
++static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+ {
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+
++ mutex_lock(&node->invalidate_mutex);
++ if (node->freed)
++ goto done;
++ node->freed = true;
++
+ trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
+ node->npages,
+ node->notifier.interval_tree.start, node->phys,
+ node->dma_addr);
+
+- /*
+- * Make sure device has seen the write before we unpin the
+- * pages.
+- */
++ /* Make sure device has seen the write before pages are unpinned */
+ hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
+
+ unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
++done:
++ mutex_unlock(&node->invalidate_mutex);
++}
++
++static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
++{
++ struct hfi1_ctxtdata *uctxt = fd->uctxt;
++
++ __clear_tid_node(fd, node);
+
+ node->grp->used--;
+ node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
+@@ -854,10 +911,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ if (node->freed)
+ return true;
+
++ /* take action only if unmapping */
++ if (range->event != MMU_NOTIFY_UNMAP)
++ return true;
++
+ trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
+ node->notifier.interval_tree.start,
+ node->rcventry, node->npages, node->dma_addr);
+- node->freed = true;
++
++ /* clear the hardware rcvarray entry */
++ __clear_tid_node(fdata, node);
+
+ spin_lock(&fdata->invalid_lock);
+ if (fdata->invalid_tid_idx < uctxt->expected_count) {
+@@ -887,6 +950,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ return true;
+ }
+
++static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
++ const struct mmu_notifier_range *range,
++ unsigned long cur_seq)
++{
++ struct tid_user_buf *tidbuf =
++ container_of(mni, struct tid_user_buf, notifier);
++
++ /* take action only if unmapping */
++ if (range->event == MMU_NOTIFY_UNMAP) {
++ mutex_lock(&tidbuf->cover_mutex);
++ mmu_interval_set_seq(mni, cur_seq);
++ mutex_unlock(&tidbuf->cover_mutex);
++ }
++
++ return true;
++}
++
+ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ struct tid_rb_node *tnode)
+ {
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+index 8c53e416bf843..f8ee997d0050e 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+@@ -16,6 +16,8 @@ struct tid_pageset {
+ };
+
+ struct tid_user_buf {
++ struct mmu_interval_notifier notifier;
++ struct mutex cover_mutex;
+ unsigned long vaddr;
+ unsigned long length;
+ unsigned int npages;
+@@ -27,6 +29,7 @@ struct tid_user_buf {
+ struct tid_rb_node {
+ struct mmu_interval_notifier notifier;
+ struct hfi1_filedata *fdata;
++ struct mutex invalidate_mutex; /* covers hw removal */
+ unsigned long phys;
+ struct tid_group *grp;
+ u32 rcventry;
+diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
+index 86c7a8bf3cbbd..fa41009ce8a9d 100644
+--- a/drivers/infiniband/sw/rxe/rxe_param.h
++++ b/drivers/infiniband/sw/rxe/rxe_param.h
+@@ -91,11 +91,11 @@ enum rxe_device_param {
+ RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
+
+ RXE_MIN_MR_INDEX = 0x00000001,
+- RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE,
+- RXE_MAX_MR = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX,
+- RXE_MIN_MW_INDEX = 0x00010001,
+- RXE_MAX_MW_INDEX = 0x00020000,
+- RXE_MAX_MW = 0x00001000,
++ RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE >> 1,
++ RXE_MAX_MR = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX,
++ RXE_MIN_MW_INDEX = RXE_MAX_MR_INDEX + 1,
++ RXE_MAX_MW_INDEX = DEFAULT_MAX_VALUE,
++ RXE_MAX_MW = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX,
+
+ RXE_MAX_PKT_PER_ACK = 64,
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
+index f50620f5a0a14..1151c0b5cceab 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.c
++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
+@@ -23,16 +23,16 @@ static const struct rxe_type_info {
+ .size = sizeof(struct rxe_ucontext),
+ .elem_offset = offsetof(struct rxe_ucontext, elem),
+ .min_index = 1,
+- .max_index = UINT_MAX,
+- .max_elem = UINT_MAX,
++ .max_index = RXE_MAX_UCONTEXT,
++ .max_elem = RXE_MAX_UCONTEXT,
+ },
+ [RXE_TYPE_PD] = {
+ .name = "pd",
+ .size = sizeof(struct rxe_pd),
+ .elem_offset = offsetof(struct rxe_pd, elem),
+ .min_index = 1,
+- .max_index = UINT_MAX,
+- .max_elem = UINT_MAX,
++ .max_index = RXE_MAX_PD,
++ .max_elem = RXE_MAX_PD,
+ },
+ [RXE_TYPE_AH] = {
+ .name = "ah",
+@@ -40,7 +40,7 @@ static const struct rxe_type_info {
+ .elem_offset = offsetof(struct rxe_ah, elem),
+ .min_index = RXE_MIN_AH_INDEX,
+ .max_index = RXE_MAX_AH_INDEX,
+- .max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
++ .max_elem = RXE_MAX_AH,
+ },
+ [RXE_TYPE_SRQ] = {
+ .name = "srq",
+@@ -49,7 +49,7 @@ static const struct rxe_type_info {
+ .cleanup = rxe_srq_cleanup,
+ .min_index = RXE_MIN_SRQ_INDEX,
+ .max_index = RXE_MAX_SRQ_INDEX,
+- .max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1,
++ .max_elem = RXE_MAX_SRQ,
+ },
+ [RXE_TYPE_QP] = {
+ .name = "qp",
+@@ -58,7 +58,7 @@ static const struct rxe_type_info {
+ .cleanup = rxe_qp_cleanup,
+ .min_index = RXE_MIN_QP_INDEX,
+ .max_index = RXE_MAX_QP_INDEX,
+- .max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1,
++ .max_elem = RXE_MAX_QP,
+ },
+ [RXE_TYPE_CQ] = {
+ .name = "cq",
+@@ -66,8 +66,8 @@ static const struct rxe_type_info {
+ .elem_offset = offsetof(struct rxe_cq, elem),
+ .cleanup = rxe_cq_cleanup,
+ .min_index = 1,
+- .max_index = UINT_MAX,
+- .max_elem = UINT_MAX,
++ .max_index = RXE_MAX_CQ,
++ .max_elem = RXE_MAX_CQ,
+ },
+ [RXE_TYPE_MR] = {
+ .name = "mr",
+@@ -76,7 +76,7 @@ static const struct rxe_type_info {
+ .cleanup = rxe_mr_cleanup,
+ .min_index = RXE_MIN_MR_INDEX,
+ .max_index = RXE_MAX_MR_INDEX,
+- .max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1,
++ .max_elem = RXE_MAX_MR,
+ },
+ [RXE_TYPE_MW] = {
+ .name = "mw",
+@@ -85,7 +85,7 @@ static const struct rxe_type_info {
+ .cleanup = rxe_mw_cleanup,
+ .min_index = RXE_MIN_MW_INDEX,
+ .max_index = RXE_MAX_MW_INDEX,
+- .max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1,
++ .max_elem = RXE_MAX_MW,
+ },
+ };
+
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index b0f776448a1cd..fa021af8506e4 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -192,7 +192,6 @@ static const char * const smbus_pnp_ids[] = {
+ "SYN3221", /* HP 15-ay000 */
+ "SYN323d", /* HP Spectre X360 13-w013dx */
+ "SYN3257", /* HP Envy 13-ad105ng */
+- "SYN3286", /* HP Laptop 15-da3001TU */
+ NULL
+ };
+
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 46f8a694291ed..efc61736099b9 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1238,6 +1238,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
+diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
+index c2903ae3b3bc3..25a1a32bc611f 100644
+--- a/drivers/interconnect/qcom/msm8996.c
++++ b/drivers/interconnect/qcom/msm8996.c
+@@ -33,6 +33,13 @@ static const char * const bus_a0noc_clocks[] = {
+ "aggre0_noc_mpu_cfg"
+ };
+
++static const char * const bus_a2noc_clocks[] = {
++ "bus",
++ "bus_a",
++ "aggre2_ufs_axi",
++ "ufs_axi"
++};
++
+ static const u16 mas_a0noc_common_links[] = {
+ MSM8996_SLAVE_A0NOC_SNOC
+ };
+@@ -1806,7 +1813,7 @@ static const struct regmap_config msm8996_a0noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+- .max_register = 0x9000,
++ .max_register = 0x6000,
+ .fast_io = true
+ };
+
+@@ -1830,7 +1837,7 @@ static const struct regmap_config msm8996_a1noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+- .max_register = 0x7000,
++ .max_register = 0x5000,
+ .fast_io = true
+ };
+
+@@ -1851,7 +1858,7 @@ static const struct regmap_config msm8996_a2noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+- .max_register = 0xa000,
++ .max_register = 0x7000,
+ .fast_io = true
+ };
+
+@@ -1859,6 +1866,8 @@ static const struct qcom_icc_desc msm8996_a2noc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = a2noc_nodes,
+ .num_nodes = ARRAY_SIZE(a2noc_nodes),
++ .clocks = bus_a2noc_clocks,
++ .num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
+ .regmap_cfg = &msm8996_a2noc_regmap_config
+ };
+
+@@ -1877,7 +1886,7 @@ static const struct regmap_config msm8996_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+- .max_register = 0x62000,
++ .max_register = 0x5a000,
+ .fast_io = true
+ };
+
+@@ -1988,7 +1997,7 @@ static const struct regmap_config msm8996_mnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+- .max_register = 0x20000,
++ .max_register = 0x1c000,
+ .fast_io = true
+ };
+
+diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
+index 9c49d00c2a966..ea6e9e1eaf046 100644
+--- a/drivers/memory/atmel-sdramc.c
++++ b/drivers/memory/atmel-sdramc.c
+@@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev)
+ caps = of_device_get_match_data(&pdev->dev);
+
+ if (caps->has_ddrck) {
+- clk = devm_clk_get(&pdev->dev, "ddrck");
++ clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+- clk_prepare_enable(clk);
+ }
+
+ if (caps->has_mpddr_clk) {
+- clk = devm_clk_get(&pdev->dev, "mpddr");
++ clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
+ if (IS_ERR(clk)) {
+ pr_err("AT91 RAMC: couldn't get mpddr clock\n");
+ return PTR_ERR(clk);
+ }
+- clk_prepare_enable(clk);
+ }
+
+ return 0;
+diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
+index 8450638e86700..efc6c08db2b70 100644
+--- a/drivers/memory/mvebu-devbus.c
++++ b/drivers/memory/mvebu-devbus.c
+@@ -280,10 +280,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
+ if (IS_ERR(devbus->base))
+ return PTR_ERR(devbus->base);
+
+- clk = devm_clk_get(&pdev->dev, NULL);
++ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+- clk_prepare_enable(clk);
+
+ /*
+ * Obtain clock period in picoseconds,
+diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
+index 62477e592bf5f..7bb73f06fad3e 100644
+--- a/drivers/memory/tegra/tegra186.c
++++ b/drivers/memory/tegra/tegra186.c
+@@ -22,32 +22,6 @@
+ #define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
+ #define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
+
+-static void tegra186_mc_program_sid(struct tegra_mc *mc)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < mc->soc->num_clients; i++) {
+- const struct tegra_mc_client *client = &mc->soc->clients[i];
+- u32 override, security;
+-
+- override = readl(mc->regs + client->regs.sid.override);
+- security = readl(mc->regs + client->regs.sid.security);
+-
+- dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
+- client->name, override, security);
+-
+- dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
+- client->name);
+- writel(client->sid, mc->regs + client->regs.sid.override);
+-
+- override = readl(mc->regs + client->regs.sid.override);
+- security = readl(mc->regs + client->regs.sid.security);
+-
+- dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
+- client->name, override, security);
+- }
+-}
+-
+ static int tegra186_mc_probe(struct tegra_mc *mc)
+ {
+ struct platform_device *pdev = to_platform_device(mc->dev);
+@@ -85,8 +59,6 @@ populate:
+ if (err < 0)
+ return err;
+
+- tegra186_mc_program_sid(mc);
+-
+ return 0;
+ }
+
+@@ -95,13 +67,6 @@ static void tegra186_mc_remove(struct tegra_mc *mc)
+ of_platform_depopulate(mc->dev);
+ }
+
+-static int tegra186_mc_resume(struct tegra_mc *mc)
+-{
+- tegra186_mc_program_sid(mc);
+-
+- return 0;
+-}
+-
+ #if IS_ENABLED(CONFIG_IOMMU_API)
+ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+@@ -173,7 +138,6 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+ const struct tegra_mc_ops tegra186_mc_ops = {
+ .probe = tegra186_mc_probe,
+ .remove = tegra186_mc_remove,
+- .resume = tegra186_mc_resume,
+ .probe_device = tegra186_mc_probe_device,
+ .handle_irq = tegra30_mc_handle_irq,
+ };
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index a6a0321a89310..a736971470534 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -548,10 +548,10 @@ int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
+
+ /* clear forwarding port */
+- alu_table[2] &= ~BIT(port);
++ alu_table[1] &= ~BIT(port);
+
+ /* if there is no port to forward, clear table */
+- if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
++ if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
+index 3763930dc6fc4..aae1dadef882d 100644
+--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
++++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
+@@ -105,7 +105,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
+ },
+ {
+ .compatible = "microchip,ksz8563",
+- .data = &ksz_switch_chips[KSZ9893]
++ .data = &ksz_switch_chips[KSZ8563]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index 9d8dfe1729948..ecce5f7a549f2 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -356,7 +356,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+
+ if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+ (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
+- rxb->offload_fwd_mark = 1;
++ rxb->offload_fwd_mark = port_priv->priv->forwarding;
+
+ netif_rx(rxb);
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 3936543a74d8f..4030d619e84f5 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
+ netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
+ }
+
++static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
++{
++ unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
++
++ /* From MAC ver 30H the TFCR is per priority, instead of per queue */
++ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
++ return max_q_count;
++ else
++ return min_t(unsigned int, pdata->tx_q_count, max_q_count);
++}
++
+ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+- unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+- unsigned int i;
++ unsigned int i, q_count;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
++ q_count = xgbe_get_fc_queue_count(pdata);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+@@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+ struct ieee_pfc *pfc = pdata->pfc;
+ struct ieee_ets *ets = pdata->ets;
+- unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+- unsigned int i;
++ unsigned int i, q_count;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+@@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+ }
+
+ /* Set MAC flow control */
+- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
++ q_count = xgbe_get_fc_queue_count(pdata);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 0c5c1b1556830..43fdd111235a6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++ pdata->kr_start_time = jiffies;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "KR training initiated\n");
+@@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
+
+ xgbe_switch_mode(pdata);
+
++ pdata->an_result = XGBE_AN_READY;
++
+ xgbe_an_restart(pdata);
+
+ return XGBE_AN_INCOMPAT_LINK;
+@@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+ {
+ unsigned long link_timeout;
++ unsigned long kr_time;
++ int wait;
+
+ link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+ if (time_after(jiffies, link_timeout)) {
++ if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
++ pdata->phy.autoneg == AUTONEG_ENABLE) {
++ /* AN restart should not happen while KR training is in progress.
++ * The while loop ensures no AN restart during KR training,
++ * waits up to 500ms and AN restart is triggered only if KR
++ * training is failed.
++ */
++ wait = XGBE_KR_TRAINING_WAIT_ITER;
++ while (wait--) {
++ kr_time = pdata->kr_start_time +
++ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
++ if (time_after(jiffies, kr_time))
++ break;
++ /* AN restart is not required, if AN result is COMPLETE */
++ if (pdata->an_result == XGBE_AN_COMPLETE)
++ return;
++ usleep_range(10000, 11000);
++ }
++ }
+ netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ xgbe_phy_config_aneg(pdata);
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 71f24cb479355..7a41367c437dd 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -290,6 +290,7 @@
+ /* Auto-negotiation */
+ #define XGBE_AN_MS_TIMEOUT 500
+ #define XGBE_LINK_TIMEOUT 5
++#define XGBE_KR_TRAINING_WAIT_ITER 50
+
+ #define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+ #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+@@ -1280,6 +1281,7 @@ struct xgbe_prv_data {
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
++ unsigned long kr_start_time;
+ enum xgbe_an_mode an_mode;
+
+ /* I2C support */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 8cad15c458b39..703fc163235f9 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -3865,7 +3865,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
+ test_info->timeout = HWRM_CMD_TIMEOUT;
+ for (i = 0; i < bp->num_tests; i++) {
+ char *str = test_info->string[i];
+- char *fw_str = resp->test0_name + i * 32;
++ char *fw_str = resp->test_name[i];
+
+ if (i == BNXT_MACLPBK_TEST_IDX) {
+ strcpy(str, "Mac loopback test (offline)");
+@@ -3876,14 +3876,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
+ } else if (i == BNXT_IRQ_TEST_IDX) {
+ strcpy(str, "Interrupt_test (offline)");
+ } else {
+- strscpy(str, fw_str, ETH_GSTRING_LEN);
+- strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
+- if (test_info->offline_mask & (1 << i))
+- strncat(str, " (offline)",
+- ETH_GSTRING_LEN - strlen(str));
+- else
+- strncat(str, " (online)",
+- ETH_GSTRING_LEN - strlen(str));
++ snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
++ fw_str, test_info->offline_mask & (1 << i) ?
++ "offline" : "online");
+ }
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+index b753032a10474..fb78fc38530da 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+@@ -10099,14 +10099,7 @@ struct hwrm_selftest_qlist_output {
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+- char test0_name[32];
+- char test1_name[32];
+- char test2_name[32];
+- char test3_name[32];
+- char test4_name[32];
+- char test5_name[32];
+- char test6_name[32];
+- char test7_name[32];
++ char test_name[8][32];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 4179a12fc8819..af9ea5e4371b3 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -11174,7 +11174,7 @@ static void tg3_reset_task(struct work_struct *work)
+ rtnl_lock();
+ tg3_full_lock(tp, 0);
+
+- if (!netif_running(tp->dev)) {
++ if (tp->pcierr_recovery || !netif_running(tp->dev)) {
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ tg3_full_unlock(tp);
+ rtnl_unlock();
+@@ -18109,6 +18109,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+
+ netdev_info(netdev, "PCI I/O error detected\n");
+
++ /* Want to make sure that the reset task doesn't run */
++ tg3_reset_task_cancel(tp);
++
+ rtnl_lock();
+
+ /* Could be second call or maybe we don't have netdev yet */
+@@ -18125,9 +18128,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+
+ tg3_timer_stop(tp);
+
+- /* Want to make sure that the reset task doesn't run */
+- tg3_reset_task_cancel(tp);
+-
+ netif_device_detach(netdev);
+
+ /* Clean up software state, even if MMIO is blocked */
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 4f63f1ba3161c..300f47ca42e3e 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -2188,7 +2188,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ skb_is_nonlinear(*skb);
+ int padlen = ETH_ZLEN - (*skb)->len;
+- int headroom = skb_headroom(*skb);
+ int tailroom = skb_tailroom(*skb);
+ struct sk_buff *nskb;
+ u32 fcs;
+@@ -2202,9 +2201,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ /* FCS could be appeded to tailroom. */
+ if (tailroom >= ETH_FCS_LEN)
+ goto add_fcs;
+- /* FCS could be appeded by moving data to headroom. */
+- else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
+- padlen = 0;
+ /* No room for FCS, need to reallocate skb. */
+ else
+ padlen = ETH_FCS_LEN;
+@@ -2213,10 +2209,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ padlen += ETH_FCS_LEN;
+ }
+
+- if (!cloned && headroom + tailroom >= padlen) {
+- (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
+- skb_set_tail_pointer(*skb, (*skb)->len);
+- } else {
++ if (cloned || tailroom < padlen) {
+ nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 13d5ff4e0e020..6bf3cc11d2121 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -419,7 +419,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ /* ring full, shall not happen because queue is stopped if full
+ * below
+ */
+- netif_stop_queue(tx->adapter->netdev);
++ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+@@ -462,7 +462,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
+ /* ring can get full with next frame */
+- netif_stop_queue(tx->adapter->netdev);
++ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+ }
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+@@ -472,11 +472,14 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+
+ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+ {
++ struct tsnep_tx_entry *entry;
++ struct netdev_queue *nq;
+ unsigned long flags;
+ int budget = 128;
+- struct tsnep_tx_entry *entry;
+- int count;
+ int length;
++ int count;
++
++ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+
+ spin_lock_irqsave(&tx->lock, flags);
+
+@@ -533,8 +536,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+ } while (likely(budget));
+
+ if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
+- netif_queue_stopped(tx->adapter->netdev)) {
+- netif_wake_queue(tx->adapter->netdev);
++ netif_tx_queue_stopped(nq)) {
++ netif_tx_wake_queue(nq);
+ }
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 3a79ead5219ae..e96449eedfb54 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -2290,14 +2290,14 @@ static void enetc_tx_onestep_tstamp(struct work_struct *work)
+
+ priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
+
+- netif_tx_lock(priv->ndev);
++ netif_tx_lock_bh(priv->ndev);
+
+ clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (skb)
+ enetc_start_xmit(skb, priv->ndev);
+
+- netif_tx_unlock(priv->ndev);
++ netif_tx_unlock_bh(priv->ndev);
+ }
+
+ static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index f250b0df27fbb..6f914180f4797 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3089,7 +3089,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ rxq = fep->rx_queue[q];
+ for (i = 0; i < rxq->bd.ring_size; i++)
+- page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
++ page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 0d1bab4ac1b07..2a9f1eeeb7015 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -249,6 +249,7 @@ struct iavf_cloud_filter {
+
+ /* board specific private data structure */
+ struct iavf_adapter {
++ struct workqueue_struct *wq;
+ struct work_struct reset_task;
+ struct work_struct adminq_task;
+ struct delayed_work client_task;
+@@ -459,7 +460,6 @@ struct iavf_device {
+
+ /* needed by iavf_ethtool.c */
+ extern char iavf_driver_name[];
+-extern struct workqueue_struct *iavf_wq;
+
+ static inline const char *iavf_state_str(enum iavf_state_t state)
+ {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index a056e15456153..83cfc54a47062 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
+ if (changed_flags & IAVF_FLAG_LEGACY_RX) {
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+ }
+ }
+
+@@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+ }
+
+ return 0;
+@@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+
+ ret:
+ if (err && fltr)
+@@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+
+ return err;
+ }
+@@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (!err)
+- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+
+ mutex_unlock(&adapter->crit_lock);
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 260c55951c287..3dad834b9b8e5 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")
+ MODULE_LICENSE("GPL v2");
+
+ static const struct net_device_ops iavf_netdev_ops;
+-struct workqueue_struct *iavf_wq;
+
+ int iavf_status_to_errno(enum iavf_status status)
+ {
+@@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
+ if (!(adapter->flags &
+ (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+ }
+ }
+
+@@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
+ void iavf_schedule_request_stats(struct iavf_adapter *adapter)
+ {
+ adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
+- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+
+ /**
+@@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
+
+ if (adapter->state != __IAVF_REMOVE)
+ /* schedule work on the private workqueue */
+- queue_work(iavf_wq, &adapter->adminq_task);
++ queue_work(adapter->wq, &adapter->adminq_task);
+
+ return IRQ_HANDLED;
+ }
+@@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+
+ /* schedule the watchdog task to immediately process the request */
+ if (f) {
+- queue_work(iavf_wq, &adapter->watchdog_task.work);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ return 0;
+ }
+ return -ENOMEM;
+@@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
+ adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
+- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+
+ /**
+@@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter)
+ adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
+ }
+
+- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+
+ /**
+@@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+
+ if (aq_required) {
+ adapter->aq_required |= aq_required;
+- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+ }
+
+@@ -2700,7 +2699,7 @@ static void iavf_watchdog_task(struct work_struct *work)
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+ return;
+ }
+
+@@ -2708,31 +2707,31 @@ static void iavf_watchdog_task(struct work_struct *work)
+ case __IAVF_STARTUP:
+ iavf_startup(adapter);
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_VERSION_CHECK:
+ iavf_init_version_check(adapter);
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_GET_RESOURCES:
+ iavf_init_get_resources(adapter);
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_EXTENDED_CAPS:
+ iavf_init_process_extended_caps(adapter);
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_CONFIG_ADAPTER:
+ iavf_init_config_adapter(adapter);
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_FAILED:
+@@ -2751,14 +2750,14 @@ static void iavf_watchdog_task(struct work_struct *work)
+ adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
+ iavf_shutdown_adminq(hw);
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq,
++ queue_delayed_work(adapter->wq,
+ &adapter->watchdog_task, (5 * HZ));
+ return;
+ }
+ /* Try again from failed step*/
+ iavf_change_state(adapter, adapter->last_state);
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
+ return;
+ case __IAVF_COMM_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+@@ -2789,13 +2788,14 @@ static void iavf_watchdog_task(struct work_struct *work)
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq,
++ queue_delayed_work(adapter->wq,
+ &adapter->watchdog_task,
+ msecs_to_jiffies(10));
+ return;
+ case __IAVF_RESETTING:
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
++ HZ * 2);
+ return;
+ case __IAVF_DOWN:
+ case __IAVF_DOWN_PENDING:
+@@ -2834,9 +2834,9 @@ static void iavf_watchdog_task(struct work_struct *work)
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+ mutex_unlock(&adapter->crit_lock);
+- queue_delayed_work(iavf_wq,
++ queue_delayed_work(adapter->wq,
+ &adapter->watchdog_task, HZ * 2);
+ return;
+ }
+@@ -2845,12 +2845,13 @@ static void iavf_watchdog_task(struct work_struct *work)
+ mutex_unlock(&adapter->crit_lock);
+ restart_watchdog:
+ if (adapter->state >= __IAVF_DOWN)
+- queue_work(iavf_wq, &adapter->adminq_task);
++ queue_work(adapter->wq, &adapter->adminq_task);
+ if (adapter->aq_required)
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(20));
+ else
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
++ HZ * 2);
+ }
+
+ /**
+@@ -2952,7 +2953,7 @@ static void iavf_reset_task(struct work_struct *work)
+ */
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state != __IAVF_REMOVE)
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+
+ goto reset_finish;
+ }
+@@ -3116,7 +3117,7 @@ continue_reset:
+ bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
+ bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
+
+- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
+
+ /* We were running when the reset started, so we need to restore some
+ * state here.
+@@ -3208,7 +3209,7 @@ static void iavf_adminq_task(struct work_struct *work)
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+- queue_work(iavf_wq, &adapter->adminq_task);
++ queue_work(adapter->wq, &adapter->adminq_task);
+ goto out;
+ }
+
+@@ -4349,7 +4350,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+ }
+
+ return 0;
+@@ -4898,6 +4899,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ hw = &adapter->hw;
+ hw->back = adapter;
+
++ adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
++ iavf_driver_name);
++ if (!adapter->wq) {
++ err = -ENOMEM;
++ goto err_alloc_wq;
++ }
++
+ adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ iavf_change_state(adapter, __IAVF_STARTUP);
+
+@@ -4942,7 +4950,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
+ INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
+- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+
+ /* Setup the wait queue for indicating transition to down status */
+@@ -4954,6 +4962,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ return 0;
+
+ err_ioremap:
++ destroy_workqueue(adapter->wq);
++err_alloc_wq:
+ free_netdev(netdev);
+ err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
+@@ -5023,7 +5033,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
+ return err;
+ }
+
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+
+ netif_device_attach(adapter->netdev);
+
+@@ -5170,6 +5180,8 @@ static void iavf_remove(struct pci_dev *pdev)
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
++ destroy_workqueue(adapter->wq);
++
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+@@ -5196,24 +5208,11 @@ static struct pci_driver iavf_driver = {
+ **/
+ static int __init iavf_init_module(void)
+ {
+- int ret;
+-
+ pr_info("iavf: %s\n", iavf_driver_string);
+
+ pr_info("%s\n", iavf_copyright);
+
+- iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
+- iavf_driver_name);
+- if (!iavf_wq) {
+- pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
+- return -ENOMEM;
+- }
+-
+- ret = pci_register_driver(&iavf_driver);
+- if (ret)
+- destroy_workqueue(iavf_wq);
+-
+- return ret;
++ return pci_register_driver(&iavf_driver);
+ }
+
+ module_init(iavf_init_module);
+@@ -5227,7 +5226,6 @@ module_init(iavf_init_module);
+ static void __exit iavf_exit_module(void)
+ {
+ pci_unregister_driver(&iavf_driver);
+- destroy_workqueue(iavf_wq);
+ }
+
+ module_exit(iavf_exit_module);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 24a701fd140e9..0752fd67c96e5 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= IAVF_FLAG_RESET_PENDING;
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+- queue_work(iavf_wq, &adapter->reset_task);
++ queue_work(adapter->wq, &adapter->reset_task);
+ }
+ break;
+ default:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
+index 6dac76fa58a3f..09d441ecb9f6d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
+@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
+ if (child->bw_share == old_bw_share)
+ continue;
+
+- err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
++ err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
+ child->max_average_bw, child->hw_id);
+ if (!err && err_one) {
+ err = err_one;
+@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
+
+- err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
++ err = mlx5_qos_update_node(htb->mdev, bw_share,
+ max_average_bw, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+index 1cbd2eb9d04f9..f2c2c752bd1c3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+@@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5e_sample_flow *sample_flow;
+ struct mlx5e_sample_attr *sample_attr;
+ struct mlx5_flow_attr *pre_attr;
+- u32 tunnel_id = attr->tunnel_id;
+ struct mlx5_eswitch *esw;
+ u32 default_tbl_id;
+ u32 obj_id;
+@@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ restore_obj.sample.group_id = sample_attr->group_num;
+ restore_obj.sample.rate = sample_attr->rate;
+ restore_obj.sample.trunc_size = sample_attr->trunc_size;
+- restore_obj.sample.tunnel_id = tunnel_id;
++ restore_obj.sample.tunnel_id = attr->tunnel_id;
+ err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
+ if (err)
+ goto err_obj_id;
+@@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ /* For decap action, do decap in the original flow table instead of the
+ * default flow table.
+ */
+- if (tunnel_id)
++ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
+ pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 4c313b7424bf5..c1cf3917baa43 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -157,6 +157,7 @@ struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
+ * it's different than the ht->mutex here.
+ */
+ static struct lock_class_key tc_ht_lock_key;
++static struct lock_class_key tc_ht_wq_key;
+
+ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+ static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
+@@ -4971,6 +4972,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
+ return err;
+
+ lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
++ lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
+
+ mapping_id = mlx5_query_nic_system_image_guid(dev);
+
+@@ -5077,6 +5079,7 @@ int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
+ return err;
+
+ lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
++ lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 4f8a24d84a86a..75015d370922e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -22,15 +22,13 @@ struct mlx5_esw_rate_group {
+ };
+
+ static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
+- u32 parent_ix, u32 tsar_ix,
+- u32 max_rate, u32 bw_share)
++ u32 tsar_ix, u32 max_rate, u32 bw_share)
+ {
+ u32 bitmask = 0;
+
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ return -EOPNOTSUPP;
+
+- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+@@ -51,7 +49,7 @@ static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_g
+ int err;
+
+ err = esw_qos_tsar_config(dev, sched_ctx,
+- esw->qos.root_tsar_ix, group->tsar_ix,
++ group->tsar_ix,
+ max_rate, bw_share);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
+@@ -67,23 +65,13 @@ static int esw_qos_vport_config(struct mlx5_eswitch *esw,
+ struct netlink_ext_ack *extack)
+ {
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+- struct mlx5_esw_rate_group *group = vport->qos.group;
+ struct mlx5_core_dev *dev = esw->dev;
+- u32 parent_tsar_ix;
+- void *vport_elem;
+ int err;
+
+ if (!vport->qos.enabled)
+ return -EIO;
+
+- parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
+- MLX5_SET(scheduling_context, sched_ctx, element_type,
+- SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+- vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
+- element_attributes);
+- MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
+-
+- err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix,
++ err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
+ max_rate, bw_share);
+ if (err) {
+ esw_warn(esw->dev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 788a6ab5c4636..43ba00d5e36ec 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1421,6 +1421,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
+ mlx5_lag_disable_change(esw->dev);
+ down_write(&esw->mode_lock);
+ mlx5_eswitch_disable_locked(esw);
++ esw->mode = MLX5_ESWITCH_LEGACY;
+ up_write(&esw->mode_lock);
+ mlx5_lag_enable_change(esw->dev);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 00758312df065..d4db1adae3e3d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2082,7 +2082,7 @@ static void mlx5_core_verify_params(void)
+ }
+ }
+
+-static int __init init(void)
++static int __init mlx5_init(void)
+ {
+ int err;
+
+@@ -2117,7 +2117,7 @@ err_debug:
+ return err;
+ }
+
+-static void __exit cleanup(void)
++static void __exit mlx5_cleanup(void)
+ {
+ mlx5e_cleanup();
+ mlx5_sf_driver_unregister();
+@@ -2125,5 +2125,5 @@ static void __exit cleanup(void)
+ mlx5_unregister_debugfs();
+ }
+
+-module_init(init);
+-module_exit(cleanup);
++module_init(mlx5_init);
++module_exit(mlx5_cleanup);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+index 0777be24a3074..8bce730b5c5be 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+@@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
+ return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
+ }
+
+-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
++int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
+ u32 bw_share, u32 max_avg_bw, u32 id)
+ {
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ u32 bitmask = 0;
+
+- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
+index 125e4e47e6f71..624ce822b7f59 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
+@@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id);
+ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
+-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
++int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share,
+ u32 max_avg_bw, u32 id);
+ int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
+
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 20ee5b28f70a5..569108c49cbc5 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -1022,11 +1022,6 @@ static int lan966x_probe(struct platform_device *pdev)
+ lan966x->base_mac[5] &= 0xf0;
+ }
+
+- ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
+- if (!ports)
+- return dev_err_probe(&pdev->dev, -ENODEV,
+- "no ethernet-ports child found\n");
+-
+ err = lan966x_create_targets(pdev, lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+@@ -1104,6 +1099,11 @@ static int lan966x_probe(struct platform_device *pdev)
+ }
+ }
+
++ ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
++ if (!ports)
++ return dev_err_probe(&pdev->dev, -ENODEV,
++ "no ethernet-ports child found\n");
++
+ /* init switch */
+ lan966x_init(lan966x);
+ lan966x_stats_init(lan966x);
+@@ -1138,6 +1138,8 @@ static int lan966x_probe(struct platform_device *pdev)
+ lan966x_port_init(lan966x->ports[p]);
+ }
+
++ fwnode_handle_put(ports);
++
+ lan966x_mdb_init(lan966x);
+ err = lan966x_fdb_init(lan966x);
+ if (err)
+@@ -1160,6 +1162,7 @@ cleanup_fdb:
+ lan966x_fdb_deinit(lan966x);
+
+ cleanup_ports:
++ fwnode_handle_put(ports);
+ fwnode_handle_put(portnp);
+
+ lan966x_cleanup_ports(lan966x);
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
+index 65c24ee49efd9..48b0ab56bdb0a 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma.h
++++ b/drivers/net/ethernet/microsoft/mana/gdma.h
+@@ -324,9 +324,12 @@ struct gdma_queue_spec {
+ };
+ };
+
++#define MANA_IRQ_NAME_SZ 32
++
+ struct gdma_irq_context {
+ void (*handler)(void *arg);
+ void *arg;
++ char name[MANA_IRQ_NAME_SZ];
+ };
+
+ struct gdma_context {
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index a6f99b4344d93..d674ebda2053d 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -1233,13 +1233,20 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
+ gic->handler = NULL;
+ gic->arg = NULL;
+
++ if (!i)
++ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
++ pci_name(pdev));
++ else
++ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
++ i - 1, pci_name(pdev));
++
+ irq = pci_irq_vector(pdev, i);
+ if (irq < 0) {
+ err = irq;
+ goto free_irq;
+ }
+
+- err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
++ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_irq;
+ }
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index b4e0fc7f65bdf..0f54849a38235 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1101,14 +1101,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
+ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
+ if (eis & EIS_QFS) {
+ ris2 = ravb_read(ndev, RIS2);
+- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
++ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
+ RIS2);
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF0)
+ priv->stats[RAVB_BE].rx_over_errors++;
+
+- /* Receive Descriptor Empty int */
++ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF1)
+ priv->stats[RAVB_NC].rx_over_errors++;
+
+@@ -2973,6 +2973,9 @@ static int __maybe_unused ravb_suspend(struct device *dev)
+ else
+ ret = ravb_close(ndev);
+
++ if (priv->info->ccc_gac)
++ ravb_ptp_stop(ndev);
++
+ return ret;
+ }
+
+@@ -3011,6 +3014,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
+ /* Restore descriptor base address table */
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
++ if (priv->info->ccc_gac)
++ ravb_ptp_init(ndev, priv->pdev);
++
+ if (netif_running(ndev)) {
+ if (priv->wol_enabled) {
+ ret = ravb_wol_restore(ndev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index 9c2d40f853ed0..413f660172199 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -186,11 +186,25 @@ static void dwmac5_handle_dma_err(struct net_device *ndev,
+ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_feat_cfg)
+ {
++ struct stmmac_safety_feature_cfg all_safety_feats = {
++ .tsoee = 1,
++ .mrxpee = 1,
++ .mestee = 1,
++ .mrxee = 1,
++ .mtxee = 1,
++ .epsi = 1,
++ .edpp = 1,
++ .prtyen = 1,
++ .tmouten = 1,
++ };
+ u32 value;
+
+ if (!asp)
+ return -EINVAL;
+
++ if (!safety_feat_cfg)
++ safety_feat_cfg = &all_safety_feats;
++
+ /* 1. Enable Safety Features */
+ value = readl(ioaddr + MTL_ECC_CONTROL);
+ value |= MEEAO; /* MTL ECC Error Addr Status Override */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index f453b0d093663..35c8dd92d3692 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -551,16 +551,16 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
+ p = (char *)priv + offsetof(struct stmmac_priv,
+ xstats.txq_stats[q].tx_pkt_n);
+ for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
+- *data++ = (*(u64 *)p);
+- p += sizeof(u64 *);
++ *data++ = (*(unsigned long *)p);
++ p += sizeof(unsigned long);
+ }
+ }
+ for (q = 0; q < rx_cnt; q++) {
+ p = (char *)priv + offsetof(struct stmmac_priv,
+ xstats.rxq_stats[q].rx_pkt_n);
+ for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
+- *data++ = (*(u64 *)p);
+- p += sizeof(u64 *);
++ *data++ = (*(unsigned long *)p);
++ p += sizeof(unsigned long);
+ }
+ }
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index feb209d4b991e..4bba0444c764a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1148,6 +1148,11 @@ static int stmmac_init_phy(struct net_device *dev)
+ int addr = priv->plat->phy_addr;
+ struct phy_device *phydev;
+
++ if (addr < 0) {
++ netdev_err(priv->dev, "no phy found\n");
++ return -ENODEV;
++ }
++
+ phydev = mdiobus_get_phy(priv->mii, addr);
+ if (!phydev) {
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
+diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
+index c269432f9c2ee..a89a3e3ff81c3 100644
+--- a/drivers/net/ipa/ipa_interrupt.c
++++ b/drivers/net/ipa/ipa_interrupt.c
+@@ -127,6 +127,16 @@ out_power_put:
+ return IRQ_HANDLED;
+ }
+
++void ipa_interrupt_irq_disable(struct ipa *ipa)
++{
++ disable_irq(ipa->interrupt->irq);
++}
++
++void ipa_interrupt_irq_enable(struct ipa *ipa)
++{
++ enable_irq(ipa->interrupt->irq);
++}
++
+ /* Common function used to enable/disable TX_SUSPEND for an endpoint */
+ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
+ u32 endpoint_id, bool enable)
+diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
+index f31fd9965fdc6..8a1bd5b893932 100644
+--- a/drivers/net/ipa/ipa_interrupt.h
++++ b/drivers/net/ipa/ipa_interrupt.h
+@@ -85,6 +85,22 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
+ */
+ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
+
++/**
++ * ipa_interrupt_irq_enable() - Enable IPA interrupts
++ * @ipa: IPA pointer
++ *
++ * This enables the IPA interrupt line
++ */
++void ipa_interrupt_irq_enable(struct ipa *ipa);
++
++/**
++ * ipa_interrupt_irq_disable() - Disable IPA interrupts
++ * @ipa: IPA pointer
++ *
++ * This disables the IPA interrupt line
++ */
++void ipa_interrupt_irq_disable(struct ipa *ipa);
++
+ /**
+ * ipa_interrupt_config() - Configure the IPA interrupt framework
+ * @ipa: IPA pointer
+diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
+index 8420f93128a26..8057be8cda801 100644
+--- a/drivers/net/ipa/ipa_power.c
++++ b/drivers/net/ipa/ipa_power.c
+@@ -181,6 +181,17 @@ static int ipa_suspend(struct device *dev)
+
+ __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
+
++ /* Increment the disable depth to ensure that the IRQ won't
++ * be re-enabled until the matching _enable call in
++ * ipa_resume(). We do this to ensure that the interrupt
++ * handler won't run whilst PM runtime is disabled.
++ *
++ * Note that disabling the IRQ is NOT the same as disabling
++ * irq wake. If wakeup is enabled for the IPA then the IRQ
++ * will still cause the system to wake up, see irq_set_irq_wake().
++ */
++ ipa_interrupt_irq_disable(ipa);
++
+ return pm_runtime_force_suspend(dev);
+ }
+
+@@ -193,6 +204,12 @@ static int ipa_resume(struct device *dev)
+
+ __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
+
++ /* Now that PM runtime is enabled again it's safe
++ * to turn the IRQ back on and process any data
++ * that was received during suspend.
++ */
++ ipa_interrupt_irq_enable(ipa);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
+index 4a2e94faf57e2..c4542ecf56230 100644
+--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
++++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include <linux/bitfield.h>
++#include <linux/delay.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/device.h>
+@@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
+
+ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+ {
++ u32 value;
+ int ret;
+
+ /* Enable the phy clock */
+@@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+
+ /* Initialize ephy control */
+ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
+- writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+- FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+- FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+- PHY_CNTL1_CLK_EN |
+- PHY_CNTL1_CLKFREQ |
+- PHY_CNTL1_PHY_ENB,
+- priv->regs + ETH_PHY_CNTL1);
++
++ /* Make sure we get a 0 -> 1 transition on the enable bit */
++ value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
++ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
++ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
++ PHY_CNTL1_CLK_EN |
++ PHY_CNTL1_CLKFREQ;
++ writel(value, priv->regs + ETH_PHY_CNTL1);
+ writel(PHY_CNTL2_USE_INTERNAL |
+ PHY_CNTL2_SMI_SRC_MAC |
+ PHY_CNTL2_RX_CLK_EPHY,
+ priv->regs + ETH_PHY_CNTL2);
+
++ value |= PHY_CNTL1_PHY_ENB;
++ writel(value, priv->regs + ETH_PHY_CNTL1);
++
++ /* The phy needs a bit of time to power up */
++ mdelay(10);
++
+ return 0;
+ }
+
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 1cd604cd1fa1b..16e021b477f06 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -108,7 +108,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
+
+ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
+ {
+- struct mdio_device *mdiodev = bus->mdio_map[addr];
++ struct mdio_device *mdiodev;
++
++ if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
++ return NULL;
++
++ mdiodev = bus->mdio_map[addr];
+
+ if (!mdiodev)
+ return NULL;
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index e11f70911acc1..fb5f59d0d55d7 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -1001,6 +1001,12 @@ static const struct usb_device_id products[] = {
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
++}, {
++ /* Cinterion PLS62-W modem by GEMALTO/THALES */
++ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_ETHERNET,
++ USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long)&wwan_info,
+ }, {
+ /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index a481a1d831e2f..23da1d9dafd1f 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -9836,6 +9836,7 @@ static const struct usb_device_id rtl8152_table[] = {
+ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
+ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
+ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
++ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
+ REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 5a53e63d33a60..3164451e1010c 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ /* ignore the CRC length */
+ len = (skb->data[1] | (skb->data[2] << 8)) - 4;
+
+- if (len > ETH_FRAME_LEN || len > skb->len)
++ if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
+ return 0;
+
+ /* the last packet of current skb */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 86e52454b5b5c..3cd15f16090f1 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1873,8 +1873,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
+ */
+ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+ netif_stop_subqueue(dev, qnum);
+- if (!use_napi &&
+- unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
++ if (use_napi) {
++ if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
++ virtqueue_napi_schedule(&sq->napi, sq->vq);
++ } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ /* More just got used, free them then recheck. */
+ free_old_xmit_skbs(sq, false);
+ if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
+index 22edea6ca4b81..1c53b55469270 100644
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -1243,9 +1243,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
+ free_dev:
+ free_netdev(dev);
+ undo_uhdlc_init:
+- iounmap(utdm->siram);
++ if (utdm)
++ iounmap(utdm->siram);
+ unmap_si_regs:
+- iounmap(utdm->si_regs);
++ if (utdm)
++ iounmap(utdm->si_regs);
+ free_utdm:
+ if (uhdlc_priv->tsa)
+ kfree(utdm);
+diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
+index 82a7458e01aec..bf72e5fd39cf4 100644
+--- a/drivers/net/wireless/rndis_wlan.c
++++ b/drivers/net/wireless/rndis_wlan.c
+@@ -696,8 +696,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
+ struct rndis_query *get;
+ struct rndis_query_c *get_c;
+ } u;
+- int ret, buflen;
+- int resplen, respoffs, copylen;
++ int ret;
++ size_t buflen, resplen, respoffs, copylen;
+
+ buflen = *len + sizeof(*u.get);
+ if (buflen < CONTROL_BUFFER_SIZE)
+@@ -732,22 +732,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
+
+ if (respoffs > buflen) {
+ /* Device returned data offset outside buffer, error. */
+- netdev_dbg(dev->net, "%s(%s): received invalid "
+- "data offset: %d > %d\n", __func__,
+- oid_to_string(oid), respoffs, buflen);
++ netdev_dbg(dev->net,
++ "%s(%s): received invalid data offset: %zu > %zu\n",
++ __func__, oid_to_string(oid), respoffs, buflen);
+
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+- if ((resplen + respoffs) > buflen) {
+- /* Device would have returned more data if buffer would
+- * have been big enough. Copy just the bits that we got.
+- */
+- copylen = buflen - respoffs;
+- } else {
+- copylen = resplen;
+- }
++ copylen = min(resplen, buflen - respoffs);
+
+ if (copylen > *len)
+ copylen = *len;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 1ded96d1bfd21..25ade4ce8e0a7 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1088,7 +1088,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+ if (ns) {
+ if (ns->head->effects)
+ effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+- if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
++ if (ns->head->ids.csi == NVME_CSI_NVM)
+ effects |= nvme_known_nvm_effects(opcode);
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
+ dev_warn_once(ctrl->device,
+@@ -3903,10 +3903,11 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
+ return a->mode;
+ }
+
+-static const struct attribute_group nvme_dev_attrs_group = {
++const struct attribute_group nvme_dev_attrs_group = {
+ .attrs = nvme_dev_attrs,
+ .is_visible = nvme_dev_attrs_are_visible,
+ };
++EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
+
+ static const struct attribute_group *nvme_dev_attr_groups[] = {
+ &nvme_dev_attrs_group,
+@@ -4839,8 +4840,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+
+ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+- const struct blk_mq_ops *ops, unsigned int flags,
+- unsigned int cmd_size)
++ const struct blk_mq_ops *ops, unsigned int cmd_size)
+ {
+ int ret;
+
+@@ -4850,7 +4850,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+- set->flags = flags;
++ set->flags = BLK_MQ_F_NO_SCHED;
++ if (ctrl->ops->flags & NVME_F_BLOCKING)
++ set->flags |= BLK_MQ_F_BLOCKING;
+ set->cmd_size = cmd_size;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+@@ -4894,8 +4896,8 @@ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
+ EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+
+ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+- const struct blk_mq_ops *ops, unsigned int flags,
+- unsigned int nr_maps, unsigned int cmd_size)
++ const struct blk_mq_ops *ops, unsigned int nr_maps,
++ unsigned int cmd_size)
+ {
+ int ret;
+
+@@ -4904,7 +4906,9 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ set->queue_depth = ctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+- set->flags = flags;
++ set->flags = BLK_MQ_F_SHOULD_MERGE;
++ if (ctrl->ops->flags & NVME_F_BLOCKING)
++ set->flags |= BLK_MQ_F_BLOCKING;
+ set->cmd_size = cmd_size,
+ set->driver_data = ctrl;
+ set->nr_hw_queues = ctrl->queue_count - 1;
+@@ -5080,7 +5084,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ ctrl->instance);
+ ctrl->device->class = nvme_class;
+ ctrl->device->parent = ctrl->dev;
+- ctrl->device->groups = nvme_dev_attr_groups;
++ if (ops->dev_attr_groups)
++ ctrl->device->groups = ops->dev_attr_groups;
++ else
++ ctrl->device->groups = nvme_dev_attr_groups;
+ ctrl->device->release = nvme_free_ctrl;
+ dev_set_drvdata(ctrl->device, ctrl);
+ ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 20b0c29a9a341..6c3d469eed7e3 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2903,7 +2903,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+ nvme_fc_init_io_queues(ctrl);
+
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+- &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
++ &nvme_fc_mq_ops, 1,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
+ if (ret)
+@@ -3508,13 +3508,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+
+ nvme_fc_init_queue(ctrl, 0);
+
+- ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+- &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+- ctrl->lport->ops->fcprqst_priv_sz));
+- if (ret)
+- goto out_free_queues;
+-
+ /*
+ * Would have been nice to init io queues tag set as well.
+ * However, we require interaction from the controller
+@@ -3524,10 +3517,17 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
+ if (ret)
+- goto out_cleanup_tagset;
++ goto out_free_queues;
+
+ /* at this point, teardown path changes to ref counting on nvme ctrl */
+
++ ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
++ &nvme_fc_admin_mq_ops,
++ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
++ ctrl->lport->ops->fcprqst_priv_sz));
++ if (ret)
++ goto fail_ctrl;
++
+ spin_lock_irqsave(&rport->lock, flags);
+ list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+@@ -3579,8 +3579,6 @@ fail_ctrl:
+
+ return ERR_PTR(-EIO);
+
+-out_cleanup_tagset:
+- nvme_remove_admin_tag_set(&ctrl->ctrl);
+ out_free_queues:
+ kfree(ctrl->queues);
+ out_free_ida:
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index cbda8a19409bf..01d90424af534 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -508,6 +508,9 @@ struct nvme_ctrl_ops {
+ unsigned int flags;
+ #define NVME_F_FABRICS (1 << 0)
+ #define NVME_F_METADATA_SUPPORTED (1 << 1)
++#define NVME_F_BLOCKING (1 << 2)
++
++ const struct attribute_group **dev_attr_groups;
+ int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+ int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
+ int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+@@ -738,12 +741,11 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl);
+ void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
+ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
+ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+- const struct blk_mq_ops *ops, unsigned int flags,
+- unsigned int cmd_size);
++ const struct blk_mq_ops *ops, unsigned int cmd_size);
+ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+- const struct blk_mq_ops *ops, unsigned int flags,
+- unsigned int nr_maps, unsigned int cmd_size);
++ const struct blk_mq_ops *ops, unsigned int nr_maps,
++ unsigned int cmd_size);
+ void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
+
+ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+@@ -857,6 +859,7 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+ extern const struct attribute_group *nvme_ns_id_attr_groups[];
+ extern const struct pr_ops nvme_pr_ops;
+ extern const struct block_device_operations nvme_ns_head_ops;
++extern const struct attribute_group nvme_dev_attrs_group;
+
+ struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+ #ifdef CONFIG_NVME_MULTIPATH
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 115d81def5671..d839689af17ce 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -158,8 +158,6 @@ struct nvme_dev {
+ unsigned int nr_allocated_queues;
+ unsigned int nr_write_queues;
+ unsigned int nr_poll_queues;
+-
+- bool attrs_added;
+ };
+
+ static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
+@@ -1367,7 +1365,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+ else
+ nvme_poll_irqdisable(nvmeq);
+
+- if (blk_mq_request_completed(req)) {
++ if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
+ dev_warn(dev->ctrl.device,
+ "I/O %d QID %d timeout, completion polled\n",
+ req->tag, nvmeq->qid);
+@@ -2241,11 +2239,17 @@ static struct attribute *nvme_pci_attrs[] = {
+ NULL,
+ };
+
+-static const struct attribute_group nvme_pci_attr_group = {
++static const struct attribute_group nvme_pci_dev_attrs_group = {
+ .attrs = nvme_pci_attrs,
+ .is_visible = nvme_pci_attrs_are_visible,
+ };
+
++static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
++ &nvme_dev_attrs_group,
++ &nvme_pci_dev_attrs_group,
++ NULL,
++};
++
+ /*
+ * nirqs is the number of interrupts available for write and read
+ * queues. The core already reserved an interrupt for the admin queue.
+@@ -2935,10 +2939,6 @@ static void nvme_reset_work(struct work_struct *work)
+ goto out;
+ }
+
+- if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj,
+- &nvme_pci_attr_group))
+- dev->attrs_added = true;
+-
+ nvme_start_ctrl(&dev->ctrl);
+ return;
+
+@@ -3011,6 +3011,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ .name = "pcie",
+ .module = THIS_MODULE,
+ .flags = NVME_F_METADATA_SUPPORTED,
++ .dev_attr_groups = nvme_pci_dev_attr_groups,
+ .reg_read32 = nvme_pci_reg_read32,
+ .reg_write32 = nvme_pci_reg_write32,
+ .reg_read64 = nvme_pci_reg_read64,
+@@ -3209,13 +3210,6 @@ static void nvme_shutdown(struct pci_dev *pdev)
+ nvme_disable_prepare_reset(dev, true);
+ }
+
+-static void nvme_remove_attrs(struct nvme_dev *dev)
+-{
+- if (dev->attrs_added)
+- sysfs_remove_group(&dev->ctrl.device->kobj,
+- &nvme_pci_attr_group);
+-}
+-
+ /*
+ * The driver's remove may be called on a device in a partially initialized
+ * state. This function must not have any dependencies on the device state in
+@@ -3237,7 +3231,6 @@ static void nvme_remove(struct pci_dev *pdev)
+ nvme_stop_ctrl(&dev->ctrl);
+ nvme_remove_namespaces(&dev->ctrl);
+ nvme_dev_disable(dev, true);
+- nvme_remove_attrs(dev);
+ nvme_free_host_mem(dev);
+ nvme_dev_remove_admin(dev);
+ nvme_free_queues(dev, 0);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index a55d3e8b607d5..6f918e61b6aef 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -798,7 +798,7 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
+ NVME_RDMA_METADATA_SGL_SIZE;
+
+ return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+- &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
++ &nvme_rdma_mq_ops,
+ ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ cmd_size);
+ }
+@@ -848,7 +848,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ if (new) {
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
+ &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
+- BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE);
+ if (error)
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 83735c52d34a0..eacd445b5333f 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1867,7 +1867,6 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ if (new) {
+ ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+ &nvme_tcp_mq_ops,
+- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+ ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ sizeof(struct nvme_tcp_request));
+ if (ret)
+@@ -1943,7 +1942,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
+ if (new) {
+ error = nvme_alloc_admin_tag_set(ctrl,
+ &to_tcp_ctrl(ctrl)->admin_tag_set,
+- &nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
++ &nvme_tcp_admin_mq_ops,
+ sizeof(struct nvme_tcp_request));
+ if (error)
+ goto out_free_queue;
+@@ -2524,7 +2523,7 @@ static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
+ .name = "tcp",
+ .module = THIS_MODULE,
+- .flags = NVME_F_FABRICS,
++ .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 08c583258e90f..c864e902e91e2 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -353,7 +353,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+ ctrl->ctrl.queue_count = 1;
+
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+- &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
++ &nvme_loop_admin_mq_ops,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ if (error)
+@@ -494,7 +494,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+ return ret;
+
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+- &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
++ &nvme_loop_mq_ops, 1,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ if (ret)
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index b80a9b74662b1..1deb61b22bc76 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1576,7 +1576,6 @@ static int arm_cmn_event_init(struct perf_event *event)
+ hw->dn++;
+ continue;
+ }
+- hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc;
+ hw->num_dns++;
+ if (bynodeid)
+ break;
+@@ -1589,6 +1588,12 @@ static int arm_cmn_event_init(struct perf_event *event)
+ nodeid, nid.x, nid.y, nid.port, nid.dev, type);
+ return -EINVAL;
+ }
++ /*
++ * Keep assuming non-cycles events count in all DTC domains; turns out
++ * it's hard to make a worthwhile optimisation around this, short of
++ * going all-in with domain-local counter allocation as well.
++ */
++ hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
+
+ return arm_cmn_validate_group(cmn, event);
+ }
+diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
+index 95c6dbb52da72..ce511ad5d3690 100644
+--- a/drivers/phy/phy-can-transceiver.c
++++ b/drivers/phy/phy-can-transceiver.c
+@@ -99,6 +99,7 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
+ struct gpio_desc *standby_gpio;
+ struct gpio_desc *enable_gpio;
+ u32 max_bitrate = 0;
++ int err;
+
+ can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL);
+ if (!can_transceiver_phy)
+@@ -124,8 +125,8 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
+ return PTR_ERR(phy);
+ }
+
+- device_property_read_u32(dev, "max-bitrate", &max_bitrate);
+- if (!max_bitrate)
++ err = device_property_read_u32(dev, "max-bitrate", &max_bitrate);
++ if ((err != -EINVAL) && !max_bitrate)
+ dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n");
+ phy->attrs.max_link_rate = max_bitrate;
+
+diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+index e6ededc515239..a0bc10aa79618 100644
+--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
++++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+@@ -485,8 +485,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
+ return ret;
+
+ ret = property_enable(base, &rport->port_cfg->phy_sus, false);
+- if (ret)
++ if (ret) {
++ clk_disable_unprepare(rphy->clk480m);
+ return ret;
++ }
+
+ /* waiting for the utmi_clk to become stable */
+ usleep_range(1500, 2000);
+diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
+index e827b79f6d493..56de41091d639 100644
+--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
++++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
+@@ -254,6 +254,9 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
+ return PTR_ERR(usbphy->phy_regs);
+
+ usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
++ if (!usbphy->moon4_res_mem)
++ return -EINVAL;
++
+ usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
+ resource_size(usbphy->moon4_res_mem));
+ if (!usbphy->moon4_regs)
+diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
+index 15a3bcf323086..b905902d57508 100644
+--- a/drivers/phy/ti/Kconfig
++++ b/drivers/phy/ti/Kconfig
+@@ -23,7 +23,7 @@ config PHY_DM816X_USB
+
+ config PHY_AM654_SERDES
+ tristate "TI AM654 SERDES support"
+- depends on OF && ARCH_K3 || COMPILE_TEST
++ depends on OF && (ARCH_K3 || COMPILE_TEST)
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ select MULTIPLEXER
+@@ -35,7 +35,7 @@ config PHY_AM654_SERDES
+
+ config PHY_J721E_WIZ
+ tristate "TI J721E WIZ (SERDES Wrapper) support"
+- depends on OF && ARCH_K3 || COMPILE_TEST
++ depends on OF && (ARCH_K3 || COMPILE_TEST)
+ depends on HAS_IOMEM && OF_ADDRESS
+ depends on COMMON_CLK
+ select GENERIC_PHY
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index da974ff2d75d0..5eeac92f610a0 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -926,19 +926,19 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
+- RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
++ RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
+- RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
++ RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
+- RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
+- RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
++ RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
++ RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
+@@ -964,7 +964,7 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
+- RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
++ RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
+@@ -973,8 +973,8 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
+- RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
+- RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
++ RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
++ RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
+@@ -1004,13 +1004,13 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
+- RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
++ RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
+- RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
++ RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
+- RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
++ RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
+ };
+@@ -2436,10 +2436,19 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
+ case RK3308:
+ case RK3368:
+ case RK3399:
++ case RK3568:
+ case RK3588:
+ pull_type = bank->pull_type[pin_num / 8];
+ data >>= bit;
+ data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
++ /*
++ * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
++ * where that pull up value becomes 3.
++ */
++ if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
++ if (data == 3)
++ data = 1;
++ }
+
+ return rockchip_pull_list[pull_type][data];
+ default:
+@@ -2497,7 +2506,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
+ }
+ }
+ /*
+- * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6,
++ * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
+ * where that pull up value becomes 3.
+ */
+ if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
+diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
+index ca33df7ea550e..9333f82cfa8a0 100644
+--- a/drivers/platform/x86/apple-gmux.c
++++ b/drivers/platform/x86/apple-gmux.c
+@@ -64,29 +64,6 @@ struct apple_gmux_data {
+
+ static struct apple_gmux_data *apple_gmux_data;
+
+-/*
+- * gmux port offsets. Many of these are not yet used, but may be in the
+- * future, and it's useful to have them documented here anyhow.
+- */
+-#define GMUX_PORT_VERSION_MAJOR 0x04
+-#define GMUX_PORT_VERSION_MINOR 0x05
+-#define GMUX_PORT_VERSION_RELEASE 0x06
+-#define GMUX_PORT_SWITCH_DISPLAY 0x10
+-#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
+-#define GMUX_PORT_INTERRUPT_ENABLE 0x14
+-#define GMUX_PORT_INTERRUPT_STATUS 0x16
+-#define GMUX_PORT_SWITCH_DDC 0x28
+-#define GMUX_PORT_SWITCH_EXTERNAL 0x40
+-#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
+-#define GMUX_PORT_DISCRETE_POWER 0x50
+-#define GMUX_PORT_MAX_BRIGHTNESS 0x70
+-#define GMUX_PORT_BRIGHTNESS 0x74
+-#define GMUX_PORT_VALUE 0xc2
+-#define GMUX_PORT_READ 0xd0
+-#define GMUX_PORT_WRITE 0xd4
+-
+-#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
+-
+ #define GMUX_INTERRUPT_ENABLE 0xff
+ #define GMUX_INTERRUPT_DISABLE 0x00
+
+@@ -249,23 +226,6 @@ static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
+ gmux_pio_write32(gmux_data, port, val);
+ }
+
+-static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
+-{
+- u16 val;
+-
+- outb(0xaa, gmux_data->iostart + 0xcc);
+- outb(0x55, gmux_data->iostart + 0xcd);
+- outb(0x00, gmux_data->iostart + 0xce);
+-
+- val = inb(gmux_data->iostart + 0xcc) |
+- (inb(gmux_data->iostart + 0xcd) << 8);
+-
+- if (val == 0x55aa)
+- return true;
+-
+- return false;
+-}
+-
+ /**
+ * DOC: Backlight control
+ *
+@@ -605,60 +565,43 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
+ int ret = -ENXIO;
+ acpi_status status;
+ unsigned long long gpe;
++ bool indexed = false;
++ u32 version;
+
+ if (apple_gmux_data)
+ return -EBUSY;
+
++ if (!apple_gmux_detect(pnp, &indexed)) {
++ pr_info("gmux device not present\n");
++ return -ENODEV;
++ }
++
+ gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
+ if (!gmux_data)
+ return -ENOMEM;
+ pnp_set_drvdata(pnp, gmux_data);
+
+ res = pnp_get_resource(pnp, IORESOURCE_IO, 0);
+- if (!res) {
+- pr_err("Failed to find gmux I/O resource\n");
+- goto err_free;
+- }
+-
+ gmux_data->iostart = res->start;
+ gmux_data->iolen = resource_size(res);
+
+- if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
+- pr_err("gmux I/O region too small (%lu < %u)\n",
+- gmux_data->iolen, GMUX_MIN_IO_LEN);
+- goto err_free;
+- }
+-
+ if (!request_region(gmux_data->iostart, gmux_data->iolen,
+ "Apple gmux")) {
+ pr_err("gmux I/O already in use\n");
+ goto err_free;
+ }
+
+- /*
+- * Invalid version information may indicate either that the gmux
+- * device isn't present or that it's a new one that uses indexed
+- * io
+- */
+-
+- ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
+- ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
+- ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
+- if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
+- if (gmux_is_indexed(gmux_data)) {
+- u32 version;
+- mutex_init(&gmux_data->index_lock);
+- gmux_data->indexed = true;
+- version = gmux_read32(gmux_data,
+- GMUX_PORT_VERSION_MAJOR);
+- ver_major = (version >> 24) & 0xff;
+- ver_minor = (version >> 16) & 0xff;
+- ver_release = (version >> 8) & 0xff;
+- } else {
+- pr_info("gmux device not present\n");
+- ret = -ENODEV;
+- goto err_release;
+- }
++ if (indexed) {
++ mutex_init(&gmux_data->index_lock);
++ gmux_data->indexed = true;
++ version = gmux_read32(gmux_data, GMUX_PORT_VERSION_MAJOR);
++ ver_major = (version >> 24) & 0xff;
++ ver_minor = (version >> 16) & 0xff;
++ ver_release = (version >> 8) & 0xff;
++ } else {
++ ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
++ ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
++ ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
+ }
+ pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
+ ver_release, (gmux_data->indexed ? "indexed" : "classic"));
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index c685a705b73dd..cb15acdf14a30 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -121,6 +121,10 @@ static struct quirk_entry quirk_asus_tablet_mode = {
+ .tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
+ };
+
++static struct quirk_entry quirk_asus_ignore_fan = {
++ .wmi_ignore_fan = true,
++};
++
+ static int dmi_matched(const struct dmi_system_id *dmi)
+ {
+ pr_info("Identified laptop model '%s'\n", dmi->ident);
+@@ -473,6 +477,15 @@ static const struct dmi_system_id asus_quirks[] = {
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUS VivoBook E410MA",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "E410MA"),
++ },
++ .driver_data = &quirk_asus_ignore_fan,
++ },
+ {},
+ };
+
+@@ -511,6 +524,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x32, { KEY_MUTE } },
++ { KE_KEY, 0x33, { KEY_SCREENLOCK } },
+ { KE_KEY, 0x35, { KEY_SCREENLOCK } },
+ { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
+ { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+@@ -544,6 +558,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
+ { KE_KEY, 0x82, { KEY_CAMERA } },
++ { KE_KEY, 0x85, { KEY_CAMERA } },
+ { KE_KEY, 0x86, { KEY_PROG1 } }, /* MyASUS Key */
+ { KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */
+ { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index f051b21653d61..02bf286924183 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -225,6 +225,7 @@ struct asus_wmi {
+
+ int tablet_switch_event_code;
+ u32 tablet_switch_dev_id;
++ bool tablet_switch_inverted;
+
+ enum fan_type fan_type;
+ enum fan_type gpu_fan_type;
+@@ -493,6 +494,13 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
+ }
+
+ /* Input **********************************************************************/
++static void asus_wmi_tablet_sw_report(struct asus_wmi *asus, bool value)
++{
++ input_report_switch(asus->inputdev, SW_TABLET_MODE,
++ asus->tablet_switch_inverted ? !value : value);
++ input_sync(asus->inputdev);
++}
++
+ static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
+ {
+ struct device *dev = &asus->platform_device->dev;
+@@ -501,7 +509,7 @@ static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event
+ result = asus_wmi_get_devstate_simple(asus, dev_id);
+ if (result >= 0) {
+ input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
+- input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
++ asus_wmi_tablet_sw_report(asus, result);
+ asus->tablet_switch_dev_id = dev_id;
+ asus->tablet_switch_event_code = event_code;
+ } else if (result == -ENODEV) {
+@@ -534,6 +542,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
+ case asus_wmi_no_tablet_switch:
+ break;
+ case asus_wmi_kbd_dock_devid:
++ asus->tablet_switch_inverted = true;
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
+ break;
+ case asus_wmi_lid_flip_devid:
+@@ -573,10 +582,8 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
+ return;
+
+ result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
+- if (result >= 0) {
+- input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
+- input_sync(asus->inputdev);
+- }
++ if (result >= 0)
++ asus_wmi_tablet_sw_report(asus, result);
+ }
+
+ /* dGPU ********************************************************************/
+@@ -2243,7 +2250,9 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
+ asus->fan_type = FAN_TYPE_NONE;
+ asus->agfn_pwm = -1;
+
+- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
++ if (asus->driver->quirks->wmi_ignore_fan)
++ asus->fan_type = FAN_TYPE_NONE;
++ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
+ asus->fan_type = FAN_TYPE_SPEC83;
+ else if (asus_wmi_has_agfn_fan(asus))
+ asus->fan_type = FAN_TYPE_AGFN;
+diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
+index 65316998b898a..a478ebfd34dfa 100644
+--- a/drivers/platform/x86/asus-wmi.h
++++ b/drivers/platform/x86/asus-wmi.h
+@@ -38,6 +38,7 @@ struct quirk_entry {
+ bool store_backlight_power;
+ bool wmi_backlight_set_devstate;
+ bool wmi_force_als_set;
++ bool wmi_ignore_fan;
+ enum asus_wmi_tablet_switch_mode tablet_switch_mode;
+ int wapf;
+ /*
+diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
+index ca76076fc706a..b3622419cd1a4 100644
+--- a/drivers/platform/x86/simatic-ipc.c
++++ b/drivers/platform/x86/simatic-ipc.c
+@@ -46,7 +46,8 @@ static struct {
+ {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
+ {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
+ {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
+- {SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
++ {SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
++ {SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G},
+ };
+
+ static int register_platform_devices(u32 station_id)
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 4e95d2243161a..7fd735c67a8e6 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10500,8 +10500,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
+ if (err)
+ goto unlock;
+ }
+- }
+- if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
++ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
+ if (err)
+ goto unlock;
+@@ -10529,14 +10528,16 @@ static void dytc_profile_refresh(void)
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ else
+ err = dytc_cql_command(DYTC_CMD_GET, &output);
+- } else if (dytc_capabilities & BIT(DYTC_FC_PSC))
++ funcmode = DYTC_FUNCTION_MMC;
++ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ err = dytc_command(DYTC_CMD_GET, &output);
+-
++ /* Check if we are PSC mode, or have AMT enabled */
++ funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
++ }
+ mutex_unlock(&dytc_mutex);
+ if (err)
+ return;
+
+- funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+ convert_dytc_to_profile(funcmode, perfmode, &profile);
+ if (profile != dytc_current_profile) {
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index baae3120efd05..f00995390fdfe 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -264,6 +264,23 @@ static const struct ts_dmi_data connect_tablet9_data = {
+ .properties = connect_tablet9_props,
+ };
+
++static const struct property_entry csl_panther_tab_hd_props[] = {
++ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
++ PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
++ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
++ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++ { }
++};
++
++static const struct ts_dmi_data csl_panther_tab_hd_data = {
++ .acpi_name = "MSSL1680:00",
++ .properties = csl_panther_tab_hd_props,
++};
++
+ static const struct property_entry cube_iwork8_air_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
+@@ -1124,6 +1141,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
+ },
+ },
++ {
++ /* CSL Panther Tab HD */
++ .driver_data = (void *)&csl_panther_tab_hd_data,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
++ },
++ },
+ {
+ /* CUBE iwork8 Air */
+ .driver_data = (void *)&cube_iwork8_air_data,
+diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
+index de176c2fbad96..2a52c990d4fec 100644
+--- a/drivers/reset/Kconfig
++++ b/drivers/reset/Kconfig
+@@ -257,7 +257,7 @@ config RESET_SUNXI
+
+ config RESET_TI_SCI
+ tristate "TI System Control Interface (TI-SCI) reset driver"
+- depends on TI_SCI_PROTOCOL || COMPILE_TEST
++ depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
+ help
+ This enables the reset driver support over TI System Control Interface
+ available on some new TI's SoCs. If you wish to use reset resources
+diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
+index 146fd5d45e99d..15abac9fc72c0 100644
+--- a/drivers/reset/reset-uniphier-glue.c
++++ b/drivers/reset/reset-uniphier-glue.c
+@@ -47,7 +47,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct uniphier_glue_reset_priv *priv;
+ struct resource *res;
+- resource_size_t size;
+ int i, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -60,7 +59,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- size = resource_size(res);
+ priv->rdata.membase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->rdata.membase))
+ return PTR_ERR(priv->rdata.membase);
+@@ -96,7 +94,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
+
+ spin_lock_init(&priv->rdata.lock);
+ priv->rdata.rcdev.owner = THIS_MODULE;
+- priv->rdata.rcdev.nr_resets = size * BITS_PER_BYTE;
++ priv->rdata.rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
+ priv->rdata.rcdev.ops = &reset_simple_ops;
+ priv->rdata.rcdev.of_node = dev->of_node;
+ priv->rdata.active_low = true;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 699b07abb6b0b..02fa3c00dcccf 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -714,7 +714,7 @@ static int hisi_sas_init_device(struct domain_device *device)
+ int_to_scsilun(0, &lun);
+
+ while (retry-- > 0) {
+- rc = sas_clear_task_set(device, lun.scsi_lun);
++ rc = sas_abort_task_set(device, lun.scsi_lun);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ hisi_sas_release_task(hisi_hba, device);
+ break;
+@@ -1334,7 +1334,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
+ device->linkrate = phy->sas_phy.linkrate;
+
+ hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+- } else
++ } else if (!port->port_attached)
+ port->id = 0xff;
+ }
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 4dbf51e2623ad..f6da34850af9d 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
+ {
+ struct Scsi_Host *sh;
+
+- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
++ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
+ if (sh == NULL) {
+ dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
+ return -ENOMEM;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index f473c002fa4d6..bf834e72595a3 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -1677,6 +1677,13 @@ static const char *iscsi_session_state_name(int state)
+ return name;
+ }
+
++static char *iscsi_session_target_state_name[] = {
++ [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND",
++ [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED",
++ [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED",
++ [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING",
++};
++
+ int iscsi_session_chkready(struct iscsi_cls_session *session)
+ {
+ int err;
+@@ -1786,9 +1793,13 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
+ if ((scan_data->channel == SCAN_WILD_CARD ||
+ scan_data->channel == 0) &&
+ (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
++ scan_data->id == id)) {
+ scsi_scan_target(&session->dev, 0, id,
+ scan_data->lun, scan_data->rescan);
++ spin_lock_irqsave(&session->lock, flags);
++ session->target_state = ISCSI_SESSION_TARGET_SCANNED;
++ spin_unlock_irqrestore(&session->lock, flags);
++ }
+ }
+
+ user_scan_exit:
+@@ -1961,31 +1972,41 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ unsigned long flags;
+ unsigned int target_id;
++ bool remove_target = true;
+
+ ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+ spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
++ if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) {
++ remove_target = false;
++ } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) {
+ spin_unlock_irqrestore(&session->lock, flags);
+ mutex_unlock(&ihost->mutex);
+- goto unbind_session_exit;
++ ISCSI_DBG_TRANS_SESSION(session,
++ "Skipping target unbinding: Session is unbound/unbinding.\n");
++ return;
+ }
+
++ session->target_state = ISCSI_SESSION_TARGET_UNBINDING;
+ target_id = session->target_id;
+ session->target_id = ISCSI_MAX_TARGET;
+ spin_unlock_irqrestore(&session->lock, flags);
+ mutex_unlock(&ihost->mutex);
+
+- scsi_remove_target(&session->dev);
++ if (remove_target)
++ scsi_remove_target(&session->dev);
+
+ if (session->ida_used)
+ ida_free(&iscsi_sess_ida, target_id);
+
+-unbind_session_exit:
+ iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+ ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
++
++ spin_lock_irqsave(&session->lock, flags);
++ session->target_state = ISCSI_SESSION_TARGET_UNBOUND;
++ spin_unlock_irqrestore(&session->lock, flags);
+ }
+
+ static void __iscsi_destroy_session(struct work_struct *work)
+@@ -2062,6 +2083,9 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ session->ida_used = true;
+ } else
+ session->target_id = target_id;
++ spin_lock_irqsave(&session->lock, flags);
++ session->target_state = ISCSI_SESSION_TARGET_ALLOCATED;
++ spin_unlock_irqrestore(&session->lock, flags);
+
+ dev_set_name(&session->dev, "session%u", session->sid);
+ err = device_add(&session->dev);
+@@ -4369,6 +4393,19 @@ iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
+ iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
+ iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
+
++static ssize_t
++show_priv_session_target_state(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++
++ return sysfs_emit(buf, "%s\n",
++ iscsi_session_target_state_name[session->target_state]);
++}
++
++static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO,
++ show_priv_session_target_state, NULL);
++
+ static ssize_t
+ show_priv_session_state(struct device *dev, struct device_attribute *attr,
+ char *buf)
+@@ -4471,6 +4508,7 @@ static struct attribute *iscsi_session_attrs[] = {
+ &dev_attr_sess_boot_target.attr,
+ &dev_attr_priv_sess_recovery_tmo.attr,
+ &dev_attr_priv_sess_state.attr,
++ &dev_attr_priv_sess_target_state.attr,
+ &dev_attr_priv_sess_creator.attr,
+ &dev_attr_sess_chap_out_idx.attr,
+ &dev_attr_sess_chap_in_idx.attr,
+@@ -4584,6 +4622,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
+ return S_IRUGO | S_IWUSR;
+ else if (attr == &dev_attr_priv_sess_state.attr)
+ return S_IRUGO;
++ else if (attr == &dev_attr_priv_sess_target_state.attr)
++ return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_creator.attr)
+ return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_target_id.attr)
+diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c
+index 0e3b6ba22f943..0f13853901dfe 100644
+--- a/drivers/soc/imx/imx8mp-blk-ctrl.c
++++ b/drivers/soc/imx/imx8mp-blk-ctrl.c
+@@ -212,7 +212,7 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
+ break;
+ case IMX8MP_HDMIBLK_PD_LCDIF:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+- BIT(7) | BIT(16) | BIT(17) | BIT(18) |
++ BIT(16) | BIT(17) | BIT(18) |
+ BIT(19) | BIT(20));
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+@@ -241,6 +241,7 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
+ regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
++ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
+ regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
+@@ -270,7 +271,7 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
+ BIT(4) | BIT(5) | BIT(6));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+- BIT(7) | BIT(16) | BIT(17) | BIT(18) |
++ BIT(16) | BIT(17) | BIT(18) |
+ BIT(19) | BIT(20));
+ break;
+ case IMX8MP_HDMIBLK_PD_PAI:
+@@ -298,6 +299,7 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
+ case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
+ regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
++ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDCP:
+@@ -590,7 +592,6 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
+ ret = PTR_ERR(domain->power_dev);
+ goto cleanup_pds;
+ }
+- dev_set_name(domain->power_dev, "%s", data->name);
+
+ domain->genpd.name = data->name;
+ domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
+diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
+index 28144c699b0c3..32ed9dc88e455 100644
+--- a/drivers/soc/imx/soc-imx8m.c
++++ b/drivers/soc/imx/soc-imx8m.c
+@@ -66,8 +66,8 @@ static u32 __init imx8mq_soc_revision(void)
+ ocotp_base = of_iomap(np, 0);
+ WARN_ON(!ocotp_base);
+ clk = of_clk_get_by_name(np, NULL);
+- if (!clk) {
+- WARN_ON(!clk);
++ if (IS_ERR(clk)) {
++ WARN_ON(IS_ERR(clk));
+ return 0;
+ }
+
+diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c
+index e9b854ed1bdfd..144ea68e0920a 100644
+--- a/drivers/soc/qcom/cpr.c
++++ b/drivers/soc/qcom/cpr.c
+@@ -1708,12 +1708,16 @@ static int cpr_probe(struct platform_device *pdev)
+
+ ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+ if (ret)
+- return ret;
++ goto err_remove_genpd;
+
+ platform_set_drvdata(pdev, drv);
+ cpr_debugfs_init(drv);
+
+ return 0;
++
++err_remove_genpd:
++ pm_genpd_remove(&drv->pd);
++ return ret;
+ }
+
+ static int cpr_remove(struct platform_device *pdev)
+diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
+index 9e187f9c6c95e..d28b8bd5b70bc 100644
+--- a/drivers/spi/spi-cadence-xspi.c
++++ b/drivers/spi/spi-cadence-xspi.c
+@@ -177,7 +177,10 @@
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ ((op)->data.nbytes >> 16) & 0xffff) | \
+- FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8))
++ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
++ (op)->dummy.buswidth != 0 ? \
++ (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
++ 0))
+
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 6313e7d0cdf87..71c3db60e9687 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -601,7 +601,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
+ goto err_find_dev;
+ }
+@@ -610,7 +609,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
+ goto err_alloc_rx_buf;
+ }
+diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
+index a4ee4661e9cc4..1cfeac16e7ac1 100644
+--- a/drivers/thermal/gov_fair_share.c
++++ b/drivers/thermal/gov_fair_share.c
+@@ -49,11 +49,7 @@ static int get_trip_level(struct thermal_zone_device *tz)
+ static long get_target_state(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev, int percentage, int level)
+ {
+- unsigned long max_state;
+-
+- cdev->ops->get_max_state(cdev, &max_state);
+-
+- return (long)(percentage * level * max_state) / (100 * tz->num_trips);
++ return (long)(percentage * level * cdev->max_state) / (100 * tz->num_trips);
+ }
+
+ /**
+diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+index 62c0aa5d07837..0a4eaa307156d 100644
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+@@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ int trip, int *temp)
+ {
+ struct int34x_thermal_zone *d = zone->devdata;
+- int i;
++ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_temp)
+ return d->override_ops->get_trip_temp(zone, trip, temp);
+
++ mutex_lock(&d->trip_mutex);
++
+ if (trip < d->aux_trip_nr)
+ *temp = d->aux_trips[trip];
+ else if (trip == d->crt_trip_id)
+@@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+- return -EINVAL;
++ ret = -EINVAL;
+ }
+
+- return 0;
++ mutex_unlock(&d->trip_mutex);
++
++ return ret;
+ }
+
+ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+@@ -77,11 +81,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ enum thermal_trip_type *type)
+ {
+ struct int34x_thermal_zone *d = zone->devdata;
+- int i;
++ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_type)
+ return d->override_ops->get_trip_type(zone, trip, type);
+
++ mutex_lock(&d->trip_mutex);
++
+ if (trip < d->aux_trip_nr)
+ *type = THERMAL_TRIP_PASSIVE;
+ else if (trip == d->crt_trip_id)
+@@ -99,10 +105,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+- return -EINVAL;
++ ret = -EINVAL;
+ }
+
+- return 0;
++ mutex_unlock(&d->trip_mutex);
++
++ return ret;
+ }
+
+ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
+@@ -180,6 +188,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+ int trip_cnt = int34x_zone->aux_trip_nr;
+ int i;
+
++ mutex_lock(&int34x_zone->trip_mutex);
++
+ int34x_zone->crt_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
+ &int34x_zone->crt_temp))
+@@ -207,6 +217,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+ int34x_zone->act_trips[i].valid = true;
+ }
+
++ mutex_unlock(&int34x_zone->trip_mutex);
++
+ return trip_cnt;
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
+@@ -230,6 +242,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
+ if (!int34x_thermal_zone)
+ return ERR_PTR(-ENOMEM);
+
++ mutex_init(&int34x_thermal_zone->trip_mutex);
++
+ int34x_thermal_zone->adev = adev;
+ int34x_thermal_zone->override_ops = override_ops;
+
+@@ -281,6 +295,7 @@ err_thermal_zone:
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
+ err_trip_alloc:
++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ return ERR_PTR(ret);
+ }
+@@ -292,6 +307,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
+ thermal_zone_device_unregister(int34x_thermal_zone->zone);
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+index 3b4971df1b33b..8f9872afd0d3c 100644
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
+ struct thermal_zone_device_ops *override_ops;
+ void *priv_data;
+ struct acpi_lpat_conversion_table *lpat_table;
++ struct mutex trip_mutex;
+ };
+
+ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 615fdda3a5de7..1eae4ec719a8f 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -603,8 +603,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ struct thermal_instance *pos;
+ struct thermal_zone_device *pos1;
+ struct thermal_cooling_device *pos2;
+- unsigned long max_state;
+- int result, ret;
++ int result;
+
+ if (trip >= tz->num_trips || trip < 0)
+ return -EINVAL;
+@@ -621,15 +620,11 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (tz != pos1 || cdev != pos2)
+ return -EINVAL;
+
+- ret = cdev->ops->get_max_state(cdev, &max_state);
+- if (ret)
+- return ret;
+-
+ /* lower default 0, upper default max_state */
+ lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
+- upper = upper == THERMAL_NO_LIMIT ? max_state : upper;
++ upper = upper == THERMAL_NO_LIMIT ? cdev->max_state : upper;
+
+- if (lower > upper || upper > max_state)
++ if (lower > upper || upper > cdev->max_state)
+ return -EINVAL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+@@ -896,12 +891,22 @@ __thermal_cooling_device_register(struct device_node *np,
+ cdev->updated = false;
+ cdev->device.class = &thermal_class;
+ cdev->devdata = devdata;
++
++ ret = cdev->ops->get_max_state(cdev, &cdev->max_state);
++ if (ret) {
++ kfree(cdev->type);
++ goto out_ida_remove;
++ }
++
+ thermal_cooling_device_setup_sysfs(cdev);
++
+ ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
+ if (ret) {
++ kfree(cdev->type);
+ thermal_cooling_device_destroy_sysfs(cdev);
+- goto out_kfree_type;
++ goto out_ida_remove;
+ }
++
+ ret = device_register(&cdev->device);
+ if (ret)
+ goto out_kfree_type;
+@@ -927,6 +932,8 @@ out_kfree_type:
+ thermal_cooling_device_destroy_sysfs(cdev);
+ kfree(cdev->type);
+ put_device(&cdev->device);
++
++ /* thermal_release() takes care of the rest */
+ cdev = NULL;
+ out_ida_remove:
+ ida_free(&thermal_cdev_ida, id);
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index ec495c7dff035..bd75961254615 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -589,13 +589,8 @@ static ssize_t max_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+- unsigned long state;
+- int ret;
+
+- ret = cdev->ops->get_max_state(cdev, &state);
+- if (ret)
+- return ret;
+- return sprintf(buf, "%ld\n", state);
++ return sprintf(buf, "%ld\n", cdev->max_state);
+ }
+
+ static ssize_t cur_state_show(struct device *dev, struct device_attribute *attr,
+@@ -625,6 +620,10 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
+ if ((long)state < 0)
+ return -EINVAL;
+
++ /* Requested state should be less than max_state + 1 */
++ if (state > cdev->max_state)
++ return -EINVAL;
++
+ mutex_lock(&cdev->lock);
+
+ result = cdev->ops->set_cur_state(cdev, state);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index b048357d21e36..fb5c9e2fc5348 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -1231,12 +1231,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+ * clock scaling is in progress
+ */
+ ufshcd_scsi_block_requests(hba);
++ mutex_lock(&hba->wb_mutex);
+ down_write(&hba->clk_scaling_lock);
+
+ if (!hba->clk_scaling.is_allowed ||
+ ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
++ mutex_unlock(&hba->wb_mutex);
+ ufshcd_scsi_unblock_requests(hba);
+ goto out;
+ }
+@@ -1248,12 +1250,16 @@ out:
+ return ret;
+ }
+
+-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
++static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+ {
+- if (writelock)
+- up_write(&hba->clk_scaling_lock);
+- else
+- up_read(&hba->clk_scaling_lock);
++ up_write(&hba->clk_scaling_lock);
++
++ /* Enable Write Booster if we have scaled up else disable it */
++ if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
++ ufshcd_wb_toggle(hba, scale_up);
++
++ mutex_unlock(&hba->wb_mutex);
++
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
+ }
+@@ -1270,7 +1276,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ {
+ int ret = 0;
+- bool is_writelock = true;
+
+ ret = ufshcd_clock_scaling_prepare(hba);
+ if (ret)
+@@ -1299,15 +1304,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ }
+ }
+
+- /* Enable Write Booster if we have scaled up else disable it */
+- if (ufshcd_enable_wb_if_scaling_up(hba)) {
+- downgrade_write(&hba->clk_scaling_lock);
+- is_writelock = false;
+- ufshcd_wb_toggle(hba, scale_up);
+- }
+-
+ out_unprepare:
+- ufshcd_clock_scaling_unprepare(hba, is_writelock);
++ ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ return ret;
+ }
+
+@@ -6104,9 +6102,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+
+ static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+ {
++ mutex_lock(&hba->wb_mutex);
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = allow;
+ up_write(&hba->clk_scaling_lock);
++ mutex_unlock(&hba->wb_mutex);
+ }
+
+ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+@@ -9773,6 +9773,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ /* Initialize mutex for exception event control */
+ mutex_init(&hba->ee_ctrl_mutex);
+
++ mutex_init(&hba->wb_mutex);
+ init_rwsem(&hba->clk_scaling_lock);
+
+ ufshcd_init_clk_gating(hba);
+diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
+index 03ededa86da1f..864fef540a394 100644
+--- a/drivers/usb/dwc3/Kconfig
++++ b/drivers/usb/dwc3/Kconfig
+@@ -3,6 +3,7 @@
+ config USB_DWC3
+ tristate "DesignWare USB3 DRD Core Support"
+ depends on (USB || USB_GADGET) && HAS_DMA
++ depends on (EXTCON || EXTCON=n)
+ select USB_XHCI_PLATFORM if USB_XHCI_HCD
+ select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
+ help
+@@ -44,7 +45,6 @@ config USB_DWC3_GADGET
+ config USB_DWC3_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
+- depends on (EXTCON=y || EXTCON=USB_DWC3)
+ help
+ This is the default mode of working of DWC3 controller where
+ both host and gadget features are enabled.
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 73dc10a77cdea..523a961b910bb 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -279,6 +279,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+ struct usb_request *req = ffs->ep0req;
+ int ret;
+
++ if (!req)
++ return -EINVAL;
++
+ req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+@@ -1892,10 +1895,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
+ ENTER();
+
+ if (!WARN_ON(!ffs->gadget)) {
++ /* dequeue before freeing ep0req */
++ usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
++ mutex_lock(&ffs->mutex);
+ usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+ ffs->ep0req = NULL;
+ ffs->gadget = NULL;
+ clear_bit(FFS_FL_BOUND, &ffs->flags);
++ mutex_unlock(&ffs->mutex);
+ ffs_data_put(ffs);
+ }
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index eabe519013e78..1292241d581a6 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -187,6 +187,7 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
+
+ struct ucsi_work {
+ struct delayed_work work;
++ struct list_head node;
+ unsigned long delay;
+ unsigned int count;
+ struct ucsi_connector *con;
+@@ -202,6 +203,7 @@ static void ucsi_poll_worker(struct work_struct *work)
+ mutex_lock(&con->lock);
+
+ if (!con->partner) {
++ list_del(&uwork->node);
+ mutex_unlock(&con->lock);
+ kfree(uwork);
+ return;
+@@ -209,10 +211,12 @@ static void ucsi_poll_worker(struct work_struct *work)
+
+ ret = uwork->cb(con);
+
+- if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT))
++ if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) {
+ queue_delayed_work(con->wq, &uwork->work, uwork->delay);
+- else
++ } else {
++ list_del(&uwork->node);
+ kfree(uwork);
++ }
+
+ mutex_unlock(&con->lock);
+ }
+@@ -236,6 +240,7 @@ static int ucsi_partner_task(struct ucsi_connector *con,
+ uwork->con = con;
+ uwork->cb = cb;
+
++ list_add_tail(&uwork->node, &con->partner_tasks);
+ queue_delayed_work(con->wq, &uwork->work, delay);
+
+ return 0;
+@@ -1056,6 +1061,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ INIT_WORK(&con->work, ucsi_handle_connector_change);
+ init_completion(&con->complete);
+ mutex_init(&con->lock);
++ INIT_LIST_HEAD(&con->partner_tasks);
+ con->num = index + 1;
+ con->ucsi = ucsi;
+
+@@ -1420,8 +1426,20 @@ void ucsi_unregister(struct ucsi *ucsi)
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+- if (ucsi->connector[i].wq)
++
++ if (ucsi->connector[i].wq) {
++ struct ucsi_work *uwork;
++
++ mutex_lock(&ucsi->connector[i].lock);
++ /*
++ * queue delayed items immediately so they can execute
++ * and free themselves before the wq is destroyed
++ */
++ list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
++ mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
++ mutex_unlock(&ucsi->connector[i].lock);
+ destroy_workqueue(ucsi->connector[i].wq);
++ }
+ typec_unregister_port(ucsi->connector[i].port);
+ }
+
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index c968474ee5473..60ce9fb6e7450 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -322,6 +322,7 @@ struct ucsi_connector {
+ struct work_struct work;
+ struct completion complete;
+ struct workqueue_struct *wq;
++ struct list_head partner_tasks;
+
+ struct typec_port *port;
+ struct typec_partner *partner;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 23c24fe98c00d..2209372f236db 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -1856,24 +1856,33 @@ unwind:
+ * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
+ * hugetlbfs is in use.
+ */
+-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
++static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
+ {
+- struct page *pages;
+ int ret, order = get_order(PAGE_SIZE * 2);
++ struct vfio_iova *region;
++ struct page *pages;
++ dma_addr_t start;
+
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!pages)
+ return;
+
+- ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
+- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+- if (!ret) {
+- size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
++ list_for_each_entry(region, regions, list) {
++ start = ALIGN(region->start, PAGE_SIZE * 2);
++ if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
++ continue;
+
+- if (unmapped == PAGE_SIZE)
+- iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
+- else
+- domain->fgsp = true;
++ ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
++ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
++ if (!ret) {
++ size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
++
++ if (unmapped == PAGE_SIZE)
++ iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
++ else
++ domain->fgsp = true;
++ }
++ break;
+ }
+
+ __free_pages(pages, order);
+@@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
+ }
+ }
+
+- vfio_test_domain_fgsp(domain);
++ vfio_test_domain_fgsp(domain, &iova_copy);
+
+ /* replay mappings on new domains */
+ ret = vfio_iommu_replay(iommu, domain);
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
+index f2ae2e563dc54..4a2ddf730a3ac 100644
+--- a/drivers/w1/w1.c
++++ b/drivers/w1/w1.c
+@@ -1166,6 +1166,8 @@ int w1_process(void *data)
+ /* remainder if it woke up early */
+ unsigned long jremain = 0;
+
++ atomic_inc(&dev->refcnt);
++
+ for (;;) {
+
+ if (!jremain && dev->search_count) {
+@@ -1193,8 +1195,10 @@ int w1_process(void *data)
+ */
+ mutex_unlock(&dev->list_mutex);
+
+- if (kthread_should_stop())
++ if (kthread_should_stop()) {
++ __set_current_state(TASK_RUNNING);
+ break;
++ }
+
+ /* Only sleep when the search is active. */
+ if (dev->search_count) {
+diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
+index b3e1792d9c49f..3a71c5eb2f837 100644
+--- a/drivers/w1/w1_int.c
++++ b/drivers/w1/w1_int.c
+@@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
+ dev->search_count = w1_search_count;
+ dev->enable_pullup = w1_enable_pullup;
+
+- /* 1 for w1_process to decrement
+- * 1 for __w1_remove_master_device to decrement
++ /* For __w1_remove_master_device to decrement
+ */
+- atomic_set(&dev->refcnt, 2);
++ atomic_set(&dev->refcnt, 1);
+
+ INIT_LIST_HEAD(&dev->slist);
+ INIT_LIST_HEAD(&dev->async_list);
+diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
+index 1826e8e671251..9b569278788a4 100644
+--- a/drivers/xen/pvcalls-front.c
++++ b/drivers/xen/pvcalls-front.c
+@@ -225,6 +225,8 @@ again:
+ return IRQ_HANDLED;
+ }
+
++static void free_active_ring(struct sock_mapping *map);
++
+ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
+ struct sock_mapping *map)
+ {
+@@ -240,7 +242,7 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
+ for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+ gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
+ gnttab_end_foreign_access(map->active.ref, NULL);
+- free_page((unsigned long)map->active.ring);
++ free_active_ring(map);
+
+ kfree(map);
+ }
+diff --git a/fs/affs/file.c b/fs/affs/file.c
+index cefa222f7881c..8daeed31e1af9 100644
+--- a/fs/affs/file.c
++++ b/fs/affs/file.c
+@@ -880,7 +880,7 @@ affs_truncate(struct inode *inode)
+ if (inode->i_size > AFFS_I(inode)->mmu_private) {
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+- void *fsdata;
++ void *fsdata = NULL;
+ loff_t isize = inode->i_size;
+ int res;
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 9e6d48ff45972..a3febabacec04 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -590,6 +590,12 @@ enum {
+ /* Indicate we have to finish a zone to do next allocation. */
+ BTRFS_FS_NEED_ZONE_FINISH,
+
++ /*
++ * Indicate metadata over-commit is disabled. This is set when active
++ * zone tracking is needed.
++ */
++ BTRFS_FS_NO_OVERCOMMIT,
++
+ #if BITS_PER_LONG == 32
+ /* Indicate if we have error/warn message printed on 32bit systems */
+ BTRFS_FS_32BIT_ERROR,
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index f171bf8756336..65c010159fb5f 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -404,7 +404,8 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+ return 0;
+
+ used = btrfs_space_info_used(space_info, true);
+- if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
++ if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
++ (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
+ avail = 0;
+ else
+ avail = calc_available_free_space(fs_info, space_info, flush);
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index c9e2b0c853099..056f002263db5 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -538,6 +538,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
+ }
+ atomic_set(&zone_info->active_zones_left,
+ max_active_zones - nactive);
++ /* Overcommit does not work well with active zone tacking. */
++ set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
+ }
+
+ /* Validate superblock log */
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 5db73c0f792a5..cbc18b4a9cb20 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -278,6 +278,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
+ * unicode length of a netbios domain name
+ */
++ kfree_sensitive(ses->auth_key.response);
+ ses->auth_key.len = size + 2 * dlen;
+ ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
+ if (!ses->auth_key.response) {
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index e70915ad75410..4302dc75843cb 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -792,26 +792,27 @@ static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const
+ */
+ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
+ {
+- int rc;
+- struct cache_entry *ce;
+ struct dfs_info3_param *refs = NULL;
++ struct cache_entry *ce;
+ int numrefs = 0;
+- bool newent = false;
++ int rc;
+
+ cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
+
+- down_write(&htable_rw_lock);
++ down_read(&htable_rw_lock);
+
+ ce = lookup_cache_entry(path);
+- if (!IS_ERR(ce)) {
+- if (!cache_entry_expired(ce)) {
+- dump_ce(ce);
+- up_write(&htable_rw_lock);
+- return 0;
+- }
+- } else {
+- newent = true;
++ if (!IS_ERR(ce) && !cache_entry_expired(ce)) {
++ up_read(&htable_rw_lock);
++ return 0;
+ }
++ /*
++ * Unlock shared access as we don't want to hold any locks while getting
++ * a new referral. The @ses used for performing the I/O could be
++ * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
++ * in order to failover -- if necessary.
++ */
++ up_read(&htable_rw_lock);
+
+ /*
+ * Either the entry was not found, or it is expired.
+@@ -819,19 +820,22 @@ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, cons
+ */
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ if (rc)
+- goto out_unlock;
++ goto out;
+
+ dump_refs(refs, numrefs);
+
+- if (!newent) {
+- rc = update_cache_entry_locked(ce, refs, numrefs);
+- goto out_unlock;
++ down_write(&htable_rw_lock);
++ /* Re-check as another task might have it added or refreshed already */
++ ce = lookup_cache_entry(path);
++ if (!IS_ERR(ce)) {
++ if (cache_entry_expired(ce))
++ rc = update_cache_entry_locked(ce, refs, numrefs);
++ } else {
++ rc = add_cache_entry_locked(refs, numrefs);
+ }
+
+- rc = add_cache_entry_locked(refs, numrefs);
+-
+-out_unlock:
+ up_write(&htable_rw_lock);
++out:
+ free_dfs_info_array(refs, numrefs);
+ return rc;
+ }
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 0b842a07e1579..c47b254f0d1e2 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -815,6 +815,7 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
+ return -EINVAL;
+ }
+ if (tilen) {
++ kfree_sensitive(ses->auth_key.response);
+ ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+@@ -1428,6 +1429,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
+ goto out_put_spnego_key;
+ }
+
++ kfree_sensitive(ses->auth_key.response);
+ ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 92f39052d3117..2c9ffa921e6f6 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1453,6 +1453,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+
+ /* keep session key if binding */
+ if (!is_binding) {
++ kfree_sensitive(ses->auth_key.response);
+ ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 90789aaa6567e..8c816b25ce7c6 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1405,6 +1405,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ destroy_workqueue(info->workqueue);
+ log_rdma_event(INFO, "rdma session destroyed\n");
+ kfree(info);
++ server->smbd_conn = NULL;
+ }
+
+ /*
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index cf4871834ebb2..ee7c88c9b5afa 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1047,12 +1047,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+
+ if (!be->decompressed_pages)
+ be->decompressed_pages =
+- kvcalloc(be->nr_pages, sizeof(struct page *),
+- GFP_KERNEL | __GFP_NOFAIL);
++ kcalloc(be->nr_pages, sizeof(struct page *),
++ GFP_KERNEL | __GFP_NOFAIL);
+ if (!be->compressed_pages)
+ be->compressed_pages =
+- kvcalloc(pclusterpages, sizeof(struct page *),
+- GFP_KERNEL | __GFP_NOFAIL);
++ kcalloc(pclusterpages, sizeof(struct page *),
++ GFP_KERNEL | __GFP_NOFAIL);
+
+ z_erofs_parse_out_bvecs(be);
+ err2 = z_erofs_parse_in_bvecs(be, &overlapped);
+@@ -1100,7 +1100,7 @@ out:
+ }
+ if (be->compressed_pages < be->onstack_pages ||
+ be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
+- kvfree(be->compressed_pages);
++ kfree(be->compressed_pages);
+ z_erofs_fill_other_copies(be, err);
+
+ for (i = 0; i < be->nr_pages; ++i) {
+@@ -1119,7 +1119,7 @@ out:
+ }
+
+ if (be->decompressed_pages != be->onstack_pages)
+- kvfree(be->decompressed_pages);
++ kfree(be->decompressed_pages);
+
+ pcl->length = 0;
+ pcl->partial = true;
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index fd0a288af299e..56be077e5d8ac 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -280,7 +280,7 @@ int ksmbd_conn_handler_loop(void *p)
+ {
+ struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
+ struct ksmbd_transport *t = conn->transport;
+- unsigned int pdu_size;
++ unsigned int pdu_size, max_allowed_pdu_size;
+ char hdr_buf[4] = {0,};
+ int size;
+
+@@ -305,13 +305,26 @@ int ksmbd_conn_handler_loop(void *p)
+ pdu_size = get_rfc1002_len(hdr_buf);
+ ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+
++ if (conn->status == KSMBD_SESS_GOOD)
++ max_allowed_pdu_size =
++ SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
++ else
++ max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
++
++ if (pdu_size > max_allowed_pdu_size) {
++ pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
++ pdu_size, max_allowed_pdu_size,
++ conn->status);
++ break;
++ }
++
+ /*
+ * Check if pdu size is valid (min : smb header size,
+ * max : 0x00FFFFFF).
+ */
+ if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+ pdu_size > MAX_STREAM_PROT_LEN) {
+- continue;
++ break;
+ }
+
+ /* 4 for rfc1002 length field */
+diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h
+index ff07c67f4565e..ce866ff159bfe 100644
+--- a/fs/ksmbd/ksmbd_netlink.h
++++ b/fs/ksmbd/ksmbd_netlink.h
+@@ -105,7 +105,8 @@ struct ksmbd_startup_request {
+ __u32 sub_auth[3]; /* Subauth value for Security ID */
+ __u32 smb2_max_credits; /* MAX credits */
+ __u32 smbd_max_io_size; /* smbd read write size */
+- __u32 reserved[127]; /* Reserved room */
++ __u32 max_connections; /* Number of maximum simultaneous connections */
++ __u32 reserved[126]; /* Reserved room */
+ __u32 ifc_list_sz; /* interfaces list size */
+ __s8 ____payload[];
+ };
+diff --git a/fs/ksmbd/ndr.c b/fs/ksmbd/ndr.c
+index 0ae8d08d85a87..4d9e0b54e3dbf 100644
+--- a/fs/ksmbd/ndr.c
++++ b/fs/ksmbd/ndr.c
+@@ -242,7 +242,7 @@ int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+ return ret;
+
+ if (da->version != 3 && da->version != 4) {
+- pr_err("v%d version is not supported\n", da->version);
++ ksmbd_debug(VFS, "v%d version is not supported\n", da->version);
+ return -EINVAL;
+ }
+
+@@ -251,7 +251,7 @@ int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+ return ret;
+
+ if (da->version != version2) {
+- pr_err("ndr version mismatched(version: %d, version2: %d)\n",
++ ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+ da->version, version2);
+ return -EINVAL;
+ }
+@@ -457,7 +457,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+ if (ret)
+ return ret;
+ if (acl->version != 4) {
+- pr_err("v%d version is not supported\n", acl->version);
++ ksmbd_debug(VFS, "v%d version is not supported\n", acl->version);
+ return -EINVAL;
+ }
+
+@@ -465,7 +465,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+ if (ret)
+ return ret;
+ if (acl->version != version2) {
+- pr_err("ndr version mismatched(version: %d, version2: %d)\n",
++ ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+ acl->version, version2);
+ return -EINVAL;
+ }
+diff --git a/fs/ksmbd/server.h b/fs/ksmbd/server.h
+index ac9d932f8c8aa..db72781817603 100644
+--- a/fs/ksmbd/server.h
++++ b/fs/ksmbd/server.h
+@@ -41,6 +41,7 @@ struct ksmbd_server_config {
+ unsigned int share_fake_fscaps;
+ struct smb_sid domain_sid;
+ unsigned int auth_mechs;
++ unsigned int max_connections;
+
+ char *conf[SERVER_CONF_WORK_GROUP + 1];
+ };
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 533742ebcb379..9b16ee657b51a 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -8657,6 +8657,7 @@ int smb3_decrypt_req(struct ksmbd_work *work)
+ bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
+ {
+ struct ksmbd_conn *conn = work->conn;
++ struct ksmbd_session *sess = work->sess;
+ struct smb2_hdr *rsp = smb2_get_msg(work->response_buf);
+
+ if (conn->dialect < SMB30_PROT_ID)
+@@ -8666,6 +8667,7 @@ bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
+ rsp = ksmbd_resp_buf_next(work);
+
+ if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE &&
++ sess->user && !user_guest(sess->user) &&
+ rsp->Status == STATUS_SUCCESS)
+ return true;
+ return false;
+diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
+index 092fdd3f87505..f4baa9800f6ee 100644
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -24,8 +24,9 @@
+
+ #define SMB21_DEFAULT_IOSIZE (1024 * 1024)
+ #define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
+-#define SMB3_MIN_IOSIZE (64 * 1024)
+-#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
++#define SMB3_MIN_IOSIZE (64 * 1024)
++#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
++#define SMB3_MAX_MSGSIZE (4 * 4096)
+
+ /*
+ * Definitions for SMB2 Protocol Data Units (network frames)
+diff --git a/fs/ksmbd/transport_ipc.c b/fs/ksmbd/transport_ipc.c
+index c9aca21637d5b..40c721f9227e4 100644
+--- a/fs/ksmbd/transport_ipc.c
++++ b/fs/ksmbd/transport_ipc.c
+@@ -308,6 +308,9 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ if (req->smbd_max_io_size)
+ init_smbd_max_io_size(req->smbd_max_io_size);
+
++ if (req->max_connections)
++ server_conf.max_connections = req->max_connections;
++
+ ret = ksmbd_set_netbios_name(req->netbios_name);
+ ret |= ksmbd_set_server_string(req->server_string);
+ ret |= ksmbd_set_work_group(req->work_group);
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 4c6bd0b699791..603893fd87f57 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -15,6 +15,8 @@
+ #define IFACE_STATE_DOWN BIT(0)
+ #define IFACE_STATE_CONFIGURED BIT(1)
+
++static atomic_t active_num_conn;
++
+ struct interface {
+ struct task_struct *ksmbd_kthread;
+ struct socket *ksmbd_socket;
+@@ -185,8 +187,10 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
+ struct tcp_transport *t;
+
+ t = alloc_transport(client_sk);
+- if (!t)
++ if (!t) {
++ sock_release(client_sk);
+ return -ENOMEM;
++ }
+
+ csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn);
+ if (kernel_getpeername(client_sk, csin) < 0) {
+@@ -239,6 +243,15 @@ static int ksmbd_kthread_fn(void *p)
+ continue;
+ }
+
++ if (server_conf.max_connections &&
++ atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
++ pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
++ atomic_read(&active_num_conn));
++ atomic_dec(&active_num_conn);
++ sock_release(client_sk);
++ continue;
++ }
++
+ ksmbd_debug(CONN, "connect success: accepted new connection\n");
+ client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
+ client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
+@@ -368,6 +381,8 @@ static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
+ static void ksmbd_tcp_disconnect(struct ksmbd_transport *t)
+ {
+ free_transport(TCP_TRANS(t));
++ if (server_conf.max_connections)
++ atomic_dec(&active_num_conn);
+ }
+
+ static void tcp_destroy_socket(struct socket *ksmbd_socket)
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index ea6fb0e6b1655..142b3c928f76e 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -638,6 +638,39 @@ static struct shrinker nfsd_file_shrinker = {
+ .seeks = 1,
+ };
+
++/**
++ * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
++ * @nf: nfsd_file to attempt to queue
++ * @dispose: private list to queue successfully-put objects
++ *
++ * Unhash an nfsd_file, try to get a reference to it, and then put that
++ * reference. If it's the last reference, queue it to the dispose list.
++ */
++static void
++nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
++ __must_hold(RCU)
++{
++ int decrement = 1;
++
++ /* If we raced with someone else unhashing, ignore it */
++ if (!nfsd_file_unhash(nf))
++ return;
++
++ /* If we can't get a reference, ignore it */
++ if (!nfsd_file_get(nf))
++ return;
++
++ /* Extra decrement if we remove from the LRU */
++ if (nfsd_file_lru_remove(nf))
++ ++decrement;
++
++ /* If refcount goes to 0, then put on the dispose list */
++ if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
++ list_add(&nf->nf_lru, dispose);
++ trace_nfsd_file_closing(nf);
++ }
++}
++
+ /**
+ * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
+ * @inode: inode on which to close out nfsd_files
+@@ -665,30 +698,11 @@ nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
+
+ rcu_read_lock();
+ do {
+- int decrement = 1;
+-
+ nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
+ nfsd_file_rhash_params);
+ if (!nf)
+ break;
+-
+- /* If we raced with someone else unhashing, ignore it */
+- if (!nfsd_file_unhash(nf))
+- continue;
+-
+- /* If we can't get a reference, ignore it */
+- if (!nfsd_file_get(nf))
+- continue;
+-
+- /* Extra decrement if we remove from the LRU */
+- if (nfsd_file_lru_remove(nf))
+- ++decrement;
+-
+- /* If refcount goes to 0, then put on the dispose list */
+- if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
+- list_add(&nf->nf_lru, dispose);
+- trace_nfsd_file_closing(nf);
+- }
++ nfsd_file_cond_queue(nf, dispose);
+ } while (1);
+ rcu_read_unlock();
+ }
+@@ -905,11 +919,8 @@ __nfsd_file_cache_purge(struct net *net)
+
+ nf = rhashtable_walk_next(&iter);
+ while (!IS_ERR_OR_NULL(nf)) {
+- if (!net || nf->nf_net == net) {
+- nfsd_file_unhash(nf);
+- nfsd_file_lru_remove(nf);
+- list_add(&nf->nf_lru, &dispose);
+- }
++ if (!net || nf->nf_net == net)
++ nfsd_file_cond_queue(nf, &dispose);
+ nf = rhashtable_walk_next(&iter);
+ }
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 30a08ec31a703..ba04ce9b9fa51 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1331,6 +1331,7 @@ try_again:
+ /* allow 20secs for mount/unmount for now - revisit */
+ if (signal_pending(current) ||
+ (schedule_timeout(20*HZ) == 0)) {
++ finish_wait(&nn->nfsd_ssc_waitq, &wait);
+ kfree(work);
+ return nfserr_eagain;
+ }
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index f436d8847f085..91a95bfad0d1c 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -754,7 +754,7 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
+ if (!c->metacopy && c->stat.size) {
+ err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size);
+ if (err)
+- return err;
++ goto out_fput;
+ }
+
+ err = ovl_copy_up_metadata(c, temp);
+@@ -973,6 +973,10 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
+ if (err)
+ return err;
+
++ if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) ||
++ !kgid_has_mapping(current_user_ns(), ctx.stat.gid))
++ return -EOVERFLOW;
++
+ ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
+
+ if (parent) {
+diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
+index 4f8c35206f7cd..6c2a2f21dbf00 100644
+--- a/include/drm/drm_vma_manager.h
++++ b/include/drm/drm_vma_manager.h
+@@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node);
+
+ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
++int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
+ void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+ struct drm_file *tag);
+ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
+index ddb10aa67b143..1f68b49bcd688 100644
+--- a/include/linux/apple-gmux.h
++++ b/include/linux/apple-gmux.h
+@@ -8,18 +8,118 @@
+ #define LINUX_APPLE_GMUX_H
+
+ #include <linux/acpi.h>
++#include <linux/io.h>
++#include <linux/pnp.h>
+
+ #define GMUX_ACPI_HID "APP000B"
+
++/*
++ * gmux port offsets. Many of these are not yet used, but may be in the
++ * future, and it's useful to have them documented here anyhow.
++ */
++#define GMUX_PORT_VERSION_MAJOR 0x04
++#define GMUX_PORT_VERSION_MINOR 0x05
++#define GMUX_PORT_VERSION_RELEASE 0x06
++#define GMUX_PORT_SWITCH_DISPLAY 0x10
++#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
++#define GMUX_PORT_INTERRUPT_ENABLE 0x14
++#define GMUX_PORT_INTERRUPT_STATUS 0x16
++#define GMUX_PORT_SWITCH_DDC 0x28
++#define GMUX_PORT_SWITCH_EXTERNAL 0x40
++#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
++#define GMUX_PORT_DISCRETE_POWER 0x50
++#define GMUX_PORT_MAX_BRIGHTNESS 0x70
++#define GMUX_PORT_BRIGHTNESS 0x74
++#define GMUX_PORT_VALUE 0xc2
++#define GMUX_PORT_READ 0xd0
++#define GMUX_PORT_WRITE 0xd4
++
++#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
++
+ #if IS_ENABLED(CONFIG_APPLE_GMUX)
++static inline bool apple_gmux_is_indexed(unsigned long iostart)
++{
++ u16 val;
++
++ outb(0xaa, iostart + 0xcc);
++ outb(0x55, iostart + 0xcd);
++ outb(0x00, iostart + 0xce);
++
++ val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8);
++ if (val == 0x55aa)
++ return true;
++
++ return false;
++}
+
+ /**
+- * apple_gmux_present() - detect if gmux is built into the machine
++ * apple_gmux_detect() - detect if gmux is built into the machine
++ *
++ * @pnp_dev: Device to probe or NULL to use the first matching device
++ * @indexed_ret: Returns (by reference) if the gmux is indexed or not
++ *
++ * Detect if a supported gmux device is present by actually probing it.
++ * This avoids the false positives returned on some models by
++ * apple_gmux_present().
++ *
++ * Return: %true if a supported gmux ACPI device is detected and the kernel
++ * was configured with CONFIG_APPLE_GMUX, %false otherwise.
++ */
++static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
++{
++ u8 ver_major, ver_minor, ver_release;
++ struct device *dev = NULL;
++ struct acpi_device *adev;
++ struct resource *res;
++ bool indexed = false;
++ bool ret = false;
++
++ if (!pnp_dev) {
++ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
++ if (!adev)
++ return false;
++
++ dev = get_device(acpi_get_first_physical_node(adev));
++ acpi_dev_put(adev);
++ if (!dev)
++ return false;
++
++ pnp_dev = to_pnp_dev(dev);
++ }
++
++ res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0);
++ if (!res || resource_size(res) < GMUX_MIN_IO_LEN)
++ goto out;
++
++ /*
++ * Invalid version information may indicate either that the gmux
++ * device isn't present or that it's a new one that uses indexed io.
++ */
++ ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR);
++ ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR);
++ ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE);
++ if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
++ indexed = apple_gmux_is_indexed(res->start);
++ if (!indexed)
++ goto out;
++ }
++
++ if (indexed_ret)
++ *indexed_ret = indexed;
++
++ ret = true;
++out:
++ put_device(dev);
++ return ret;
++}
++
++/**
++ * apple_gmux_present() - check if gmux ACPI device is present
+ *
+ * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
+ * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
+ *
+- * Return: %true if gmux is present and the kernel was configured
++ * Return: %true if gmux ACPI device is present and the kernel was configured
+ * with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+ static inline bool apple_gmux_present(void)
+@@ -34,6 +134,11 @@ static inline bool apple_gmux_present(void)
+ return false;
+ }
+
++static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
++{
++ return false;
++}
++
+ #endif /* !CONFIG_APPLE_GMUX */
+
+ #endif /* LINUX_APPLE_GMUX_H */
+diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
+index 632320ec8f082..a48bb52409777 100644
+--- a/include/linux/platform_data/x86/simatic-ipc.h
++++ b/include/linux/platform_data/x86/simatic-ipc.h
+@@ -32,7 +32,8 @@ enum simatic_ipc_station_ids {
+ SIMATIC_IPC_IPC477E = 0x00000A02,
+ SIMATIC_IPC_IPC127E = 0x00000D01,
+ SIMATIC_IPC_IPC227G = 0x00000F01,
+- SIMATIC_IPC_IPC427G = 0x00001001,
++ SIMATIC_IPC_IPCBX_39A = 0x00001001,
++ SIMATIC_IPC_IPCPX_39A = 0x00001002,
+ };
+
+ static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 9ecc128944a19..5e093602e8fcd 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -100,6 +100,7 @@ struct thermal_cooling_device_ops {
+ struct thermal_cooling_device {
+ int id;
+ char *type;
++ unsigned long max_state;
+ struct device device;
+ struct device_node *np;
+ void *devdata;
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index ac2bad57933f8..72b739dc6d530 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1827,8 +1827,6 @@ struct ieee80211_vif_cfg {
+ * @drv_priv: data area for driver use, will always be aligned to
+ * sizeof(void \*).
+ * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
+- * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
+- * protected by fq->lock.
+ * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+ * &enum ieee80211_offload_flags.
+ * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
+@@ -1857,8 +1855,6 @@ struct ieee80211_vif {
+ bool probe_req_reg;
+ bool rx_mcast_action_reg;
+
+- bool txqs_stopped[IEEE80211_NUM_ACS];
+-
+ struct ieee80211_vif *mbssid_tx_vif;
+
+ /* must be last */
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index d5517719af4ef..af4aa66aaa4eb 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -1288,4 +1288,11 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
+
+ int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
+
++/* Make sure qdisc is no longer in SCHED state. */
++static inline void qdisc_synchronize(const struct Qdisc *q)
++{
++ while (test_bit(__QDISC_STATE_SCHED, &q->state))
++ msleep(1);
++}
++
+ #endif
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index cab52b0f11d0c..34c03707fb6ef 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -236,6 +236,14 @@ enum {
+ ISCSI_SESSION_FREE,
+ };
+
++enum {
++ ISCSI_SESSION_TARGET_UNBOUND,
++ ISCSI_SESSION_TARGET_ALLOCATED,
++ ISCSI_SESSION_TARGET_SCANNED,
++ ISCSI_SESSION_TARGET_UNBINDING,
++ ISCSI_SESSION_TARGET_MAX,
++};
++
+ #define ISCSI_MAX_TARGET -1
+
+ struct iscsi_cls_session {
+@@ -264,6 +272,7 @@ struct iscsi_cls_session {
+ */
+ pid_t creator;
+ int state;
++ int target_state; /* session target bind state */
+ int sid; /* session id */
+ void *dd_data; /* LLD private data */
+ struct device dev; /* sysfs transport/container device */
+diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+index edc6ddab0de6a..2d6f80d75ae74 100644
+--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
++++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+@@ -15,7 +15,7 @@ enum sctp_conntrack {
+ SCTP_CONNTRACK_SHUTDOWN_RECD,
+ SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_SENT,
+- SCTP_CONNTRACK_HEARTBEAT_ACKED,
++ SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */
+ SCTP_CONNTRACK_MAX
+ };
+
+diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+index 6b20fb22717b2..aa805e6d4e284 100644
+--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
++++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+@@ -94,7 +94,7 @@ enum ctattr_timeout_sctp {
+ CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+- CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
++ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
+ __CTA_TIMEOUT_SCTP_MAX
+ };
+ #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 9f28349ebcff5..2bb89290da63c 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -806,6 +806,7 @@ struct ufs_hba_monitor {
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
++ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
+ * @clk_scaling_lock: used to serialize device commands and clock scaling
+ * @desc_size: descriptor sizes reported by device
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
+@@ -948,6 +949,7 @@ struct ufs_hba {
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
+
++ struct mutex wb_mutex;
+ struct rw_semaphore clk_scaling_lock;
+ unsigned char desc_size[QUERY_DESC_IDN_MAX];
+ atomic_t scsi_block_reqs_cnt;
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index cea5de98c4232..862e05e6691de 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -823,15 +823,19 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
+ return filled;
+ }
+
+-static void __io_req_complete_put(struct io_kiocb *req)
++void io_req_complete_post(struct io_kiocb *req)
+ {
++ struct io_ring_ctx *ctx = req->ctx;
++
++ io_cq_lock(ctx);
++ if (!(req->flags & REQ_F_CQE_SKIP))
++ __io_fill_cqe_req(ctx, req);
++
+ /*
+ * If we're the last reference to this request, add to our locked
+ * free_list cache.
+ */
+ if (req_ref_put_and_test(req)) {
+- struct io_ring_ctx *ctx = req->ctx;
+-
+ if (req->flags & IO_REQ_LINK_FLAGS) {
+ if (req->flags & IO_DISARM_MASK)
+ io_disarm_next(req);
+@@ -852,21 +856,6 @@ static void __io_req_complete_put(struct io_kiocb *req)
+ wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
+ ctx->locked_free_nr++;
+ }
+-}
+-
+-void __io_req_complete_post(struct io_kiocb *req)
+-{
+- if (!(req->flags & REQ_F_CQE_SKIP))
+- __io_fill_cqe_req(req->ctx, req);
+- __io_req_complete_put(req);
+-}
+-
+-void io_req_complete_post(struct io_kiocb *req)
+-{
+- struct io_ring_ctx *ctx = req->ctx;
+-
+- io_cq_lock(ctx);
+- __io_req_complete_post(req);
+ io_cq_unlock_post(ctx);
+ }
+
+@@ -876,9 +865,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
+ }
+
+ void io_req_complete_failed(struct io_kiocb *req, s32 res)
++ __must_hold(&ctx->uring_lock)
+ {
+ const struct io_op_def *def = &io_op_defs[req->opcode];
+
++ lockdep_assert_held(&req->ctx->uring_lock);
++
+ req_set_fail(req);
+ io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
+ if (def->fail)
+@@ -1133,7 +1125,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
+ percpu_ref_put(&ctx->refs);
+ }
+
+-static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
++void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
+ {
+ struct io_uring_task *tctx = req->task->io_uring;
+ struct io_ring_ctx *ctx = req->ctx;
+@@ -1165,11 +1157,6 @@ static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local
+ }
+ }
+
+-void io_req_task_work_add(struct io_kiocb *req)
+-{
+- __io_req_task_work_add(req, true);
+-}
+-
+ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
+ {
+ struct llist_node *node;
+@@ -1243,18 +1230,6 @@ int io_run_local_work(struct io_ring_ctx *ctx)
+ return ret;
+ }
+
+-static void io_req_tw_post(struct io_kiocb *req, bool *locked)
+-{
+- io_req_complete_post(req);
+-}
+-
+-void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
+-{
+- io_req_set_res(req, res, cflags);
+- req->io_task_work.func = io_req_tw_post;
+- io_req_task_work_add(req);
+-}
+-
+ static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
+ {
+ /* not needed for normal modes, but SQPOLL depends on it */
+@@ -1641,6 +1616,7 @@ static u32 io_get_sequence(struct io_kiocb *req)
+ }
+
+ static __cold void io_drain_req(struct io_kiocb *req)
++ __must_hold(&ctx->uring_lock)
+ {
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_defer_entry *de;
+@@ -1658,17 +1634,12 @@ queue:
+ }
+ spin_unlock(&ctx->completion_lock);
+
+- ret = io_req_prep_async(req);
+- if (ret) {
+-fail:
+- io_req_complete_failed(req, ret);
+- return;
+- }
+ io_prep_async_link(req);
+ de = kmalloc(sizeof(*de), GFP_KERNEL);
+ if (!de) {
+ ret = -ENOMEM;
+- goto fail;
++ io_req_complete_failed(req, ret);
++ return;
+ }
+
+ spin_lock(&ctx->completion_lock);
+@@ -1942,13 +1913,16 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
+ req->flags &= ~REQ_F_HARDLINK;
+ req->flags |= REQ_F_LINK;
+ io_req_complete_failed(req, req->cqe.res);
+- } else if (unlikely(req->ctx->drain_active)) {
+- io_drain_req(req);
+ } else {
+ int ret = io_req_prep_async(req);
+
+- if (unlikely(ret))
++ if (unlikely(ret)) {
+ io_req_complete_failed(req, ret);
++ return;
++ }
++
++ if (unlikely(req->ctx->drain_active))
++ io_drain_req(req);
+ else
+ io_queue_iowq(req, NULL);
+ }
+@@ -2877,7 +2851,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
+ while (!list_empty(&list)) {
+ de = list_first_entry(&list, struct io_defer_entry, list);
+ list_del_init(&de->list);
+- io_req_complete_failed(de->req, -ECANCELED);
++ io_req_task_queue_fail(de->req, -ECANCELED);
+ kfree(de);
+ }
+ return true;
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 4334cd30c423d..90b675c65b840 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -33,7 +33,6 @@ int io_run_local_work(struct io_ring_ctx *ctx);
+ void io_req_complete_failed(struct io_kiocb *req, s32 res);
+ void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
+ void io_req_complete_post(struct io_kiocb *req);
+-void __io_req_complete_post(struct io_kiocb *req);
+ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
+ bool allow_overflow);
+ bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
+@@ -51,10 +50,9 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
+ return req->flags & REQ_F_FIXED_FILE;
+ }
+
++void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
+ bool io_is_uring_fops(struct file *file);
+ bool io_alloc_async_data(struct io_kiocb *req);
+-void io_req_task_work_add(struct io_kiocb *req);
+-void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
+ void io_req_task_queue(struct io_kiocb *req);
+ void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
+ void io_req_task_complete(struct io_kiocb *req, bool *locked);
+@@ -83,6 +81,11 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
+ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ bool cancel_all);
+
++static inline void io_req_task_work_add(struct io_kiocb *req)
++{
++ __io_req_task_work_add(req, true);
++}
++
+ #define io_for_each_link(pos, head) \
+ for (pos = (head); pos; pos = pos->link)
+
+@@ -376,4 +379,11 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
+ ctx->submitter_task == current);
+ }
+
++static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
++{
++ io_req_set_res(req, res, 0);
++ req->io_task_work.func = io_req_task_complete;
++ io_req_task_work_add(req);
++}
++
+ #endif
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index a49ccab262d53..7d5b544cfc305 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -30,6 +30,8 @@ static int io_msg_ring_data(struct io_kiocb *req)
+
+ if (msg->src_fd || msg->dst_fd || msg->flags)
+ return -EINVAL;
++ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
++ return -EBADFD;
+
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
+ return 0;
+@@ -84,6 +86,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+
+ if (target_ctx == ctx)
+ return -EINVAL;
++ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
++ return -EBADFD;
+
+ ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
+ if (unlikely(ret))
+diff --git a/io_uring/net.c b/io_uring/net.c
+index bdd2b4e370b35..9046e269e5a58 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -62,6 +62,7 @@ struct io_sr_msg {
+ u16 flags;
+ /* initialised and used only by !msg send variants */
+ u16 addr_len;
++ u16 buf_group;
+ void __user *addr;
+ /* used only for send zerocopy */
+ struct io_kiocb *notif;
+@@ -565,6 +566,15 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ if (req->opcode == IORING_OP_RECV && sr->len)
+ return -EINVAL;
+ req->flags |= REQ_F_APOLL_MULTISHOT;
++ /*
++ * Store the buffer group for this multishot receive separately,
++ * as if we end up doing an io-wq based issue that selects a
++ * buffer, it has to be committed immediately and that will
++ * clear ->buf_list. This means we lose the link to the buffer
++ * list, and the eventual buffer put on completion then cannot
++ * restore it.
++ */
++ sr->buf_group = req->buf_index;
+ }
+
+ #ifdef CONFIG_COMPAT
+@@ -581,6 +591,7 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
+
+ sr->done_io = 0;
+ sr->len = 0; /* get from the provided buffer */
++ req->buf_index = sr->buf_group;
+ }
+
+ /*
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index 06200fe73a044..4c6a5666541cf 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -63,7 +63,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
+ atomic_set(&req->ctx->cq_timeouts,
+ atomic_read(&req->ctx->cq_timeouts) + 1);
+ list_del_init(&timeout->list);
+- io_req_tw_post_queue(req, status, 0);
++ io_req_queue_tw_complete(req, status);
+ return true;
+ }
+ return false;
+@@ -161,7 +161,7 @@ void io_disarm_next(struct io_kiocb *req)
+ req->flags &= ~REQ_F_ARM_LTIMEOUT;
+ if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
+ io_remove_next_linked(req);
+- io_req_tw_post_queue(link, -ECANCELED, 0);
++ io_req_queue_tw_complete(link, -ECANCELED);
+ }
+ } else if (req->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = req->ctx;
+@@ -170,7 +170,7 @@ void io_disarm_next(struct io_kiocb *req)
+ link = io_disarm_linked_timeout(req);
+ spin_unlock_irq(&ctx->timeout_lock);
+ if (link)
+- io_req_tw_post_queue(link, -ECANCELED, 0);
++ io_req_queue_tw_complete(link, -ECANCELED);
+ }
+ if (unlikely((req->flags & REQ_F_FAIL) &&
+ !(req->flags & REQ_F_HARDLINK)))
+@@ -284,11 +284,11 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
+ ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
+ }
+ io_req_set_res(req, ret ?: -ETIME, 0);
+- io_req_complete_post(req);
++ io_req_task_complete(req, locked);
+ io_put_req(prev);
+ } else {
+ io_req_set_res(req, -ETIME, 0);
+- io_req_complete_post(req);
++ io_req_task_complete(req, locked);
+ }
+ }
+
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index f39ee3e055897..c4811984fafa4 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -152,7 +152,7 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
+ {
+ unsigned long flags;
+
+- hash = hash & HASHTAB_MAP_LOCK_MASK;
++ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+
+ preempt_disable();
+ if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+@@ -171,7 +171,7 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ struct bucket *b, u32 hash,
+ unsigned long flags)
+ {
+- hash = hash & HASHTAB_MAP_LOCK_MASK;
++ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+ raw_spin_unlock_irqrestore(&b->raw_lock, flags);
+ __this_cpu_dec(*(htab->map_locked[hash]));
+ preempt_enable();
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index b4d5b343c1912..398a0008aff72 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3063,7 +3063,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ bool sanitize = reg && is_spillable_regtype(reg->type);
+
+ for (i = 0; i < size; i++) {
+- if (state->stack[spi].slot_type[i] == STACK_INVALID) {
++ u8 type = state->stack[spi].slot_type[i];
++
++ if (type != STACK_MISC && type != STACK_ZERO) {
+ sanitize = true;
+ break;
+ }
+diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
+index dcec1b743c694..a60c561724be9 100644
+--- a/kernel/kcsan/kcsan_test.c
++++ b/kernel/kcsan/kcsan_test.c
+@@ -159,7 +159,7 @@ static bool __report_matches(const struct expect_report *r)
+ const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
+ bool ret = false;
+ unsigned long flags;
+- typeof(observed.lines) expect;
++ typeof(*observed.lines) *expect;
+ const char *end;
+ char *cur;
+ int i;
+@@ -168,6 +168,10 @@ static bool __report_matches(const struct expect_report *r)
+ if (!report_available())
+ return false;
+
++ expect = kmalloc(sizeof(observed.lines), GFP_KERNEL);
++ if (WARN_ON(!expect))
++ return false;
++
+ /* Generate expected report contents. */
+
+ /* Title */
+@@ -253,6 +257,7 @@ static bool __report_matches(const struct expect_report *r)
+ strstr(observed.lines[2], expect[1])));
+ out:
+ spin_unlock_irqrestore(&observed.lock, flags);
++ kfree(expect);
+ return ret;
+ }
+
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index d02d39c7174e1..7a627345d4fd9 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -2386,7 +2386,8 @@ static bool finished_loading(const char *name)
+ sched_annotate_sleep();
+ mutex_lock(&module_mutex);
+ mod = find_module_all(name, strlen(name), true);
+- ret = !mod || mod->state == MODULE_STATE_LIVE;
++ ret = !mod || mod->state == MODULE_STATE_LIVE
++ || mod->state == MODULE_STATE_GOING;
+ mutex_unlock(&module_mutex);
+
+ return ret;
+@@ -2562,20 +2563,35 @@ static int add_unformed_module(struct module *mod)
+
+ mod->state = MODULE_STATE_UNFORMED;
+
+-again:
+ mutex_lock(&module_mutex);
+ old = find_module_all(mod->name, strlen(mod->name), true);
+ if (old != NULL) {
+- if (old->state != MODULE_STATE_LIVE) {
++ if (old->state == MODULE_STATE_COMING
++ || old->state == MODULE_STATE_UNFORMED) {
+ /* Wait in case it fails to load. */
+ mutex_unlock(&module_mutex);
+ err = wait_event_interruptible(module_wq,
+ finished_loading(mod->name));
+ if (err)
+ goto out_unlocked;
+- goto again;
++
++ /* The module might have gone in the meantime. */
++ mutex_lock(&module_mutex);
++ old = find_module_all(mod->name, strlen(mod->name),
++ true);
+ }
+- err = -EEXIST;
++
++ /*
++ * We are here only when the same module was being loaded. Do
++ * not try to load it again right now. It prevents long delays
++ * caused by serialized module load failures. It might happen
++ * when more devices of the same type trigger load of
++ * a particular module.
++ */
++ if (old && old->state == MODULE_STATE_LIVE)
++ err = -EEXIST;
++ else
++ err = -EBUSY;
+ goto out;
+ }
+ mod_update_bounds(mod);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0f32acb05055f..2c3d0d49c80ea 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7213,11 +7213,11 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ eenv_task_busy_time(&eenv, p, prev_cpu);
+
+ for (; pd; pd = pd->next) {
++ unsigned long util_min = p_util_min, util_max = p_util_max;
+ unsigned long cpu_cap, cpu_thermal_cap, util;
+ unsigned long cur_delta, max_spare_cap = 0;
+ unsigned long rq_util_min, rq_util_max;
+- unsigned long util_min, util_max;
+- bool compute_prev_delta = false;
++ unsigned long prev_spare_cap = 0;
+ int max_spare_cap_cpu = -1;
+ unsigned long base_energy;
+
+@@ -7235,6 +7235,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ eenv.pd_cap = 0;
+
+ for_each_cpu(cpu, cpus) {
++ struct rq *rq = cpu_rq(cpu);
++
+ eenv.pd_cap += cpu_thermal_cap;
+
+ if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
+@@ -7253,24 +7255,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ * much capacity we can get out of the CPU; this is
+ * aligned with sched_cpu_util().
+ */
+- if (uclamp_is_used()) {
+- if (uclamp_rq_is_idle(cpu_rq(cpu))) {
+- util_min = p_util_min;
+- util_max = p_util_max;
+- } else {
+- /*
+- * Open code uclamp_rq_util_with() except for
+- * the clamp() part. Ie: apply max aggregation
+- * only. util_fits_cpu() logic requires to
+- * operate on non clamped util but must use the
+- * max-aggregated uclamp_{min, max}.
+- */
+- rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+- rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+-
+- util_min = max(rq_util_min, p_util_min);
+- util_max = max(rq_util_max, p_util_max);
+- }
++ if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
++ /*
++ * Open code uclamp_rq_util_with() except for
++ * the clamp() part. Ie: apply max aggregation
++ * only. util_fits_cpu() logic requires to
++ * operate on non clamped util but must use the
++ * max-aggregated uclamp_{min, max}.
++ */
++ rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
++ rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
++
++ util_min = max(rq_util_min, p_util_min);
++ util_max = max(rq_util_max, p_util_max);
+ }
+ if (!util_fits_cpu(util, util_min, util_max, cpu))
+ continue;
+@@ -7279,18 +7276,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+
+ if (cpu == prev_cpu) {
+ /* Always use prev_cpu as a candidate. */
+- compute_prev_delta = true;
++ prev_spare_cap = cpu_cap;
+ } else if (cpu_cap > max_spare_cap) {
+ /*
+ * Find the CPU with the maximum spare capacity
+- * in the performance domain.
++ * among the remaining CPUs in the performance
++ * domain.
+ */
+ max_spare_cap = cpu_cap;
+ max_spare_cap_cpu = cpu;
+ }
+ }
+
+- if (max_spare_cap_cpu < 0 && !compute_prev_delta)
++ if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
+ continue;
+
+ eenv_pd_busy_time(&eenv, cpus, p);
+@@ -7298,7 +7296,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ base_energy = compute_energy(&eenv, pd, cpus, p, -1);
+
+ /* Evaluate the energy impact of using prev_cpu. */
+- if (compute_prev_delta) {
++ if (prev_spare_cap > 0) {
+ prev_delta = compute_energy(&eenv, pd, cpus, p,
+ prev_cpu);
+ /* CPU utilization has changed */
+@@ -7309,7 +7307,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ }
+
+ /* Evaluate the energy impact of using max_spare_cap_cpu. */
+- if (max_spare_cap_cpu >= 0) {
++ if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
+ cur_delta = compute_energy(&eenv, pd, cpus, p,
+ max_spare_cap_cpu);
+ /* CPU utilization has changed */
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 33236241f2364..6f726ea0fde01 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1248,12 +1248,17 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
+ call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
+ }
+
++/**
++ * ftrace_free_filter - remove all filters for an ftrace_ops
++ * @ops - the ops to remove the filters from
++ */
+ void ftrace_free_filter(struct ftrace_ops *ops)
+ {
+ ftrace_ops_init(ops);
+ free_ftrace_hash(ops->func_hash->filter_hash);
+ free_ftrace_hash(ops->func_hash->notrace_hash);
+ }
++EXPORT_SYMBOL_GPL(ftrace_free_filter);
+
+ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
+ {
+@@ -5828,6 +5833,10 @@ EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
+ *
+ * Filters denote which functions should be enabled when tracing is enabled
+ * If @ip is NULL, it fails to update filter.
++ *
++ * This can allocate memory which must be freed before @ops can be freed,
++ * either by removing each filtered addr or by using
++ * ftrace_free_filter(@ops).
+ */
+ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
+ int remove, int reset)
+@@ -5847,7 +5856,11 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
+ *
+ * Filters denote which functions should be enabled when tracing is enabled
+ * If @ips array or any ip specified within is NULL , it fails to update filter.
+- */
++ *
++ * This can allocate memory which must be freed before @ops can be freed,
++ * either by removing each filtered addr or by using
++ * ftrace_free_filter(@ops).
++*/
+ int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset)
+ {
+@@ -5889,6 +5902,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
+ *
+ * Filters denote which functions should be enabled when tracing is enabled.
+ * If @buf is NULL and reset is set, all functions will be enabled for tracing.
++ *
++ * This can allocate memory which must be freed before @ops can be freed,
++ * either by removing each filtered addr or by using
++ * ftrace_free_filter(@ops).
+ */
+ int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset)
+@@ -5908,6 +5925,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
+ * Notrace Filters denote which functions should not be enabled when tracing
+ * is enabled. If @buf is NULL and reset is set, all functions will be enabled
+ * for tracing.
++ *
++ * This can allocate memory which must be freed before @ops can be freed,
++ * either by removing each filtered addr or by using
++ * ftrace_free_filter(@ops).
+ */
+ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 3076af8dbf32e..546e84ae9993b 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -10291,6 +10291,8 @@ void __init early_trace_init(void)
+ static_key_enable(&tracepoint_printk_key.key);
+ }
+ tracer_alloc_buffers();
++
++ init_events();
+ }
+
+ void __init trace_init(void)
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 5581754d97628..9e931f51328a2 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1490,6 +1490,7 @@ extern void trace_event_enable_cmd_record(bool enable);
+ extern void trace_event_enable_tgid_record(bool enable);
+
+ extern int event_trace_init(void);
++extern int init_events(void);
+ extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
+ extern int event_trace_del_tracer(struct trace_array *tr);
+ extern void __trace_early_add_events(struct trace_array *tr);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index c6e406995c112..da3bfe8625d96 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1975,6 +1975,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
+ hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 :
+ HIST_FIELD_FN_BUCKET;
+ hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
++ if (!hist_field->operands[0])
++ goto free;
+ hist_field->size = hist_field->operands[0]->size;
+ hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL);
+ if (!hist_field->type)
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index 4300c5dc4e5db..1c07efcb3d466 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -125,9 +125,8 @@ static void osnoise_unregister_instance(struct trace_array *tr)
+ * register/unregister serialization is provided by trace's
+ * trace_types_lock.
+ */
+- lockdep_assert_held(&trace_types_lock);
+-
+- list_for_each_entry_rcu(inst, &osnoise_instances, list) {
++ list_for_each_entry_rcu(inst, &osnoise_instances, list,
++ lockdep_is_held(&trace_types_lock)) {
+ if (inst->tr == tr) {
+ list_del_rcu(&inst->list);
+ found = 1;
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 67f47ea27921d..5cd4fb6563068 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -1568,7 +1568,7 @@ static struct trace_event *events[] __initdata = {
+ NULL
+ };
+
+-__init static int init_events(void)
++__init int init_events(void)
+ {
+ struct trace_event *event;
+ int i, ret;
+@@ -1581,4 +1581,3 @@ __init static int init_events(void)
+
+ return 0;
+ }
+-early_initcall(init_events);
+diff --git a/lib/lockref.c b/lib/lockref.c
+index 45e93ece8ba0d..2afe4c5d89191 100644
+--- a/lib/lockref.c
++++ b/lib/lockref.c
+@@ -23,7 +23,6 @@
+ } \
+ if (!--retry) \
+ break; \
+- cpu_relax(); \
+ } \
+ } while (0)
+
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index b67a53e29b8fe..dffd60e4065fd 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -10,6 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/jiffies.h>
++#include <linux/nospec.h>
+ #include <linux/skbuff.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+@@ -381,6 +382,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
+ if (type <= 0 || type > maxtype)
+ return 0;
+
++ type = array_index_nospec(type, maxtype + 1);
+ pt = &policy[type];
+
+ BUG_ON(pt->type > NLA_TYPE_MAX);
+@@ -596,6 +598,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
+ }
+ continue;
+ }
++ type = array_index_nospec(type, maxtype + 1);
+ if (policy) {
+ int err = validate_nla(nla, maxtype, policy,
+ validate, extack, depth);
+diff --git a/mm/compaction.c b/mm/compaction.c
+index ca1603524bbe0..8238e83385a79 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1839,6 +1839,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ pfn = cc->zone->zone_start_pfn;
+ cc->fast_search_fail = 0;
+ found_block = true;
++ set_pageblock_skip(freepage);
+ break;
+ }
+ }
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 8aab2e882958c..3c3b79f2e4c03 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -821,6 +821,7 @@ static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
+ static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
+ {
+ struct iso_list_data *d;
++ int ret;
+
+ bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
+
+@@ -832,8 +833,12 @@ static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
+ d->big = big;
+ d->bis = bis;
+
+- return hci_cmd_sync_queue(hdev, terminate_big_sync, d,
+- terminate_big_destroy);
++ ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
++ terminate_big_destroy);
++ if (ret)
++ kfree(d);
++
++ return ret;
+ }
+
+ static int big_terminate_sync(struct hci_dev *hdev, void *data)
+@@ -858,6 +863,7 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
+ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
+ {
+ struct iso_list_data *d;
++ int ret;
+
+ bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
+
+@@ -869,8 +875,12 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
+ d->big = big;
+ d->sync_handle = sync_handle;
+
+- return hci_cmd_sync_queue(hdev, big_terminate_sync, d,
+- terminate_big_destroy);
++ ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
++ terminate_big_destroy);
++ if (ret)
++ kfree(d);
++
++ return ret;
+ }
+
+ /* Cleanup BIS connection
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index faca701bce2a3..0e2425eb6aa79 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3838,8 +3838,11 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
+ conn->handle, conn->link);
+
+ /* Create CIS if LE is already connected */
+- if (conn->link && conn->link->state == BT_CONNECTED)
++ if (conn->link && conn->link->state == BT_CONNECTED) {
++ rcu_read_unlock();
+ hci_le_create_cis(conn->link);
++ rcu_read_lock();
++ }
+
+ if (i == rp->num_handles)
+ break;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 8d6c8cbfe1de4..3eec688a88a92 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4703,6 +4703,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+ hdev->flush(hdev);
+
+ if (hdev->sent_cmd) {
++ cancel_delayed_work_sync(&hdev->cmd_timer);
+ kfree_skb(hdev->sent_cmd);
+ hdev->sent_cmd = NULL;
+ }
+@@ -6168,20 +6169,13 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+
+ static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
+ {
+- u8 instance = *(u8 *)data;
+-
+- kfree(data);
++ u8 instance = PTR_ERR(data);
+
+ return hci_update_adv_data_sync(hdev, instance);
+ }
+
+ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
+ {
+- u8 *inst_ptr = kmalloc(1, GFP_KERNEL);
+-
+- if (!inst_ptr)
+- return -ENOMEM;
+-
+- *inst_ptr = instance;
+- return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL);
++ return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
++ ERR_PTR(instance), NULL);
+ }
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 26db929b97c43..2dabef488eaae 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -261,13 +261,13 @@ static int iso_connect_bis(struct sock *sk)
+
+ if (!bis_capable(hdev)) {
+ err = -EOPNOTSUPP;
+- goto done;
++ goto unlock;
+ }
+
+ /* Fail if out PHYs are marked as disabled */
+ if (!iso_pi(sk)->qos.out.phy) {
+ err = -EINVAL;
+- goto done;
++ goto unlock;
+ }
+
+ hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->dst_type,
+@@ -275,22 +275,27 @@ static int iso_connect_bis(struct sock *sk)
+ iso_pi(sk)->base);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+- goto done;
++ goto unlock;
+ }
+
+ conn = iso_conn_add(hcon);
+ if (!conn) {
+ hci_conn_drop(hcon);
+ err = -ENOMEM;
+- goto done;
++ goto unlock;
+ }
+
+- /* Update source addr of the socket */
+- bacpy(&iso_pi(sk)->src, &hcon->src);
++ hci_dev_unlock(hdev);
++ hci_dev_put(hdev);
+
+ err = iso_chan_add(conn, sk, NULL);
+ if (err)
+- goto done;
++ return err;
++
++ lock_sock(sk);
++
++ /* Update source addr of the socket */
++ bacpy(&iso_pi(sk)->src, &hcon->src);
+
+ if (hcon->state == BT_CONNECTED) {
+ iso_sock_clear_timer(sk);
+@@ -300,7 +305,10 @@ static int iso_connect_bis(struct sock *sk)
+ iso_sock_set_timer(sk, sk->sk_sndtimeo);
+ }
+
+-done:
++ release_sock(sk);
++ return err;
++
++unlock:
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
+ return err;
+@@ -324,13 +332,13 @@ static int iso_connect_cis(struct sock *sk)
+
+ if (!cis_central_capable(hdev)) {
+ err = -EOPNOTSUPP;
+- goto done;
++ goto unlock;
+ }
+
+ /* Fail if either PHYs are marked as disabled */
+ if (!iso_pi(sk)->qos.in.phy && !iso_pi(sk)->qos.out.phy) {
+ err = -EINVAL;
+- goto done;
++ goto unlock;
+ }
+
+ /* Just bind if DEFER_SETUP has been set */
+@@ -340,7 +348,7 @@ static int iso_connect_cis(struct sock *sk)
+ &iso_pi(sk)->qos);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+- goto done;
++ goto unlock;
+ }
+ } else {
+ hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst,
+@@ -348,7 +356,7 @@ static int iso_connect_cis(struct sock *sk)
+ &iso_pi(sk)->qos);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+- goto done;
++ goto unlock;
+ }
+ }
+
+@@ -356,15 +364,20 @@ static int iso_connect_cis(struct sock *sk)
+ if (!conn) {
+ hci_conn_drop(hcon);
+ err = -ENOMEM;
+- goto done;
++ goto unlock;
+ }
+
+- /* Update source addr of the socket */
+- bacpy(&iso_pi(sk)->src, &hcon->src);
++ hci_dev_unlock(hdev);
++ hci_dev_put(hdev);
+
+ err = iso_chan_add(conn, sk, NULL);
+ if (err)
+- goto done;
++ return err;
++
++ lock_sock(sk);
++
++ /* Update source addr of the socket */
++ bacpy(&iso_pi(sk)->src, &hcon->src);
+
+ if (hcon->state == BT_CONNECTED) {
+ iso_sock_clear_timer(sk);
+@@ -377,7 +390,10 @@ static int iso_connect_cis(struct sock *sk)
+ iso_sock_set_timer(sk, sk->sk_sndtimeo);
+ }
+
+-done:
++ release_sock(sk);
++ return err;
++
++unlock:
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
+ return err;
+@@ -831,20 +847,23 @@ static int iso_sock_connect(struct socket *sock, struct sockaddr *addr,
+ bacpy(&iso_pi(sk)->dst, &sa->iso_bdaddr);
+ iso_pi(sk)->dst_type = sa->iso_bdaddr_type;
+
++ release_sock(sk);
++
+ if (bacmp(&iso_pi(sk)->dst, BDADDR_ANY))
+ err = iso_connect_cis(sk);
+ else
+ err = iso_connect_bis(sk);
+
+ if (err)
+- goto done;
++ return err;
++
++ lock_sock(sk);
+
+ if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ err = bt_sock_wait_state(sk, BT_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+ }
+
+-done:
+ release_sock(sk);
+ return err;
+ }
+@@ -1099,28 +1118,22 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ {
+ struct sock *sk = sock->sk;
+ struct iso_pinfo *pi = iso_pi(sk);
+- int err;
+
+ BT_DBG("sk %p", sk);
+
+- lock_sock(sk);
+-
+ if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ switch (sk->sk_state) {
+ case BT_CONNECT2:
++ lock_sock(sk);
+ iso_conn_defer_accept(pi->conn->hcon);
+ sk->sk_state = BT_CONFIG;
+ release_sock(sk);
+ return 0;
+ case BT_CONNECT:
+- err = iso_connect_cis(sk);
+- release_sock(sk);
+- return err;
++ return iso_connect_cis(sk);
+ }
+ }
+
+- release_sock(sk);
+-
+ return bt_sock_recvmsg(sock, msg, len, flags);
+ }
+
+@@ -1415,33 +1428,29 @@ static void iso_conn_ready(struct iso_conn *conn)
+ struct sock *parent;
+ struct sock *sk = conn->sk;
+ struct hci_ev_le_big_sync_estabilished *ev;
++ struct hci_conn *hcon;
+
+ BT_DBG("conn %p", conn);
+
+ if (sk) {
+ iso_sock_ready(conn->sk);
+ } else {
+- iso_conn_lock(conn);
+-
+- if (!conn->hcon) {
+- iso_conn_unlock(conn);
++ hcon = conn->hcon;
++ if (!hcon)
+ return;
+- }
+
+- ev = hci_recv_event_data(conn->hcon->hdev,
++ ev = hci_recv_event_data(hcon->hdev,
+ HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
+ if (ev)
+- parent = iso_get_sock_listen(&conn->hcon->src,
+- &conn->hcon->dst,
++ parent = iso_get_sock_listen(&hcon->src,
++ &hcon->dst,
+ iso_match_big, ev);
+ else
+- parent = iso_get_sock_listen(&conn->hcon->src,
++ parent = iso_get_sock_listen(&hcon->src,
+ BDADDR_ANY, NULL, NULL);
+
+- if (!parent) {
+- iso_conn_unlock(conn);
++ if (!parent)
+ return;
+- }
+
+ lock_sock(parent);
+
+@@ -1449,30 +1458,29 @@ static void iso_conn_ready(struct iso_conn *conn)
+ BTPROTO_ISO, GFP_ATOMIC, 0);
+ if (!sk) {
+ release_sock(parent);
+- iso_conn_unlock(conn);
+ return;
+ }
+
+ iso_sock_init(sk, parent);
+
+- bacpy(&iso_pi(sk)->src, &conn->hcon->src);
+- iso_pi(sk)->src_type = conn->hcon->src_type;
++ bacpy(&iso_pi(sk)->src, &hcon->src);
++ iso_pi(sk)->src_type = hcon->src_type;
+
+ /* If hcon has no destination address (BDADDR_ANY) it means it
+ * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to
+ * initialize using the parent socket destination address.
+ */
+- if (!bacmp(&conn->hcon->dst, BDADDR_ANY)) {
+- bacpy(&conn->hcon->dst, &iso_pi(parent)->dst);
+- conn->hcon->dst_type = iso_pi(parent)->dst_type;
+- conn->hcon->sync_handle = iso_pi(parent)->sync_handle;
++ if (!bacmp(&hcon->dst, BDADDR_ANY)) {
++ bacpy(&hcon->dst, &iso_pi(parent)->dst);
++ hcon->dst_type = iso_pi(parent)->dst_type;
++ hcon->sync_handle = iso_pi(parent)->sync_handle;
+ }
+
+- bacpy(&iso_pi(sk)->dst, &conn->hcon->dst);
+- iso_pi(sk)->dst_type = conn->hcon->dst_type;
++ bacpy(&iso_pi(sk)->dst, &hcon->dst);
++ iso_pi(sk)->dst_type = hcon->dst_type;
+
+- hci_conn_hold(conn->hcon);
+- __iso_chan_add(conn, sk, parent);
++ hci_conn_hold(hcon);
++ iso_chan_add(conn, sk, parent);
+
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
+ sk->sk_state = BT_CONNECT2;
+@@ -1483,8 +1491,6 @@ static void iso_conn_ready(struct iso_conn *conn)
+ parent->sk_data_ready(parent);
+
+ release_sock(parent);
+-
+- iso_conn_unlock(conn);
+ }
+ }
+
+diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
+index 6a8b7e84293df..bdf978605d5a8 100644
+--- a/net/bluetooth/mgmt_util.h
++++ b/net/bluetooth/mgmt_util.h
+@@ -27,7 +27,7 @@ struct mgmt_mesh_tx {
+ struct sock *sk;
+ u8 handle;
+ u8 instance;
+- u8 param[sizeof(struct mgmt_cp_mesh_send) + 29];
++ u8 param[sizeof(struct mgmt_cp_mesh_send) + 31];
+ };
+
+ struct mgmt_pending_cmd {
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 21e24da4847f0..4397e14ff560f 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -391,6 +391,7 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
+ addr->sa_family != AF_BLUETOOTH)
+ return -EINVAL;
+
++ sock_hold(sk);
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+@@ -410,14 +411,18 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
+ d->sec_level = rfcomm_pi(sk)->sec_level;
+ d->role_switch = rfcomm_pi(sk)->role_switch;
+
++ /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */
++ release_sock(sk);
+ err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
+ sa->rc_channel);
+- if (!err)
++ lock_sock(sk);
++ if (!err && !sock_flag(sk, SOCK_ZAPPED))
+ err = bt_sock_wait_state(sk, BT_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+
+ done:
+ release_sock(sk);
++ sock_put(sk);
+ return err;
+ }
+
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index f64654df71a29..4c1707d0eb9b0 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -137,12 +137,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
+ return 0;
+
+ if (ops->id && ops->size) {
+-cleanup:
+ ng = rcu_dereference_protected(net->gen,
+ lockdep_is_held(&pernet_ops_rwsem));
+ ng->ptr[*ops->id] = NULL;
+ }
+
++cleanup:
+ kfree(data);
+
+ out:
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index ce9ff3c62e840..3bb890a40ed73 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -30,6 +30,7 @@
+ #include <linux/slab.h>
+ #include <linux/netlink.h>
+ #include <linux/hash.h>
++#include <linux/nospec.h>
+
+ #include <net/arp.h>
+ #include <net/inet_dscp.h>
+@@ -1022,6 +1023,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
+ if (type > RTAX_MAX)
+ return false;
+
++ type = array_index_nospec(type, RTAX_MAX + 1);
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+ bool ecn_ca = false;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 67f5e54408020..a5711b8f4cb19 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -650,8 +650,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
+ spin_lock(lock);
+ if (osk) {
+ WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+- ret = sk_nulls_del_node_init_rcu(osk);
+- } else if (found_dup_sk) {
++ ret = sk_hashed(osk);
++ if (ret) {
++ /* Before deleting the node, we insert a new one to make
++ * sure that the look-up-sk process would not miss either
++ * of them and that at least one node would exist in ehash
++ * table all the time. Otherwise there's a tiny chance
++ * that lookup process could find nothing in ehash table.
++ */
++ __sk_nulls_add_node_tail_rcu(sk, list);
++ sk_nulls_del_node_init_rcu(osk);
++ }
++ goto unlock;
++ }
++ if (found_dup_sk) {
+ *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
+ if (*found_dup_sk)
+ ret = false;
+@@ -660,6 +672,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
+ if (ret)
+ __sk_nulls_add_node_rcu(sk, list);
+
++unlock:
+ spin_unlock(lock);
+
+ return ret;
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index 1d77d992e6e77..beed32fff4841 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -91,10 +91,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
+ }
+ EXPORT_SYMBOL_GPL(inet_twsk_put);
+
+-static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+- struct hlist_nulls_head *list)
++static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
++ struct hlist_nulls_head *list)
+ {
+- hlist_nulls_add_head_rcu(&tw->tw_node, list);
++ hlist_nulls_add_tail_rcu(&tw->tw_node, list);
+ }
+
+ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+@@ -147,7 +147,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+
+ spin_lock(lock);
+
+- inet_twsk_add_node_rcu(tw, &ehead->chain);
++ inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
+
+ /* Step 3: Remove SK from hash chain */
+ if (__sk_nulls_del_node_init_rcu(sk))
+diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
+index 25ea6ac44db95..6a1427916c7dc 100644
+--- a/net/ipv4/metrics.c
++++ b/net/ipv4/metrics.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ #include <linux/netlink.h>
++#include <linux/nospec.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/types.h>
+ #include <net/ip.h>
+@@ -28,6 +29,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ return -EINVAL;
+ }
+
++ type = array_index_nospec(type, RTAX_MAX + 1);
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 4f2205756cfee..ec19ed7224536 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -435,6 +435,7 @@ void tcp_init_sock(struct sock *sk)
+
+ /* There's a bubble in the pipe until at least the first ACK. */
+ tp->app_limited = ~0U;
++ tp->rate_app_limited = 1;
+
+ /* See draft-stevens-tcpca-spec-01 for discussion of the
+ * initialization of these values.
+@@ -3177,6 +3178,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ tp->last_oow_ack_time = 0;
+ /* There's a bubble in the pipe until at least the first ACK. */
+ tp->app_limited = ~0U;
++ tp->rate_app_limited = 1;
+ tp->rack.mstamp = 0;
+ tp->rack.advanced = 0;
+ tp->rack.reo_wnd_steps = 1;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 60fd91bb5171c..c314fdde0097c 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -547,7 +547,20 @@ int ip6_forward(struct sk_buff *skb)
+ pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
+ int proxied = ip6_forward_proxy_check(skb);
+ if (proxied > 0) {
+- hdr->hop_limit--;
++ /* It's tempting to decrease the hop limit
++ * here by 1, as we do at the end of the
++ * function too.
++ *
++ * But that would be incorrect, as proxying is
++ * not forwarding. The ip6_input function
++ * will handle this packet locally, and it
++ * depends on the hop limit being unchanged.
++ *
++ * One example is the NDP hop limit, that
++ * always has to stay 255, but other would be
++ * similar checks around RA packets, where the
++ * user can even change the desired limit.
++ */
+ return ip6_input(skb);
+ } else if (proxied < 0) {
+ __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 9a1415fe3fa78..03608d3ded4b8 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -104,9 +104,9 @@ static struct workqueue_struct *l2tp_wq;
+ /* per-net private data for this module */
+ static unsigned int l2tp_net_id;
+ struct l2tp_net {
+- struct list_head l2tp_tunnel_list;
+- /* Lock for write access to l2tp_tunnel_list */
+- spinlock_t l2tp_tunnel_list_lock;
++ /* Lock for write access to l2tp_tunnel_idr */
++ spinlock_t l2tp_tunnel_idr_lock;
++ struct idr l2tp_tunnel_idr;
+ struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
+ /* Lock for write access to l2tp_session_hlist */
+ spinlock_t l2tp_session_hlist_lock;
+@@ -208,13 +208,10 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
+ struct l2tp_tunnel *tunnel;
+
+ rcu_read_lock_bh();
+- list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+- if (tunnel->tunnel_id == tunnel_id &&
+- refcount_inc_not_zero(&tunnel->ref_count)) {
+- rcu_read_unlock_bh();
+-
+- return tunnel;
+- }
++ tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
++ if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
++ rcu_read_unlock_bh();
++ return tunnel;
+ }
+ rcu_read_unlock_bh();
+
+@@ -224,13 +221,14 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
+
+ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
+ {
+- const struct l2tp_net *pn = l2tp_pernet(net);
++ struct l2tp_net *pn = l2tp_pernet(net);
++ unsigned long tunnel_id, tmp;
+ struct l2tp_tunnel *tunnel;
+ int count = 0;
+
+ rcu_read_lock_bh();
+- list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+- if (++count > nth &&
++ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
++ if (tunnel && ++count > nth &&
+ refcount_inc_not_zero(&tunnel->ref_count)) {
+ rcu_read_unlock_bh();
+ return tunnel;
+@@ -1043,7 +1041,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+ nf_reset_ct(skb);
+
+- bh_lock_sock(sk);
++ bh_lock_sock_nested(sk);
+ if (sock_owned_by_user(sk)) {
+ kfree_skb(skb);
+ ret = NET_XMIT_DROP;
+@@ -1227,6 +1225,15 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
+ l2tp_tunnel_delete(tunnel);
+ }
+
++static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
++{
++ struct l2tp_net *pn = l2tp_pernet(net);
++
++ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
++ idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
++ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
++}
++
+ /* Workqueue tunnel deletion function */
+ static void l2tp_tunnel_del_work(struct work_struct *work)
+ {
+@@ -1234,7 +1241,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ del_work);
+ struct sock *sk = tunnel->sock;
+ struct socket *sock = sk->sk_socket;
+- struct l2tp_net *pn;
+
+ l2tp_tunnel_closeall(tunnel);
+
+@@ -1248,12 +1254,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ }
+ }
+
+- /* Remove the tunnel struct from the tunnel list */
+- pn = l2tp_pernet(tunnel->l2tp_net);
+- spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+- list_del_rcu(&tunnel->list);
+- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+-
++ l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
+ /* drop initial ref */
+ l2tp_tunnel_dec_refcount(tunnel);
+
+@@ -1384,8 +1385,6 @@ out:
+ return err;
+ }
+
+-static struct lock_class_key l2tp_socket_class;
+-
+ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
+ struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
+ {
+@@ -1455,12 +1454,19 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
+ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ struct l2tp_tunnel_cfg *cfg)
+ {
+- struct l2tp_tunnel *tunnel_walk;
+- struct l2tp_net *pn;
++ struct l2tp_net *pn = l2tp_pernet(net);
++ u32 tunnel_id = tunnel->tunnel_id;
+ struct socket *sock;
+ struct sock *sk;
+ int ret;
+
++ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
++ ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
++ GFP_ATOMIC);
++ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
++ if (ret)
++ return ret == -ENOSPC ? -EEXIST : ret;
++
+ if (tunnel->fd < 0) {
+ ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
+ tunnel->peer_tunnel_id, cfg,
+@@ -1474,6 +1480,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ }
+
+ sk = sock->sk;
++ lock_sock(sk);
+ write_lock_bh(&sk->sk_callback_lock);
+ ret = l2tp_validate_socket(sk, net, tunnel->encap);
+ if (ret < 0)
+@@ -1481,24 +1488,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ rcu_assign_sk_user_data(sk, tunnel);
+ write_unlock_bh(&sk->sk_callback_lock);
+
+- tunnel->l2tp_net = net;
+- pn = l2tp_pernet(net);
+-
+- sock_hold(sk);
+- tunnel->sock = sk;
+-
+- spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+- list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
+- if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
+- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+- sock_put(sk);
+- ret = -EEXIST;
+- goto err_sock;
+- }
+- }
+- list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+-
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ struct udp_tunnel_sock_cfg udp_cfg = {
+ .sk_user_data = tunnel,
+@@ -1512,9 +1501,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+
+ tunnel->old_sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = &l2tp_tunnel_destruct;
+- lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
+- "l2tp_sock");
+ sk->sk_allocation = GFP_ATOMIC;
++ release_sock(sk);
++
++ sock_hold(sk);
++ tunnel->sock = sk;
++ tunnel->l2tp_net = net;
++
++ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
++ idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
++ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
+
+ trace_register_tunnel(tunnel);
+
+@@ -1523,17 +1519,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+
+ return 0;
+
+-err_sock:
+- write_lock_bh(&sk->sk_callback_lock);
+- rcu_assign_sk_user_data(sk, NULL);
+ err_inval_sock:
+ write_unlock_bh(&sk->sk_callback_lock);
++ release_sock(sk);
+
+ if (tunnel->fd < 0)
+ sock_release(sock);
+ else
+ sockfd_put(sock);
+ err:
++ l2tp_tunnel_remove(net, tunnel);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
+@@ -1647,8 +1642,8 @@ static __net_init int l2tp_init_net(struct net *net)
+ struct l2tp_net *pn = net_generic(net, l2tp_net_id);
+ int hash;
+
+- INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
+- spin_lock_init(&pn->l2tp_tunnel_list_lock);
++ idr_init(&pn->l2tp_tunnel_idr);
++ spin_lock_init(&pn->l2tp_tunnel_idr_lock);
+
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
+@@ -1662,11 +1657,13 @@ static __net_exit void l2tp_exit_net(struct net *net)
+ {
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_tunnel *tunnel = NULL;
++ unsigned long tunnel_id, tmp;
+ int hash;
+
+ rcu_read_lock_bh();
+- list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+- l2tp_tunnel_delete(tunnel);
++ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
++ if (tunnel)
++ l2tp_tunnel_delete(tunnel);
+ }
+ rcu_read_unlock_bh();
+
+@@ -1676,6 +1673,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
+
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
++ idr_destroy(&pn->l2tp_tunnel_idr);
+ }
+
+ static struct pernet_operations l2tp_net_ops = {
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index b2e40465289d6..85d2b9e4b51ce 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -511,8 +511,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ */
+ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+
+- ieee80211_agg_stop_txq(sta, tid);
+-
+ /*
+ * Make sure no packets are being processed. This ensures that
+ * we have a valid starting sequence number and that in-flight
+diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
+index d3397c1248d36..b057253db28d5 100644
+--- a/net/mac80211/debugfs_sta.c
++++ b/net/mac80211/debugfs_sta.c
+@@ -167,7 +167,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
+ continue;
+ txqi = to_txq_info(sta->sta.txq[i]);
+ p += scnprintf(p, bufsz + buf - p,
+- "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
++ "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n",
+ txqi->txq.tid,
+ txqi->txq.ac,
+ txqi->tin.backlog_bytes,
+@@ -182,7 +182,8 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
+ txqi->flags,
+ test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
+ test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
+- test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "");
++ test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "",
++ test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
+ }
+
+ rcu_read_unlock();
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index 81e40b0a3b167..e685c12757f4b 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1183,7 +1183,7 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
+
+ /* In reconfig don't transmit now, but mark for waking later */
+ if (local->in_reconfig) {
+- set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
++ set_bit(IEEE80211_TXQ_DIRTY, &txq->flags);
+ return;
+ }
+
+diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
+index 83bc41346ae7f..ae42e956eff5a 100644
+--- a/net/mac80211/ht.c
++++ b/net/mac80211/ht.c
+@@ -391,6 +391,43 @@ void ieee80211_ba_session_work(struct work_struct *work)
+
+ tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
+ if (!blocked && tid_tx) {
++ struct ieee80211_sub_if_data *sdata = sta->sdata;
++ struct ieee80211_local *local = sdata->local;
++
++ if (local->ops->wake_tx_queue) {
++ struct txq_info *txqi =
++ to_txq_info(sta->sta.txq[tid]);
++ struct fq *fq = &local->fq;
++
++ spin_lock_bh(&fq->lock);
++
++ /* Allow only frags to be dequeued */
++ set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
++
++ if (!skb_queue_empty(&txqi->frags)) {
++ /* Fragmented Tx is ongoing, wait for it
++ * to finish. Reschedule worker to retry
++ * later.
++ */
++
++ spin_unlock_bh(&fq->lock);
++ spin_unlock_bh(&sta->lock);
++
++ /* Give the task working on the txq a
++ * chance to send out the queued frags
++ */
++ synchronize_net();
++
++ mutex_unlock(&sta->ampdu_mlme.mtx);
++
++ ieee80211_queue_work(&sdata->local->hw,
++ work);
++ return;
++ }
++
++ spin_unlock_bh(&fq->lock);
++ }
++
+ /*
+ * Assign it over to the normal tid_tx array
+ * where it "goes live".
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index de7b8a4d4bbbb..a8862f2c64ec0 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -836,7 +836,7 @@ enum txq_info_flags {
+ IEEE80211_TXQ_STOP,
+ IEEE80211_TXQ_AMPDU,
+ IEEE80211_TXQ_NO_AMSDU,
+- IEEE80211_TXQ_STOP_NETIF_TX,
++ IEEE80211_TXQ_DIRTY,
+ };
+
+ /**
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index cc10ee1ff8e93..6409097a56c7a 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1295,7 +1295,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
+ if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
+ if (!(tx->flags & IEEE80211_TX_UNICAST) ||
+ skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
+- info->flags & IEEE80211_TX_CTL_AMPDU)
++ (info->flags & IEEE80211_TX_CTL_AMPDU &&
++ !local->ops->wake_tx_queue))
+ info->flags |= IEEE80211_TX_CTL_DONTFRAG;
+ }
+
+@@ -3709,13 +3710,15 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = container_of(txq, struct txq_info, txq);
+ struct ieee80211_hdr *hdr;
+- struct sk_buff *skb = NULL;
+ struct fq *fq = &local->fq;
+ struct fq_tin *tin = &txqi->tin;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_tx_data tx;
++ struct sk_buff *skb;
+ ieee80211_tx_result r;
+ struct ieee80211_vif *vif = txq->vif;
++ int q = vif->hw_queue[txq->ac];
++ bool q_stopped;
+
+ WARN_ON_ONCE(softirq_count() == 0);
+
+@@ -3723,17 +3726,18 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ return NULL;
+
+ begin:
+- spin_lock_bh(&fq->lock);
++ spin_lock(&local->queue_stop_reason_lock);
++ q_stopped = local->queue_stop_reasons[q];
++ spin_unlock(&local->queue_stop_reason_lock);
+
+- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
+- test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
+- goto out;
+-
+- if (vif->txqs_stopped[txq->ac]) {
+- set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
+- goto out;
++ if (unlikely(q_stopped)) {
++ /* mark for waking later */
++ set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags);
++ return NULL;
+ }
+
++ spin_lock_bh(&fq->lock);
++
+ /* Make sure fragments stay together. */
+ skb = __skb_dequeue(&txqi->frags);
+ if (unlikely(skb)) {
+@@ -3743,6 +3747,9 @@ begin:
+ IEEE80211_SKB_CB(skb)->control.flags &=
+ ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
+ } else {
++ if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
++ goto out;
++
+ skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
+ }
+
+@@ -3793,7 +3800,8 @@ begin:
+ }
+
+ if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
+- info->flags |= IEEE80211_TX_CTL_AMPDU;
++ info->flags |= (IEEE80211_TX_CTL_AMPDU |
++ IEEE80211_TX_CTL_DONTFRAG);
+ else
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index b512cb37aafb7..ed53c51bbc321 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -301,8 +301,6 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
+ local_bh_disable();
+ spin_lock(&fq->lock);
+
+- sdata->vif.txqs_stopped[ac] = false;
+-
+ if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
+ goto out;
+
+@@ -324,7 +322,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
+ if (ac != txq->ac)
+ continue;
+
+- if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
++ if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY,
+ &txqi->flags))
+ continue;
+
+@@ -339,7 +337,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
+
+ txqi = to_txq_info(vif->txq);
+
+- if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
++ if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ||
+ (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
+ goto out;
+
+@@ -537,16 +535,10 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
+ continue;
+
+ for (ac = 0; ac < n_acs; ac++) {
+- if (sdata->vif.hw_queue[ac] == queue ||
+- sdata->vif.cab_queue == queue) {
+- if (!local->ops->wake_tx_queue) {
+- netif_stop_subqueue(sdata->dev, ac);
+- continue;
+- }
+- spin_lock(&local->fq.lock);
+- sdata->vif.txqs_stopped[ac] = true;
+- spin_unlock(&local->fq.lock);
+- }
++ if (!local->ops->wake_tx_queue &&
++ (sdata->vif.hw_queue[ac] == queue ||
++ sdata->vif.cab_queue == queue))
++ netif_stop_subqueue(sdata->dev, ac);
+ }
+ }
+ rcu_read_unlock();
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index fc9e728b6333a..45bbe3e54cc28 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -544,9 +544,6 @@ static int mctp_sk_init(struct sock *sk)
+
+ static void mctp_sk_close(struct sock *sk, long timeout)
+ {
+- struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+-
+- del_timer_sync(&msk->key_expiry);
+ sk_common_release(sk);
+ }
+
+@@ -580,7 +577,14 @@ static void mctp_sk_unhash(struct sock *sk)
+ spin_lock_irqsave(&key->lock, fl2);
+ __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
+ }
++ sock_set_flag(sk, SOCK_DEAD);
+ spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
++
++ /* Since there are no more tag allocations (we have removed all of the
++ * keys), stop any pending expiry events. the timer cannot be re-queued
++ * as the sk is no longer observable
++ */
++ del_timer_sync(&msk->key_expiry);
+ }
+
+ static struct proto mctp_proto = {
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index f9a80b82dc511..f51a05ec71624 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -147,6 +147,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
+ key->valid = true;
+ spin_lock_init(&key->lock);
+ refcount_set(&key->refs, 1);
++ sock_hold(key->sk);
+
+ return key;
+ }
+@@ -165,6 +166,7 @@ void mctp_key_unref(struct mctp_sk_key *key)
+ mctp_dev_release_key(key->dev, key);
+ spin_unlock_irqrestore(&key->lock, flags);
+
++ sock_put(key->sk);
+ kfree(key);
+ }
+
+@@ -177,6 +179,11 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
+
+ spin_lock_irqsave(&net->mctp.keys_lock, flags);
+
++ if (sock_flag(&msk->sk, SOCK_DEAD)) {
++ rc = -EINVAL;
++ goto out_unlock;
++ }
++
+ hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
+ if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
+ key->tag)) {
+@@ -198,6 +205,7 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
+ hlist_add_head(&key->sklist, &msk->keys);
+ }
+
++out_unlock:
+ spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
+
+ return rc;
+@@ -315,8 +323,8 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
+
+ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ {
++ struct mctp_sk_key *key, *any_key = NULL;
+ struct net *net = dev_net(skb->dev);
+- struct mctp_sk_key *key;
+ struct mctp_sock *msk;
+ struct mctp_hdr *mh;
+ unsigned long f;
+@@ -361,13 +369,11 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ * key for reassembly - we'll create a more specific
+ * one for future packets if required (ie, !EOM).
+ */
+- key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+- if (key) {
+- msk = container_of(key->sk,
++ any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
++ if (any_key) {
++ msk = container_of(any_key->sk,
+ struct mctp_sock, sk);
+- spin_unlock_irqrestore(&key->lock, f);
+- mctp_key_unref(key);
+- key = NULL;
++ spin_unlock_irqrestore(&any_key->lock, f);
+ }
+ }
+
+@@ -419,14 +425,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ * this function.
+ */
+ rc = mctp_key_add(key, msk);
+- if (rc) {
+- kfree(key);
+- } else {
++ if (!rc)
+ trace_mctp_key_acquire(key);
+
+- /* we don't need to release key->lock on exit */
+- mctp_key_unref(key);
+- }
++ /* we don't need to release key->lock on exit, so
++ * clean up here and suppress the unlock via
++ * setting to NULL
++ */
++ mctp_key_unref(key);
+ key = NULL;
+
+ } else {
+@@ -473,6 +479,8 @@ out_unlock:
+ spin_unlock_irqrestore(&key->lock, f);
+ mctp_key_unref(key);
+ }
++ if (any_key)
++ mctp_key_unref(any_key);
+ out:
+ if (rc)
+ kfree_skb(skb);
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 5a936334b517a..895e0ca542994 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -27,22 +27,16 @@
+ #include <net/netfilter/nf_conntrack_ecache.h>
+ #include <net/netfilter/nf_conntrack_timeout.h>
+
+-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
+- closely. They're more complex. --RR
+-
+- And so for me for SCTP :D -Kiran */
+-
+ static const char *const sctp_conntrack_names[] = {
+- "NONE",
+- "CLOSED",
+- "COOKIE_WAIT",
+- "COOKIE_ECHOED",
+- "ESTABLISHED",
+- "SHUTDOWN_SENT",
+- "SHUTDOWN_RECD",
+- "SHUTDOWN_ACK_SENT",
+- "HEARTBEAT_SENT",
+- "HEARTBEAT_ACKED",
++ [SCTP_CONNTRACK_NONE] = "NONE",
++ [SCTP_CONNTRACK_CLOSED] = "CLOSED",
++ [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT",
++ [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED",
++ [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED",
++ [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT",
++ [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD",
++ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT",
++ [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT",
+ };
+
+ #define SECS * HZ
+@@ -54,12 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ [SCTP_CONNTRACK_CLOSED] = 10 SECS,
+ [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
+- [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS,
++ [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
+ [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
+ [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+- [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
+ };
+
+ #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
+@@ -73,7 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
+ #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+ #define sHS SCTP_CONNTRACK_HEARTBEAT_SENT
+-#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
+ #define sIV SCTP_CONNTRACK_MAX
+
+ /*
+@@ -96,9 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
+ CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
+ the SHUTDOWN chunk. Connection is closed.
+ HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow.
+-HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK in the direction opposite to
+- that of the HEARTBEAT chunk. Secondary connection is
+- established.
+ */
+
+ /* TODO
+@@ -115,33 +104,33 @@ cookie echoed to closed.
+ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ {
+ /* ORIGINAL */
+-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+-/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+-/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+-/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+-/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
+-/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
+-/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
+-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
+-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
+-/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+-/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
++/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
++/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
++/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
++/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
++/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
++/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
++/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
++/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
++/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
++/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
++/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
++/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+ },
+ {
+ /* REPLY */
+-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+-/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
+-/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+-/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
+-/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
+-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
+-/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
+-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
+-/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
+-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
+-/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+-/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
++/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
++/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
++/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
++/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
++/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
++/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
++/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
++/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
++/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
++/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
++/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
++/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
+ }
+ };
+
+@@ -412,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+ /* Special cases of Verification tag check (Sec 8.5.1) */
+ if (sch->type == SCTP_CID_INIT) {
+- /* Sec 8.5.1 (A) */
++ /* (A) vtag MUST be zero */
+ if (sh->vtag != 0)
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_ABORT) {
+- /* Sec 8.5.1 (B) */
+- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+- sh->vtag != ct->proto.sctp.vtag[!dir])
++ /* (B) vtag MUST match own vtag if T flag is unset OR
++ * MUST match peer's vtag if T flag is set
++ */
++ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[dir]) ||
++ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[!dir]))
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+- /* Sec 8.5.1 (C) */
+- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+- sh->vtag != ct->proto.sctp.vtag[!dir] &&
+- sch->flags & SCTP_CHUNK_FLAG_T)
++ /* (C) vtag MUST match own vtag if T flag is unset OR
++ * MUST match peer's vtag if T flag is set
++ */
++ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[dir]) ||
++ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[!dir]))
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
+- /* Sec 8.5.1 (D) */
++ /* (D) vtag must be same as init_vtag as found in INIT_ACK */
+ if (sh->vtag != ct->proto.sctp.vtag[dir])
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+@@ -501,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ }
+
+ ct->proto.sctp.state = new_state;
+- if (old_state != new_state)
++ if (old_state != new_state) {
+ nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
++ if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
++ !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
++ nf_conntrack_event_cache(IPCT_ASSURED, ct);
++ }
+ }
+ spin_unlock_bh(&ct->lock);
+
+@@ -516,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
+
+- if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
+- dir == IP_CT_DIR_REPLY &&
+- new_state == SCTP_CONNTRACK_ESTABLISHED) {
+- pr_debug("Setting assured bit\n");
+- set_bit(IPS_ASSURED_BIT, &ct->status);
+- nf_conntrack_event_cache(IPCT_ASSURED, ct);
+- }
+-
+ return NF_ACCEPT;
+
+ out_unlock:
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 6566310831779..3ac1af6f59fcc 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -1068,6 +1068,13 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ ct->proto.tcp.last_flags |=
+ IP_CT_EXP_CHALLENGE_ACK;
+ }
++
++ /* possible challenge ack reply to syn */
++ if (old_state == TCP_CONNTRACK_SYN_SENT &&
++ index == TCP_ACK_SET &&
++ dir == IP_CT_DIR_REPLY)
++ ct->proto.tcp.last_ack = ntohl(th->ack_seq);
++
+ spin_unlock_bh(&ct->lock);
+ nf_ct_l4proto_log_invalid(skb, ct, state,
+ "packet (index %d) in dir %d ignored, state %s",
+@@ -1193,6 +1200,14 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ * segments we ignored. */
+ goto in_window;
+ }
++
++ /* Reset in response to a challenge-ack we let through earlier */
++ if (old_state == TCP_CONNTRACK_SYN_SENT &&
++ ct->proto.tcp.last_index == TCP_ACK_SET &&
++ ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
++ ntohl(th->seq) == ct->proto.tcp.last_ack)
++ goto in_window;
++
+ break;
+ default:
+ /* Keep compilers happy. */
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index bca839ab1ae8d..460294bd4b606 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -601,7 +601,6 @@ enum nf_ct_sysctl_index {
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
+- NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
+ #endif
+ #ifdef CONFIG_NF_CT_PROTO_DCCP
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
+@@ -886,12 +885,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+- [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
+- .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
+- .maxlen = sizeof(unsigned int),
+- .mode = 0644,
+- .proc_handler = proc_dointvec_jiffies,
+- },
+ #endif
+ #ifdef CONFIG_NF_CT_PROTO_DCCP
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
+@@ -1035,7 +1028,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
+ XASSIGN(SHUTDOWN_RECD, sn);
+ XASSIGN(SHUTDOWN_ACK_SENT, sn);
+ XASSIGN(HEARTBEAT_SENT, sn);
+- XASSIGN(HEARTBEAT_ACKED, sn);
+ #undef XASSIGN
+ #endif
+ }
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 7325bee7d1442..19ea4d3c35535 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
+ return !nft_rbtree_interval_end(rbe);
+ }
+
+-static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
+- const struct nft_rbtree_elem *interval)
++static int nft_rbtree_cmp(const struct nft_set *set,
++ const struct nft_rbtree_elem *e1,
++ const struct nft_rbtree_elem *e2)
+ {
+- return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
++ return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
++ set->klen);
+ }
+
+ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+@@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ const struct nft_rbtree_elem *rbe, *interval = NULL;
+ u8 genmask = nft_genmask_cur(net);
+ const struct rb_node *parent;
+- const void *this;
+ int d;
+
+ parent = rcu_dereference_raw(priv->root.rb_node);
+@@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+- this = nft_set_ext_key(&rbe->ext);
+- d = memcmp(this, key, set->klen);
++ d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
+ if (d < 0) {
+ parent = rcu_dereference_raw(parent->rb_left);
+ if (interval &&
+- nft_rbtree_equal(set, this, interval) &&
++ !nft_rbtree_cmp(set, rbe, interval) &&
+ nft_rbtree_interval_end(rbe) &&
+ nft_rbtree_interval_start(interval))
+ continue;
+@@ -215,154 +215,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
+ return rbe;
+ }
+
++static int nft_rbtree_gc_elem(const struct nft_set *__set,
++ struct nft_rbtree *priv,
++ struct nft_rbtree_elem *rbe)
++{
++ struct nft_set *set = (struct nft_set *)__set;
++ struct rb_node *prev = rb_prev(&rbe->node);
++ struct nft_rbtree_elem *rbe_prev;
++ struct nft_set_gc_batch *gcb;
++
++ gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
++ if (!gcb)
++ return -ENOMEM;
++
++ /* search for expired end interval coming before this element. */
++ do {
++ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
++ if (nft_rbtree_interval_end(rbe_prev))
++ break;
++
++ prev = rb_prev(prev);
++ } while (prev != NULL);
++
++ rb_erase(&rbe_prev->node, &priv->root);
++ rb_erase(&rbe->node, &priv->root);
++ atomic_sub(2, &set->nelems);
++
++ nft_set_gc_batch_add(gcb, rbe);
++ nft_set_gc_batch_complete(gcb);
++
++ return 0;
++}
++
++static bool nft_rbtree_update_first(const struct nft_set *set,
++ struct nft_rbtree_elem *rbe,
++ struct rb_node *first)
++{
++ struct nft_rbtree_elem *first_elem;
++
++ first_elem = rb_entry(first, struct nft_rbtree_elem, node);
++ /* this element is closest to where the new element is to be inserted:
++ * update the first element for the node list path.
++ */
++ if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
++ return true;
++
++ return false;
++}
++
+ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ struct nft_rbtree_elem *new,
+ struct nft_set_ext **ext)
+ {
+- bool overlap = false, dup_end_left = false, dup_end_right = false;
++ struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
++ struct rb_node *node, *parent, **p, *first = NULL;
+ struct nft_rbtree *priv = nft_set_priv(set);
+ u8 genmask = nft_genmask_next(net);
+- struct nft_rbtree_elem *rbe;
+- struct rb_node *parent, **p;
+- int d;
++ int d, err;
+
+- /* Detect overlaps as we descend the tree. Set the flag in these cases:
+- *
+- * a1. _ _ __>| ?_ _ __| (insert end before existing end)
+- * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
+- * a3. _ _ ___? >|_ _ __| (insert start before existing end)
+- *
+- * and clear it later on, as we eventually reach the points indicated by
+- * '?' above, in the cases described below. We'll always meet these
+- * later, locally, due to tree ordering, and overlaps for the intervals
+- * that are the closest together are always evaluated last.
+- *
+- * b1. _ _ __>| !_ _ __| (insert end before existing start)
+- * b2. _ _ ___| !_ _ _>| (insert end after existing start)
+- * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
+- * '--' no nodes falling in this range
+- * b4. >|_ _ ! (insert start before existing start)
+- *
+- * Case a3. resolves to b3.:
+- * - if the inserted start element is the leftmost, because the '0'
+- * element in the tree serves as end element
+- * - otherwise, if an existing end is found immediately to the left. If
+- * there are existing nodes in between, we need to further descend the
+- * tree before we can conclude the new start isn't causing an overlap
+- *
+- * or to b4., which, preceded by a3., means we already traversed one or
+- * more existing intervals entirely, from the right.
+- *
+- * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
+- * in that order.
+- *
+- * The flag is also cleared in two special cases:
+- *
+- * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
+- * b6. |__ _ >|!__ _ _ (insert end right after existing start)
+- *
+- * which always happen as last step and imply that no further
+- * overlapping is possible.
+- *
+- * Another special case comes from the fact that start elements matching
+- * an already existing start element are allowed: insertion is not
+- * performed but we return -EEXIST in that case, and the error will be
+- * cleared by the caller if NLM_F_EXCL is not present in the request.
+- * This way, request for insertion of an exact overlap isn't reported as
+- * error to userspace if not desired.
+- *
+- * However, if the existing start matches a pre-existing start, but the
+- * end element doesn't match the corresponding pre-existing end element,
+- * we need to report a partial overlap. This is a local condition that
+- * can be noticed without need for a tracking flag, by checking for a
+- * local duplicated end for a corresponding start, from left and right,
+- * separately.
++ /* Descend the tree to search for an existing element greater than the
++ * key value to insert that is greater than the new element. This is the
++ * first element to walk the ordered elements to find possible overlap.
+ */
+-
+ parent = NULL;
+ p = &priv->root.rb_node;
+ while (*p != NULL) {
+ parent = *p;
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+- d = memcmp(nft_set_ext_key(&rbe->ext),
+- nft_set_ext_key(&new->ext),
+- set->klen);
++ d = nft_rbtree_cmp(set, rbe, new);
++
+ if (d < 0) {
+ p = &parent->rb_left;
+-
+- if (nft_rbtree_interval_start(new)) {
+- if (nft_rbtree_interval_end(rbe) &&
+- nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext) && !*p)
+- overlap = false;
+- } else {
+- if (dup_end_left && !*p)
+- return -ENOTEMPTY;
+-
+- overlap = nft_rbtree_interval_end(rbe) &&
+- nft_set_elem_active(&rbe->ext,
+- genmask) &&
+- !nft_set_elem_expired(&rbe->ext);
+-
+- if (overlap) {
+- dup_end_right = true;
+- continue;
+- }
+- }
+ } else if (d > 0) {
+- p = &parent->rb_right;
++ if (!first ||
++ nft_rbtree_update_first(set, rbe, first))
++ first = &rbe->node;
+
+- if (nft_rbtree_interval_end(new)) {
+- if (dup_end_right && !*p)
+- return -ENOTEMPTY;
+-
+- overlap = nft_rbtree_interval_end(rbe) &&
+- nft_set_elem_active(&rbe->ext,
+- genmask) &&
+- !nft_set_elem_expired(&rbe->ext);
+-
+- if (overlap) {
+- dup_end_left = true;
+- continue;
+- }
+- } else if (nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext)) {
+- overlap = nft_rbtree_interval_end(rbe);
+- }
++ p = &parent->rb_right;
+ } else {
+- if (nft_rbtree_interval_end(rbe) &&
+- nft_rbtree_interval_start(new)) {
++ if (nft_rbtree_interval_end(rbe))
+ p = &parent->rb_left;
+-
+- if (nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext))
+- overlap = false;
+- } else if (nft_rbtree_interval_start(rbe) &&
+- nft_rbtree_interval_end(new)) {
++ else
+ p = &parent->rb_right;
++ }
++ }
++
++ if (!first)
++ first = rb_first(&priv->root);
++
++ /* Detect overlap by going through the list of valid tree nodes.
++ * Values stored in the tree are in reversed order, starting from
++ * highest to lowest value.
++ */
++ for (node = first; node != NULL; node = rb_next(node)) {
++ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+
+- if (nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext))
+- overlap = false;
+- } else if (nft_set_elem_active(&rbe->ext, genmask) &&
+- !nft_set_elem_expired(&rbe->ext)) {
+- *ext = &rbe->ext;
+- return -EEXIST;
+- } else {
+- overlap = false;
+- if (nft_rbtree_interval_end(rbe))
+- p = &parent->rb_left;
+- else
+- p = &parent->rb_right;
++ if (!nft_set_elem_active(&rbe->ext, genmask))
++ continue;
++
++ /* perform garbage collection to avoid bogus overlap reports. */
++ if (nft_set_elem_expired(&rbe->ext)) {
++ err = nft_rbtree_gc_elem(set, priv, rbe);
++ if (err < 0)
++ return err;
++
++ continue;
++ }
++
++ d = nft_rbtree_cmp(set, rbe, new);
++ if (d == 0) {
++ /* Matching end element: no need to look for an
++ * overlapping greater or equal element.
++ */
++ if (nft_rbtree_interval_end(rbe)) {
++ rbe_le = rbe;
++ break;
++ }
++
++ /* first element that is greater or equal to key value. */
++ if (!rbe_ge) {
++ rbe_ge = rbe;
++ continue;
++ }
++
++ /* this is a closer more or equal element, update it. */
++ if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
++ rbe_ge = rbe;
++ continue;
++ }
++
++ /* element is equal to key value, make sure flags are
++ * the same, an existing more or equal start element
++ * must not be replaced by more or equal end element.
++ */
++ if ((nft_rbtree_interval_start(new) &&
++ nft_rbtree_interval_start(rbe_ge)) ||
++ (nft_rbtree_interval_end(new) &&
++ nft_rbtree_interval_end(rbe_ge))) {
++ rbe_ge = rbe;
++ continue;
+ }
++ } else if (d > 0) {
++ /* annotate element greater than the new element. */
++ rbe_ge = rbe;
++ continue;
++ } else if (d < 0) {
++ /* annotate element less than the new element. */
++ rbe_le = rbe;
++ break;
+ }
++ }
+
+- dup_end_left = dup_end_right = false;
++ /* - new start element matching existing start element: full overlap
++ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
++ */
++ if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
++ nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
++ *ext = &rbe_ge->ext;
++ return -EEXIST;
+ }
+
+- if (overlap)
++ /* - new end element matching existing end element: full overlap
++ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
++ */
++ if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
++ nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
++ *ext = &rbe_le->ext;
++ return -EEXIST;
++ }
++
++ /* - new start element with existing closest, less or equal key value
++ * being a start element: partial overlap, reported as -ENOTEMPTY.
++ * Anonymous sets allow for two consecutive start element since they
++ * are constant, skip them to avoid bogus overlap reports.
++ */
++ if (!nft_set_is_anonymous(set) && rbe_le &&
++ nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
++ return -ENOTEMPTY;
++
++ /* - new end element with existing closest, less or equal key value
++ * being a end element: partial overlap, reported as -ENOTEMPTY.
++ */
++ if (rbe_le &&
++ nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
+ return -ENOTEMPTY;
+
++ /* - new end element with existing closest, greater or equal key value
++ * being an end element: partial overlap, reported as -ENOTEMPTY
++ */
++ if (rbe_ge &&
++ nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
++ return -ENOTEMPTY;
++
++ /* Accepted element: pick insertion point depending on key value */
++ parent = NULL;
++ p = &priv->root.rb_node;
++ while (*p != NULL) {
++ parent = *p;
++ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
++ d = nft_rbtree_cmp(set, rbe, new);
++
++ if (d < 0)
++ p = &parent->rb_left;
++ else if (d > 0)
++ p = &parent->rb_right;
++ else if (nft_rbtree_interval_end(rbe))
++ p = &parent->rb_left;
++ else
++ p = &parent->rb_right;
++ }
++
+ rb_link_node_rcu(&new->node, parent, p);
+ rb_insert_color(&new->node, &priv->root);
+ return 0;
+@@ -501,23 +563,37 @@ static void nft_rbtree_gc(struct work_struct *work)
+ struct nft_rbtree *priv;
+ struct rb_node *node;
+ struct nft_set *set;
++ struct net *net;
++ u8 genmask;
+
+ priv = container_of(work, struct nft_rbtree, gc_work.work);
+ set = nft_set_container_of(priv);
++ net = read_pnet(&set->net);
++ genmask = nft_genmask_cur(net);
+
+ write_lock_bh(&priv->lock);
+ write_seqcount_begin(&priv->count);
+ for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+
++ if (!nft_set_elem_active(&rbe->ext, genmask))
++ continue;
++
++ /* elements are reversed in the rbtree for historical reasons,
++ * from highest to lowest value, that is why end element is
++ * always visited before the start element.
++ */
+ if (nft_rbtree_interval_end(rbe)) {
+ rbe_end = rbe;
+ continue;
+ }
+ if (!nft_set_elem_expired(&rbe->ext))
+ continue;
+- if (nft_set_elem_mark_busy(&rbe->ext))
++
++ if (nft_set_elem_mark_busy(&rbe->ext)) {
++ rbe_end = NULL;
+ continue;
++ }
+
+ if (rbe_prev) {
+ rb_erase(&rbe_prev->node, &priv->root);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index a662e8a5ff84a..e506712967918 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -580,7 +580,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ if (nlk_sk(sk)->bound)
+ goto err;
+
+- nlk_sk(sk)->portid = portid;
++ /* portid can be read locklessly from netlink_getname(). */
++ WRITE_ONCE(nlk_sk(sk)->portid, portid);
++
+ sock_hold(sk);
+
+ err = __netlink_insert(table, sk);
+@@ -1085,9 +1087,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ return -EINVAL;
+
+ if (addr->sa_family == AF_UNSPEC) {
+- sk->sk_state = NETLINK_UNCONNECTED;
+- nlk->dst_portid = 0;
+- nlk->dst_group = 0;
++ /* paired with READ_ONCE() in netlink_getsockbyportid() */
++ WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
++ /* dst_portid and dst_group can be read locklessly */
++ WRITE_ONCE(nlk->dst_portid, 0);
++ WRITE_ONCE(nlk->dst_group, 0);
+ return 0;
+ }
+ if (addr->sa_family != AF_NETLINK)
+@@ -1108,9 +1112,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ err = netlink_autobind(sock);
+
+ if (err == 0) {
+- sk->sk_state = NETLINK_CONNECTED;
+- nlk->dst_portid = nladdr->nl_pid;
+- nlk->dst_group = ffs(nladdr->nl_groups);
++ /* paired with READ_ONCE() in netlink_getsockbyportid() */
++ WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
++ /* dst_portid and dst_group can be read locklessly */
++ WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
++ WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
+ }
+
+ return err;
+@@ -1127,10 +1133,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
+ nladdr->nl_pad = 0;
+
+ if (peer) {
+- nladdr->nl_pid = nlk->dst_portid;
+- nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
++ /* Paired with WRITE_ONCE() in netlink_connect() */
++ nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
++ nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
+ } else {
+- nladdr->nl_pid = nlk->portid;
++ /* Paired with WRITE_ONCE() in netlink_insert() */
++ nladdr->nl_pid = READ_ONCE(nlk->portid);
+ netlink_lock_table();
+ nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
+ netlink_unlock_table();
+@@ -1157,8 +1165,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
+
+ /* Don't bother queuing skb if kernel socket has no input function */
+ nlk = nlk_sk(sock);
+- if (sock->sk_state == NETLINK_CONNECTED &&
+- nlk->dst_portid != nlk_sk(ssk)->portid) {
++ /* dst_portid and sk_state can be changed in netlink_connect() */
++ if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
++ READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
+ sock_put(sock);
+ return ERR_PTR(-ECONNREFUSED);
+ }
+@@ -1875,8 +1884,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ goto out;
+ netlink_skb_flags |= NETLINK_SKB_DST;
+ } else {
+- dst_portid = nlk->dst_portid;
+- dst_group = nlk->dst_group;
++ /* Paired with WRITE_ONCE() in netlink_connect() */
++ dst_portid = READ_ONCE(nlk->dst_portid);
++ dst_group = READ_ONCE(nlk->dst_group);
+ }
+
+ /* Paired with WRITE_ONCE() in netlink_insert() */
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index a8da88db7893f..4e7c968cde2dc 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
+ is accepted() it isn't 'dead' so doesn't get removed. */
+ if (sock_flag(sk, SOCK_DESTROY) ||
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
++ sock_hold(sk);
+ bh_unlock_sock(sk);
+ nr_destroy_socket(sk);
+ goto out;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index 3364caabef8b1..a27e1842b2a09 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -157,6 +157,7 @@ static void local_cleanup(struct nfc_llcp_local *local)
+ cancel_work_sync(&local->rx_work);
+ cancel_work_sync(&local->timeout_work);
+ kfree_skb(local->rx_pending);
++ local->rx_pending = NULL;
+ del_timer_sync(&local->sdreq_timer);
+ cancel_work_sync(&local->sdreq_timeout_work);
+ nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index a661b062cca85..872d127c9db42 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -377,6 +377,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
+ /* Even if driver returns failure adjust the stats - in case offload
+ * ended but driver still wants to adjust the values.
+ */
++ sch_tree_lock(sch);
+ for (i = 0; i < MAX_DPs; i++) {
+ if (!table->tab[i])
+ continue;
+@@ -393,6 +394,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
+ sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
+ }
+ _bstats_update(&sch->bstats, bytes, packets);
++ sch_tree_unlock(sch);
+
+ kfree(hw_stats);
+ return ret;
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index e5b4bbf3ce3d5..3afac9c21a763 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1545,7 +1545,7 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
+ struct tc_htb_qopt_offload offload_opt;
+ struct netdev_queue *dev_queue;
+ struct Qdisc *q = cl->leaf.q;
+- struct Qdisc *old = NULL;
++ struct Qdisc *old;
+ int err;
+
+ if (cl->level)
+@@ -1553,14 +1553,17 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
+
+ WARN_ON(!q);
+ dev_queue = htb_offload_get_queue(cl);
+- old = htb_graft_helper(dev_queue, NULL);
+- if (destroying)
+- /* Before HTB is destroyed, the kernel grafts noop_qdisc to
+- * all queues.
++ /* When destroying, caller qdisc_graft grafts the new qdisc and invokes
++ * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
++ * does not need to graft or qdisc_put the qdisc being destroyed.
++ */
++ if (!destroying) {
++ old = htb_graft_helper(dev_queue, NULL);
++ /* Last qdisc grafted should be the same as cl->leaf.q when
++ * calling htb_delete.
+ */
+- WARN_ON(!(old->flags & TCQ_F_BUILTIN));
+- else
+ WARN_ON(old != q);
++ }
+
+ if (cl->parent) {
+ _bstats_update(&cl->parent->bstats_bias,
+@@ -1577,10 +1580,12 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
+ };
+ err = htb_offload(qdisc_dev(sch), &offload_opt);
+
+- if (!err || destroying)
+- qdisc_put(old);
+- else
+- htb_graft_helper(dev_queue, old);
++ if (!destroying) {
++ if (!err)
++ qdisc_put(old);
++ else
++ htb_graft_helper(dev_queue, old);
++ }
+
+ if (last_child)
+ return err;
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 570389f6cdd7d..c322a61eaeeac 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1700,6 +1700,7 @@ static void taprio_reset(struct Qdisc *sch)
+ int i;
+
+ hrtimer_cancel(&q->advance_timer);
++
+ if (q->qdiscs) {
+ for (i = 0; i < dev->num_tx_queues; i++)
+ if (q->qdiscs[i])
+@@ -1720,6 +1721,7 @@ static void taprio_destroy(struct Qdisc *sch)
+ * happens in qdisc_create(), after taprio_init() has been called.
+ */
+ hrtimer_cancel(&q->advance_timer);
++ qdisc_synchronize(sch);
+
+ taprio_disable_offload(dev, q, NULL);
+
+diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
+index 59e653b528b1f..6b95d3ba8fe1c 100644
+--- a/net/sctp/bind_addr.c
++++ b/net/sctp/bind_addr.c
+@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
+ }
+ }
+
++ /* If somehow no addresses were found that can be used with this
++ * scope, it's an error.
++ */
++ if (list_empty(&dest->address_list))
++ error = -ENETUNREACH;
++
+ out:
+ if (error)
+ sctp_bind_addr_clean(dest);
+diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
+index 65aa94d96f4e3..6f23b1413745c 100644
+--- a/samples/ftrace/ftrace-direct-multi-modify.c
++++ b/samples/ftrace/ftrace-direct-multi-modify.c
+@@ -149,6 +149,7 @@ static void __exit ftrace_direct_multi_exit(void)
+ {
+ kthread_stop(simple_tsk);
+ unregister_ftrace_direct_multi(&direct, my_tramp);
++ ftrace_free_filter(&direct);
+ }
+
+ module_init(ftrace_direct_multi_init);
+diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c
+index 41ded7c615c7f..a9a5c90fb2044 100644
+--- a/samples/ftrace/ftrace-direct-multi.c
++++ b/samples/ftrace/ftrace-direct-multi.c
+@@ -77,6 +77,7 @@ static int __init ftrace_direct_multi_init(void)
+ static void __exit ftrace_direct_multi_exit(void)
+ {
+ unregister_ftrace_direct_multi(&direct, (unsigned long) my_tramp);
++ ftrace_free_filter(&direct);
+ }
+
+ module_init(ftrace_direct_multi_init);
+diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl
+old mode 100755
+new mode 100644
+diff --git a/scripts/tracing/ftrace-bisect.sh b/scripts/tracing/ftrace-bisect.sh
+index 926701162bc83..bb4f59262bbe9 100755
+--- a/scripts/tracing/ftrace-bisect.sh
++++ b/scripts/tracing/ftrace-bisect.sh
+@@ -12,7 +12,7 @@
+ # (note, if this is a problem with function_graph tracing, then simply
+ # replace "function" with "function_graph" in the following steps).
+ #
+-# # cd /sys/kernel/debug/tracing
++# # cd /sys/kernel/tracing
+ # # echo schedule > set_ftrace_filter
+ # # echo function > current_tracer
+ #
+@@ -20,22 +20,40 @@
+ #
+ # # echo nop > current_tracer
+ #
+-# # cat available_filter_functions > ~/full-file
++# Starting with v5.1 this can be done with numbers, making it much faster:
++#
++# The old (slow) way, for kernels before v5.1.
++#
++# [old-way] # cat available_filter_functions > ~/full-file
++#
++# [old-way] *** Note *** this process will take several minutes to update the
++# [old-way] filters. Setting multiple functions is an O(n^2) operation, and we
++# [old-way] are dealing with thousands of functions. So go have coffee, talk
++# [old-way] with your coworkers, read facebook. And eventually, this operation
++# [old-way] will end.
++#
++# The new way (using numbers) is an O(n) operation, and usually takes less than a second.
++#
++# seq `wc -l available_filter_functions | cut -d' ' -f1` > ~/full-file
++#
++# This will create a sequence of numbers that match the functions in
++# available_filter_functions, and when echoing in a number into the
++# set_ftrace_filter file, it will enable the corresponding function in
++# O(1) time. Making enabling all functions O(n) where n is the number of
++# functions to enable.
++#
++# For either the new or old way, the rest of the operations remain the same.
++#
+ # # ftrace-bisect ~/full-file ~/test-file ~/non-test-file
+ # # cat ~/test-file > set_ftrace_filter
+ #
+-# *** Note *** this will take several minutes. Setting multiple functions is
+-# an O(n^2) operation, and we are dealing with thousands of functions. So go
+-# have coffee, talk with your coworkers, read facebook. And eventually, this
+-# operation will end.
+-#
+ # # echo function > current_tracer
+ #
+ # If it crashes, we know that ~/test-file has a bad function.
+ #
+ # Reboot back to test kernel.
+ #
+-# # cd /sys/kernel/debug/tracing
++# # cd /sys/kernel/tracing
+ # # mv ~/test-file ~/full-file
+ #
+ # If it didn't crash.
+diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
+index cca5a3012fee2..221eaadffb09c 100644
+--- a/security/tomoyo/Makefile
++++ b/security/tomoyo/Makefile
+@@ -10,7 +10,7 @@ endef
+ quiet_cmd_policy = POLICY $@
+ cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
+
+-$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
++$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE
+ $(call if_changed,policy)
+
+ $(obj)/common.o: $(obj)/builtin-policy.h
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 1f0b5527c5949..0d283e41f66dc 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -206,6 +206,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -220,6 +227,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Razer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
++ }
++ },
+ {}
+ };
+
+diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
+old mode 100755
+new mode 100644
+diff --git a/sound/soc/codecs/es8326.h b/sound/soc/codecs/es8326.h
+old mode 100755
+new mode 100644
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index c836848ef0a65..8d14b5593658d 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -121,11 +121,11 @@ static const struct snd_soc_dapm_route audio_map[] = {
+
+ static const struct snd_soc_dapm_route audio_map_ac97[] = {
+ /* 1st half -- Normal DAPM routes */
+- {"Playback", NULL, "AC97 Playback"},
+- {"AC97 Capture", NULL, "Capture"},
++ {"AC97 Playback", NULL, "CPU AC97 Playback"},
++ {"CPU AC97 Capture", NULL, "AC97 Capture"},
+ /* 2nd half -- ASRC DAPM routes */
+- {"AC97 Playback", NULL, "ASRC-Playback"},
+- {"ASRC-Capture", NULL, "AC97 Capture"},
++ {"CPU AC97 Playback", NULL, "ASRC-Playback"},
++ {"ASRC-Capture", NULL, "CPU AC97 Capture"},
+ };
+
+ static const struct snd_soc_dapm_route audio_map_tx[] = {
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index 4b86ef82fd930..4b8fe9b8be407 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -154,21 +154,21 @@ static int micfil_quality_set(struct snd_kcontrol *kcontrol,
+
+ static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
+ SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
+ SOC_ENUM_EXT("MICFIL Quality Select",
+ fsl_micfil_quality_enum,
+ micfil_quality_get, micfil_quality_set),
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index c9e0e31d5b34d..46a53551b955c 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -1189,14 +1189,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
+ .symmetric_channels = 1,
+ .probe = fsl_ssi_dai_probe,
+ .playback = {
+- .stream_name = "AC97 Playback",
++ .stream_name = "CPU AC97 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
+ },
+ .capture = {
+- .stream_name = "AC97 Capture",
++ .stream_name = "CPU AC97 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
+index 363fa4d476800..b027fba8233df 100644
+--- a/sound/soc/mediatek/Kconfig
++++ b/sound/soc/mediatek/Kconfig
+@@ -182,10 +182,12 @@ config SND_SOC_MT8186_MT6366_DA7219_MAX98357
+ If unsure select "N".
+
+ config SND_SOC_MT8186_MT6366_RT1019_RT5682S
+- tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S codec"
++ tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S MAX98357A/MAX98360 codec"
+ depends on I2C && GPIOLIB
+ depends on SND_SOC_MT8186 && MTK_PMIC_WRAP
++ select SND_SOC_MAX98357A
+ select SND_SOC_MT6358
++ select SND_SOC_MAX98357A
+ select SND_SOC_RT1015P
+ select SND_SOC_RT5682S
+ select SND_SOC_BT_SCO
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 60fa55d0c91f0..6babadb2e6fe2 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -991,6 +991,21 @@ static struct snd_soc_card mt8186_mt6366_rt1019_rt5682s_soc_card = {
+ .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
+ };
+
++static struct snd_soc_card mt8186_mt6366_rt5682s_max98360_soc_card = {
++ .name = "mt8186_rt5682s_max98360",
++ .owner = THIS_MODULE,
++ .dai_link = mt8186_mt6366_rt1019_rt5682s_dai_links,
++ .num_links = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_dai_links),
++ .controls = mt8186_mt6366_rt1019_rt5682s_controls,
++ .num_controls = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_controls),
++ .dapm_widgets = mt8186_mt6366_rt1019_rt5682s_widgets,
++ .num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_widgets),
++ .dapm_routes = mt8186_mt6366_rt1019_rt5682s_routes,
++ .num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_routes),
++ .codec_conf = mt8186_mt6366_rt1019_rt5682s_codec_conf,
++ .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
++};
++
+ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ {
+ struct snd_soc_card *card;
+@@ -1132,9 +1147,14 @@ err_adsp_node:
+
+ #if IS_ENABLED(CONFIG_OF)
+ static const struct of_device_id mt8186_mt6366_rt1019_rt5682s_dt_match[] = {
+- { .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
++ {
++ .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
+ .data = &mt8186_mt6366_rt1019_rt5682s_soc_card,
+ },
++ {
++ .compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound",
++ .data = &mt8186_mt6366_rt5682s_max98360_soc_card,
++ },
+ {}
+ };
+ #endif
+diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
+index d9a3ce7b69e16..ade0507328af4 100644
+--- a/sound/soc/sof/debug.c
++++ b/sound/soc/sof/debug.c
+@@ -353,7 +353,9 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
+ return err;
+ }
+
+- return 0;
++ return snd_sof_debugfs_buf_item(sdev, &sdev->fw_state,
++ sizeof(sdev->fw_state),
++ "fw_state", 0444);
+ }
+ EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
+
+diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
+index df740be645e84..8722bbd7fd3d7 100644
+--- a/sound/soc/sof/pm.c
++++ b/sound/soc/sof/pm.c
+@@ -182,7 +182,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+ const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ pm_message_t pm_state;
+- u32 target_state = 0;
++ u32 target_state = snd_sof_dsp_power_target(sdev);
+ int ret;
+
+ /* do nothing if dsp suspend callback is not set */
+@@ -192,6 +192,9 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
+ return 0;
+
++ if (tplg_ops && tplg_ops->tear_down_all_pipelines)
++ tplg_ops->tear_down_all_pipelines(sdev, false);
++
+ if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
+ goto suspend;
+
+@@ -206,7 +209,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ }
+ }
+
+- target_state = snd_sof_dsp_power_target(sdev);
+ pm_state.event = target_state;
+
+ /* Skip to platform-specific suspend if DSP is entering D0 */
+@@ -217,9 +219,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ goto suspend;
+ }
+
+- if (tplg_ops->tear_down_all_pipelines)
+- tplg_ops->tear_down_all_pipelines(sdev, false);
+-
+ /* suspend DMA trace */
+ sof_fw_trace_suspend(sdev, pm_state);
+
+diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
+index 6c122952c5892..5dee2b98ab604 100644
+--- a/tools/gpio/gpio-event-mon.c
++++ b/tools/gpio/gpio-event-mon.c
+@@ -86,6 +86,7 @@ int monitor_device(const char *device_name,
+ gpiotools_test_bit(values.bits, i));
+ }
+
++ i = 0;
+ while (1) {
+ struct gpio_v2_line_event event;
+
+diff --git a/tools/include/nolibc/ctype.h b/tools/include/nolibc/ctype.h
+index e3000b2992d7b..6f90706d06442 100644
+--- a/tools/include/nolibc/ctype.h
++++ b/tools/include/nolibc/ctype.h
+@@ -96,4 +96,7 @@ int ispunct(int c)
+ return isgraph(c) && !isalnum(c);
+ }
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_CTYPE_H */
+diff --git a/tools/include/nolibc/errno.h b/tools/include/nolibc/errno.h
+index 06893d6dfb7a6..9dc4919c769b7 100644
+--- a/tools/include/nolibc/errno.h
++++ b/tools/include/nolibc/errno.h
+@@ -24,4 +24,7 @@ static int errno;
+ */
+ #define MAX_ERRNO 4095
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_ERRNO_H */
+diff --git a/tools/include/nolibc/signal.h b/tools/include/nolibc/signal.h
+index ef47e71e2be37..137552216e469 100644
+--- a/tools/include/nolibc/signal.h
++++ b/tools/include/nolibc/signal.h
+@@ -19,4 +19,7 @@ int raise(int signal)
+ return sys_kill(sys_getpid(), signal);
+ }
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_SIGNAL_H */
+diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
+index a3cebc4bc3ac4..96ac8afc5aeed 100644
+--- a/tools/include/nolibc/stdio.h
++++ b/tools/include/nolibc/stdio.h
+@@ -303,4 +303,7 @@ void perror(const char *msg)
+ fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
+ }
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_STDIO_H */
+diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
+index 92378c4b96605..a24000d1e8222 100644
+--- a/tools/include/nolibc/stdlib.h
++++ b/tools/include/nolibc/stdlib.h
+@@ -419,4 +419,7 @@ char *u64toa(uint64_t in)
+ return itoa_buffer;
+ }
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_STDLIB_H */
+diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
+index ad97c0d522b8e..fffdaf6ff4673 100644
+--- a/tools/include/nolibc/string.h
++++ b/tools/include/nolibc/string.h
+@@ -88,8 +88,11 @@ void *memset(void *dst, int b, size_t len)
+ {
+ char *p = dst;
+
+- while (len--)
++ while (len--) {
++ /* prevent gcc from recognizing memset() here */
++ asm volatile("");
+ *(p++) = b;
++ }
+ return dst;
+ }
+
+@@ -285,4 +288,7 @@ char *strrchr(const char *s, int c)
+ return (char *)ret;
+ }
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_STRING_H */
+diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
+index ce3ee03aa6794..78473d34e27cd 100644
+--- a/tools/include/nolibc/sys.h
++++ b/tools/include/nolibc/sys.h
+@@ -1243,5 +1243,7 @@ ssize_t write(int fd, const void *buf, size_t count)
+ return ret;
+ }
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
+
+ #endif /* _NOLIBC_SYS_H */
+diff --git a/tools/include/nolibc/time.h b/tools/include/nolibc/time.h
+index d18b7661fdd71..84655361b9ad2 100644
+--- a/tools/include/nolibc/time.h
++++ b/tools/include/nolibc/time.h
+@@ -25,4 +25,7 @@ time_t time(time_t *tptr)
+ return tv.tv_sec;
+ }
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_TIME_H */
+diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
+index 959997034e553..fbbc0e68c001b 100644
+--- a/tools/include/nolibc/types.h
++++ b/tools/include/nolibc/types.h
+@@ -26,13 +26,13 @@
+ #define S_IFSOCK 0140000
+ #define S_IFMT 0170000
+
+-#define S_ISDIR(mode) (((mode) & S_IFDIR) == S_IFDIR)
+-#define S_ISCHR(mode) (((mode) & S_IFCHR) == S_IFCHR)
+-#define S_ISBLK(mode) (((mode) & S_IFBLK) == S_IFBLK)
+-#define S_ISREG(mode) (((mode) & S_IFREG) == S_IFREG)
+-#define S_ISFIFO(mode) (((mode) & S_IFIFO) == S_IFIFO)
+-#define S_ISLNK(mode) (((mode) & S_IFLNK) == S_IFLNK)
+-#define S_ISSOCK(mode) (((mode) & S_IFSOCK) == S_IFSOCK)
++#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
++#define S_ISCHR(mode) (((mode) & S_IFMT) == S_IFCHR)
++#define S_ISBLK(mode) (((mode) & S_IFMT) == S_IFBLK)
++#define S_ISREG(mode) (((mode) & S_IFMT) == S_IFREG)
++#define S_ISFIFO(mode) (((mode) & S_IFMT) == S_IFIFO)
++#define S_ISLNK(mode) (((mode) & S_IFMT) == S_IFLNK)
++#define S_ISSOCK(mode) (((mode) & S_IFMT) == S_IFSOCK)
+
+ /* dirent types */
+ #define DT_UNKNOWN 0x0
+@@ -89,39 +89,46 @@
+ #define EXIT_SUCCESS 0
+ #define EXIT_FAILURE 1
+
++#define FD_SETIDXMASK (8 * sizeof(unsigned long))
++#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
++
+ /* for select() */
+ typedef struct {
+- uint32_t fd32[(FD_SETSIZE + 31) / 32];
++ unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
+ } fd_set;
+
+-#define FD_CLR(fd, set) do { \
+- fd_set *__set = (set); \
+- int __fd = (fd); \
+- if (__fd >= 0) \
+- __set->fd32[__fd / 32] &= ~(1U << (__fd & 31)); \
++#define FD_CLR(fd, set) do { \
++ fd_set *__set = (set); \
++ int __fd = (fd); \
++ if (__fd >= 0) \
++ __set->fds[__fd / FD_SETIDXMASK] &= \
++ ~(1U << (__fd & FX_SETBITMASK)); \
+ } while (0)
+
+-#define FD_SET(fd, set) do { \
+- fd_set *__set = (set); \
+- int __fd = (fd); \
+- if (__fd >= 0) \
+- __set->fd32[__fd / 32] |= 1U << (__fd & 31); \
++#define FD_SET(fd, set) do { \
++ fd_set *__set = (set); \
++ int __fd = (fd); \
++ if (__fd >= 0) \
++ __set->fds[__fd / FD_SETIDXMASK] |= \
++ 1 << (__fd & FD_SETBITMASK); \
+ } while (0)
+
+-#define FD_ISSET(fd, set) ({ \
+- fd_set *__set = (set); \
+- int __fd = (fd); \
+- int __r = 0; \
+- if (__fd >= 0) \
+- __r = !!(__set->fd32[__fd / 32] & 1U << (__fd & 31)); \
+- __r; \
++#define FD_ISSET(fd, set) ({ \
++ fd_set *__set = (set); \
++ int __fd = (fd); \
++ int __r = 0; \
++ if (__fd >= 0) \
++ __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \
++1U << (__fd & FD_SET_BITMASK)); \
++ __r; \
+ })
+
+-#define FD_ZERO(set) do { \
+- fd_set *__set = (set); \
+- int __idx; \
+- for (__idx = 0; __idx < (FD_SETSIZE+31) / 32; __idx ++) \
+- __set->fd32[__idx] = 0; \
++#define FD_ZERO(set) do { \
++ fd_set *__set = (set); \
++ int __idx; \
++ int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
++ for (__idx = 0; __idx < __size; __idx++) \
++ __set->fds[__idx] = 0; \
+ } while (0)
+
+ /* for poll() */
+@@ -202,4 +209,7 @@ struct stat {
+ })
+ #endif
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_TYPES_H */
+diff --git a/tools/include/nolibc/unistd.h b/tools/include/nolibc/unistd.h
+index 1c25e20ee3606..1cfcd52106a42 100644
+--- a/tools/include/nolibc/unistd.h
++++ b/tools/include/nolibc/unistd.h
+@@ -51,4 +51,7 @@ int tcsetpgrp(int fd, pid_t pid)
+ return ioctl(fd, TIOCSPGRP, &pid);
+ }
+
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_UNISTD_H */
+diff --git a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
+deleted file mode 100644
+index 3add34df57678..0000000000000
+--- a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
++++ /dev/null
+@@ -1,9 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
+-#include <test_progs.h>
+-#include "jeq_infer_not_null_fail.skel.h"
+-
+-void test_jeq_infer_not_null(void)
+-{
+- RUN_TESTS(jeq_infer_not_null_fail);
+-}
+diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
+deleted file mode 100644
+index f46965053acb2..0000000000000
+--- a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
++++ /dev/null
+@@ -1,42 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
+-#include "vmlinux.h"
+-#include <bpf/bpf_helpers.h>
+-#include "bpf_misc.h"
+-
+-char _license[] SEC("license") = "GPL";
+-
+-struct {
+- __uint(type, BPF_MAP_TYPE_HASH);
+- __uint(max_entries, 1);
+- __type(key, u64);
+- __type(value, u64);
+-} m_hash SEC(".maps");
+-
+-SEC("?raw_tp")
+-__failure __msg("R8 invalid mem access 'map_value_or_null")
+-int jeq_infer_not_null_ptr_to_btfid(void *ctx)
+-{
+- struct bpf_map *map = (struct bpf_map *)&m_hash;
+- struct bpf_map *inner_map = map->inner_map_meta;
+- u64 key = 0, ret = 0, *val;
+-
+- val = bpf_map_lookup_elem(map, &key);
+- /* Do not mark ptr as non-null if one of them is
+- * PTR_TO_BTF_ID (R9), reject because of invalid
+- * access to map value (R8).
+- *
+- * Here, we need to inline those insns to access
+- * R8 directly, since compiler may use other reg
+- * once it figures out val==inner_map.
+- */
+- asm volatile("r8 = %[val];\n"
+- "r9 = %[inner_map];\n"
+- "if r8 != r9 goto +1;\n"
+- "%[ret] = *(u64 *)(r8 +0);\n"
+- : [ret] "+r"(ret)
+- : [inner_map] "r"(inner_map), [val] "r"(val)
+- : "r8", "r9");
+-
+- return ret;
+-}
+diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/net/toeplitz.c
+index 90026a27eac0c..9ba03164d73a6 100644
+--- a/tools/testing/selftests/net/toeplitz.c
++++ b/tools/testing/selftests/net/toeplitz.c
+@@ -215,7 +215,7 @@ static char *recv_frame(const struct ring_state *ring, char *frame)
+ }
+
+ /* A single TPACKET_V3 block can hold multiple frames */
+-static void recv_block(struct ring_state *ring)
++static bool recv_block(struct ring_state *ring)
+ {
+ struct tpacket_block_desc *block;
+ char *frame;
+@@ -223,7 +223,7 @@ static void recv_block(struct ring_state *ring)
+
+ block = (void *)(ring->mmap + ring->idx * ring_block_sz);
+ if (!(block->hdr.bh1.block_status & TP_STATUS_USER))
+- return;
++ return false;
+
+ frame = (char *)block;
+ frame += block->hdr.bh1.offset_to_first_pkt;
+@@ -235,6 +235,8 @@ static void recv_block(struct ring_state *ring)
+
+ block->hdr.bh1.block_status = TP_STATUS_KERNEL;
+ ring->idx = (ring->idx + 1) % ring_block_nr;
++
++ return true;
+ }
+
+ /* simple test: sleep once unconditionally and then process all rings */
+@@ -245,7 +247,7 @@ static void process_rings(void)
+ usleep(1000 * cfg_timeout_msec);
+
+ for (i = 0; i < num_cpus; i++)
+- recv_block(&rings[i]);
++ do {} while (recv_block(&rings[i]));
+
+ fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n",
+ frames_received - frames_nohash - frames_error,
+@@ -257,12 +259,12 @@ static char *setup_ring(int fd)
+ struct tpacket_req3 req3 = {0};
+ void *ring;
+
+- req3.tp_retire_blk_tov = cfg_timeout_msec;
++ req3.tp_retire_blk_tov = cfg_timeout_msec / 8;
+ req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
+
+ req3.tp_frame_size = 2048;
+ req3.tp_frame_nr = 1 << 10;
+- req3.tp_block_nr = 2;
++ req3.tp_block_nr = 16;
+
+ req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr;
+ req3.tp_block_size /= req3.tp_block_nr;
+diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
+index 495ceabffe88b..9584eb57e0eda 100644
+--- a/virt/kvm/vfio.c
++++ b/virt/kvm/vfio.c
+@@ -336,7 +336,7 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
+ return -ENXIO;
+ }
+
+-static void kvm_vfio_destroy(struct kvm_device *dev)
++static void kvm_vfio_release(struct kvm_device *dev)
+ {
+ struct kvm_vfio *kv = dev->private;
+ struct kvm_vfio_group *kvg, *tmp;
+@@ -355,7 +355,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
+ kvm_vfio_update_coherency(dev);
+
+ kfree(kv);
+- kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
++ kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
+ }
+
+ static int kvm_vfio_create(struct kvm_device *dev, u32 type);
+@@ -363,7 +363,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type);
+ static struct kvm_device_ops kvm_vfio_ops = {
+ .name = "kvm-vfio",
+ .create = kvm_vfio_create,
+- .destroy = kvm_vfio_destroy,
++ .release = kvm_vfio_release,
+ .set_attr = kvm_vfio_set_attr,
+ .has_attr = kvm_vfio_has_attr,
+ };