summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2024-01-25 18:33:42 -0500
committerMike Pagano <mpagano@gentoo.org>2024-01-25 18:33:42 -0500
commit65bc4cedeb20ce2479d4d49d272ec64f9bd82000 (patch)
tree277235f207962b84cd8c1006844fdcb30a169902
parentLinux patch 5.15.147 (diff)
downloadlinux-patches-65bc4cedeb20ce2479d4d49d272ec64f9bd82000.tar.gz
linux-patches-65bc4cedeb20ce2479d4d49d272ec64f9bd82000.tar.bz2
linux-patches-65bc4cedeb20ce2479d4d49d272ec64f9bd82000.zip
Linux patch 5.15.1485.15-157
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1147_linux-5.15.148.patch12540
2 files changed, 12544 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e24763df..14bc0ec9 100644
--- a/0000_README
+++ b/0000_README
@@ -631,6 +631,10 @@ Patch: 1146_linux-5.15.147.patch
From: https://www.kernel.org
Desc: Linux 5.15.147
+Patch: 1147_linux-5.15.148.patch
+From: https://www.kernel.org
+Desc: Linux 5.15.148
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1147_linux-5.15.148.patch b/1147_linux-5.15.148.patch
new file mode 100644
index 00000000..1e7f63b7
--- /dev/null
+++ b/1147_linux-5.15.148.patch
@@ -0,0 +1,12540 @@
+diff --git a/Makefile b/Makefile
+index 95170c5f75c2a..1e5ac4584ac10 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 15
+-SUBLEVEL = 147
++SUBLEVEL = 148
+ EXTRAVERSION =
+ NAME = Trick or Treat
+
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index cb2f88502bafe..25c16eb1d90b7 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -61,7 +61,7 @@ struct rt_sigframe {
+ unsigned int sigret_magic;
+ };
+
+-static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
+ {
+ int err = 0;
+ #ifndef CONFIG_ISA_ARCOMPACT
+@@ -74,12 +74,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+ #else
+ v2abi.r58 = v2abi.r59 = 0;
+ #endif
+- err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
++ err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
+ #endif
+ return err;
+ }
+
+-static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
+ {
+ int err = 0;
+ #ifndef CONFIG_ISA_ARCOMPACT
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index 0e830476fefd2..d70f071fd8304 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -760,7 +760,7 @@
+
+ xoadc: xoadc@197 {
+ compatible = "qcom,pm8921-adc";
+- reg = <197>;
++ reg = <0x197>;
+ interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index 9d62487f6c8ff..73fa0ef6b69ef 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -457,7 +457,7 @@
+ reg = <0x0c264000 0x1000>;
+ };
+
+- spmi_bus: qcom,spmi@c440000 {
++ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0c440000 0x0000d00>,
+ <0x0c600000 0x2000000>,
+diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
+index 1d3aef84287d5..889f3b9255c7d 100644
+--- a/arch/arm/mach-davinci/Kconfig
++++ b/arch/arm/mach-davinci/Kconfig
+@@ -3,12 +3,14 @@
+ menuconfig ARCH_DAVINCI
+ bool "TI DaVinci"
+ depends on ARCH_MULTI_V5
++ select CPU_ARM926T
+ select DAVINCI_TIMER
+ select ZONE_DMA
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM && OF
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
++ select PINCTRL
+ select PINCTRL_SINGLE
+
+ if ARCH_DAVINCI
+diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
+index b2f5f4f28705f..f779e386b6e7d 100644
+--- a/arch/arm/mach-sunxi/mc_smp.c
++++ b/arch/arm/mach-sunxi/mc_smp.c
+@@ -804,12 +804,12 @@ static int __init sunxi_mc_smp_init(void)
+ for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
+ ret = of_property_match_string(node, "enable-method",
+ sunxi_mc_smp_data[i].enable_method);
+- if (!ret)
++ if (ret >= 0)
+ break;
+ }
+
+ of_node_put(node);
+- if (ret)
++ if (ret < 0)
+ return -ENODEV;
+
+ is_a83t = sunxi_mc_smp_data[i].is_a83t;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index b276dd77df83c..7006d6883402d 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -130,7 +130,7 @@
+ compatible = "microchip,mcp7940x";
+ reg = <0x6f>;
+ interrupt-parent = <&gpiosb>;
+- interrupts = <5 0>; /* GPIO2_5 */
++ interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+index d3449cb52defe..9ef0afdae1481 100644
+--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
++++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+@@ -58,8 +58,8 @@
+ user4 {
+ label = "green:user4";
+ gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
+- linux,default-trigger = "panic-indicator";
+ default-state = "off";
++ panic-indicator;
+ };
+
+ wlan {
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index 12816d60e2494..410e0305eb512 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -3366,7 +3366,7 @@
+ compatible = "qcom,apss-wdt-sc7180", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000{
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index dc4aab258ff6f..929fc0667e985 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -485,6 +485,7 @@
+
+ bus-width = <8>;
+ supports-cqe;
++ dma-coherent;
+
+ qcom,dll-config = <0x0007642c>;
+ qcom,ddr-config = <0x80040868>;
+@@ -1174,6 +1175,7 @@
+ operating-points-v2 = <&sdhc2_opp_table>;
+
+ bus-width = <4>;
++ dma-coherent;
+
+ qcom,dll-config = <0x0007642c>;
+
+@@ -1290,8 +1292,8 @@
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 13 IRQ_TYPE_EDGE_RISING>,
+- <&pdc 12 IRQ_TYPE_EDGE_RISING>;
++ <&pdc 12 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 13 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -1711,7 +1713,7 @@
+ compatible = "qcom,apss-wdt-sc7280", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index 5ce270f0b2ec1..62877311e5c24 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -55,8 +55,8 @@
+ user4 {
+ label = "green:user4";
+ gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>;
+- linux,default-trigger = "panic-indicator";
+ default-state = "off";
++ panic-indicator;
+ };
+
+ wlan {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 6a0e30cbf88f1..5a78ee8622c94 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4725,7 +4725,7 @@
+ compatible = "qcom,apss-wdt-sdm845", "qcom,kpss-wdt";
+ reg = <0 0x17980000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ apss_shared: mailbox@17990000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+index 335aa0753fc02..716e964946ede 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
++++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+@@ -126,8 +126,6 @@
+ vdda_sp_sensor:
+ vdda_ufs_2ln_core_1:
+ vdda_ufs_2ln_core_2:
+- vdda_usb_ss_dp_core_1:
+- vdda_usb_ss_dp_core_2:
+ vdda_qlink_lv:
+ vdda_qlink_lv_ck:
+ vreg_l5a_0p875: ldo5 {
+@@ -209,6 +207,12 @@
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
++
++ vreg_l18a_0p8: ldo18 {
++ regulator-min-microvolt = <880000>;
++ regulator-max-microvolt = <880000>;
++ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++ };
+ };
+
+ pm8150l-rpmh-regulators {
+@@ -441,13 +445,13 @@
+ &usb_1_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+- vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
++ vdda-pll-supply = <&vreg_l18a_0p8>;
+ };
+
+ &usb_2_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+- vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
++ vdda-pll-supply = <&vreg_l5a_0p875>;
+ };
+
+ &usb_1 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index e8cb20c4cbf22..c6b6064e6b12e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -3359,7 +3359,7 @@
+ compatible = "qcom,apss-wdt-sm8150", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 5d6551e1fcd8d..8880e9cbc9743 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -3947,7 +3947,7 @@
+ compatible = "qcom,apss-wdt-sm8250", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+index 4f232f575ab2a..b729d2dee209e 100644
+--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+@@ -855,7 +855,7 @@
+ assigned-clocks = <&k3_clks 67 2>;
+ assigned-clock-parents = <&k3_clks 67 5>;
+
+- interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
++ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+
+ dma-coherent;
+
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index 1d534283378a3..8e3a27fa2ffbc 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -584,7 +584,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
++
+ irq = __vgic_its_check_cache(dist, db, devid, eventid);
++ if (irq)
++ vgic_get_irq_kref(irq);
++
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+
+ return irq;
+@@ -763,6 +767,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ irq->pending_latch = true;
+ vgic_queue_irq_unlock(kvm, irq, flags);
++ vgic_put_irq(kvm, irq);
+
+ return 0;
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index b94a6d1de35df..fd1fa2561520e 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -363,19 +363,26 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+- if (test_bit(i, &val)) {
+- /*
+- * pending_latch is set irrespective of irq type
+- * (level or edge) to avoid dependency that VM should
+- * restore irq config before pending info.
+- */
+- irq->pending_latch = true;
+- vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+- } else {
++
++ /*
++ * pending_latch is set irrespective of irq type
++ * (level or edge) to avoid dependency that VM should
++ * restore irq config before pending info.
++ */
++ irq->pending_latch = test_bit(i, &val);
++
++ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
++ irq_set_irqchip_state(irq->host_irq,
++ IRQCHIP_STATE_PENDING,
++ irq->pending_latch);
+ irq->pending_latch = false;
+- raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
+
++ if (irq->pending_latch)
++ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
++ else
++ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
++
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+
+diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
+index f521874ebb07b..67f067706af27 100644
+--- a/arch/mips/alchemy/devboards/db1200.c
++++ b/arch/mips/alchemy/devboards/db1200.c
+@@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
+ i2c_register_board_info(0, db1200_i2c_devs,
+ ARRAY_SIZE(db1200_i2c_devs));
+ spi_register_board_info(db1200_spi_devs,
+- ARRAY_SIZE(db1200_i2c_devs));
++ ARRAY_SIZE(db1200_spi_devs));
+
+ /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
+ * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
+diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
+index 752b93d91ac9a..06811a5db71d7 100644
+--- a/arch/mips/alchemy/devboards/db1550.c
++++ b/arch/mips/alchemy/devboards/db1550.c
+@@ -588,7 +588,7 @@ int __init db1550_dev_setup(void)
+ i2c_register_board_info(0, db1550_i2c_devs,
+ ARRAY_SIZE(db1550_i2c_devs));
+ spi_register_board_info(db1550_spi_devs,
+- ARRAY_SIZE(db1550_i2c_devs));
++ ARRAY_SIZE(db1550_spi_devs));
+
+ c = clk_get(NULL, "psc0_intclk");
+ if (!IS_ERR(c)) {
+diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+index bfc3d3243ee7f..d73d8f4fd78e6 100644
+--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+@@ -118,8 +118,7 @@
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass0c0320",
+- "pciclass0c03",
+- "loongson, pci-gmac";
++ "pciclass0c03";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+index 2f45fce2cdc4a..ed99ee316febb 100644
+--- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
++++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+@@ -186,8 +186,7 @@
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass020000",
+- "pciclass0200",
+- "loongson, pci-gmac";
++ "pciclass0200";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/mips/include/asm/dmi.h b/arch/mips/include/asm/dmi.h
+index 27415a288adf5..dc397f630c660 100644
+--- a/arch/mips/include/asm/dmi.h
++++ b/arch/mips/include/asm/dmi.h
+@@ -5,7 +5,7 @@
+ #include <linux/io.h>
+ #include <linux/memblock.h>
+
+-#define dmi_early_remap(x, l) ioremap_cache(x, l)
++#define dmi_early_remap(x, l) ioremap(x, l)
+ #define dmi_early_unmap(x, l) iounmap(x)
+ #define dmi_remap(x, l) ioremap_cache(x, l)
+ #define dmi_unmap(x) iounmap(x)
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 145f905fb3623..9d53498682d2e 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -324,11 +324,11 @@ static void __init bootmem_init(void)
+ panic("Incorrect memory mapping !!!");
+
+ if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
++ max_low_pfn = PFN_DOWN(HIGHMEM_START);
+ #ifdef CONFIG_HIGHMEM
+- highstart_pfn = PFN_DOWN(HIGHMEM_START);
++ highstart_pfn = max_low_pfn;
+ highend_pfn = max_pfn;
+ #else
+- max_low_pfn = PFN_DOWN(HIGHMEM_START);
+ max_pfn = max_low_pfn;
+ #endif
+ }
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 3dc75040a7563..11a0ccb47be32 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -44,18 +44,13 @@ machine-$(CONFIG_PPC64) += 64
+ machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le
+ UTS_MACHINE := $(subst $(space),,$(machine-y))
+
+-# XXX This needs to be before we override LD below
+-ifdef CONFIG_PPC32
+-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+-else
+-ifeq ($(call ld-ifversion, -ge, 22500, y),y)
++ifeq ($(CONFIG_PPC64)$(CONFIG_LD_IS_BFD),yy)
+ # Have the linker provide sfpr if possible.
+ # There is a corresponding test in arch/powerpc/lib/Makefile
+ KBUILD_LDFLAGS_MODULE += --save-restore-funcs
+ else
+ KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+ endif
+-endif
+
+ ifdef CONFIG_CPU_LITTLE_ENDIAN
+ KBUILD_CFLAGS += -mlittle-endian
+@@ -434,16 +429,7 @@ endif # CONFIG_PPC32
+ endif # CONFIG_SMP
+
+ PHONY += checkbin
+-# Check toolchain versions:
+-# - gcc-4.6 is the minimum kernel-wide version so nothing required.
+ checkbin:
+- @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
+- && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
+- echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
+- echo 'in some circumstances.' ; \
+- echo -n '*** Please use a different binutils version.' ; \
+- false ; \
+- fi
+ @if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \
+ "x${CONFIG_LD_IS_BFD}" = "xy" -a \
+ "${CONFIG_LD_VERSION}" = "23700" ; then \
+diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
+index e8f10a5996593..feadee18e2715 100644
+--- a/arch/powerpc/boot/crt0.S
++++ b/arch/powerpc/boot/crt0.S
+@@ -28,7 +28,7 @@ p_etext: .8byte _etext
+ p_bss_start: .8byte __bss_start
+ p_end: .8byte _end
+
+-p_toc: .8byte __toc_start + 0x8000 - p_base
++p_toc: .8byte .TOC. - p_base
+ p_dyn: .8byte __dynamic_start - p_base
+ p_rela: .8byte __rela_dyn_start - p_base
+ p_prom: .8byte 0
+diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
+index d6f0728656273..d65cd55a6f386 100644
+--- a/arch/powerpc/boot/zImage.lds.S
++++ b/arch/powerpc/boot/zImage.lds.S
+@@ -36,12 +36,9 @@ SECTIONS
+ }
+
+ #ifdef CONFIG_PPC64_BOOT_WRAPPER
+- . = ALIGN(256);
+- .got :
++ .got : ALIGN(256)
+ {
+- __toc_start = .;
+- *(.got)
+- *(.toc)
++ *(.got .toc)
+ }
+ #endif
+
+diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
+index e92d39c0cd1d9..34d82ae1774cb 100644
+--- a/arch/powerpc/include/asm/sections.h
++++ b/arch/powerpc/include/asm/sections.h
+@@ -26,16 +26,16 @@ extern char start_virt_trampolines[];
+ extern char end_virt_trampolines[];
+ #endif
+
++/*
++ * This assumes the kernel is never compiled -mcmodel=small or
++ * the total .toc is always less than 64k.
++ */
+ static inline unsigned long kernel_toc_addr(void)
+ {
+- /* Defined by the linker, see vmlinux.lds.S */
+- extern unsigned long __toc_start;
+-
+- /*
+- * The TOC register (r2) points 32kB into the TOC, so that 64kB of
+- * the TOC can be addressed using a single machine instruction.
+- */
+- return (unsigned long)(&__toc_start) + 0x8000UL;
++ unsigned long toc_ptr;
++
++ asm volatile("mr %0, 2" : "=r" (toc_ptr));
++ return toc_ptr;
+ }
+
+ static inline int overlaps_interrupt_vector_text(unsigned long start,
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index f17ae20837337..a08c050ff6459 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -904,7 +904,7 @@ _GLOBAL(relative_toc)
+ blr
+
+ .balign 8
+-p_toc: .8byte __toc_start + 0x8000 - 0b
++p_toc: .8byte .TOC. - 0b
+
+ /*
+ * This is where the main kernel code starts.
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index d4531902d8c67..70bf67ed87b52 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -148,6 +148,12 @@ SECTIONS
+ SOFT_MASK_TABLE(8)
+ RESTART_TABLE(8)
+
++ .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
++ __start_opd = .;
++ KEEP(*(.opd))
++ __end_opd = .;
++ }
++
+ . = ALIGN(8);
+ __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
+ __start___stf_entry_barrier_fixup = .;
+@@ -346,21 +352,13 @@ SECTIONS
+ *(.branch_lt)
+ }
+
+- .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
+- __start_opd = .;
+- KEEP(*(.opd))
+- __end_opd = .;
+- }
+-
+- . = ALIGN(256);
+- .got : AT(ADDR(.got) - LOAD_OFFSET) {
+- __toc_start = .;
++ .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) {
++ *(.got)
+ #ifndef CONFIG_RELOCATABLE
+ __prom_init_toc_start = .;
+- arch/powerpc/kernel/prom_init.o*(.toc .got)
++ arch/powerpc/kernel/prom_init.o*(.toc)
+ __prom_init_toc_end = .;
+ #endif
+- *(.got)
+ *(.toc)
+ }
+ #endif
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index 54be64203b2ab..6448de85f7380 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -37,8 +37,8 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+ # 64-bit linker creates .sfpr on demand for final link (vmlinux),
+ # so it is only needed for modules, and only for older linkers which
+ # do not support --save-restore-funcs
+-ifeq ($(call ld-ifversion, -lt, 22500, y),y)
+-extra-$(CONFIG_PPC64) += crtsavres.o
++ifndef CONFIG_LD_IS_BFD
++always-$(CONFIG_PPC64) += crtsavres.o
+ endif
+
+ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 55a853edc3bea..e2f6fe259aa6f 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -292,6 +292,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+ attr_group->attrs = attrs;
+ do {
+ ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
++ if (!ev_val_str)
++ continue;
+ dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
+ if (!dev_str)
+ continue;
+@@ -299,6 +301,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+ attrs[j++] = dev_str;
+ if (pmu->events[i].scale) {
+ ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
++ if (!ev_scale_str)
++ continue;
+ dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
+ if (!dev_str)
+ continue;
+@@ -308,6 +312,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+
+ if (pmu->events[i].unit) {
+ ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
++ if (!ev_unit_str)
++ continue;
+ dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
+ if (!dev_str)
+ continue;
+diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
+index 83975ef50975e..66c54cef3cfa5 100644
+--- a/arch/powerpc/platforms/44x/Kconfig
++++ b/arch/powerpc/platforms/44x/Kconfig
+@@ -177,6 +177,7 @@ config ISS4xx
+ config CURRITUCK
+ bool "IBM Currituck (476fpe) Support"
+ depends on PPC_47x
++ select I2C
+ select SWIOTLB
+ select 476FPE
+ select FORCE_PCI
+diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
+index d55652b5f6fa4..391f505352007 100644
+--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
++++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
+@@ -275,6 +275,8 @@ int __init opal_event_init(void)
+ else
+ name = kasprintf(GFP_KERNEL, "opal");
+
++ if (!name)
++ continue;
+ /* Install interrupt handler */
+ rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
+ name, NULL);
+diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
+index c16d44f6f1d12..ce9ec3962cef0 100644
+--- a/arch/powerpc/platforms/powernv/opal-powercap.c
++++ b/arch/powerpc/platforms/powernv/opal-powercap.c
+@@ -196,6 +196,12 @@ void __init opal_powercap_init(void)
+
+ j = 0;
+ pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
++ if (!pcaps[i].pg.name) {
++ kfree(pcaps[i].pattrs);
++ kfree(pcaps[i].pg.attrs);
++ goto out_pcaps_pattrs;
++ }
++
+ if (has_min) {
+ powercap_add_attr(min, "powercap-min",
+ &pcaps[i].pattrs[j]);
+diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
+index 6b4eed2ef4fa9..f67235d1ba2c5 100644
+--- a/arch/powerpc/platforms/powernv/opal-xscom.c
++++ b/arch/powerpc/platforms/powernv/opal-xscom.c
+@@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
+ ent->chip = chip;
+ snprintf(ent->name, 16, "%08x", chip);
+ ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
++ if (!ent->path.data) {
++ kfree(ent);
++ return -ENOMEM;
++ }
++
+ ent->path.size = strlen((char *)ent->path.data);
+
+ dir = debugfs_create_dir(ent->name, root);
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 91cf23495ccb5..642951c0d5f5c 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -501,14 +501,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
+ }
+ }
+
+- if (!lmb_found)
++ if (!lmb_found) {
++ pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
+ rc = -EINVAL;
+-
+- if (rc)
++ } else if (rc) {
+ pr_debug("Failed to hot-remove memory at %llx\n",
+ lmb->base_addr);
+- else
++ } else {
+ pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
++ }
+
+ return rc;
+ }
+diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
+index 32336e8a17cb0..a393d5035c543 100644
+--- a/arch/riscv/include/asm/sections.h
++++ b/arch/riscv/include/asm/sections.h
+@@ -13,6 +13,7 @@ extern char _start_kernel[];
+ extern char __init_data_begin[], __init_data_end[];
+ extern char __init_text_begin[], __init_text_end[];
+ extern char __alt_start[], __alt_end[];
++extern char __exittext_begin[], __exittext_end[];
+
+ static inline bool is_va_kernel_text(uintptr_t va)
+ {
+diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
+index 4a48287513c37..24c3883c80d0a 100644
+--- a/arch/riscv/kernel/module.c
++++ b/arch/riscv/kernel/module.c
+@@ -423,7 +423,8 @@ void *module_alloc(unsigned long size)
+ {
+ return __vmalloc_node_range(size, 1, MODULES_VADDR,
+ MODULES_END, GFP_KERNEL,
+- PAGE_KERNEL, 0, NUMA_NO_NODE,
++ PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
++ NUMA_NO_NODE,
+ __builtin_return_address(0));
+ }
+ #endif
+diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
+index e099961453cca..160e5c1caa9c4 100644
+--- a/arch/riscv/kernel/patch.c
++++ b/arch/riscv/kernel/patch.c
+@@ -13,6 +13,7 @@
+ #include <asm/fixmap.h>
+ #include <asm/ftrace.h>
+ #include <asm/patch.h>
++#include <asm/sections.h>
+
+ struct patch_insn {
+ void *addr;
+@@ -23,6 +24,14 @@ struct patch_insn {
+ int riscv_patch_in_stop_machine = false;
+
+ #ifdef CONFIG_MMU
++
++static inline bool is_kernel_exittext(uintptr_t addr)
++{
++ return system_state < SYSTEM_RUNNING &&
++ addr >= (uintptr_t)__exittext_begin &&
++ addr < (uintptr_t)__exittext_end;
++}
++
+ /*
+ * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
+ * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
+@@ -33,7 +42,7 @@ static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
+ uintptr_t uintaddr = (uintptr_t) addr;
+ struct page *page;
+
+- if (core_kernel_text(uintaddr))
++ if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
+ page = phys_to_page(__pa_symbol(addr));
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ page = vmalloc_to_page(addr);
+diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
+index 9c9f35091ef04..5407eafa166b4 100644
+--- a/arch/riscv/kernel/vmlinux-xip.lds.S
++++ b/arch/riscv/kernel/vmlinux-xip.lds.S
+@@ -29,10 +29,12 @@ SECTIONS
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ /* we have to discard exit text and such at runtime, not link time */
++ __exittext_begin = .;
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
++ __exittext_end = .;
+
+ .text : {
+ _text = .;
+diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
+index 5104f3a871e38..adcfe9c6a7bff 100644
+--- a/arch/riscv/kernel/vmlinux.lds.S
++++ b/arch/riscv/kernel/vmlinux.lds.S
+@@ -73,10 +73,12 @@ SECTIONS
+ __soc_builtin_dtb_table_end = .;
+ }
+ /* we have to discard exit text and such at runtime, not link time */
++ __exittext_begin = .;
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
++ __exittext_end = .;
+
+ __init_text_end = .;
+ . = ALIGN(SECTION_ALIGN);
+diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
+index 287bb88f76986..2686bee800e3d 100644
+--- a/arch/s390/include/asm/pci_io.h
++++ b/arch/s390/include/asm/pci_io.h
+@@ -11,6 +11,8 @@
+ /* I/O size constraints */
+ #define ZPCI_MAX_READ_SIZE 8
+ #define ZPCI_MAX_WRITE_SIZE 128
++#define ZPCI_BOUNDARY_SIZE (1 << 12)
++#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
+
+ /* I/O Map */
+ #define ZPCI_IOMAP_SHIFT 48
+@@ -125,16 +127,18 @@ out:
+ int zpci_write_block(volatile void __iomem *dst, const void *src,
+ unsigned long len);
+
+-static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
++static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
+ {
+- int count = len > max ? max : len, size = 1;
++ int offset = dst & ZPCI_BOUNDARY_MASK;
++ int size;
+
+- while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
+- dst = dst >> 1;
+- src = src >> 1;
+- size = size << 1;
+- }
+- return size;
++ size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
++ if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
++ return size;
++
++ if (size >= 8)
++ return 8;
++ return rounddown_pow_of_two(size);
+ }
+
+ static inline int zpci_memcpy_fromio(void *dst,
+@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
+ int size, rc = 0;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) src,
+- (u64) dst, n,
+- ZPCI_MAX_READ_SIZE);
++ size = zpci_get_max_io_size((u64 __force) src,
++ (u64) dst, n,
++ ZPCI_MAX_READ_SIZE);
+ rc = zpci_read_single(dst, src, size);
+ if (rc)
+ break;
+@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
+ return -EINVAL;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) dst,
+- (u64) src, n,
+- ZPCI_MAX_WRITE_SIZE);
++ size = zpci_get_max_io_size((u64 __force) dst,
++ (u64) src, n,
++ ZPCI_MAX_WRITE_SIZE);
+ if (size > 8) /* main path */
+ rc = zpci_write_block(dst, src, size);
+ else
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index b94163ee5632c..7e4cb95a431c9 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -96,9 +96,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
+ return -EINVAL;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) dst,
+- (u64 __force) src, n,
+- ZPCI_MAX_WRITE_SIZE);
++ size = zpci_get_max_io_size((u64 __force) dst,
++ (u64 __force) src, n,
++ ZPCI_MAX_WRITE_SIZE);
+ if (size > 8) /* main path */
+ rc = __pcistb_mio_inuser(dst, src, size, &status);
+ else
+@@ -241,9 +241,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
+ u8 status;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) src,
+- (u64 __force) dst, n,
+- ZPCI_MAX_READ_SIZE);
++ size = zpci_get_max_io_size((u64 __force) src,
++ (u64 __force) dst, n,
++ ZPCI_MAX_READ_SIZE);
+ rc = __pcilg_mio_inuser(dst, src, size, &status);
+ if (rc)
+ break;
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 73c74b961d0fd..1d986b353b3e5 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -24,8 +24,8 @@
+
+ static int kvmclock __initdata = 1;
+ static int kvmclock_vsyscall __initdata = 1;
+-static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
+-static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
++static int msr_kvm_system_time __ro_after_init;
++static int msr_kvm_wall_clock __ro_after_init;
+ static u64 kvm_sched_clock_offset __ro_after_init;
+
+ static int __init parse_no_kvmclock(char *arg)
+@@ -195,7 +195,8 @@ static void kvm_setup_secondary_clock(void)
+
+ void kvmclock_disable(void)
+ {
+- native_write_msr(msr_kvm_system_time, 0, 0);
++ if (msr_kvm_system_time)
++ native_write_msr(msr_kvm_system_time, 0, 0);
+ }
+
+ static void __init kvmclock_init_mem(void)
+@@ -291,7 +292,10 @@ void __init kvmclock_init(void)
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
+ msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
+ msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
+- } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
++ } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
++ msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
++ msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
++ } else {
+ return;
+ }
+
+diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c
+index a018ec4fba53e..c97be9a1430a0 100644
+--- a/arch/x86/lib/misc.c
++++ b/arch/x86/lib/misc.c
+@@ -6,7 +6,7 @@
+ */
+ int num_digits(int val)
+ {
+- int m = 10;
++ long long m = 10;
+ int d = 1;
+
+ if (val < 0) {
+diff --git a/block/bio.c b/block/bio.c
+index ba9120d4fe499..a0080dc55c954 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1026,7 +1026,7 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
+ return;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (mark_dirty && !PageCompound(bvec->bv_page))
++ if (mark_dirty)
+ set_page_dirty_lock(bvec->bv_page);
+ put_page(bvec->bv_page);
+ }
+@@ -1345,8 +1345,7 @@ void bio_set_pages_dirty(struct bio *bio)
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (!PageCompound(bvec->bv_page))
+- set_page_dirty_lock(bvec->bv_page);
++ set_page_dirty_lock(bvec->bv_page);
+ }
+ }
+
+@@ -1394,7 +1393,7 @@ void bio_check_pages_dirty(struct bio *bio)
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
++ if (!PageDirty(bvec->bv_page))
+ goto defer;
+ }
+
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 73a80895e3ae1..959b5c1e6d3b7 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -136,7 +136,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
+ limits->max_hw_sectors = max_hw_sectors;
+
+ max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
+- max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
++ max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS);
+ max_sectors = round_down(max_sectors,
+ limits->logical_block_size >> SECTOR_SHIFT);
+ limits->max_sectors = max_sectors;
+diff --git a/block/genhd.c b/block/genhd.c
+index 6123f13e148e0..4d28f1d5f9b0e 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -421,7 +421,9 @@ int device_add_disk(struct device *parent, struct gendisk *disk,
+ DISK_MAX_PARTS);
+ disk->minors = DISK_MAX_PARTS;
+ }
+- if (disk->first_minor + disk->minors > MINORMASK + 1)
++ if (disk->first_minor > MINORMASK ||
++ disk->minors > MINORMASK + 1 ||
++ disk->first_minor + disk->minors > MINORMASK + 1)
+ return -EINVAL;
+ } else {
+ if (WARN_ON(disk->minors))
+@@ -538,6 +540,7 @@ out_del_integrity:
+ out_del_block_link:
+ if (!sysfs_deprecated)
+ sysfs_remove_link(block_depr, dev_name(ddev));
++ pm_runtime_set_memalloc_noio(ddev, false);
+ out_device_del:
+ device_del(ddev);
+ out_free_ext_minor:
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 8f39e413f12a3..0a3cf9aeec7d9 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -18,7 +18,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ {
+ struct gendisk *disk = bdev->bd_disk;
+ struct blkpg_partition p;
+- long long start, length;
++ sector_t start, length;
+
+ if (disk->flags & GENHD_FL_NO_PART)
+ return -EINVAL;
+@@ -35,14 +35,17 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ if (op == BLKPG_DEL_PARTITION)
+ return bdev_del_partition(disk, p.pno);
+
++ if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
++ return -EINVAL;
++ /* Check that the partition is aligned to the block size */
++ if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
++ return -EINVAL;
++
+ start = p.start >> SECTOR_SHIFT;
+ length = p.length >> SECTOR_SHIFT;
+
+ switch (op) {
+ case BLKPG_ADD_PARTITION:
+- /* check if partition is aligned to blocksize */
+- if (p.start & (bdev_logical_block_size(bdev) - 1))
+- return -EINVAL;
+ return bdev_add_partition(disk, p.pno, start, length);
+ case BLKPG_RESIZE_PARTITION:
+ return bdev_resize_partition(disk, p.pno, start, length);
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 8bd288d2b089b..aa93501e27b95 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -1045,9 +1045,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
+ void af_alg_free_resources(struct af_alg_async_req *areq)
+ {
+ struct sock *sk = areq->sk;
++ struct af_alg_ctx *ctx;
+
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
++
++ ctx = alg_sk(sk)->private;
++ ctx->inflight = false;
+ }
+ EXPORT_SYMBOL_GPL(af_alg_free_resources);
+
+@@ -1117,11 +1121,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
+ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
+ unsigned int areqlen)
+ {
+- struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
++ struct af_alg_ctx *ctx = alg_sk(sk)->private;
++ struct af_alg_async_req *areq;
++
++ /* Only one AIO request can be in flight. */
++ if (ctx->inflight)
++ return ERR_PTR(-EBUSY);
+
++ areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+ if (unlikely(!areq))
+ return ERR_PTR(-ENOMEM);
+
++ ctx->inflight = true;
++
+ areq->areqlen = areqlen;
+ areq->sk = sk;
+ areq->last_rsgl = NULL;
+diff --git a/crypto/scompress.c b/crypto/scompress.c
+index 738f4f8f0f41a..4d6366a444007 100644
+--- a/crypto/scompress.c
++++ b/crypto/scompress.c
+@@ -124,6 +124,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void **ctx = acomp_request_ctx(req);
+ struct scomp_scratch *scratch;
++ unsigned int dlen;
+ int ret;
+
+ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+@@ -135,6 +136,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
+ req->dlen = SCOMP_SCRATCH_SIZE;
+
++ dlen = req->dlen;
++
+ scratch = raw_cpu_ptr(&scomp_scratch);
+ spin_lock(&scratch->lock);
+
+@@ -152,6 +155,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ ret = -ENOMEM;
+ goto out;
+ }
++ } else if (req->dlen > dlen) {
++ ret = -ENOSPC;
++ goto out;
+ }
+ scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
+ 1);
+diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
+index e648158368a7d..088db2356998f 100644
+--- a/drivers/acpi/acpi_extlog.c
++++ b/drivers/acpi/acpi_extlog.c
+@@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
+ static u32 err_seq;
+
+ estatus = extlog_elog_entry_check(cpu, bank);
+- if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC))
++ if (!estatus)
+ return NOTIFY_DONE;
+
++ if (mce->kflags & MCE_HANDLED_CEC) {
++ estatus->block_status = 0;
++ return NOTIFY_DONE;
++ }
++
+ memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
+ /* clear record status to enable BIOS to update it again */
+ estatus->block_status = 0;
+diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
+index 48e5059d67cab..7de59730030c2 100644
+--- a/drivers/acpi/acpi_lpit.c
++++ b/drivers/acpi/acpi_lpit.c
+@@ -98,7 +98,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
+ struct acpi_lpit_native *lpit_native)
+ {
+ info->frequency = lpit_native->counter_frequency ?
+- lpit_native->counter_frequency : tsc_khz * 1000;
++ lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
+ if (!info->frequency)
+ info->frequency = 1;
+
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index f609f9d62efdd..332befb5f5797 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -439,8 +439,9 @@ static int register_device_clock(struct acpi_device *adev,
+ if (!clk_name)
+ return -ENOMEM;
+ clk = clk_register_fractional_divider(NULL, clk_name, parent,
++ 0, prv_base, 1, 15, 16, 15,
+ CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
+- prv_base, 1, 15, 16, 15, 0, NULL);
++ NULL);
+ parent = clk_name;
+
+ clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 2b18b51f6351e..61bcdc75bee70 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -1793,12 +1793,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+ return;
+ count++;
+
+- acpi_get_parent(device->dev->handle, &acpi_parent);
+-
+- pdev = acpi_get_pci_dev(acpi_parent);
+- if (pdev) {
+- parent = &pdev->dev;
+- pci_dev_put(pdev);
++ if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
++ pdev = acpi_get_pci_dev(acpi_parent);
++ if (pdev) {
++ parent = &pdev->dev;
++ pci_dev_put(pdev);
++ }
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 503fb973faa5c..91628fb41ef85 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -480,6 +480,13 @@ static const struct dmi_system_id maingear_laptop[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
+ },
+ },
++ {
++ /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
++ DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
++ },
++ },
+ {
+ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
+ .matches = {
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index b63322e7e1011..b4b73093757f8 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4836,7 +4836,7 @@ static __poll_t binder_poll(struct file *filp,
+
+ thread = binder_get_thread(proc);
+ if (!thread)
+- return POLLERR;
++ return EPOLLERR;
+
+ binder_inner_proc_lock(thread->proc);
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index 54cee2b31c8e5..813894ba5da6b 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ }
+ if (mm) {
+ mmap_write_unlock(mm);
+- mmput(mm);
++ mmput_async(mm);
+ }
+ return 0;
+
+@@ -304,7 +304,7 @@ err_page_ptr_cleared:
+ err_no_vma:
+ if (mm) {
+ mmap_write_unlock(mm);
+- mmput(mm);
++ mmput_async(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+ }
+@@ -344,8 +344,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+ continue;
+ if (!buffer->async_transaction)
+ continue;
+- total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+- + sizeof(struct binder_buffer);
++ total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
+ num_buffers++;
+ }
+
+@@ -407,17 +406,17 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+- if (is_async &&
+- alloc->free_async_space < size + sizeof(struct binder_buffer)) {
++
++ /* Pad 0-size buffers so they get assigned unique addresses */
++ size = max(size, sizeof(void *));
++
++ if (is_async && alloc->free_async_space < size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+- /* Pad 0-size buffers so they get assigned unique addresses */
+- size = max(size, sizeof(void *));
+-
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+@@ -519,7 +518,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ buffer->pid = pid;
+ buffer->oneway_spam_suspect = false;
+ if (is_async) {
+- alloc->free_async_space -= size + sizeof(struct binder_buffer);
++ alloc->free_async_space -= size;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+@@ -557,7 +556,7 @@ err_alloc_buf_struct_failed:
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+- * Return: The allocated buffer or %NULL if error
++ * Return: The allocated buffer or %ERR_PTR(-errno) if error
+ */
+ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+@@ -657,8 +656,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
+ BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+- alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
+-
++ alloc->free_async_space += buffer_size;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+@@ -706,7 +704,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
+ /*
+ * We could eliminate the call to binder_alloc_clear_buf()
+ * from binder_alloc_deferred_release() by moving this to
+- * binder_alloc_free_buf_locked(). However, that could
++ * binder_free_buf_locked(). However, that could
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
+ * needed for correctness here.
+@@ -1005,7 +1003,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
+ goto err_mmget;
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+- vma = binder_alloc_get_vma(alloc);
++ vma = vma_lookup(mm, page_addr);
++ if (vma && vma != binder_alloc_get_vma(alloc))
++ goto err_invalid_vma;
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+@@ -1031,6 +1031,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED_RETRY;
+
++err_invalid_vma:
++ mmap_read_unlock(mm);
+ err_mmap_read_lock_failed:
+ mmput_async(mm);
+ err_mmget:
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 3ba1232ce8451..15f149fc19401 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -547,6 +547,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
++ if (!args)
++ return 0;
++
+ args->fwnode = software_node_get(refnode);
+ args->nargs = nargs;
+
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 686ec6bcdef3d..87791265e09bf 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1900,11 +1900,8 @@ static int null_add_dev(struct nullb_device *dev)
+
+ blk_queue_logical_block_size(nullb->q, dev->blocksize);
+ blk_queue_physical_block_size(nullb->q, dev->blocksize);
+- if (!dev->max_sectors)
+- dev->max_sectors = queue_max_hw_sectors(nullb->q);
+- dev->max_sectors = min_t(unsigned int, dev->max_sectors,
+- BLK_DEF_MAX_SECTORS);
+- blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
++ if (dev->max_sectors)
++ blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
+
+ if (dev->virt_boundary)
+ blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
+@@ -1954,12 +1951,6 @@ static int __init null_init(void)
+ g_bs = PAGE_SIZE;
+ }
+
+- if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
+- pr_warn("invalid max sectors\n");
+- pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
+- g_max_sectors = BLK_DEF_MAX_SECTORS;
+- }
+-
+ if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+ pr_err("invalid home_node value\n");
+ g_home_node = NUMA_NO_NODE;
+diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
+index 9ba22b13b4fa0..a831d734cd9ec 100644
+--- a/drivers/bluetooth/btmtkuart.c
++++ b/drivers/bluetooth/btmtkuart.c
+@@ -471,7 +471,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
+ return data;
+ }
+
+-static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
++static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+ {
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ const unsigned char *p_left = data, *p_h4;
+@@ -510,25 +510,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+ bt_dev_err(bdev->hdev,
+ "Frame reassembly failed (%d)", err);
+ bdev->rx_skb = NULL;
+- return err;
++ return;
+ }
+
+ sz_left -= sz_h4;
+ p_left += sz_h4;
+ }
+-
+- return 0;
+ }
+
+ static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ size_t count)
+ {
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+- int err;
+
+- err = btmtkuart_recv(bdev->hdev, data, count);
+- if (err < 0)
+- return err;
++ btmtkuart_recv(bdev->hdev, data, count);
+
+ bdev->hdev->stat.byte_rx += count;
+
+diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
+index bacebd457e6f3..8b3c059e19a12 100644
+--- a/drivers/clk/clk-asm9260.c
++++ b/drivers/clk/clk-asm9260.c
+@@ -80,7 +80,7 @@ struct asm9260_mux_clock {
+ u8 mask;
+ u32 *table;
+ const char *name;
+- const char **parent_names;
++ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ unsigned long offset;
+ unsigned long flags;
+@@ -232,10 +232,10 @@ static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
+ HW_AHBCLKCTRL1, 16 },
+ };
+
+-static const char __initdata *main_mux_p[] = { NULL, NULL };
+-static const char __initdata *i2s0_mux_p[] = { NULL, NULL, "i2s0m_div"};
+-static const char __initdata *i2s1_mux_p[] = { NULL, NULL, "i2s1m_div"};
+-static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
++static struct clk_parent_data __initdata main_mux_p[] = { { .index = 0, }, { .name = "pll" } };
++static struct clk_parent_data __initdata i2s0_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "i2s0m_div"} };
++static struct clk_parent_data __initdata i2s1_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "i2s1m_div"} };
++static struct clk_parent_data __initdata clkout_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "rtc"} };
+ static u32 three_mux_table[] = {0, 1, 3};
+
+ static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
+@@ -255,9 +255,10 @@ static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
+
+ static void __init asm9260_acc_init(struct device_node *np)
+ {
+- struct clk_hw *hw;
++ struct clk_hw *hw, *pll_hw;
+ struct clk_hw **hws;
+- const char *ref_clk, *pll_clk = "pll";
++ const char *pll_clk = "pll";
++ struct clk_parent_data pll_parent_data = { .index = 0 };
+ u32 rate;
+ int n;
+
+@@ -274,21 +275,15 @@ static void __init asm9260_acc_init(struct device_node *np)
+ /* register pll */
+ rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
+
+- /* TODO: Convert to DT parent scheme */
+- ref_clk = of_clk_get_parent_name(np, 0);
+- hw = __clk_hw_register_fixed_rate(NULL, NULL, pll_clk,
+- ref_clk, NULL, NULL, 0, rate, 0,
+- CLK_FIXED_RATE_PARENT_ACCURACY);
+-
+- if (IS_ERR(hw))
++ pll_hw = clk_hw_register_fixed_rate_parent_accuracy(NULL, pll_clk, &pll_parent_data,
++ 0, rate);
++ if (IS_ERR(pll_hw))
+ panic("%pOFn: can't register REFCLK. Check DT!", np);
+
+ for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
+ const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
+
+- mc->parent_names[0] = ref_clk;
+- mc->parent_names[1] = pll_clk;
+- hw = clk_hw_register_mux_table(NULL, mc->name, mc->parent_names,
++ hw = clk_hw_register_mux_table_parent_data(NULL, mc->name, mc->parent_data,
+ mc->num_parents, mc->flags, base + mc->offset,
+ 0, mc->mask, 0, mc->table, &asm9260_clk_lock);
+ }
+diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
+index 45501637705c3..62e994d18fe24 100644
+--- a/drivers/clk/clk-fixed-rate.c
++++ b/drivers/clk/clk-fixed-rate.c
+@@ -49,12 +49,24 @@ const struct clk_ops clk_fixed_rate_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
+
++static void devm_clk_hw_register_fixed_rate_release(struct device *dev, void *res)
++{
++ struct clk_fixed_rate *fix = res;
++
++ /*
++ * We can not use clk_hw_unregister_fixed_rate, since it will kfree()
++ * the hw, resulting in double free. Just unregister the hw and let
++ * devres code kfree() it.
++ */
++ clk_hw_unregister(&fix->hw);
++}
++
+ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy,
+- unsigned long clk_fixed_flags)
++ unsigned long clk_fixed_flags, bool devm)
+ {
+ struct clk_fixed_rate *fixed;
+ struct clk_hw *hw;
+@@ -62,7 +74,11 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+ int ret = -EINVAL;
+
+ /* allocate fixed-rate clock */
+- fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
++ if (devm)
++ fixed = devres_alloc(devm_clk_hw_register_fixed_rate_release,
++ sizeof(*fixed), GFP_KERNEL);
++ else
++ fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return ERR_PTR(-ENOMEM);
+
+@@ -90,9 +106,13 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+ else if (np)
+ ret = of_clk_hw_register(np, hw);
+ if (ret) {
+- kfree(fixed);
++ if (devm)
++ devres_free(fixed);
++ else
++ kfree(fixed);
+ hw = ERR_PTR(ret);
+- }
++ } else if (devm)
++ devres_add(dev, fixed);
+
+ return hw;
+ }
+diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
+index 91a6bc74ebd5a..cda5a0f4d9dfe 100644
+--- a/drivers/clk/clk-si5341.c
++++ b/drivers/clk/clk-si5341.c
+@@ -892,10 +892,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ r[0] = r_div ? (r_div & 0xff) : 1;
+ r[1] = (r_div >> 8) & 0xff;
+ r[2] = (r_div >> 16) & 0xff;
+- err = regmap_bulk_write(output->data->regmap,
++ return regmap_bulk_write(output->data->regmap,
+ SI5341_OUT_R_REG(output), r, 3);
+-
+- return 0;
+ }
+
+ static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)
+diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
+index 8422fd0474932..c89a5b59ddb7c 100644
+--- a/drivers/clk/qcom/gpucc-sm8150.c
++++ b/drivers/clk/qcom/gpucc-sm8150.c
+@@ -37,8 +37,8 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+- .test_ctl_hi_val = 0x00000002,
+- .test_ctl_hi1_val = 0x00000000,
++ .test_ctl_hi_val = 0x00000000,
++ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
+index 1afdbe4a249d6..52a9a453a1432 100644
+--- a/drivers/clk/qcom/videocc-sm8150.c
++++ b/drivers/clk/qcom/videocc-sm8150.c
+@@ -33,6 +33,7 @@ static struct alpha_pll_config video_pll0_config = {
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
++ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+@@ -214,6 +215,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {
+
+ static const struct qcom_reset_map video_cc_sm8150_resets[] = {
+ [VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
++ [VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
++ [VIDEO_CC_MVS0_BCR] = { 0x870 },
++ [VIDEO_CC_MVS1_BCR] = { 0x8b0 },
++ [VIDEO_CC_MVSC_BCR] = { 0x810 },
+ };
+
+ static const struct qcom_cc_desc video_cc_sm8150_desc = {
+diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
+index aa53797dbfc14..7782785a86e69 100644
+--- a/drivers/clk/rockchip/clk-rk3128.c
++++ b/drivers/clk/rockchip/clk-rk3128.c
+@@ -490,7 +490,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
+ GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
+ GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+- GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
++ GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
+ GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
+diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
+index 17afce594f288..5a040f939056c 100644
+--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
++++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
+@@ -89,7 +89,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ static const struct clk_ops zynqmp_clk_mux_ops = {
+ .get_parent = zynqmp_clk_mux_get_parent,
+ .set_parent = zynqmp_clk_mux_set_parent,
+- .determine_rate = __clk_mux_determine_rate,
++ .determine_rate = __clk_mux_determine_rate_closest,
+ };
+
+ static const struct clk_ops zynqmp_clk_mux_ro_ops = {
+diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
+index cb49281f9cf98..e25c76ff27391 100644
+--- a/drivers/clk/zynqmp/divider.c
++++ b/drivers/clk/zynqmp/divider.c
+@@ -110,49 +110,6 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
+ return DIV_ROUND_UP_ULL(parent_rate, value);
+ }
+
+-static void zynqmp_get_divider2_val(struct clk_hw *hw,
+- unsigned long rate,
+- struct zynqmp_clk_divider *divider,
+- int *bestdiv)
+-{
+- int div1;
+- int div2;
+- long error = LONG_MAX;
+- unsigned long div1_prate;
+- struct clk_hw *div1_parent_hw;
+- struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
+- struct zynqmp_clk_divider *pdivider =
+- to_zynqmp_clk_divider(div2_parent_hw);
+-
+- if (!pdivider)
+- return;
+-
+- div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
+- if (!div1_parent_hw)
+- return;
+-
+- div1_prate = clk_hw_get_rate(div1_parent_hw);
+- *bestdiv = 1;
+- for (div1 = 1; div1 <= pdivider->max_div;) {
+- for (div2 = 1; div2 <= divider->max_div;) {
+- long new_error = ((div1_prate / div1) / div2) - rate;
+-
+- if (abs(new_error) < abs(error)) {
+- *bestdiv = div2;
+- error = new_error;
+- }
+- if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+- div2 = div2 << 1;
+- else
+- div2++;
+- }
+- if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
+- div1 = div1 << 1;
+- else
+- div1++;
+- }
+-}
+-
+ /**
+ * zynqmp_clk_divider_round_rate() - Round rate of divider clock
+ * @hw: handle between common and hardware-specific interfaces
+@@ -171,6 +128,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
+ u32 div_type = divider->div_type;
+ u32 bestdiv;
+ int ret;
++ u8 width;
+
+ /* if read only, just return current value */
+ if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+@@ -190,23 +148,12 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
+ return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ }
+
+- bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
+-
+- /*
+- * In case of two divisors, compute best divider values and return
+- * divider2 value based on compute value. div1 will be automatically
+- * set to optimum based on required total divider value.
+- */
+- if (div_type == TYPE_DIV2 &&
+- (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+- zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
+- }
++ width = fls(divider->max_div);
+
+- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
+- bestdiv = rate % *prate ? 1 : bestdiv;
++ rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
+
+- bestdiv = min_t(u32, bestdiv, divider->max_div);
+- *prate = rate * bestdiv;
++ if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
++ *prate = rate;
+
+ return rate;
+ }
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index e1b5975c7daa1..48ca7189a73b8 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -171,7 +171,7 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+- if (of_get_property(np, "operating-points-v2", NULL))
++ if (of_property_present(np, "operating-points-v2"))
+ ret = true;
+
+ of_node_put(np);
+diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
+index 3fe9125156b44..0942498b348c3 100644
+--- a/drivers/cpufreq/imx-cpufreq-dt.c
++++ b/drivers/cpufreq/imx-cpufreq-dt.c
+@@ -89,7 +89,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
+
+ cpu_dev = get_cpu_device(0);
+
+- if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL))
++ if (!of_property_present(cpu_dev->of_node, "cpu-supply"))
+ return -ENODEV;
+
+ if (of_machine_is_compatible("fsl,imx7ulp")) {
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index 67f98a083d223..ae9ef99f7d862 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -230,7 +230,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
+ u32 val;
+ int ret;
+
+- if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
++ if (of_property_present(dev->of_node, "nvmem-cells")) {
+ ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ if (ret)
+ return ret;
+@@ -285,7 +285,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
+ u32 val;
+ int ret = 0;
+
+- if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
++ if (of_property_present(dev->of_node, "nvmem-cells")) {
+ ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ if (ret)
+ return ret;
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index 1e0cd4d165f04..c24e6373d3417 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -299,8 +299,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
+
+ #ifdef CONFIG_COMMON_CLK
+ /* dummy clock provider as needed by OPP if clocks property is used */
+- if (of_find_property(dev->of_node, "#clock-cells", NULL))
+- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
++ if (of_property_present(dev->of_node, "#clock-cells")) {
++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
++ if (ret)
++ return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
++ }
+ #endif
+
+ ret = cpufreq_register_driver(&scmi_cpufreq_driver);
+diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
+index e8db3d75be251..72b9c2d5f3754 100644
+--- a/drivers/cpufreq/tegra20-cpufreq.c
++++ b/drivers/cpufreq/tegra20-cpufreq.c
+@@ -25,7 +25,7 @@ static bool cpu0_node_has_opp_v2_prop(void)
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+- if (of_get_property(np, "operating-points-v2", NULL))
++ if (of_property_present(np, "operating-points-v2"))
+ ret = true;
+
+ of_node_put(np);
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index aa4e1a5006919..cb8e99936abb7 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
+
+ wa->dma.address = dma_map_single(wa->dev, wa->address, len,
+ dir);
+- if (dma_mapping_error(wa->dev, wa->dma.address))
++ if (dma_mapping_error(wa->dev, wa->dma.address)) {
++ kfree(wa->address);
++ wa->address = NULL;
+ return -ENOMEM;
++ }
+
+ wa->dma.length = len;
+ }
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index 544d7040cfc58..91ab33690ccf4 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -1868,9 +1868,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
+ crypto_aead_set_flags(ctx->fallback.aead,
+ crypto_aead_get_flags(authenc) &
+ CRYPTO_TFM_REQ_MASK);
+- crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+
+- return 0;
++ return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+ }
+
+ static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
+index b07ae4ba165e7..b9832978b9358 100644
+--- a/drivers/crypto/sahara.c
++++ b/drivers/crypto/sahara.c
+@@ -44,7 +44,6 @@
+ #define FLAGS_MODE_MASK 0x000f
+ #define FLAGS_ENCRYPT BIT(0)
+ #define FLAGS_CBC BIT(1)
+-#define FLAGS_NEW_KEY BIT(3)
+
+ #define SAHARA_HDR_BASE 0x00800000
+ #define SAHARA_HDR_SKHA_ALG_AES 0
+@@ -142,8 +141,6 @@ struct sahara_hw_link {
+ };
+
+ struct sahara_ctx {
+- unsigned long flags;
+-
+ /* AES-specific context */
+ int keylen;
+ u8 key[AES_KEYSIZE_128];
+@@ -152,6 +149,7 @@ struct sahara_ctx {
+
+ struct sahara_aes_reqctx {
+ unsigned long mode;
++ u8 iv_out[AES_BLOCK_SIZE];
+ struct skcipher_request fallback_req; // keep at the end
+ };
+
+@@ -447,27 +445,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ int ret;
+ int i, j;
+ int idx = 0;
++ u32 len;
+
+- /* Copy new key if necessary */
+- if (ctx->flags & FLAGS_NEW_KEY) {
+- memcpy(dev->key_base, ctx->key, ctx->keylen);
+- ctx->flags &= ~FLAGS_NEW_KEY;
++ memcpy(dev->key_base, ctx->key, ctx->keylen);
+
+- if (dev->flags & FLAGS_CBC) {
+- dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+- dev->hw_desc[idx]->p1 = dev->iv_phys_base;
+- } else {
+- dev->hw_desc[idx]->len1 = 0;
+- dev->hw_desc[idx]->p1 = 0;
+- }
+- dev->hw_desc[idx]->len2 = ctx->keylen;
+- dev->hw_desc[idx]->p2 = dev->key_phys_base;
+- dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
++ if (dev->flags & FLAGS_CBC) {
++ dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
++ dev->hw_desc[idx]->p1 = dev->iv_phys_base;
++ } else {
++ dev->hw_desc[idx]->len1 = 0;
++ dev->hw_desc[idx]->p1 = 0;
++ }
++ dev->hw_desc[idx]->len2 = ctx->keylen;
++ dev->hw_desc[idx]->p2 = dev->key_phys_base;
++ dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
++ dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+
+- dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
++ idx++;
+
+- idx++;
+- }
+
+ dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
+ if (dev->nb_in_sg < 0) {
+@@ -489,24 +484,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ DMA_TO_DEVICE);
+ if (ret != dev->nb_in_sg) {
+ dev_err(dev->device, "couldn't map in sg\n");
+- goto unmap_in;
++ return -EINVAL;
+ }
++
+ ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_FROM_DEVICE);
+ if (ret != dev->nb_out_sg) {
+ dev_err(dev->device, "couldn't map out sg\n");
+- goto unmap_out;
++ goto unmap_in;
+ }
+
+ /* Create input links */
+ dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
+ sg = dev->in_sg;
++ len = dev->total;
+ for (i = 0; i < dev->nb_in_sg; i++) {
+- dev->hw_link[i]->len = sg->length;
++ dev->hw_link[i]->len = min(len, sg->length);
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
++ len -= min(len, sg->length);
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ }
+@@ -515,12 +513,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ /* Create output links */
+ dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
+ sg = dev->out_sg;
++ len = dev->total;
+ for (j = i; j < dev->nb_out_sg + i; j++) {
+- dev->hw_link[j]->len = sg->length;
++ dev->hw_link[j]->len = min(len, sg->length);
+ dev->hw_link[j]->p = sg->dma_address;
+ if (j == (dev->nb_out_sg + i - 1)) {
+ dev->hw_link[j]->next = 0;
+ } else {
++ len -= min(len, sg->length);
+ dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
+ sg = sg_next(sg);
+ }
+@@ -539,9 +539,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+
+ return 0;
+
+-unmap_out:
+- dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+- DMA_FROM_DEVICE);
+ unmap_in:
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+@@ -549,8 +546,24 @@ unmap_in:
+ return -EINVAL;
+ }
+
++static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
++{
++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++ /* Update IV buffer to contain the last ciphertext block */
++ if (rctx->mode & FLAGS_ENCRYPT) {
++ sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
++ ivsize, req->cryptlen - ivsize);
++ } else {
++ memcpy(req->iv, rctx->iv_out, ivsize);
++ }
++}
++
+ static int sahara_aes_process(struct skcipher_request *req)
+ {
++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct sahara_dev *dev = dev_ptr;
+ struct sahara_ctx *ctx;
+ struct sahara_aes_reqctx *rctx;
+@@ -572,8 +585,17 @@ static int sahara_aes_process(struct skcipher_request *req)
+ rctx->mode &= FLAGS_MODE_MASK;
+ dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+- if ((dev->flags & FLAGS_CBC) && req->iv)
+- memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
++ if ((dev->flags & FLAGS_CBC) && req->iv) {
++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++ memcpy(dev->iv_base, req->iv, ivsize);
++
++ if (!(dev->flags & FLAGS_ENCRYPT)) {
++ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
++ rctx->iv_out, ivsize,
++ req->cryptlen - ivsize);
++ }
++ }
+
+ /* assign new context to device */
+ dev->ctx = ctx;
+@@ -586,16 +608,20 @@ static int sahara_aes_process(struct skcipher_request *req)
+
+ timeout = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+- if (!timeout) {
+- dev_err(dev->device, "AES timeout\n");
+- return -ETIMEDOUT;
+- }
+
+ dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+
++ if (!timeout) {
++ dev_err(dev->device, "AES timeout\n");
++ return -ETIMEDOUT;
++ }
++
++ if ((dev->flags & FLAGS_CBC) && req->iv)
++ sahara_aes_cbc_update_iv(req);
++
+ return 0;
+ }
+
+@@ -609,7 +635,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ /* SAHARA only supports 128bit keys */
+ if (keylen == AES_KEYSIZE_128) {
+ memcpy(ctx->key, key, keylen);
+- ctx->flags |= FLAGS_NEW_KEY;
+ return 0;
+ }
+
+@@ -625,12 +650,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ return crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ }
+
++static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
++{
++ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++ struct sahara_ctx *ctx = crypto_skcipher_ctx(
++ crypto_skcipher_reqtfm(req));
++
++ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++ skcipher_request_set_callback(&rctx->fallback_req,
++ req->base.flags,
++ req->base.complete,
++ req->base.data);
++ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++ req->dst, req->cryptlen, req->iv);
++
++ if (mode & FLAGS_ENCRYPT)
++ return crypto_skcipher_encrypt(&rctx->fallback_req);
++
++ return crypto_skcipher_decrypt(&rctx->fallback_req);
++}
++
+ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
+ {
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++ struct sahara_ctx *ctx = crypto_skcipher_ctx(
++ crypto_skcipher_reqtfm(req));
+ struct sahara_dev *dev = dev_ptr;
+ int err = 0;
+
++ if (!req->cryptlen)
++ return 0;
++
++ if (unlikely(ctx->keylen != AES_KEYSIZE_128))
++ return sahara_aes_fallback(req, mode);
++
+ dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
+ req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+
+@@ -653,81 +706,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
+
+ static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
+ {
+- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+- struct sahara_ctx *ctx = crypto_skcipher_ctx(
+- crypto_skcipher_reqtfm(req));
+-
+- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+- skcipher_request_set_callback(&rctx->fallback_req,
+- req->base.flags,
+- req->base.complete,
+- req->base.data);
+- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+- req->dst, req->cryptlen, req->iv);
+- return crypto_skcipher_encrypt(&rctx->fallback_req);
+- }
+-
+ return sahara_aes_crypt(req, FLAGS_ENCRYPT);
+ }
+
+ static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
+ {
+- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+- struct sahara_ctx *ctx = crypto_skcipher_ctx(
+- crypto_skcipher_reqtfm(req));
+-
+- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+- skcipher_request_set_callback(&rctx->fallback_req,
+- req->base.flags,
+- req->base.complete,
+- req->base.data);
+- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+- req->dst, req->cryptlen, req->iv);
+- return crypto_skcipher_decrypt(&rctx->fallback_req);
+- }
+-
+ return sahara_aes_crypt(req, 0);
+ }
+
+ static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
+ {
+- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+- struct sahara_ctx *ctx = crypto_skcipher_ctx(
+- crypto_skcipher_reqtfm(req));
+-
+- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+- skcipher_request_set_callback(&rctx->fallback_req,
+- req->base.flags,
+- req->base.complete,
+- req->base.data);
+- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+- req->dst, req->cryptlen, req->iv);
+- return crypto_skcipher_encrypt(&rctx->fallback_req);
+- }
+-
+ return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+ }
+
+ static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
+ {
+- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+- struct sahara_ctx *ctx = crypto_skcipher_ctx(
+- crypto_skcipher_reqtfm(req));
+-
+- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+- skcipher_request_set_callback(&rctx->fallback_req,
+- req->base.flags,
+- req->base.complete,
+- req->base.data);
+- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+- req->dst, req->cryptlen, req->iv);
+- return crypto_skcipher_decrypt(&rctx->fallback_req);
+- }
+-
+ return sahara_aes_crypt(req, FLAGS_CBC);
+ }
+
+@@ -784,6 +777,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+ int start)
+ {
+ struct scatterlist *sg;
++ unsigned int len;
+ unsigned int i;
+ int ret;
+
+@@ -805,12 +799,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+ if (!ret)
+ return -EFAULT;
+
++ len = rctx->total;
+ for (i = start; i < dev->nb_in_sg + start; i++) {
+- dev->hw_link[i]->len = sg->length;
++ dev->hw_link[i]->len = min(len, sg->length);
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg + start - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
++ len -= min(len, sg->length);
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ }
+@@ -891,24 +887,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
+ return 0;
+ }
+
+-static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
+-{
+- if (!sg || !sg->length)
+- return nbytes;
+-
+- while (nbytes && sg) {
+- if (nbytes <= sg->length) {
+- sg->length = nbytes;
+- sg_mark_end(sg);
+- break;
+- }
+- nbytes -= sg->length;
+- sg = sg_next(sg);
+- }
+-
+- return nbytes;
+-}
+-
+ static int sahara_sha_prepare_request(struct ahash_request *req)
+ {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -945,36 +923,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
+ hash_later, 0);
+ }
+
+- /* nbytes should now be multiple of blocksize */
+- req->nbytes = req->nbytes - hash_later;
+-
+- sahara_walk_and_recalc(req->src, req->nbytes);
+-
++ rctx->total = len - hash_later;
+ /* have data from previous operation and current */
+ if (rctx->buf_cnt && req->nbytes) {
+ sg_init_table(rctx->in_sg_chain, 2);
+ sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
+-
+ sg_chain(rctx->in_sg_chain, 2, req->src);
+-
+- rctx->total = req->nbytes + rctx->buf_cnt;
+ rctx->in_sg = rctx->in_sg_chain;
+-
+- req->src = rctx->in_sg_chain;
+ /* only data from previous operation */
+ } else if (rctx->buf_cnt) {
+- if (req->src)
+- rctx->in_sg = req->src;
+- else
+- rctx->in_sg = rctx->in_sg_chain;
+- /* buf was copied into rembuf above */
++ rctx->in_sg = rctx->in_sg_chain;
+ sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
+- rctx->total = rctx->buf_cnt;
+ /* no data from previous operation */
+ } else {
+ rctx->in_sg = req->src;
+- rctx->total = req->nbytes;
+- req->src = rctx->in_sg;
+ }
+
+ /* on next call, we only have the remaining data in the buffer */
+@@ -995,7 +957,10 @@ static int sahara_sha_process(struct ahash_request *req)
+ return ret;
+
+ if (rctx->first) {
+- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
++ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
++ if (ret)
++ return ret;
++
+ dev->hw_desc[0]->next = 0;
+ rctx->first = 0;
+ } else {
+@@ -1003,7 +968,10 @@ static int sahara_sha_process(struct ahash_request *req)
+
+ sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
+ dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
++ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
++ if (ret)
++ return ret;
++
+ dev->hw_desc[1]->next = 0;
+ }
+
+@@ -1016,18 +984,19 @@ static int sahara_sha_process(struct ahash_request *req)
+
+ timeout = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+- if (!timeout) {
+- dev_err(dev->device, "SHA timeout\n");
+- return -ETIMEDOUT;
+- }
+
+ if (rctx->sg_in_idx)
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+
++ if (!timeout) {
++ dev_err(dev->device, "SHA timeout\n");
++ return -ETIMEDOUT;
++ }
++
+ memcpy(rctx->context, dev->context_base, rctx->context_size);
+
+- if (req->result)
++ if (req->result && rctx->last)
+ memcpy(req->result, rctx->context, rctx->digest_size);
+
+ return 0;
+@@ -1171,8 +1140,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
+ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
+ {
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+- sizeof(struct sahara_sha_reqctx) +
+- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
++ sizeof(struct sahara_sha_reqctx));
+
+ return 0;
+ }
+diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
+index 583c0b535d13b..8b577e4aa39fa 100644
+--- a/drivers/crypto/virtio/virtio_crypto_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_algs.c
+@@ -255,7 +255,7 @@ static int virtio_crypto_alg_skcipher_close_session(
+ vcrypto->ctrl_status.status,
+ destroy_session->session_id);
+
+- return -EINVAL;
++ err = -EINVAL;
+ }
+ spin_unlock(&vcrypto->ctrl_lock);
+
+diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
+index a24f85c589e7e..faa804a15299d 100644
+--- a/drivers/crypto/virtio/virtio_crypto_common.h
++++ b/drivers/crypto/virtio/virtio_crypto_common.h
+@@ -10,6 +10,7 @@
+ #include <linux/virtio.h>
+ #include <linux/crypto.h>
+ #include <linux/spinlock.h>
++#include <linux/interrupt.h>
+ #include <crypto/aead.h>
+ #include <crypto/aes.h>
+ #include <crypto/engine.h>
+@@ -27,6 +28,7 @@ struct data_queue {
+ char name[32];
+
+ struct crypto_engine *engine;
++ struct tasklet_struct done_task;
+ };
+
+ struct virtio_crypto {
+diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
+index e2375d9923087..f1eff8f752cd5 100644
+--- a/drivers/crypto/virtio/virtio_crypto_core.c
++++ b/drivers/crypto/virtio/virtio_crypto_core.c
+@@ -22,27 +22,28 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
+ }
+ }
+
+-static void virtcrypto_dataq_callback(struct virtqueue *vq)
++static void virtcrypto_done_task(unsigned long data)
+ {
+- struct virtio_crypto *vcrypto = vq->vdev->priv;
++ struct data_queue *data_vq = (struct data_queue *)data;
++ struct virtqueue *vq = data_vq->vq;
+ struct virtio_crypto_request *vc_req;
+- unsigned long flags;
+ unsigned int len;
+- unsigned int qid = vq->index;
+
+- spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+- spin_unlock_irqrestore(
+- &vcrypto->data_vq[qid].lock, flags);
+ if (vc_req->alg_cb)
+ vc_req->alg_cb(vc_req, len);
+- spin_lock_irqsave(
+- &vcrypto->data_vq[qid].lock, flags);
+ }
+ } while (!virtqueue_enable_cb(vq));
+- spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
++}
++
++static void virtcrypto_dataq_callback(struct virtqueue *vq)
++{
++ struct virtio_crypto *vcrypto = vq->vdev->priv;
++ struct data_queue *dq = &vcrypto->data_vq[vq->index];
++
++ tasklet_schedule(&dq->done_task);
+ }
+
+ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+@@ -99,6 +100,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+ ret = -ENOMEM;
+ goto err_engine;
+ }
++ tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
++ (unsigned long)&vi->data_vq[i]);
+ }
+
+ kfree(names);
+@@ -431,11 +434,14 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+ static void virtcrypto_remove(struct virtio_device *vdev)
+ {
+ struct virtio_crypto *vcrypto = vdev->priv;
++ int i;
+
+ dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
++ for (i = 0; i < vcrypto->max_data_queues; i++)
++ tasklet_kill(&vcrypto->data_vq[i].done_task);
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ virtcrypto_clear_crypto_engines(vcrypto);
+diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
+index f13674081cb6b..4dca21b39bf73 100644
+--- a/drivers/edac/thunderx_edac.c
++++ b/drivers/edac/thunderx_edac.c
+@@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
+ decode_register(other, OCX_OTHER_SIZE,
+ ocx_com_errors, ctx->reg_com_int);
+
+- strncat(msg, other, OCX_MESSAGE_SIZE);
++ strlcat(msg, other, OCX_MESSAGE_SIZE);
+
+ for (lane = 0; lane < OCX_RX_LANES; lane++)
+ if (ctx->reg_com_int & BIT(lane)) {
+@@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
+ lane, ctx->reg_lane_int[lane],
+ lane, ctx->reg_lane_stat11[lane]);
+
+- strncat(msg, other, OCX_MESSAGE_SIZE);
++ strlcat(msg, other, OCX_MESSAGE_SIZE);
+
+ decode_register(other, OCX_OTHER_SIZE,
+ ocx_lane_errors,
+ ctx->reg_lane_int[lane]);
+- strncat(msg, other, OCX_MESSAGE_SIZE);
++ strlcat(msg, other, OCX_MESSAGE_SIZE);
+ }
+
+ if (ctx->reg_com_int & OCX_COM_INT_CE)
+@@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
+ decode_register(other, OCX_OTHER_SIZE,
+ ocx_com_link_errors, ctx->reg_com_link_int);
+
+- strncat(msg, other, OCX_MESSAGE_SIZE);
++ strlcat(msg, other, OCX_MESSAGE_SIZE);
+
+ if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
+ edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
+@@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
+
+ decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
+
+- strncat(msg, other, L2C_MESSAGE_SIZE);
++ strlcat(msg, other, L2C_MESSAGE_SIZE);
+
+ if (ctx->reg_int & mask_ue)
+ edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
+diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
+index d081a6312627b..bf19dd66c2137 100644
+--- a/drivers/firmware/meson/meson_sm.c
++++ b/drivers/firmware/meson/meson_sm.c
+@@ -313,11 +313,14 @@ static int __init meson_sm_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, fw);
+
+- pr_info("secure-monitor enabled\n");
++ if (devm_of_platform_populate(dev))
++ goto out_in_base;
+
+ if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
+ goto out_in_base;
+
++ pr_info("secure-monitor enabled\n");
++
+ return 0;
+
+ out_in_base:
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index c2fafe49c2e85..b313337e4f19a 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -161,7 +161,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ {
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+- char debug_name[50] = "ti_sci_debug@";
++ char debug_name[50];
+
+ /* Debug region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+@@ -178,10 +178,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ /* Setup NULL termination */
+ info->debug_buffer[info->debug_region_size] = 0;
+
+- info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
+- sizeof(debug_name) -
+- sizeof("ti_sci_debug@")),
+- 0444, NULL, info, &ti_sci_debug_fops);
++ snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
++ dev_name(dev));
++ info->d = debugfs_create_file(debug_name, 0444, NULL, info,
++ &ti_sci_debug_fops);
+ if (IS_ERR(info->d))
+ return PTR_ERR(info->d);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index beb199d13451b..aa057ceecf068 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -342,6 +342,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
++ if (!adev->didt_rreg)
++ return -EOPNOTSUPP;
++
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+@@ -401,6 +404,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
++ if (!adev->didt_wreg)
++ return -EOPNOTSUPP;
++
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+@@ -459,7 +465,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ int r;
+
+ if (!adev->smc_rreg)
+- return -EPERM;
++ return -EOPNOTSUPP;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+@@ -521,7 +527,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
+ int r;
+
+ if (!adev->smc_wreg)
+- return -EPERM;
++ return -EOPNOTSUPP;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 529bb6c6ac6f5..e8c0e77e1b018 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1615,9 +1615,11 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+- adev->nbio.funcs->get_clockgating_state(adev, flags);
++ if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
++ adev->nbio.funcs->get_clockgating_state(adev, flags);
+
+- adev->hdp.funcs->get_clock_gating_state(adev, flags);
++ if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
++ adev->hdp.funcs->get_clock_gating_state(adev, flags);
+
+ if (adev->asic_type != CHIP_ALDEBARAN) {
+
+@@ -1633,9 +1635,11 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
+ }
+
+ /* AMD_CG_SUPPORT_ROM_MGCG */
+- adev->smuio.funcs->get_clock_gating_state(adev, flags);
++ if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
++ adev->smuio.funcs->get_clock_gating_state(adev, flags);
+
+- adev->df.funcs->get_clockgating_state(adev, flags);
++ if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
++ adev->df.funcs->get_clockgating_state(adev, flags);
+ }
+
+ static int soc15_common_set_powergating_state(void *handle,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 611969bf45207..9bfc465d08fb0 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2924,6 +2924,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ result = smu7_get_evv_voltages(hwmgr);
+ if (result) {
+ pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
++ kfree(hwmgr->backend);
++ hwmgr->backend = NULL;
+ return -EINVAL;
+ }
+ } else {
+@@ -2969,8 +2971,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ }
+
+ result = smu7_update_edc_leakage_table(hwmgr);
+- if (result)
++ if (result) {
++ smu7_hwmgr_backend_fini(hwmgr);
+ return result;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
+index 6ba4c2ae69a63..309c9f0b8f83a 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
+@@ -2731,10 +2731,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
+- if (ps == NULL) {
+- kfree(adev->pm.dpm.ps);
++ if (ps == NULL)
+ return -ENOMEM;
+- }
+ adev->pm.dpm.ps[i].ps_priv = ps;
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+index 66fc63f1f1c17..09e78575db877 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+@@ -7346,10 +7346,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
+ kcalloc(4,
+ sizeof(struct amdgpu_clock_voltage_dependency_entry),
+ GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
+ return -ENOMEM;
+- }
++
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+index fccd6fbcc257f..29c04012b7011 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+@@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
+
+ static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
+ {
+- int ret, tries = 3;
++ int ret = -EINVAL;
++ int tries = 3;
+ u32 i;
+
+ for (i = 0; i < tries; i++) {
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 14d6717429639..4c6f3052156bd 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1633,7 +1633,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ } else {
+ if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
+ dev_err(dev, "failed to parse HPD number\n");
+- return ret;
++ return -EINVAL;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
+index e0e015243a602..b588fea12502d 100644
+--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
++++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
+@@ -179,7 +179,7 @@ static int tpd12s015_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int __exit tpd12s015_remove(struct platform_device *pdev)
++static int tpd12s015_remove(struct platform_device *pdev)
+ {
+ struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
+
+@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
+
+ static struct platform_driver tpd12s015_driver = {
+ .probe = tpd12s015_probe,
+- .remove = __exit_p(tpd12s015_remove),
++ .remove = tpd12s015_remove,
+ .driver = {
+ .name = "tpd12s015",
+ .of_match_table = tpd12s015_of_match,
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 26a77a7359056..c5e0c652766c8 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -643,8 +643,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ struct drm_mode_set set;
+ uint32_t __user *set_connectors_ptr;
+ struct drm_modeset_acquire_ctx ctx;
+- int ret;
+- int i;
++ int ret, i, num_connectors = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EOPNOTSUPP;
+@@ -799,6 +798,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ connector->name);
+
+ connector_set[i] = connector;
++ num_connectors++;
+ }
+ }
+
+@@ -807,7 +807,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ set.y = crtc_req->y;
+ set.mode = mode;
+ set.connectors = connector_set;
+- set.num_connectors = crtc_req->count_connectors;
++ set.num_connectors = num_connectors;
+ set.fb = fb;
+
+ if (drm_drv_uses_atomic_modeset(dev))
+@@ -820,7 +820,7 @@ out:
+ drm_framebuffer_put(fb);
+
+ if (connector_set) {
+- for (i = 0; i < crtc_req->count_connectors; i++) {
++ for (i = 0; i < num_connectors; i++) {
+ if (connector_set[i])
+ drm_connector_put(connector_set[i]);
+ }
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 6f1791613757b..23409f1916ee6 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -894,8 +894,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
+ goto err_minors;
+ }
+
+- if (drm_core_check_feature(dev, DRIVER_MODESET))
+- drm_modeset_register_all(dev);
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++ ret = drm_modeset_register_all(dev);
++ if (ret)
++ goto err_unload;
++ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver->name, driver->major, driver->minor,
+@@ -905,6 +908,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
+
+ goto out_unlock;
+
++err_unload:
++ if (dev->driver->unload)
++ dev->driver->unload(dev);
+ err_minors:
+ remove_compat_control_link(dev);
+ drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
+index bf33c3084cb41..6b4d6da3b1f4e 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
+@@ -108,18 +108,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ return 0;
+
+ if (!priv->mapping) {
+- void *mapping;
++ void *mapping = NULL;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ mapping = iommu_get_domain_for_dev(priv->dma_dev);
+- else
+- mapping = ERR_PTR(-ENODEV);
+
+- if (IS_ERR(mapping))
+- return PTR_ERR(mapping);
++ if (!mapping)
++ return -ENODEV;
+ priv->mapping = mapping;
+ }
+
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index 7655142a4651c..42b6b001405a5 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -1861,6 +1861,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
+ return ret;
+
+ crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
++ if (IS_ERR(crtc))
++ return PTR_ERR(crtc);
+ crtc->pipe_clk = &hdata->phy_clk;
+
+ ret = hdmi_create_connector(encoder);
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+index 169f9de4a12a7..3100957225a70 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+@@ -269,6 +269,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ {
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
++ unsigned long flags;
+
+ DBG("%s", mdp4_crtc->name);
+
+@@ -281,6 +282,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
+ mdp4_disable(mdp4_kms);
+
++ if (crtc->state->event && !crtc->state->active) {
++ WARN_ON(mdp4_crtc->event);
++ spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
++ drm_crtc_send_vblank_event(crtc, crtc->state->event);
++ crtc->state->event = NULL;
++ spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
++ }
++
+ mdp4_crtc->enabled = false;
+ }
+
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+index 6a917fe69a833..4b5b0a4b051b0 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+@@ -588,7 +588,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
+ struct device *dev = &phy->pdev->dev;
+ int ret;
+
+- pm_runtime_get_sync(dev);
++ ret = pm_runtime_resume_and_get(dev);
++ if (ret)
++ return ret;
+
+ ret = clk_prepare_enable(phy->ahb_clk);
+ if (ret) {
+diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
+index 5b71a5a5cd85c..cdbc75e3d1f66 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fence.c
++++ b/drivers/gpu/drm/nouveau/nv04_fence.c
+@@ -39,7 +39,7 @@ struct nv04_fence_priv {
+ static int
+ nv04_fence_emit(struct nouveau_fence *fence)
+ {
+- struct nvif_push *push = fence->channel->chan.push;
++ struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
+ int ret = PUSH_WAIT(push, 2);
+ if (ret == 0) {
+ PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+index 6cb5eefa45e9a..5a08458fe1b7f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+@@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
+
+ type |= 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+- type |= 0x00000004; /* HUB_ONLY */
++ type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
+
+ mutex_lock(&vmm->mmu->mutex);
+
+diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
+index f86e205781430..60871d255a4df 100644
+--- a/drivers/gpu/drm/omapdrm/omap_drv.c
++++ b/drivers/gpu/drm/omapdrm/omap_drv.c
+@@ -68,7 +68,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ {
+ struct drm_device *dev = old_state->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+- bool fence_cookie = dma_fence_begin_signalling();
+
+ dispc_runtime_get(priv->dispc);
+
+@@ -91,6 +90,8 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ omap_atomic_wait_for_completion(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
++
++ drm_atomic_helper_commit_hw_done(old_state);
+ } else {
+ /*
+ * OMAP3 DSS seems to have issues with the work-around above,
+@@ -100,11 +101,9 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+- }
+
+- drm_atomic_helper_commit_hw_done(old_state);
+-
+- dma_fence_end_signalling(fence_cookie);
++ drm_atomic_helper_commit_hw_done(old_state);
++ }
+
+ /*
+ * Wait for completion of the page flips to ensure that old buffers
+diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+index 80227617a4d6e..ccb7d521f30ae 100644
+--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
++++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+@@ -112,6 +112,8 @@ static int kd35t133_unprepare(struct drm_panel *panel)
+ return ret;
+ }
+
++ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vdd);
+
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index 2dd85ba1faa27..d3ad98bd25907 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -2320,7 +2320,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ switch (prim_walk) {
+ case 1:
+ for (i = 0; i < track->num_arrays; i++) {
+- size = track->arrays[i].esize * track->max_indx * 4;
++ size = track->arrays[i].esize * track->max_indx * 4UL;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+@@ -2339,7 +2339,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ break;
+ case 2:
+ for (i = 0; i < track->num_arrays; i++) {
+- size = track->arrays[i].esize * (nverts - 1) * 4;
++ size = track->arrays[i].esize * (nverts - 1) * 4UL;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index 8be4799a98eff..6ade5dd470d5f 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -1277,7 +1277,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 4;
+- track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
++ track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+@@ -1304,7 +1304,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+- track->htile_offset = radeon_get_ib_value(p, idx) << 8;
++ track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ track->htile_bo = reloc->robj;
+ track->db_dirty = true;
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 573154268d43a..6337fad441df3 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -681,11 +681,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ if (radeon_crtc == NULL)
+ return;
+
++ radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
++ if (!radeon_crtc->flip_queue) {
++ kfree(radeon_crtc);
++ return;
++ }
++
+ drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+ radeon_crtc->crtc_id = index;
+- radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
+ rdev->mode_info.crtcs[index] = radeon_crtc;
+
+ if (rdev->family >= CHIP_BONAIRE) {
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index bb53016f3138a..f2fc8ef991397 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -1204,13 +1204,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+ r = radeon_bo_create(rdev, pd_size, align, true,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ NULL, &vm->page_directory);
+- if (r)
++ if (r) {
++ kfree(vm->page_tables);
++ vm->page_tables = NULL;
+ return r;
+-
++ }
+ r = radeon_vm_clear_bo(rdev, vm->page_directory);
+ if (r) {
+ radeon_bo_unref(&vm->page_directory);
+ vm->page_directory = NULL;
++ kfree(vm->page_tables);
++ vm->page_tables = NULL;
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 4679b798a0384..e4481e5a15e18 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -3611,6 +3611,10 @@ static int si_cp_start(struct radeon_device *rdev)
+ for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
+ ring = &rdev->ring[i];
+ r = radeon_ring_lock(rdev, ring, 2);
++ if (r) {
++ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
++ return r;
++ }
+
+ /* clear the compute context state */
+ radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
+diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
+index f74f381af05fd..d49c145db4370 100644
+--- a/drivers/gpu/drm/radeon/sumo_dpm.c
++++ b/drivers/gpu/drm/radeon/sumo_dpm.c
+@@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+- if (!rdev->pm.power_state[i].clock_info)
++ if (!rdev->pm.power_state[i].clock_info) {
++ kfree(rdev->pm.dpm.ps);
+ return -EINVAL;
++ }
+ ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
+index 08ea1c864cb23..ef1cc7bad20a7 100644
+--- a/drivers/gpu/drm/radeon/trinity_dpm.c
++++ b/drivers/gpu/drm/radeon/trinity_dpm.c
+@@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+- if (!rdev->pm.power_state[i].clock_info)
++ if (!rdev->pm.power_state[i].clock_info) {
++ kfree(rdev->pm.dpm.ps);
+ return -EINVAL;
++ }
+ ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
+index 666e527a0acf1..cb0ab224282f3 100644
+--- a/drivers/gpu/drm/tidss/tidss_kms.c
++++ b/drivers/gpu/drm/tidss/tidss_kms.c
+@@ -4,8 +4,6 @@
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+-#include <linux/dma-fence.h>
+-
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_bridge.h>
+@@ -28,7 +26,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+ {
+ struct drm_device *ddev = old_state->dev;
+ struct tidss_device *tidss = to_tidss(ddev);
+- bool fence_cookie = dma_fence_begin_signalling();
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+@@ -39,7 +36,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+ drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+- dma_fence_end_signalling(fence_cookie);
+ drm_atomic_helper_wait_for_flip_done(ddev, old_state);
+
+ drm_atomic_helper_cleanup_planes(ddev, old_state);
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index 6b03f89a98d44..6924488274ddf 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -140,7 +140,7 @@ static int tilcdc_irq_install(struct drm_device *dev, unsigned int irq)
+ if (ret)
+ return ret;
+
+- priv->irq_enabled = false;
++ priv->irq_enabled = true;
+
+ return 0;
+ }
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 51a8e8d10519f..6da69b1fde580 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2623,8 +2623,8 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ {
+ struct hid_data *hid_data = &wacom_wac->hid_data;
+ bool mt = wacom_wac->features.touch_max > 1;
+- bool prox = hid_data->tipswitch &&
+- report_touch_events(wacom_wac);
++ bool touch_down = hid_data->tipswitch && hid_data->confidence;
++ bool prox = touch_down && report_touch_events(wacom_wac);
+
+ if (touch_is_muted(wacom_wac)) {
+ if (!wacom_wac->shared->touch_down)
+@@ -2674,24 +2674,6 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ }
+ }
+
+-static bool wacom_wac_slot_is_active(struct input_dev *dev, int key)
+-{
+- struct input_mt *mt = dev->mt;
+- struct input_mt_slot *s;
+-
+- if (!mt)
+- return false;
+-
+- for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
+- if (s->key == key &&
+- input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) {
+- return true;
+- }
+- }
+-
+- return false;
+-}
+-
+ static void wacom_wac_finger_event(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage, __s32 value)
+ {
+@@ -2742,14 +2724,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
+ }
+
+ if (usage->usage_index + 1 == field->report_count) {
+- if (equivalent_usage == wacom_wac->hid_data.last_slot_field) {
+- bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input,
+- wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch;
+-
+- if (wacom_wac->hid_data.confidence || touch_removed) {
+- wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+- }
+- }
++ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
++ wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+ }
+ }
+
+diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
+index 14389fd7afb89..ae983e715110e 100644
+--- a/drivers/hwmon/corsair-psu.c
++++ b/drivers/hwmon/corsair-psu.c
+@@ -808,7 +808,23 @@ static struct hid_driver corsairpsu_driver = {
+ .reset_resume = corsairpsu_resume,
+ #endif
+ };
+-module_hid_driver(corsairpsu_driver);
++
++static int __init corsair_init(void)
++{
++ return hid_register_driver(&corsairpsu_driver);
++}
++
++static void __exit corsair_exit(void)
++{
++ hid_unregister_driver(&corsairpsu_driver);
++}
++
++/*
++ * With module_init() the driver would load before the HID bus when
++ * built-in, so use late_initcall() instead.
++ */
++late_initcall(corsair_init);
++module_exit(corsair_exit);
+
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Wilken Gottwalt <wilken.gottwalt@posteo.net>");
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index 2305f32fedf68..a0f3f0ba3380c 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -944,7 +944,7 @@ struct etmv4_drvdata {
+ u8 ctxid_size;
+ u8 vmid_size;
+ u8 ccsize;
+- u8 ccitmin;
++ u16 ccitmin;
+ u8 s_ex_level;
+ u8 ns_ex_level;
+ u8 q_support;
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index 13c14eb175e94..6abcf975a2db9 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -178,6 +178,7 @@ struct rk3x_i2c_soc_data {
+ * @clk: function clk for rk3399 or function & Bus clks for others
+ * @pclk: Bus clk for rk3399
+ * @clk_rate_nb: i2c clk rate change notify
++ * @irq: irq number
+ * @t: I2C known timing information
+ * @lock: spinlock for the i2c bus
+ * @wait: the waitqueue to wait for i2c transfer
+@@ -200,6 +201,7 @@ struct rk3x_i2c {
+ struct clk *clk;
+ struct clk *pclk;
+ struct notifier_block clk_rate_nb;
++ int irq;
+
+ /* Settings */
+ struct i2c_timings t;
+@@ -1087,13 +1089,18 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+- rk3x_i2c_start(i2c);
+-
+ if (!polling) {
++ rk3x_i2c_start(i2c);
++
+ timeout = wait_event_timeout(i2c->wait, !i2c->busy,
+ msecs_to_jiffies(WAIT_TIMEOUT));
+ } else {
++ disable_irq(i2c->irq);
++ rk3x_i2c_start(i2c);
++
+ timeout = rk3x_i2c_wait_xfer_poll(i2c);
++
++ enable_irq(i2c->irq);
+ }
+
+ spin_lock_irqsave(&i2c->lock, flags);
+@@ -1301,6 +1308,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ i2c->irq = irq;
++
+ platform_set_drvdata(pdev, i2c);
+
+ if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index b49a1b170bb2f..0777848b33161 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -220,8 +220,17 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
+ int tries;
+
+ for (tries = 50; tries; --tries) {
+- if (readl(i2c->regs + S3C2410_IICCON)
+- & S3C2410_IICCON_IRQPEND) {
++ unsigned long tmp = readl(i2c->regs + S3C2410_IICCON);
++
++ if (!(tmp & S3C2410_IICCON_ACKEN)) {
++ /*
++ * Wait a bit for the bus to stabilize,
++ * delay estimated experimentally.
++ */
++ usleep_range(100, 200);
++ return true;
++ }
++ if (tmp & S3C2410_IICCON_IRQPEND) {
+ if (!(readl(i2c->regs + S3C2410_IICSTAT)
+ & S3C2410_IICSTAT_LASTBIT))
+ return true;
+@@ -274,16 +283,6 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
+
+ stat |= S3C2410_IICSTAT_START;
+ writel(stat, i2c->regs + S3C2410_IICSTAT);
+-
+- if (i2c->quirks & QUIRK_POLL) {
+- while ((i2c->msg_num != 0) && is_ack(i2c)) {
+- i2c_s3c_irq_nextbyte(i2c, stat);
+- stat = readl(i2c->regs + S3C2410_IICSTAT);
+-
+- if (stat & S3C2410_IICSTAT_ARBITR)
+- dev_err(i2c->dev, "deal with arbitration loss\n");
+- }
+- }
+ }
+
+ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
+@@ -690,7 +689,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
+ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
+ struct i2c_msg *msgs, int num)
+ {
+- unsigned long timeout;
++ unsigned long timeout = 0;
+ int ret;
+
+ ret = s3c24xx_i2c_set_master(i2c);
+@@ -710,16 +709,19 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
+ s3c24xx_i2c_message_start(i2c, msgs);
+
+ if (i2c->quirks & QUIRK_POLL) {
+- ret = i2c->msg_idx;
++ while ((i2c->msg_num != 0) && is_ack(i2c)) {
++ unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT);
+
+- if (ret != num)
+- dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
++ i2c_s3c_irq_nextbyte(i2c, stat);
+
+- goto out;
++ stat = readl(i2c->regs + S3C2410_IICSTAT);
++ if (stat & S3C2410_IICSTAT_ARBITR)
++ dev_err(i2c->dev, "deal with arbitration loss\n");
++ }
++ } else {
++ timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+ }
+
+- timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+-
+ ret = i2c->msg_idx;
+
+ /*
+diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
+index 63b4d6ea4566a..811f04448d8d9 100644
+--- a/drivers/iio/adc/ad7091r-base.c
++++ b/drivers/iio/adc/ad7091r-base.c
+@@ -174,8 +174,8 @@ static const struct iio_info ad7091r_info = {
+
+ static irqreturn_t ad7091r_event_handler(int irq, void *private)
+ {
+- struct ad7091r_state *st = (struct ad7091r_state *) private;
+- struct iio_dev *iio_dev = dev_get_drvdata(st->dev);
++ struct iio_dev *iio_dev = private;
++ struct ad7091r_state *st = iio_priv(iio_dev);
+ unsigned int i, read_val;
+ int ret;
+ s64 timestamp = iio_get_time_ns(iio_dev);
+@@ -234,7 +234,7 @@ int ad7091r_probe(struct device *dev, const char *name,
+ if (irq) {
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ ad7091r_event_handler,
+- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st);
++ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, iio_dev);
+ if (ret)
+ return ret;
+ }
+diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
+index dbfc8517cb8ab..ad94f0abc402a 100644
+--- a/drivers/iio/adc/ad9467.c
++++ b/drivers/iio/adc/ad9467.c
+@@ -119,9 +119,9 @@ struct ad9467_state {
+ struct spi_device *spi;
+ struct clk *clk;
+ unsigned int output_mode;
++ unsigned int (*scales)[2];
+
+ struct gpio_desc *pwrdown_gpio;
+- struct gpio_desc *reset_gpio;
+ };
+
+ static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
+@@ -163,9 +163,10 @@ static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
+
+ if (readval == NULL) {
+ ret = ad9467_spi_write(spi, reg, writeval);
+- ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+- AN877_ADC_TRANSFER_SYNC);
+- return ret;
++ if (ret)
++ return ret;
++ return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
++ AN877_ADC_TRANSFER_SYNC);
+ }
+
+ ret = ad9467_spi_read(spi, reg);
+@@ -212,6 +213,7 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+ .channel = _chan, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
++ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = _sign, \
+@@ -273,10 +275,13 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
+ const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info);
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int i, vref_val;
++ int ret;
+
+- vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
++ ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
++ if (ret < 0)
++ return ret;
+
+- vref_val &= info1->vref_mask;
++ vref_val = ret & info1->vref_mask;
+
+ for (i = 0; i < info->num_scales; i++) {
+ if (vref_val == info->scale_table[i][1])
+@@ -297,6 +302,7 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int scale_val[2];
+ unsigned int i;
++ int ret;
+
+ if (val != 0)
+ return -EINVAL;
+@@ -306,11 +312,13 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+ if (scale_val[0] != val || scale_val[1] != val2)
+ continue;
+
+- ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+- info->scale_table[i][1]);
+- ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+- AN877_ADC_TRANSFER_SYNC);
+- return 0;
++ ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
++ info->scale_table[i][1]);
++ if (ret < 0)
++ return ret;
++
++ return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
++ AN877_ADC_TRANSFER_SYNC);
+ }
+
+ return -EINVAL;
+@@ -359,6 +367,26 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
+ }
+ }
+
++static int ad9467_read_avail(struct adi_axi_adc_conv *conv,
++ struct iio_chan_spec const *chan,
++ const int **vals, int *type, int *length,
++ long mask)
++{
++ const struct adi_axi_adc_chip_info *info = conv->chip_info;
++ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++
++ switch (mask) {
++ case IIO_CHAN_INFO_SCALE:
++ *vals = (const int *)st->scales;
++ *type = IIO_VAL_INT_PLUS_MICRO;
++ /* Values are stored in a 2D matrix */
++ *length = info->num_scales * 2;
++ return IIO_AVAIL_LIST;
++ default:
++ return -EINVAL;
++ }
++}
++
+ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+ {
+ int ret;
+@@ -371,6 +399,26 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+ AN877_ADC_TRANSFER_SYNC);
+ }
+
++static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
++{
++ const struct adi_axi_adc_chip_info *info = conv->chip_info;
++ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++ unsigned int i, val1, val2;
++
++ st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales,
++ sizeof(*st->scales), GFP_KERNEL);
++ if (!st->scales)
++ return -ENOMEM;
++
++ for (i = 0; i < info->num_scales; i++) {
++ __ad9467_get_scale(conv, i, &val1, &val2);
++ st->scales[i][0] = val1;
++ st->scales[i][1] = val2;
++ }
++
++ return 0;
++}
++
+ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+ {
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+@@ -378,11 +426,19 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+ return ad9467_outputmode_set(st->spi, st->output_mode);
+ }
+
+-static void ad9467_clk_disable(void *data)
++static int ad9467_reset(struct device *dev)
+ {
+- struct ad9467_state *st = data;
++ struct gpio_desc *gpio;
+
+- clk_disable_unprepare(st->clk);
++ gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
++ if (IS_ERR_OR_NULL(gpio))
++ return PTR_ERR_OR_ZERO(gpio);
++
++ fsleep(1);
++ gpiod_set_value_cansleep(gpio, 0);
++ fsleep(10 * USEC_PER_MSEC);
++
++ return 0;
+ }
+
+ static int ad9467_probe(struct spi_device *spi)
+@@ -404,38 +460,25 @@ static int ad9467_probe(struct spi_device *spi)
+ st = adi_axi_adc_conv_priv(conv);
+ st->spi = spi;
+
+- st->clk = devm_clk_get(&spi->dev, "adc-clk");
++ st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
+ if (IS_ERR(st->clk))
+ return PTR_ERR(st->clk);
+
+- ret = clk_prepare_enable(st->clk);
+- if (ret < 0)
+- return ret;
+-
+- ret = devm_add_action_or_reset(&spi->dev, ad9467_clk_disable, st);
+- if (ret)
+- return ret;
+-
+ st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->pwrdown_gpio))
+ return PTR_ERR(st->pwrdown_gpio);
+
+- st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+- GPIOD_OUT_LOW);
+- if (IS_ERR(st->reset_gpio))
+- return PTR_ERR(st->reset_gpio);
+-
+- if (st->reset_gpio) {
+- udelay(1);
+- ret = gpiod_direction_output(st->reset_gpio, 1);
+- if (ret)
+- return ret;
+- mdelay(10);
+- }
++ ret = ad9467_reset(&spi->dev);
++ if (ret)
++ return ret;
+
+ conv->chip_info = &info->axi_adc_info;
+
++ ret = ad9467_scale_fill(conv);
++ if (ret)
++ return ret;
++
+ id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
+ if (id != conv->chip_info->id) {
+ dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
+@@ -446,6 +489,7 @@ static int ad9467_probe(struct spi_device *spi)
+ conv->reg_access = ad9467_reg_access;
+ conv->write_raw = ad9467_write_raw;
+ conv->read_raw = ad9467_read_raw;
++ conv->read_avail = ad9467_read_avail;
+ conv->preenable_setup = ad9467_preenable_setup;
+
+ st->output_mode = info->default_output_mode |
+diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
+index a9e655e69eaa2..c6dac4c10c31a 100644
+--- a/drivers/iio/adc/adi-axi-adc.c
++++ b/drivers/iio/adc/adi-axi-adc.c
+@@ -142,6 +142,20 @@ static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
+ return conv->write_raw(conv, chan, val, val2, mask);
+ }
+
++static int adi_axi_adc_read_avail(struct iio_dev *indio_dev,
++ struct iio_chan_spec const *chan,
++ const int **vals, int *type, int *length,
++ long mask)
++{
++ struct adi_axi_adc_state *st = iio_priv(indio_dev);
++ struct adi_axi_adc_conv *conv = &st->client->conv;
++
++ if (!conv->read_avail)
++ return -EOPNOTSUPP;
++
++ return conv->read_avail(conv, chan, vals, type, length, mask);
++}
++
+ static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+ {
+@@ -226,69 +240,11 @@ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(devm_adi_axi_adc_conv_register);
+
+-static ssize_t in_voltage_scale_available_show(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
+-{
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+- struct adi_axi_adc_state *st = iio_priv(indio_dev);
+- struct adi_axi_adc_conv *conv = &st->client->conv;
+- size_t len = 0;
+- int i;
+-
+- for (i = 0; i < conv->chip_info->num_scales; i++) {
+- const unsigned int *s = conv->chip_info->scale_table[i];
+-
+- len += scnprintf(buf + len, PAGE_SIZE - len,
+- "%u.%06u ", s[0], s[1]);
+- }
+- buf[len - 1] = '\n';
+-
+- return len;
+-}
+-
+-static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
+-
+-enum {
+- ADI_AXI_ATTR_SCALE_AVAIL,
+-};
+-
+-#define ADI_AXI_ATTR(_en_, _file_) \
+- [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr
+-
+-static struct attribute *adi_axi_adc_attributes[] = {
+- ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available),
+- NULL
+-};
+-
+-static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
+- struct attribute *attr, int n)
+-{
+- struct device *dev = kobj_to_dev(kobj);
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+- struct adi_axi_adc_state *st = iio_priv(indio_dev);
+- struct adi_axi_adc_conv *conv = &st->client->conv;
+-
+- switch (n) {
+- case ADI_AXI_ATTR_SCALE_AVAIL:
+- if (!conv->chip_info->num_scales)
+- return 0;
+- return attr->mode;
+- default:
+- return attr->mode;
+- }
+-}
+-
+-static const struct attribute_group adi_axi_adc_attribute_group = {
+- .attrs = adi_axi_adc_attributes,
+- .is_visible = axi_adc_attr_is_visible,
+-};
+-
+ static const struct iio_info adi_axi_adc_info = {
+ .read_raw = &adi_axi_adc_read_raw,
+ .write_raw = &adi_axi_adc_write_raw,
+- .attrs = &adi_axi_adc_attribute_group,
+ .update_scan_mode = &adi_axi_adc_update_scan_mode,
++ .read_avail = &adi_axi_adc_read_avail,
+ };
+
+ static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 6d8f3aa9d6aa9..7376f012ece12 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5442,7 +5442,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+
+ /* Resizing SRQs is not supported yet */
+ if (srq_attr_mask & IB_SRQ_MAX_WR)
+- return -EINVAL;
++ return -EOPNOTSUPP;
+
+ if (srq_attr_mask & IB_SRQ_LIMIT) {
+ if (srq_attr->srq_limit > srq->wqe_cnt)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
+index 81ffad77ae420..21bc26bd92d9d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
+@@ -161,7 +161,7 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+ int ret;
+
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+- return -EINVAL;
++ return -EOPNOTSUPP;
+
+ ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
+ if (ret)
+diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
+index bdf5ed38de220..0307c45aa6d31 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
++++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
+@@ -635,7 +635,7 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
+
+ int mthca_SYS_EN(struct mthca_dev *dev)
+ {
+- u64 out;
++ u64 out = 0;
+ int ret;
+
+ ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
+@@ -1955,7 +1955,7 @@ int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
+ int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ u16 *hash)
+ {
+- u64 imm;
++ u64 imm = 0;
+ int err;
+
+ err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
+diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
+index f507c4cd46d3c..f77c48b317f23 100644
+--- a/drivers/infiniband/hw/mthca/mthca_main.c
++++ b/drivers/infiniband/hw/mthca/mthca_main.c
+@@ -382,7 +382,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
+ struct mthca_init_hca_param *init_hca,
+ u64 icm_size)
+ {
+- u64 aux_pages;
++ u64 aux_pages = 0;
+ int err;
+
+ err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 9f6ac0a09a782..35cc0a57e6975 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -321,12 +321,10 @@ struct iser_device {
+ *
+ * @mr: memory region
+ * @sig_mr: signature memory region
+- * @mr_valid: is mr valid indicator
+ */
+ struct iser_reg_resources {
+ struct ib_mr *mr;
+ struct ib_mr *sig_mr;
+- u8 mr_valid:1;
+ };
+
+ /**
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 27a6f75a9912f..9ea88dd6a414e 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -602,7 +602,10 @@ iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
+ return -EINVAL;
+ }
+
+- desc->rsc.mr_valid = 0;
++ if (desc->sig_protected)
++ desc->rsc.sig_mr->need_inval = false;
++ else
++ desc->rsc.mr->need_inval = false;
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index 9776b755d8487..c362043e7f172 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -250,7 +250,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+
+ iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
+
+- if (rsc->mr_valid)
++ if (rsc->sig_mr->need_inval)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+@@ -274,7 +274,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+- rsc->mr_valid = 1;
++ rsc->sig_mr->need_inval = true;
+
+ sig_reg->sge.lkey = mr->lkey;
+ sig_reg->rkey = mr->rkey;
+@@ -299,7 +299,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ struct ib_reg_wr *wr = &tx_desc->reg_wr;
+ int n;
+
+- if (rsc->mr_valid)
++ if (rsc->mr->need_inval)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+@@ -322,7 +322,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+- rsc->mr_valid = 1;
++ rsc->mr->need_inval = true;
+
+ reg->sge.lkey = mr->lkey;
+ reg->rkey = mr->rkey;
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index b566f7cb7797b..8656664a15c5f 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -136,7 +136,6 @@ iser_create_fastreg_desc(struct iser_device *device,
+ goto err_alloc_mr_integrity;
+ }
+ }
+- desc->rsc.mr_valid = 0;
+
+ return desc;
+
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 3bf5c787f9149..1ff0d4e24fe68 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -268,6 +268,7 @@ static const struct xpad_device {
+ { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
++ { 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE },
+ { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index fbdef95291e90..caf85ae462939 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -756,6 +756,44 @@ static void atkbd_deactivate(struct atkbd *atkbd)
+ ps2dev->serio->phys);
+ }
+
++#ifdef CONFIG_X86
++static bool atkbd_is_portable_device(void)
++{
++ static const char * const chassis_types[] = {
++ "8", /* Portable */
++ "9", /* Laptop */
++ "10", /* Notebook */
++ "14", /* Sub-Notebook */
++ "31", /* Convertible */
++ "32", /* Detachable */
++ };
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(chassis_types); i++)
++ if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i]))
++ return true;
++
++ return false;
++}
++
++/*
++ * On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops
++ * the controller is always in translated mode. In this mode mice/touchpads will
++ * not work. So in this case simply assume a keyboard is connected to avoid
++ * confusing some laptop keyboards.
++ *
++ * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using the standard
++ * 0xab83 id is ok in translated mode, only atkbd_select_set() checks atkbd->id
++ * and in translated mode that is a no-op.
++ */
++static bool atkbd_skip_getid(struct atkbd *atkbd)
++{
++ return atkbd->translated && atkbd_is_portable_device();
++}
++#else
++static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; }
++#endif
++
+ /*
+ * atkbd_probe() probes for an AT keyboard on a serio port.
+ */
+@@ -764,6 +802,7 @@ static int atkbd_probe(struct atkbd *atkbd)
+ {
+ struct ps2dev *ps2dev = &atkbd->ps2dev;
+ unsigned char param[2];
++ bool skip_getid;
+
+ /*
+ * Some systems, where the bit-twiddling when testing the io-lines of the
+@@ -785,17 +824,18 @@ static int atkbd_probe(struct atkbd *atkbd)
+ */
+
+ param[0] = param[1] = 0xa5; /* initialize with invalid values */
+- if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
++ skip_getid = atkbd_skip_getid(atkbd);
++ if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+
+ /*
+- * If the get ID command failed, we check if we can at least set the LEDs on
+- * the keyboard. This should work on every keyboard out there. It also turns
+- * the LEDs off, which we want anyway.
++ * If the get ID command was skipped or failed, we check if we can at least set
++ * the LEDs on the keyboard. This should work on every keyboard out there.
++ * It also turns the LEDs off, which we want anyway.
+ */
+ param[0] = 0;
+ if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
+ return -1;
+- atkbd->id = 0xabba;
++ atkbd->id = skip_getid ? 0xab83 : 0xabba;
+ return 0;
+ }
+
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 3db87ee0b70c7..6af38f53154bd 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -351,6 +351,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ },
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
+ },
++ {
++ /* Acer TravelMate P459-G2-M */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
++ },
+ {
+ /* Amoi M636/A737 */
+ .matches = {
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index 50453d38400c5..272c9b35c9f0e 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -226,6 +226,7 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+
+ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
+ { .compatible = "qcom,adreno" },
++ { .compatible = "qcom,adreno-gmu" },
+ { .compatible = "qcom,mdp4" },
+ { .compatible = "qcom,mdss" },
+ { .compatible = "qcom,sc7180-mdss" },
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 48c6f7ff4aef1..8cd63e6ccd2cf 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -25,6 +25,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/crash_dump.h>
+ #include <linux/dma-direct.h>
++#include <trace/events/swiotlb.h>
+
+ struct iommu_dma_msi_page {
+ struct list_head list;
+@@ -817,6 +818,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ void *padding_start;
+ size_t padding_size, aligned_size;
+
++ trace_swiotlb_bounced(dev, phys, size, swiotlb_force);
++
+ aligned_size = iova_align(iovad, size);
+ phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+ iova_mask(iovad), dir, attrs);
+diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
+index 8bf545100fb04..fdfc41535c8a3 100644
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -97,6 +97,7 @@ config LEDS_ARIEL
+ config LEDS_AW2013
+ tristate "LED support for Awinic AW2013"
+ depends on LEDS_CLASS && I2C && OF
++ select REGMAP_I2C
+ help
+ This option enables support for the AW2013 3-channel
+ LED driver.
+diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
+index 8ae0d2d284aff..3e69a7bde9284 100644
+--- a/drivers/leds/trigger/ledtrig-tty.c
++++ b/drivers/leds/trigger/ledtrig-tty.c
+@@ -168,6 +168,10 @@ static void ledtrig_tty_deactivate(struct led_classdev *led_cdev)
+
+ cancel_delayed_work_sync(&trigger_data->dwork);
+
++ kfree(trigger_data->ttyname);
++ tty_kref_put(trigger_data->tty);
++ trigger_data->tty = NULL;
++
+ kfree(trigger_data);
+ }
+
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 2ff8a1b776fb4..3a83e8e092568 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -114,6 +114,8 @@ static int dvb_device_open(struct inode *inode, struct file *file)
+ err = file->f_op->open(inode, file);
+ up_read(&minor_rwsem);
+ mutex_unlock(&dvbdev_mutex);
++ if (err)
++ dvb_device_put(dvbdev);
+ return err;
+ }
+ fail:
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index e03fac025b512..fdf993da30011 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -1898,7 +1898,7 @@ static int m88ds3103_probe(struct i2c_client *client,
+ /* get frontend address */
+ ret = regmap_read(dev->regmap, 0x29, &utmp);
+ if (ret)
+- goto err_kfree;
++ goto err_del_adapters;
+ dev->dt_addr = ((utmp & 0x80) == 0) ? 0x42 >> 1 : 0x40 >> 1;
+ dev_dbg(&client->dev, "dt addr is 0x%02x\n", dev->dt_addr);
+
+@@ -1906,11 +1906,14 @@ static int m88ds3103_probe(struct i2c_client *client,
+ dev->dt_addr);
+ if (IS_ERR(dev->dt_client)) {
+ ret = PTR_ERR(dev->dt_client);
+- goto err_kfree;
++ goto err_del_adapters;
+ }
+ }
+
+ return 0;
++
++err_del_adapters:
++ i2c_mux_del_adapters(dev->muxc);
+ err_kfree:
+ kfree(dev);
+ err:
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+index 560f928c37520..79cfa99f2a64f 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+@@ -514,7 +514,7 @@ static int rkisp1_probe(struct platform_device *pdev)
+
+ ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev);
+ if (ret)
+- return ret;
++ goto err_pm_runtime_disable;
+
+ ret = media_device_register(&rkisp1->media_dev);
+ if (ret) {
+@@ -534,6 +534,7 @@ err_unreg_media_dev:
+ media_device_unregister(&rkisp1->media_dev);
+ err_unreg_v4l2_dev:
+ v4l2_device_unregister(&rkisp1->v4l2_dev);
++err_pm_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
+index 727e6268567f7..f1feccc28bf05 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-core.c
++++ b/drivers/media/usb/cx231xx/cx231xx-core.c
+@@ -1024,6 +1024,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
+ if (!dev->video_mode.isoc_ctl.urb) {
+ dev_err(dev->dev,
+ "cannot alloc memory for usb buffers\n");
++ kfree(dma_q->p_left_data);
+ return -ENOMEM;
+ }
+
+@@ -1033,6 +1034,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
+ dev_err(dev->dev,
+ "cannot allocate memory for usbtransfer\n");
+ kfree(dev->video_mode.isoc_ctl.urb);
++ kfree(dma_q->p_left_data);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+index 14170a5d72b35..1764674de98bc 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+@@ -268,7 +268,8 @@ void pvr2_context_disconnect(struct pvr2_context *mp)
+ {
+ pvr2_hdw_disconnect(mp->hdw);
+ mp->disconnect_flag = !0;
+- pvr2_context_notify(mp);
++ if (!pvr2_context_shutok())
++ pvr2_context_notify(mp);
+ }
+
+
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 0e15afc39f545..b1a0cd34e8a99 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -301,8 +301,8 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
+
+ snprintf(name, sizeof(name), "%s-div", devname);
+ tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
++ 0, lpss->priv, 1, 15, 16, 15,
+ CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
+- lpss->priv, 1, 15, 16, 15, 0,
+ NULL);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
+index 191fdb87c424d..552b1861adad4 100644
+--- a/drivers/mfd/syscon.c
++++ b/drivers/mfd/syscon.c
+@@ -103,6 +103,10 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
+
+ syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", np,
+ (u64)res.start);
++ if (!syscon_config.name) {
++ ret = -ENOMEM;
++ goto err_regmap;
++ }
+ syscon_config.reg_stride = reg_io_width;
+ syscon_config.val_bits = reg_io_width * 8;
+ syscon_config.max_register = resource_size(&res) - reg_io_width;
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index a281df78d1685..0061eeec220bc 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -1055,14 +1055,15 @@ config MMC_SDHCI_XENON
+
+ config MMC_SDHCI_OMAP
+ tristate "TI SDHCI Controller Support"
++ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM && OF
+ select THERMAL
+ imply TI_SOC_THERMAL
+ select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+- support present in TI's DRA7 SOCs. The controller supports
+- SD/MMC/SDIO devices.
++ support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller
++ supports SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+@@ -1070,14 +1071,15 @@ config MMC_SDHCI_OMAP
+
+ config MMC_SDHCI_AM654
+ tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
++ depends on ARCH_K3 || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM && OF
+ select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
+ select REGMAP_MMIO
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+- support present in TI's AM654 SOCs. The controller supports
+- SD/MMC/SDIO devices.
++ support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller
++ supports SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index b8ae1ec14e178..5288a6aaf1828 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -457,7 +457,7 @@ static void blktrans_notify_add(struct mtd_info *mtd)
+ {
+ struct mtd_blktrans_ops *tr;
+
+- if (mtd->type == MTD_ABSENT)
++ if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
+ return;
+
+ list_for_each_entry(tr, &blktrans_majors, list)
+@@ -497,7 +497,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
+ mutex_lock(&mtd_table_mutex);
+ list_add(&tr->list, &blktrans_majors);
+ mtd_for_each_device(mtd)
+- if (mtd->type != MTD_ABSENT)
++ if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
+ tr->add_mtd(tr, mtd);
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
+index 02d5001768382..bea1a7d3edd78 100644
+--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
++++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
+@@ -20,7 +20,7 @@
+
+ #define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+-#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
++#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
+ for IFC NAND Machine */
+
+ struct fsl_ifc_ctrl;
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index 0c0bc78b1788e..6b6470ef2ae93 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -1119,6 +1119,8 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
+
+ vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
+ vsc->chipid);
++ if (!vsc->gc.label)
++ return -ENOMEM;
+ vsc->gc.ngpio = 4;
+ vsc->gc.owner = THIS_MODULE;
+ vsc->gc.parent = vsc->dev;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index fc487a6f050a2..757138c33b753 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6448,6 +6448,14 @@ static void tg3_dump_state(struct tg3 *tp)
+ int i;
+ u32 *regs;
+
++ /* If it is a PCI error, all registers will be 0xffff,
++ * we don't dump them out, just report the error and return
++ */
++ if (tp->pdev->error_state != pci_channel_io_normal) {
++ netdev_err(tp->dev, "PCI channel ERROR!\n");
++ return;
++ }
++
+ regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
+ if (!regs)
+ return;
+@@ -11186,7 +11194,8 @@ static void tg3_reset_task(struct work_struct *work)
+ rtnl_lock();
+ tg3_full_lock(tp, 0);
+
+- if (tp->pcierr_recovery || !netif_running(tp->dev)) {
++ if (tp->pcierr_recovery || !netif_running(tp->dev) ||
++ tp->pdev->error_state != pci_channel_io_normal) {
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ tg3_full_unlock(tp);
+ rtnl_unlock();
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile b/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile
+index e57c1375f236a..a97c2bef846be 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile
+@@ -3,7 +3,6 @@
+ obj-$(CONFIG_MLXBF_GIGE) += mlxbf_gige.o
+
+ mlxbf_gige-y := mlxbf_gige_ethtool.o \
+- mlxbf_gige_gpio.o \
+ mlxbf_gige_intr.o \
+ mlxbf_gige_main.o \
+ mlxbf_gige_mdio.o \
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+index 3e8725b7f0b70..0fdf2c8ca480d 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+@@ -51,11 +51,6 @@
+ #define MLXBF_GIGE_ERROR_INTR_IDX 0
+ #define MLXBF_GIGE_RECEIVE_PKT_INTR_IDX 1
+ #define MLXBF_GIGE_LLU_PLU_INTR_IDX 2
+-#define MLXBF_GIGE_PHY_INT_N 3
+-
+-#define MLXBF_GIGE_MDIO_DEFAULT_PHY_ADDR 0x3
+-
+-#define MLXBF_GIGE_DEFAULT_PHY_INT_GPIO 12
+
+ struct mlxbf_gige_stats {
+ u64 hw_access_errors;
+@@ -82,11 +77,7 @@ struct mlxbf_gige {
+ void __iomem *mdio_io;
+ void __iomem *clk_io;
+ struct mii_bus *mdiobus;
+- void __iomem *gpio_io;
+- struct irq_domain *irqdomain;
+- u32 phy_int_gpio_mask;
+ spinlock_t lock; /* for packet processing indices */
+- spinlock_t gpio_lock; /* for GPIO bus access */
+ u16 rx_q_entries;
+ u16 tx_q_entries;
+ u64 *tx_wqe_base;
+@@ -186,7 +177,4 @@ int mlxbf_gige_poll(struct napi_struct *napi, int budget);
+ extern const struct ethtool_ops mlxbf_gige_ethtool_ops;
+ void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv);
+
+-int mlxbf_gige_gpio_init(struct platform_device *pdev, struct mlxbf_gige *priv);
+-void mlxbf_gige_gpio_free(struct mlxbf_gige *priv);
+-
+ #endif /* !defined(__MLXBF_GIGE_H__) */
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c
+deleted file mode 100644
+index a8d966db57159..0000000000000
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c
++++ /dev/null
+@@ -1,212 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+-
+-/* Initialize and handle GPIO interrupt triggered by INT_N PHY signal.
+- * This GPIO interrupt triggers the PHY state machine to bring the link
+- * up/down.
+- *
+- * Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES
+- */
+-
+-#include <linux/acpi.h>
+-#include <linux/bitfield.h>
+-#include <linux/device.h>
+-#include <linux/err.h>
+-#include <linux/gpio/driver.h>
+-#include <linux/interrupt.h>
+-#include <linux/io.h>
+-#include <linux/irq.h>
+-#include <linux/irqdomain.h>
+-#include <linux/irqreturn.h>
+-#include <linux/platform_device.h>
+-#include <linux/property.h>
+-
+-#include "mlxbf_gige.h"
+-#include "mlxbf_gige_regs.h"
+-
+-#define MLXBF_GIGE_GPIO_CAUSE_FALL_EN 0x48
+-#define MLXBF_GIGE_GPIO_CAUSE_OR_CAUSE_EVTEN0 0x80
+-#define MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0 0x94
+-#define MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE 0x98
+-
+-static void mlxbf_gige_gpio_enable(struct mlxbf_gige *priv)
+-{
+- unsigned long flags;
+- u32 val;
+-
+- spin_lock_irqsave(&priv->gpio_lock, flags);
+- val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+- val |= priv->phy_int_gpio_mask;
+- writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+-
+- /* The INT_N interrupt level is active low.
+- * So enable cause fall bit to detect when GPIO
+- * state goes low.
+- */
+- val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_FALL_EN);
+- val |= priv->phy_int_gpio_mask;
+- writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_FALL_EN);
+-
+- /* Enable PHY interrupt by setting the priority level */
+- val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+- val |= priv->phy_int_gpio_mask;
+- writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+- spin_unlock_irqrestore(&priv->gpio_lock, flags);
+-}
+-
+-static void mlxbf_gige_gpio_disable(struct mlxbf_gige *priv)
+-{
+- unsigned long flags;
+- u32 val;
+-
+- spin_lock_irqsave(&priv->gpio_lock, flags);
+- val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+- val &= ~priv->phy_int_gpio_mask;
+- writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+- spin_unlock_irqrestore(&priv->gpio_lock, flags);
+-}
+-
+-static irqreturn_t mlxbf_gige_gpio_handler(int irq, void *ptr)
+-{
+- struct mlxbf_gige *priv;
+- u32 val;
+-
+- priv = ptr;
+-
+- /* Check if this interrupt is from PHY device.
+- * Return if it is not.
+- */
+- val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CAUSE_EVTEN0);
+- if (!(val & priv->phy_int_gpio_mask))
+- return IRQ_NONE;
+-
+- /* Clear interrupt when done, otherwise, no further interrupt
+- * will be triggered.
+- */
+- val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+- val |= priv->phy_int_gpio_mask;
+- writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+-
+- generic_handle_irq(priv->phy_irq);
+-
+- return IRQ_HANDLED;
+-}
+-
+-static void mlxbf_gige_gpio_mask(struct irq_data *irqd)
+-{
+- struct mlxbf_gige *priv = irq_data_get_irq_chip_data(irqd);
+-
+- mlxbf_gige_gpio_disable(priv);
+-}
+-
+-static void mlxbf_gige_gpio_unmask(struct irq_data *irqd)
+-{
+- struct mlxbf_gige *priv = irq_data_get_irq_chip_data(irqd);
+-
+- mlxbf_gige_gpio_enable(priv);
+-}
+-
+-static struct irq_chip mlxbf_gige_gpio_chip = {
+- .name = "mlxbf_gige_phy",
+- .irq_mask = mlxbf_gige_gpio_mask,
+- .irq_unmask = mlxbf_gige_gpio_unmask,
+-};
+-
+-static int mlxbf_gige_gpio_domain_map(struct irq_domain *d,
+- unsigned int irq,
+- irq_hw_number_t hwirq)
+-{
+- irq_set_chip_data(irq, d->host_data);
+- irq_set_chip_and_handler(irq, &mlxbf_gige_gpio_chip, handle_simple_irq);
+- irq_set_noprobe(irq);
+-
+- return 0;
+-}
+-
+-static const struct irq_domain_ops mlxbf_gige_gpio_domain_ops = {
+- .map = mlxbf_gige_gpio_domain_map,
+- .xlate = irq_domain_xlate_twocell,
+-};
+-
+-#ifdef CONFIG_ACPI
+-static int mlxbf_gige_gpio_resources(struct acpi_resource *ares,
+- void *data)
+-{
+- struct acpi_resource_gpio *gpio;
+- u32 *phy_int_gpio = data;
+-
+- if (ares->type == ACPI_RESOURCE_TYPE_GPIO) {
+- gpio = &ares->data.gpio;
+- *phy_int_gpio = gpio->pin_table[0];
+- }
+-
+- return 1;
+-}
+-#endif
+-
+-void mlxbf_gige_gpio_free(struct mlxbf_gige *priv)
+-{
+- irq_dispose_mapping(priv->phy_irq);
+- irq_domain_remove(priv->irqdomain);
+-}
+-
+-int mlxbf_gige_gpio_init(struct platform_device *pdev,
+- struct mlxbf_gige *priv)
+-{
+- struct device *dev = &pdev->dev;
+- struct resource *res;
+- u32 phy_int_gpio = 0;
+- int ret;
+-
+- LIST_HEAD(resources);
+-
+- res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_GPIO0);
+- if (!res)
+- return -ENODEV;
+-
+- priv->gpio_io = devm_ioremap(dev, res->start, resource_size(res));
+- if (!priv->gpio_io)
+- return -ENOMEM;
+-
+-#ifdef CONFIG_ACPI
+- ret = acpi_dev_get_resources(ACPI_COMPANION(dev),
+- &resources, mlxbf_gige_gpio_resources,
+- &phy_int_gpio);
+- acpi_dev_free_resource_list(&resources);
+- if (ret < 0 || !phy_int_gpio) {
+- dev_err(dev, "Error retrieving the gpio phy pin");
+- return -EINVAL;
+- }
+-#endif
+-
+- priv->phy_int_gpio_mask = BIT(phy_int_gpio);
+-
+- mlxbf_gige_gpio_disable(priv);
+-
+- priv->hw_phy_irq = platform_get_irq(pdev, MLXBF_GIGE_PHY_INT_N);
+-
+- priv->irqdomain = irq_domain_add_simple(NULL, 1, 0,
+- &mlxbf_gige_gpio_domain_ops,
+- priv);
+- if (!priv->irqdomain) {
+- dev_err(dev, "Failed to add IRQ domain\n");
+- return -ENOMEM;
+- }
+-
+- priv->phy_irq = irq_create_mapping(priv->irqdomain, 0);
+- if (!priv->phy_irq) {
+- irq_domain_remove(priv->irqdomain);
+- priv->irqdomain = NULL;
+- dev_err(dev, "Error mapping PHY IRQ\n");
+- return -EINVAL;
+- }
+-
+- ret = devm_request_irq(dev, priv->hw_phy_irq, mlxbf_gige_gpio_handler,
+- IRQF_ONESHOT | IRQF_SHARED, "mlxbf_gige_phy", priv);
+- if (ret) {
+- dev_err(dev, "Failed to request PHY IRQ");
+- mlxbf_gige_gpio_free(priv);
+- return ret;
+- }
+-
+- return ret;
+-}
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+index b990782c1eb1f..679415a64f25c 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+@@ -132,9 +132,15 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ {
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
++ u64 control;
+ u64 int_en;
+ int err;
+
++ /* Perform general init of GigE block */
++ control = readq(priv->base + MLXBF_GIGE_CONTROL);
++ control |= MLXBF_GIGE_CONTROL_PORT_EN;
++ writeq(control, priv->base + MLXBF_GIGE_CONTROL);
++
+ err = mlxbf_gige_request_irqs(priv);
+ if (err)
+ return err;
+@@ -149,14 +155,14 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ */
+ priv->valid_polarity = 0;
+
+- err = mlxbf_gige_rx_init(priv);
++ phy_start(phydev);
++
++ err = mlxbf_gige_tx_init(priv);
+ if (err)
+ goto free_irqs;
+- err = mlxbf_gige_tx_init(priv);
++ err = mlxbf_gige_rx_init(priv);
+ if (err)
+- goto rx_deinit;
+-
+- phy_start(phydev);
++ goto tx_deinit;
+
+ netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&priv->napi);
+@@ -178,8 +184,8 @@ static int mlxbf_gige_open(struct net_device *netdev)
+
+ return 0;
+
+-rx_deinit:
+- mlxbf_gige_rx_deinit(priv);
++tx_deinit:
++ mlxbf_gige_tx_deinit(priv);
+
+ free_irqs:
+ mlxbf_gige_free_irqs(priv);
+@@ -280,8 +286,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ void __iomem *llu_base;
+ void __iomem *plu_base;
+ void __iomem *base;
+- u64 control;
+- int addr;
++ int addr, phy_irq;
+ int err;
+
+ base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
+@@ -296,11 +301,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ if (IS_ERR(plu_base))
+ return PTR_ERR(plu_base);
+
+- /* Perform general init of GigE block */
+- control = readq(base + MLXBF_GIGE_CONTROL);
+- control |= MLXBF_GIGE_CONTROL_PORT_EN;
+- writeq(control, base + MLXBF_GIGE_CONTROL);
+-
+ netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
+ if (!netdev)
+ return -ENOMEM;
+@@ -316,20 +316,12 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ priv->pdev = pdev;
+
+ spin_lock_init(&priv->lock);
+- spin_lock_init(&priv->gpio_lock);
+
+ /* Attach MDIO device */
+ err = mlxbf_gige_mdio_probe(pdev, priv);
+ if (err)
+ return err;
+
+- err = mlxbf_gige_gpio_init(pdev, priv);
+- if (err) {
+- dev_err(&pdev->dev, "PHY IRQ initialization failed\n");
+- mlxbf_gige_mdio_remove(priv);
+- return -ENODEV;
+- }
+-
+ priv->base = base;
+ priv->llu_base = llu_base;
+ priv->plu_base = plu_base;
+@@ -350,6 +342,12 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
+ priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
+
++ phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy-gpios", 0);
++ if (phy_irq < 0) {
++ dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
++ phy_irq = PHY_POLL;
++ }
++
+ phydev = phy_find_first(priv->mdiobus);
+ if (!phydev) {
+ err = -ENODEV;
+@@ -357,8 +355,8 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ }
+
+ addr = phydev->mdio.addr;
+- priv->mdiobus->irq[addr] = priv->phy_irq;
+- phydev->irq = priv->phy_irq;
++ priv->mdiobus->irq[addr] = phy_irq;
++ phydev->irq = phy_irq;
+
+ err = phy_connect_direct(netdev, phydev,
+ mlxbf_gige_adjust_link,
+@@ -394,7 +392,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ return 0;
+
+ out:
+- mlxbf_gige_gpio_free(priv);
+ mlxbf_gige_mdio_remove(priv);
+ return err;
+ }
+@@ -405,7 +402,6 @@ static int mlxbf_gige_remove(struct platform_device *pdev)
+
+ unregister_netdev(priv->netdev);
+ phy_disconnect(priv->netdev->phydev);
+- mlxbf_gige_gpio_free(priv);
+ mlxbf_gige_mdio_remove(priv);
+ platform_set_drvdata(pdev, NULL);
+
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+index 227d01cace3f0..6999843584934 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+@@ -142,6 +142,9 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+ writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
+ priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
+
++ writeq(ilog2(priv->rx_q_entries),
++ priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
++
+ /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+ * indicate readiness to receive interrupts
+ */
+@@ -154,9 +157,6 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+ data |= MLXBF_GIGE_RX_DMA_EN;
+ writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+
+- writeq(ilog2(priv->rx_q_entries),
+- priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
+-
+ return 0;
+
+ free_wqe_and_skb:
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+index ded4cf6586809..4b713832fdd55 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+@@ -119,7 +119,6 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
+ {
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
+- size_t alloc_size;
+ u64 max_lkey_id;
+ int err;
+
+@@ -131,8 +130,7 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
+ if (!region_12kb)
+ return -ENOMEM;
+
+- alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long);
+- region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL);
++ region_12kb->used_lkey_id = bitmap_zalloc(max_lkey_id, GFP_KERNEL);
+ if (!region_12kb->used_lkey_id) {
+ err = -ENOMEM;
+ goto err_used_lkey_id_alloc;
+@@ -149,7 +147,7 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
+ return 0;
+
+ err_rhashtable_init:
+- kfree(region_12kb->used_lkey_id);
++ bitmap_free(region_12kb->used_lkey_id);
+ err_used_lkey_id_alloc:
+ kfree(region_12kb);
+ return err;
+@@ -161,7 +159,7 @@ mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+
+ rhashtable_destroy(&region_12kb->lkey_ht);
+- kfree(region_12kb->used_lkey_id);
++ bitmap_free(region_12kb->used_lkey_id);
+ kfree(region_12kb);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+index 4c98950380d53..d231f4d2888be 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+@@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned long *p_index)
+ {
+ unsigned int num_rows, entry_size;
++ unsigned long index;
+
+ /* We only allow allocations of entire rows */
+ if (num_erps % erp_core->num_erp_banks != 0)
+@@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ entry_size = erp_core->erpt_entries_size[region_type];
+ num_rows = num_erps / erp_core->num_erp_banks;
+
+- *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+- if (*p_index == 0)
++ index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
++ if (!index)
+ return -ENOBUFS;
+- *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
++
++ *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+index 7cccc41dd69c9..483c8b75bebb8 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+@@ -29,70 +29,6 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
+ #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
+ #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
+
+-int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+- struct mlxsw_sp_acl_tcam *tcam)
+-{
+- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+- u64 max_tcam_regions;
+- u64 max_regions;
+- u64 max_groups;
+- size_t alloc_size;
+- int err;
+-
+- mutex_init(&tcam->lock);
+- tcam->vregion_rehash_intrvl =
+- MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
+- INIT_LIST_HEAD(&tcam->vregion_list);
+-
+- max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+- ACL_MAX_TCAM_REGIONS);
+- max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+-
+- /* Use 1:1 mapping between ACL region and TCAM region */
+- if (max_tcam_regions < max_regions)
+- max_regions = max_tcam_regions;
+-
+- alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
+- tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
+- if (!tcam->used_regions)
+- return -ENOMEM;
+- tcam->max_regions = max_regions;
+-
+- max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+- alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
+- tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
+- if (!tcam->used_groups) {
+- err = -ENOMEM;
+- goto err_alloc_used_groups;
+- }
+- tcam->max_groups = max_groups;
+- tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+- ACL_MAX_GROUP_SIZE);
+-
+- err = ops->init(mlxsw_sp, tcam->priv, tcam);
+- if (err)
+- goto err_tcam_init;
+-
+- return 0;
+-
+-err_tcam_init:
+- kfree(tcam->used_groups);
+-err_alloc_used_groups:
+- kfree(tcam->used_regions);
+- return err;
+-}
+-
+-void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+- struct mlxsw_sp_acl_tcam *tcam)
+-{
+- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+-
+- mutex_destroy(&tcam->lock);
+- ops->fini(mlxsw_sp, tcam->priv);
+- kfree(tcam->used_groups);
+- kfree(tcam->used_regions);
+-}
+-
+ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority)
+@@ -1545,6 +1481,73 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
+ mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
+ }
+
++int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
++ struct mlxsw_sp_acl_tcam *tcam)
++{
++ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
++ u64 max_tcam_regions;
++ u64 max_regions;
++ u64 max_groups;
++ int err;
++
++ mutex_init(&tcam->lock);
++ tcam->vregion_rehash_intrvl =
++ MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
++ INIT_LIST_HEAD(&tcam->vregion_list);
++
++ max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
++ ACL_MAX_TCAM_REGIONS);
++ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
++
++ /* Use 1:1 mapping between ACL region and TCAM region */
++ if (max_tcam_regions < max_regions)
++ max_regions = max_tcam_regions;
++
++ tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
++ if (!tcam->used_regions) {
++ err = -ENOMEM;
++ goto err_alloc_used_regions;
++ }
++ tcam->max_regions = max_regions;
++
++ max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
++ tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
++ if (!tcam->used_groups) {
++ err = -ENOMEM;
++ goto err_alloc_used_groups;
++ }
++ tcam->max_groups = max_groups;
++ tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
++ ACL_MAX_GROUP_SIZE);
++ tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
++ MLXSW_REG_PAGT_ACL_MAX_NUM);
++
++ err = ops->init(mlxsw_sp, tcam->priv, tcam);
++ if (err)
++ goto err_tcam_init;
++
++ return 0;
++
++err_tcam_init:
++ bitmap_free(tcam->used_groups);
++err_alloc_used_groups:
++ bitmap_free(tcam->used_regions);
++err_alloc_used_regions:
++ mutex_destroy(&tcam->lock);
++ return err;
++}
++
++void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
++ struct mlxsw_sp_acl_tcam *tcam)
++{
++ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
++
++ ops->fini(mlxsw_sp, tcam->priv);
++ bitmap_free(tcam->used_groups);
++ bitmap_free(tcam->used_regions);
++ mutex_destroy(&tcam->lock);
++}
++
+ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
+ MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+index b65b93a2b9bc6..fc2257753b9b3 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+@@ -122,7 +122,6 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
+ unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_counter_pool *pool;
+- unsigned int map_size;
+ int err;
+
+ pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
+@@ -143,9 +142,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
+ devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ mlxsw_sp_counter_pool_occ_get, pool);
+
+- map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
+-
+- pool->usage = kzalloc(map_size, GFP_KERNEL);
++ pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
+ if (!pool->usage) {
+ err = -ENOMEM;
+ goto err_usage_alloc;
+@@ -158,7 +155,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
+ return 0;
+
+ err_sub_pools_init:
+- kfree(pool->usage);
++ bitmap_free(pool->usage);
+ err_usage_alloc:
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
+@@ -176,7 +173,7 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
+ WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
+ pool->pool_size);
+ WARN_ON(atomic_read(&pool->active_entries_count));
+- kfree(pool->usage);
++ bitmap_free(pool->usage);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
+ kfree(pool);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 22fede5cb32c2..81c7e8a7fcf5d 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -1635,16 +1635,13 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
+ u16 fid)
+ {
+ struct mlxsw_sp_mid *mid;
+- size_t alloc_size;
+
+ mid = kzalloc(sizeof(*mid), GFP_KERNEL);
+ if (!mid)
+ return NULL;
+
+- alloc_size = sizeof(unsigned long) *
+- BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
+-
+- mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
++ mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
++ GFP_KERNEL);
+ if (!mid->ports_in_mid)
+ goto err_ports_in_mid_alloc;
+
+@@ -1663,7 +1660,7 @@ out:
+ return mid;
+
+ err_write_mdb_entry:
+- kfree(mid->ports_in_mid);
++ bitmap_free(mid->ports_in_mid);
+ err_ports_in_mid_alloc:
+ kfree(mid);
+ return NULL;
+@@ -1680,7 +1677,7 @@ static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
+ mlxsw_core_max_ports(mlxsw_sp->core))) {
+ err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
+ list_del(&mid->list);
+- kfree(mid->ports_in_mid);
++ bitmap_free(mid->ports_in_mid);
+ kfree(mid);
+ }
+ return err;
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+index 27b1663c476e7..64b209a0ad219 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+@@ -391,7 +391,7 @@ nla_put_failure:
+
+ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
+ .kind = "rmnet",
+- .maxtype = __IFLA_RMNET_MAX,
++ .maxtype = IFLA_RMNET_MAX,
+ .priv_size = sizeof(struct rmnet_priv),
+ .setup = rmnet_vnd_setup,
+ .validate = rmnet_rtnl_validate,
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 1fa002c42c889..2bf5d4c208d3d 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1567,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ struct ravb_tstamp_skb *ts_skb;
+ struct ravb_tx_desc *desc;
+ unsigned long flags;
+- u32 dma_addr;
++ dma_addr_t dma_addr;
+ void *buffer;
+ u32 entry;
+ u32 len;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index cef9734ef259b..da7b5d26a5897 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -59,11 +59,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ return -ENODEV;
+ }
+
+- if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
+- pr_info("dwmac_loongson_pci: Incompatible OF node\n");
+- return -ENODEV;
+- }
+-
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+index f03779205ade4..1a74437787c03 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -241,6 +241,7 @@ struct stmmac_priv {
+ u32 msg_enable;
+ int wolopts;
+ int wol_irq;
++ bool wol_irq_disabled;
+ int clk_csr;
+ struct timer_list eee_ctrl_timer;
+ int lpi_irq;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index 9e8ae4384e4fb..7b954365e5641 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -758,10 +758,16 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ if (wol->wolopts) {
+ pr_info("stmmac: wakeup enable\n");
+ device_set_wakeup_enable(priv->device, 1);
+- enable_irq_wake(priv->wol_irq);
++ /* Avoid unbalanced enable_irq_wake calls */
++ if (priv->wol_irq_disabled)
++ enable_irq_wake(priv->wol_irq);
++ priv->wol_irq_disabled = false;
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+- disable_irq_wake(priv->wol_irq);
++ /* Avoid unbalanced disable_irq_wake calls */
++ if (!priv->wol_irq_disabled)
++ disable_irq_wake(priv->wol_irq);
++ priv->wol_irq_disabled = true;
+ }
+
+ mutex_lock(&priv->lock);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 08693d7458d15..ede630bfad2f4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3506,6 +3506,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
++ priv->wol_irq_disabled = true;
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ int_name = priv->int_name_wol;
+ sprintf(int_name, "%s:%s", dev->name, "wol");
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 4aa9477ac5977..1fa6f0dacd2de 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -53,7 +53,7 @@
+ #define AM65_CPSW_MAX_PORTS 8
+
+ #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
+-#define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
++#define AM65_CPSW_MAX_PACKET_SIZE 2024
+
+ #define AM65_CPSW_REG_CTL 0x004
+ #define AM65_CPSW_REG_STAT_PORT_EN 0x014
+@@ -1985,7 +1985,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
+ eth_hw_addr_set(port->ndev, port->slave.mac_addr);
+
+ port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
+- port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
++ port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
++ (VLAN_ETH_HLEN + ETH_FCS_LEN);
+ port->ndev->hw_features = NETIF_F_SG |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 05a8985d71073..dc209ad8a0fed 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1728,6 +1728,7 @@ static struct phy_driver ksphy_driver[] = {
+ /* PHY_GBIT_FEATURES */
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
++ .soft_reset = genphy_soft_reset,
+ .config_init = ksz9131_config_init,
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index 24bd0520926bf..853a5df4f38c6 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -650,8 +650,8 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab)
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+- ath11k_err(ab, "failed to get rproc\n");
+- return -EINVAL;
++ ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
++ return -EPROBE_DEFER;
+ }
+ ab_ahb->tgt_rproc = prproc;
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+index 6d82725cb87d0..32ed8227d985a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+@@ -99,17 +99,6 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+ active_cnt = 2;
+ }
+
+- /*
+- * If the firmware requested it, then we know that it supports
+- * getting zero for the values to indicate "use one, but pick
+- * which one yourself", which means it can dynamically pick one
+- * that e.g. has better RSSI.
+- */
+- if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
+- idle_cnt = 0;
+- active_cnt = 0;
+- }
+-
+ *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ PHY_RX_CHAIN_VALID_POS);
+ *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 08dd227bad4b1..266d1e9a89f5e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -2054,7 +2054,7 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
+- cmd.flags |= CMD_WANT_SKB;
++ cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
+ sta_id, tids);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 29be7ed768947..17e53c7eb62d5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1380,7 +1380,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+ * if it is true then one of the handlers took the page.
+ */
+
+- if (reclaim) {
++ if (reclaim && txq) {
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ int cmd_index = iwl_txq_get_cmd_index(txq, index);
+diff --git a/drivers/net/wireless/marvell/libertas/Kconfig b/drivers/net/wireless/marvell/libertas/Kconfig
+index 6d62ab49aa8d4..c7d02adb3eead 100644
+--- a/drivers/net/wireless/marvell/libertas/Kconfig
++++ b/drivers/net/wireless/marvell/libertas/Kconfig
+@@ -2,8 +2,6 @@
+ config LIBERTAS
+ tristate "Marvell 8xxx Libertas WLAN driver support"
+ depends on CFG80211
+- select WIRELESS_EXT
+- select WEXT_SPY
+ select LIB80211
+ select FW_LOADER
+ help
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index 97f0f39364d67..109b1bef4c707 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -1980,6 +1980,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
+
+ mwifiex_set_sys_config_invalid_data(bss_cfg);
+
++ memcpy(bss_cfg->mac_addr, priv->curr_addr, ETH_ALEN);
++
+ if (params->beacon_interval)
+ bss_cfg->beacon_period = params->beacon_interval;
+ if (params->dtim_period)
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
+index 2ff23ab259ab3..29140949c01ca 100644
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
+@@ -177,6 +177,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
+ #define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
+ #define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
+ #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
++#define TLV_TYPE_UAP_MAC_ADDRESS (PROPRIETARY_TLV_BASE_ID + 43)
+ #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
+ #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
+ #define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
+diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
+index 3db449efa167c..cdb5b3881782f 100644
+--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
++++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
+@@ -119,6 +119,7 @@ struct mwifiex_uap_bss_param {
+ u8 qos_info;
+ u8 power_constraint;
+ struct mwifiex_types_wmm_info wmm_info;
++ u8 mac_addr[ETH_ALEN];
+ };
+
+ enum {
+diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+index 18e89777b7841..82aa6f0469a1b 100644
+--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
++++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+@@ -480,6 +480,7 @@ void mwifiex_config_uap_11d(struct mwifiex_private *priv,
+ static int
+ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
+ {
++ struct host_cmd_tlv_mac_addr *mac_tlv;
+ struct host_cmd_tlv_dtim_period *dtim_period;
+ struct host_cmd_tlv_beacon_period *beacon_period;
+ struct host_cmd_tlv_ssid *ssid;
+@@ -499,6 +500,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
+ int i;
+ u16 cmd_size = *param_size;
+
++ mac_tlv = (struct host_cmd_tlv_mac_addr *)tlv;
++ mac_tlv->header.type = cpu_to_le16(TLV_TYPE_UAP_MAC_ADDRESS);
++ mac_tlv->header.len = cpu_to_le16(ETH_ALEN);
++ memcpy(mac_tlv->mac_addr, bss_cfg->mac_addr, ETH_ALEN);
++ cmd_size += sizeof(struct host_cmd_tlv_mac_addr);
++ tlv += sizeof(struct host_cmd_tlv_mac_addr);
++
+ if (bss_cfg->ssid.ssid_len) {
+ ssid = (struct host_cmd_tlv_ssid *)tlv;
+ ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
+diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
+index db0cd56c8dc7f..832b0792e0e9c 100644
+--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
+@@ -51,7 +51,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
+ goto out_put_node;
+ }
+
+- offset = be32_to_cpup(list);
++ offset += be32_to_cpup(list);
+ ret = mtd_read(mtd, offset, len, &retlen, eep);
+ put_mtd_device(mtd);
+ if (ret)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 3776495fd9d03..679ae786cf450 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -164,21 +164,29 @@ static bool _rtl_pci_platform_switch_device_pci_aspm(
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
++ value &= PCI_EXP_LNKCTL_ASPMC;
++
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
+- value |= 0x40;
++ value |= PCI_EXP_LNKCTL_CCC;
+
+- pci_write_config_byte(rtlpci->pdev, 0x80, value);
++ pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_ASPMC | value,
++ value);
+
+ return false;
+ }
+
+-/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
+-static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
++/* @value is PCI_EXP_LNKCTL_CLKREQ_EN or 0 to enable/disable clk request. */
++static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u16 value)
+ {
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+- pci_write_config_byte(rtlpci->pdev, 0x81, value);
++ value &= PCI_EXP_LNKCTL_CLKREQ_EN;
++
++ pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_CLKREQ_EN,
++ value);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+ udelay(100);
+@@ -192,11 +200,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ /*Retrieve original configuration settings. */
+ u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
+- u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
+- pcibridge_linkctrlreg;
+ u16 aspmlevel = 0;
+ u8 tmp_u1b = 0;
+
+@@ -221,16 +226,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+ /*Set corresponding value. */
+ aspmlevel |= BIT(0) | BIT(1);
+ linkctrl_reg &= ~aspmlevel;
+- pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
+
+ _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
+- udelay(50);
+-
+- /*4 Disable Pci Bridge ASPM */
+- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
+- pcibridge_linkctrlreg);
+-
+- udelay(50);
+ }
+
+ /*Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
+@@ -245,9 +242,7 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ u16 aspmlevel;
+- u8 u_pcibridge_aspmsetting;
+ u8 u_device_aspmsetting;
+
+ if (!ppsc->support_aspm)
+@@ -259,25 +254,6 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+ return;
+ }
+
+- /*4 Enable Pci Bridge ASPM */
+-
+- u_pcibridge_aspmsetting =
+- pcipriv->ndis_adapter.pcibridge_linkctrlreg |
+- rtlpci->const_hostpci_aspm_setting;
+-
+- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
+- u_pcibridge_aspmsetting &= ~BIT(0);
+-
+- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
+- u_pcibridge_aspmsetting);
+-
+- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+- "PlatformEnableASPM(): Write reg[%x] = %x\n",
+- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+- u_pcibridge_aspmsetting);
+-
+- udelay(50);
+-
+ /*Get ASPM level (with/without Clock Req) */
+ aspmlevel = rtlpci->const_devicepci_aspm_setting;
+ u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
+@@ -291,7 +267,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
+- RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
++ RT_RF_OFF_LEVL_CLK_REQ) ?
++ PCI_EXP_LNKCTL_CLKREQ_EN : 0);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ }
+ udelay(100);
+@@ -359,22 +336,6 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
+ return find_buddy_priv;
+ }
+
+-static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
+-{
+- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+- u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
+- u8 linkctrl_reg;
+- u8 num4bbytes;
+-
+- num4bbytes = (capabilityoffset + 0x10) / 4;
+-
+- /*Read Link Control Register */
+- pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
+-
+- pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
+-}
+-
+ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+ struct ieee80211_hw *hw)
+ {
+@@ -2035,12 +1996,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ PCI_SLOT(bridge_pdev->devfn);
+ pcipriv->ndis_adapter.pcibridge_funcnum =
+ PCI_FUNC(bridge_pdev->devfn);
+- pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+- pci_pcie_cap(bridge_pdev);
+- pcipriv->ndis_adapter.num4bytes =
+- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
+-
+- rtl_pci_get_linkcontrol_field(hw);
+
+ if (pcipriv->ndis_adapter.pcibridge_vendor ==
+ PCI_BRIDGE_VENDOR_AMD) {
+@@ -2057,13 +2012,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+- "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
++ "pci_bridge busnumber:devnumber:funcnumber:vendor:amd %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch);
+
+ rtl_pci_parse_configuration(pdev, hw);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
+index 866861626a0a1..d6307197dfea0 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
+@@ -236,11 +236,6 @@ struct mp_adapter {
+ u16 pcibridge_vendorid;
+ u16 pcibridge_deviceid;
+
+- u8 num4bytes;
+-
+- u8 pcibridge_pciehdr_offset;
+- u8 pcibridge_linkctrlreg;
+-
+ bool amd_l1_patch;
+ };
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+index 12d0b3a87af7c..0fab3a0c7d49d 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+@@ -16,12 +16,6 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
+ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw);
+ static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+ static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+@@ -51,7 +45,7 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -74,7 +68,7 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+@@ -99,7 +93,7 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+
+
+ original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr);
+- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -127,7 +121,7 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl88e_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+index 3d29c8dbb2559..144ee780e1b6a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+@@ -17,7 +17,7 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -40,7 +40,7 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+@@ -143,14 +143,6 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ }
+ EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
+
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+-EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
+-
+ static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
+ {
+ rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+index 75afa6253ad02..e64d377dfe9e2 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+@@ -196,7 +196,6 @@ bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
+ void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+ void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+index 04735da11168a..6b98e77768e96 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+@@ -39,7 +39,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+ rfpath, regaddr);
+ }
+
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -110,7 +110,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+@@ -122,7 +122,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+index 7582a162bd112..c7a0d4c776f0a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+@@ -94,7 +94,6 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 offset);
+ u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset, u32 data);
+ void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+index a8d9fe269f313..0b8cb7e61fd80 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+@@ -32,7 +32,7 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath, regaddr);
+ }
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+@@ -56,7 +56,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+@@ -67,7 +67,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+index 4d153bd62c53f..c3c07ca77614a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+@@ -169,13 +169,6 @@ static const u8 channel_all[59] = {
+ 157, 159, 161, 163, 165
+ };
+
+-static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+-
+ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -198,7 +191,7 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ } else {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ }
+- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+@@ -230,7 +223,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+ dbi_direct);
+ else
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+ if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob)
+@@ -317,7 +310,7 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+ regaddr, rfpath, bitmask);
+ spin_lock(&rtlpriv->locks.rf_lock);
+ original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
+- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ spin_unlock(&rtlpriv->locks.rf_lock);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -343,7 +336,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92d_phy_rf_serial_read(hw,
+ rfpath, regaddr);
+- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+index cc0bcaf13e96e..73ef602bfb01a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+@@ -16,7 +16,6 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
+ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask);
+ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw);
+ static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+ static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw,
+@@ -46,7 +45,7 @@ u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -68,7 +67,7 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+@@ -92,7 +91,7 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
+ spin_lock(&rtlpriv->locks.rf_lock);
+
+ original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr);
+- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -119,7 +118,7 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
+
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr);
+- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = (original_value & (~bitmask)) | (data << bitshift);
+ }
+
+@@ -201,13 +200,6 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
+ pphyreg->rf3wire_offset, data_and_addr);
+ }
+
+-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+-
+ bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw)
+ {
+ return _rtl92ee_phy_config_mac_with_headerfile(hw);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+index aaa004d4d6d0a..0e2b9698088bb 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+@@ -14,13 +14,6 @@
+ #include "hw.h"
+ #include "table.h"
+
+-static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+-
+ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -30,7 +23,7 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ regaddr, bitmask);
+
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+@@ -52,7 +45,7 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+@@ -160,7 +153,7 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+
+ original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr);
+
+- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -191,7 +184,7 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92s_phy_rf_serial_read(hw, rfpath,
+ regaddr);
+- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) | (data << bitshift));
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+index 5323ead30db03..fa1839d8ee55f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+@@ -29,9 +29,10 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+ u32 data);
+ static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+ {
+- u32 i = ffs(bitmask);
++ if (WARN_ON_ONCE(!bitmask))
++ return 0;
+
+- return i ? i - 1 : 32;
++ return __ffs(bitmask);
+ }
+ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
+ /*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index aa07856411b12..a1f223c8848b9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -3106,4 +3106,11 @@ static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+ return ieee80211_find_sta(mac->vif, mac_addr);
+ }
+
++static inline u32 calculate_bit_shift(u32 bitmask)
++{
++ if (WARN_ON_ONCE(!bitmask))
++ return 0;
++
++ return __ffs(bitmask);
++}
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
+index 6f5629852416a..942bb2ab8b50f 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
+@@ -267,9 +267,9 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
+
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI)
+- rtwdev->hal.rcr |= BIT_AM | BIT_AB;
++ rtwdev->hal.rcr |= BIT_AM;
+ else
+- rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB);
++ rtwdev->hal.rcr &= ~(BIT_AM);
+ }
+ if (changed_flags & FIF_FCSFAIL) {
+ if (*new_flags & FIF_FCSFAIL)
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 5017033c705ae..c07f5ab4c004d 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ }
+
+ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
+- shinfo->nr_frags++, gop++, nr_slots--) {
++ nr_slots--) {
++ if (unlikely(!txp->size)) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&queue->response_lock, flags);
++ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
++ push_tx_responses(queue);
++ spin_unlock_irqrestore(&queue->response_lock, flags);
++ ++txp;
++ continue;
++ }
++
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp,
+ txp == first ? extra_count : 0, gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
++ ++shinfo->nr_frags;
++ ++gop;
+
+ if (txp == first)
+ txp = txfrags;
+@@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ shinfo = skb_shinfo(nskb);
+ frags = shinfo->frags;
+
+- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
+- shinfo->nr_frags++, txp++, gop++) {
++ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
++ if (unlikely(!txp->size)) {
++ unsigned long flags;
++
++ spin_lock_irqsave(&queue->response_lock, flags);
++ make_tx_response(queue, txp, 0,
++ XEN_NETIF_RSP_OKAY);
++ push_tx_responses(queue);
++ spin_unlock_irqrestore(&queue->response_lock,
++ flags);
++ continue;
++ }
++
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
+ gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags],
+ pending_idx);
++ ++shinfo->nr_frags;
++ ++gop;
+ }
+
+- skb_shinfo(skb)->frag_list = nskb;
+- } else if (nskb) {
++ if (shinfo->nr_frags) {
++ skb_shinfo(skb)->frag_list = nskb;
++ nskb = NULL;
++ }
++ }
++
++ if (nskb) {
+ /* A frag_list skb was allocated but it is no longer needed
+- * because enough slots were converted to copy ops above.
++ * because enough slots were converted to copy ops above or some
++ * were empty.
+ */
+ kfree_skb(nskb);
+ }
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 98a7649a0f061..8f06e5c1706ba 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1846,9 +1846,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
+
+ /*
+ * The block layer can't support LBA sizes larger than the page size
+- * yet, so catch this early and don't allow block I/O.
++ * or smaller than a sector size yet, so catch this early and don't
++ * allow block I/O.
+ */
+- if (ns->lba_shift > PAGE_SHIFT) {
++ if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
+ capacity = 0;
+ bs = (1 << 9);
+ }
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 415b017075996..f9bfd6a549f37 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -365,6 +365,11 @@ struct nvme_ctrl {
+ struct nvme_fault_inject fault_inject;
+ };
+
++static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
++{
++ return READ_ONCE(ctrl->state);
++}
++
+ enum nvme_iopolicy {
+ NVME_IOPOLICY_NUMA,
+ NVME_IOPOLICY_RR,
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 6a57cf885db10..4f2164a3f466b 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -18,6 +18,7 @@
+ #include "nvmet.h"
+
+ #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
++#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
+
+ /* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+@@ -884,7 +885,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ icresp->hdr.pdo = 0;
+ icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
+ icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
+- icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
++ icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
+ icresp->cpda = 0;
+ if (queue->hdr_digest)
+ icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
+@@ -930,6 +931,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ {
+ struct nvme_tcp_data_pdu *data = &queue->pdu.data;
+ struct nvmet_tcp_cmd *cmd;
++ unsigned int exp_data_len;
+
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+@@ -948,12 +950,24 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ data->ttag, le32_to_cpu(data->data_offset),
+ cmd->rbytes_done);
+ /* FIXME: use path and transport errors */
+- nvmet_req_complete(&cmd->req,
+- NVME_SC_INVALID_FIELD | NVME_SC_DNR);
++ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
++ exp_data_len = le32_to_cpu(data->hdr.plen) -
++ nvmet_tcp_hdgst_len(queue) -
++ nvmet_tcp_ddgst_len(queue) -
++ sizeof(*data);
++
+ cmd->pdu_len = le32_to_cpu(data->data_length);
++ if (unlikely(cmd->pdu_len != exp_data_len ||
++ cmd->pdu_len == 0 ||
++ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
++ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
++ /* FIXME: use proper transport errors */
++ nvmet_tcp_fatal_error(queue);
++ return -EPROTO;
++ }
+ cmd->pdu_recv = 0;
+ nvmet_tcp_map_pdu_iovec(cmd);
+ queue->cmd = cmd;
+diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
+index 6109b3806b12b..155334ddc13f3 100644
+--- a/drivers/nvme/target/trace.h
++++ b/drivers/nvme/target/trace.h
+@@ -53,8 +53,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req)
+ return;
+ }
+
+- strncpy(name, req->ns->device_path,
+- min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
++ strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
+ }
+ #endif
+
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 54719f8156ed1..bc5abe650c5c2 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1669,6 +1669,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ out_args->np = new;
+ of_node_put(cur);
+ cur = new;
++ new = NULL;
+ }
+ put:
+ of_node_put(cur);
+diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi
+index 6b33be4c4416c..aa0d7027ffa68 100644
+--- a/drivers/of/unittest-data/tests-phandle.dtsi
++++ b/drivers/of/unittest-data/tests-phandle.dtsi
+@@ -38,6 +38,13 @@
+ phandle-map-pass-thru = <0x0 0xf0>;
+ };
+
++ provider5: provider5 {
++ #phandle-cells = <2>;
++ phandle-map = <2 7 &provider4 2 3>;
++ phandle-map-mask = <0xff 0xf>;
++ phandle-map-pass-thru = <0x0 0xf0>;
++ };
++
+ consumer-a {
+ phandle-list = <&provider1 1>,
+ <&provider2 2 0>,
+@@ -64,7 +71,8 @@
+ <&provider4 4 0x100>,
+ <&provider4 0 0x61>,
+ <&provider0>,
+- <&provider4 19 0x20>;
++ <&provider4 19 0x20>,
++ <&provider5 2 7>;
+ phandle-list-bad-phandle = <12345678 0 0>;
+ phandle-list-bad-args = <&provider2 1 0>,
+ <&provider4 0>;
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index 073a3f44c4049..699daf0645d16 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -448,6 +448,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
+
+ unittest(passed, "index %i - data error on node %pOF rc=%i\n",
+ i, args.np, rc);
++
++ if (rc == 0)
++ of_node_put(args.np);
+ }
+
+ /* Check for missing list property */
+@@ -537,8 +540,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
+
+ static void __init of_unittest_parse_phandle_with_args_map(void)
+ {
+- struct device_node *np, *p0, *p1, *p2, *p3;
++ struct device_node *np, *p[6] = {};
+ struct of_phandle_args args;
++ unsigned int prefs[6];
+ int i, rc;
+
+ np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b");
+@@ -547,34 +551,24 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ return;
+ }
+
+- p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
+- if (!p0) {
+- pr_err("missing testcase data\n");
+- return;
+- }
+-
+- p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
+- if (!p1) {
+- pr_err("missing testcase data\n");
+- return;
+- }
+-
+- p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
+- if (!p2) {
+- pr_err("missing testcase data\n");
+- return;
+- }
+-
+- p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
+- if (!p3) {
+- pr_err("missing testcase data\n");
+- return;
++ p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
++ p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
++ p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
++ p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
++ p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4");
++ p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5");
++ for (i = 0; i < ARRAY_SIZE(p); ++i) {
++ if (!p[i]) {
++ pr_err("missing testcase data\n");
++ return;
++ }
++ prefs[i] = kref_read(&p[i]->kobj.kref);
+ }
+
+ rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
+- unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
++ unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc);
+
+- for (i = 0; i < 8; i++) {
++ for (i = 0; i < 9; i++) {
+ bool passed = true;
+
+ memset(&args, 0, sizeof(args));
+@@ -585,13 +579,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ switch (i) {
+ case 0:
+ passed &= !rc;
+- passed &= (args.np == p1);
++ passed &= (args.np == p[1]);
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 1);
+ break;
+ case 1:
+ passed &= !rc;
+- passed &= (args.np == p3);
++ passed &= (args.np == p[3]);
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 2);
+ passed &= (args.args[1] == 5);
+@@ -602,28 +596,36 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ break;
+ case 3:
+ passed &= !rc;
+- passed &= (args.np == p0);
++ passed &= (args.np == p[0]);
+ passed &= (args.args_count == 0);
+ break;
+ case 4:
+ passed &= !rc;
+- passed &= (args.np == p1);
++ passed &= (args.np == p[1]);
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 3);
+ break;
+ case 5:
+ passed &= !rc;
+- passed &= (args.np == p0);
++ passed &= (args.np == p[0]);
+ passed &= (args.args_count == 0);
+ break;
+ case 6:
+ passed &= !rc;
+- passed &= (args.np == p2);
++ passed &= (args.np == p[2]);
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 15);
+ passed &= (args.args[1] == 0x20);
+ break;
+ case 7:
++ passed &= !rc;
++ passed &= (args.np == p[3]);
++ passed &= (args.args_count == 3);
++ passed &= (args.args[0] == 2);
++ passed &= (args.args[1] == 5);
++ passed &= (args.args[2] == 3);
++ break;
++ case 8:
+ passed &= (rc == -ENOENT);
+ break;
+ default:
+@@ -632,6 +634,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+
+ unittest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
++
++ if (rc == 0)
++ of_node_put(args.np);
+ }
+
+ /* Check for missing list property */
+@@ -678,6 +683,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+
+ unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
++
++ for (i = 0; i < ARRAY_SIZE(p); ++i) {
++ unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
++ "provider%d: expected:%d got:%d\n",
++ i, prefs[i], kref_read(&p[i]->kobj.kref));
++ of_node_put(p[i]);
++ }
+ }
+
+ static void __init of_unittest_property_string(void)
+diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
+index 9f5d784cd95d5..3644997a83425 100644
+--- a/drivers/parport/parport_serial.c
++++ b/drivers/parport/parport_serial.c
+@@ -65,6 +65,10 @@ enum parport_pc_pci_cards {
+ sunix_5069a,
+ sunix_5079a,
+ sunix_5099a,
++ brainboxes_uc257,
++ brainboxes_is300,
++ brainboxes_uc414,
++ brainboxes_px263,
+ };
+
+ /* each element directly indexed from enum list, above */
+@@ -158,6 +162,10 @@ static struct parport_pc_pci cards[] = {
+ /* sunix_5069a */ { 1, { { 1, 2 }, } },
+ /* sunix_5079a */ { 1, { { 1, 2 }, } },
+ /* sunix_5099a */ { 1, { { 1, 2 }, } },
++ /* brainboxes_uc257 */ { 1, { { 3, -1 }, } },
++ /* brainboxes_is300 */ { 1, { { 3, -1 }, } },
++ /* brainboxes_uc414 */ { 1, { { 3, -1 }, } },
++ /* brainboxes_px263 */ { 1, { { 3, -1 }, } },
+ };
+
+ static struct pci_device_id parport_serial_pci_tbl[] = {
+@@ -277,6 +285,38 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0104, 0, 0, sunix_5099a },
+
++ /* Brainboxes UC-203 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0bc1,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0bc2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++ /* Brainboxes UC-257 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0862,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0863,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++ /* Brainboxes UC-414 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0e61,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc414 },
++
++ /* Brainboxes UC-475 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0981,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0982,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++ /* Brainboxes IS-300/IS-500 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0da0,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_is300 },
++
++ /* Brainboxes PX-263/PX-295 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x402c,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px263 },
++
+ { 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
+@@ -542,6 +582,30 @@ static struct pciserial_board pci_parport_serial_boards[] = {
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
++ [brainboxes_uc257] = {
++ .flags = FL_BASE2,
++ .num_ports = 2,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ },
++ [brainboxes_is300] = {
++ .flags = FL_BASE2,
++ .num_ports = 1,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ },
++ [brainboxes_uc414] = {
++ .flags = FL_BASE2,
++ .num_ports = 4,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ },
++ [brainboxes_px263] = {
++ .flags = FL_BASE2,
++ .num_ports = 4,
++ .base_baud = 921600,
++ .uart_offset = 8,
++ },
+ };
+
+ struct parport_serial_private {
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index eacdcb0a87719..09379e5f7724a 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1200,7 +1200,16 @@ static int ks_pcie_probe(struct platform_device *pdev)
+ goto err_link;
+ }
+
++ /* Obtain references to the PHYs */
++ for (i = 0; i < num_lanes; i++)
++ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
++
+ ret = ks_pcie_enable_phy(ks_pcie);
++
++ /* Release references to the PHYs */
++ for (i = 0; i < num_lanes; i++)
++ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
++
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ goto err_link;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 5023b7f704d2f..61a0f33c59cf8 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -589,6 +589,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ }
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
++ msg_addr &= ~aligned_offset;
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
+ if (ret)
+diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
+index 5273cb5ede0fd..3200d776e34d8 100644
+--- a/drivers/pci/controller/pcie-mediatek.c
++++ b/drivers/pci/controller/pcie-mediatek.c
+@@ -624,12 +624,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
+ if (status & MSI_STATUS){
+ unsigned long imsi_status;
+
++ /*
++ * The interrupt status can be cleared even if the
++ * MSI status remains pending. As such, given the
++ * edge-triggered interrupt type, its status should
++ * be cleared before being dispatched to the
++ * handler of the underlying device.
++ */
++ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
+ for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
+ generic_handle_domain_irq(port->inner_domain, bit);
+ }
+- /* Clear MSI interrupt status */
+- writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ }
+ }
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 5d8768cd7c50a..626d9ed733f14 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4577,17 +4577,21 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
+ * But the implementation could block peer-to-peer transactions between them
+ * and provide ACS-like functionality.
+ */
+-static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
++static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+ {
+ if (!pci_is_pcie(dev) ||
+ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ return -ENOTTY;
+
++ /*
++ * Future Zhaoxin Root Ports and Switch Downstream Ports will
++ * implement ACS capability in accordance with the PCIe Spec.
++ */
+ switch (dev->device) {
+ case 0x0710 ... 0x071e:
+ case 0x0721:
+- case 0x0723 ... 0x0732:
++ case 0x0723 ... 0x0752:
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
+index 530426a74f751..b3cea8d56c4f6 100644
+--- a/drivers/pinctrl/cirrus/Kconfig
++++ b/drivers/pinctrl/cirrus/Kconfig
+@@ -1,7 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config PINCTRL_LOCHNAGAR
+ tristate "Cirrus Logic Lochnagar pinctrl driver"
+- depends on MFD_LOCHNAGAR
++ # Avoid clash caused by MIPS defining RST, which is used in the driver
++ depends on MFD_LOCHNAGAR && !MIPS
+ select GPIOLIB
+ select PINMUX
+ select PINCONF
+diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
+index 15f013af9e623..f5e020840d947 100644
+--- a/drivers/platform/x86/intel/vbtn.c
++++ b/drivers/platform/x86/intel/vbtn.c
+@@ -73,10 +73,10 @@ struct intel_vbtn_priv {
+ bool wakeup_mode;
+ };
+
+-static void detect_tablet_mode(struct platform_device *device)
++static void detect_tablet_mode(struct device *dev)
+ {
+- struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+- acpi_handle handle = ACPI_HANDLE(&device->dev);
++ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
++ acpi_handle handle = ACPI_HANDLE(dev);
+ unsigned long long vgbs;
+ acpi_status status;
+ int m;
+@@ -89,6 +89,8 @@ static void detect_tablet_mode(struct platform_device *device)
+ input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
+ m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
+ input_report_switch(priv->switches_dev, SW_DOCK, m);
++
++ input_sync(priv->switches_dev);
+ }
+
+ /*
+@@ -134,7 +136,7 @@ static int intel_vbtn_input_setup(struct platform_device *device)
+ priv->switches_dev->id.bustype = BUS_HOST;
+
+ if (priv->has_switches) {
+- detect_tablet_mode(device);
++ detect_tablet_mode(&device->dev);
+
+ ret = input_register_device(priv->switches_dev);
+ if (ret)
+@@ -198,6 +200,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
+ autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
+
+ sparse_keymap_report_event(input_dev, event, val, autorelease);
++
++ /* Some devices need this to report further events */
++ acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ }
+
+ /*
+@@ -358,7 +363,13 @@ static void intel_vbtn_pm_complete(struct device *dev)
+
+ static int intel_vbtn_pm_resume(struct device *dev)
+ {
++ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
++
+ intel_vbtn_pm_complete(dev);
++
++ if (priv->has_switches)
++ detect_tablet_mode(dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
+index f501ecd492022..9fb7b44e890af 100644
+--- a/drivers/power/supply/bq256xx_charger.c
++++ b/drivers/power/supply/bq256xx_charger.c
+@@ -1514,13 +1514,16 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
+ wd_reg_val = i;
+ break;
+ }
+- if (bq->watchdog_timer > bq256xx_watchdog_time[i] &&
++ if (i + 1 < BQ256XX_NUM_WD_VAL &&
++ bq->watchdog_timer > bq256xx_watchdog_time[i] &&
+ bq->watchdog_timer < bq256xx_watchdog_time[i + 1])
+ wd_reg_val = i;
+ }
+ ret = regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_1,
+ BQ256XX_WATCHDOG_MASK, wd_reg_val <<
+ BQ256XX_WDT_BIT_SHIFT);
++ if (ret)
++ return ret;
+
+ ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ if (ret) {
+diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
+index 091868e9e9e82..587db9fd86241 100644
+--- a/drivers/power/supply/cw2015_battery.c
++++ b/drivers/power/supply/cw2015_battery.c
+@@ -490,7 +490,7 @@ static int cw_battery_get_property(struct power_supply *psy,
+
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ if (cw_battery_valid_time_to_empty(cw_bat))
+- val->intval = cw_bat->time_to_empty;
++ val->intval = cw_bat->time_to_empty * 60;
+ else
+ val->intval = 0;
+ break;
+diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
+index 23dc1fb770e22..40dcf530a1753 100644
+--- a/drivers/pwm/pwm-jz4740.c
++++ b/drivers/pwm/pwm-jz4740.c
+@@ -60,9 +60,10 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+ snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
+
+ clk = clk_get(chip->dev, name);
+- if (IS_ERR(clk))
+- return dev_err_probe(chip->dev, PTR_ERR(clk),
+- "Failed to get clock\n");
++ if (IS_ERR(clk)) {
++ dev_err(chip->dev, "error %pe: Failed to get clock\n", clk);
++ return PTR_ERR(clk);
++ }
+
+ err = clk_prepare_enable(clk);
+ if (err < 0) {
+diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
+index 794ca5b029681..bdcdb7f38312b 100644
+--- a/drivers/pwm/pwm-stm32.c
++++ b/drivers/pwm/pwm-stm32.c
+@@ -115,14 +115,14 @@ static int stm32_pwm_raw_capture(struct stm32_pwm *priv, struct pwm_device *pwm,
+ int ret;
+
+ /* Ensure registers have been updated, enable counter and capture */
+- regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
+- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN);
++ regmap_set_bits(priv->regmap, TIM_EGR, TIM_EGR_UG);
++ regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
+
+ /* Use cc1 or cc3 DMA resp for PWM input channels 1 & 2 or 3 & 4 */
+ dma_id = pwm->hwpwm < 2 ? STM32_TIMERS_DMA_CH1 : STM32_TIMERS_DMA_CH3;
+ ccen = pwm->hwpwm < 2 ? TIM_CCER_CC12E : TIM_CCER_CC34E;
+ ccr = pwm->hwpwm < 2 ? TIM_CCR1 : TIM_CCR3;
+- regmap_update_bits(priv->regmap, TIM_CCER, ccen, ccen);
++ regmap_set_bits(priv->regmap, TIM_CCER, ccen);
+
+ /*
+ * Timer DMA burst mode. Request 2 registers, 2 bursts, to get both
+@@ -160,8 +160,8 @@ static int stm32_pwm_raw_capture(struct stm32_pwm *priv, struct pwm_device *pwm,
+ }
+
+ stop:
+- regmap_update_bits(priv->regmap, TIM_CCER, ccen, 0);
+- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
++ regmap_clear_bits(priv->regmap, TIM_CCER, ccen);
++ regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
+
+ return ret;
+ }
+@@ -359,7 +359,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
+
+ regmap_write(priv->regmap, TIM_PSC, prescaler);
+ regmap_write(priv->regmap, TIM_ARR, prd - 1);
+- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
++ regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE);
+
+ /* Calculate the duty cycles */
+ dty = prd * duty_ns;
+@@ -377,7 +377,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
+ else
+ regmap_update_bits(priv->regmap, TIM_CCMR2, mask, ccmr);
+
+- regmap_update_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE, TIM_BDTR_MOE);
++ regmap_set_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE);
+
+ return 0;
+ }
+@@ -411,13 +411,13 @@ static int stm32_pwm_enable(struct stm32_pwm *priv, int ch)
+ if (priv->have_complementary_output)
+ mask |= TIM_CCER_CC1NE << (ch * 4);
+
+- regmap_update_bits(priv->regmap, TIM_CCER, mask, mask);
++ regmap_set_bits(priv->regmap, TIM_CCER, mask);
+
+ /* Make sure that registers are updated */
+- regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
++ regmap_set_bits(priv->regmap, TIM_EGR, TIM_EGR_UG);
+
+ /* Enable controller */
+- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN);
++ regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
+
+ return 0;
+ }
+@@ -431,11 +431,11 @@ static void stm32_pwm_disable(struct stm32_pwm *priv, int ch)
+ if (priv->have_complementary_output)
+ mask |= TIM_CCER_CC1NE << (ch * 4);
+
+- regmap_update_bits(priv->regmap, TIM_CCER, mask, 0);
++ regmap_clear_bits(priv->regmap, TIM_CCER, mask);
+
+ /* When all channels are disabled, we can disable the controller */
+ if (!active_channels(priv))
+- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
++ regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
+
+ clk_disable(priv->clk);
+ }
+@@ -568,41 +568,30 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv)
+ * If complementary bit doesn't exist writing 1 will have no
+ * effect so we can detect it.
+ */
+- regmap_update_bits(priv->regmap,
+- TIM_CCER, TIM_CCER_CC1NE, TIM_CCER_CC1NE);
++ regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE);
+ regmap_read(priv->regmap, TIM_CCER, &ccer);
+- regmap_update_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE, 0);
++ regmap_clear_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE);
+
+ priv->have_complementary_output = (ccer != 0);
+ }
+
+-static int stm32_pwm_detect_channels(struct stm32_pwm *priv)
++static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
++ unsigned int *num_enabled)
+ {
+- u32 ccer;
+- int npwm = 0;
++ u32 ccer, ccer_backup;
+
+ /*
+ * If channels enable bits don't exist writing 1 will have no
+ * effect so we can detect and count them.
+ */
+- regmap_update_bits(priv->regmap,
+- TIM_CCER, TIM_CCER_CCXE, TIM_CCER_CCXE);
++ regmap_read(priv->regmap, TIM_CCER, &ccer_backup);
++ regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
+ regmap_read(priv->regmap, TIM_CCER, &ccer);
+- regmap_update_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE, 0);
+-
+- if (ccer & TIM_CCER_CC1E)
+- npwm++;
++ regmap_write(priv->regmap, TIM_CCER, ccer_backup);
+
+- if (ccer & TIM_CCER_CC2E)
+- npwm++;
++ *num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE);
+
+- if (ccer & TIM_CCER_CC3E)
+- npwm++;
+-
+- if (ccer & TIM_CCER_CC4E)
+- npwm++;
+-
+- return npwm;
++ return hweight32(ccer & TIM_CCER_CCXE);
+ }
+
+ static int stm32_pwm_probe(struct platform_device *pdev)
+@@ -611,6 +600,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+ struct device_node *np = dev->of_node;
+ struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct stm32_pwm *priv;
++ unsigned int num_enabled;
++ unsigned int i;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -633,7 +624,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+
+ priv->chip.dev = dev;
+ priv->chip.ops = &stm32pwm_ops;
+- priv->chip.npwm = stm32_pwm_detect_channels(priv);
++ priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled);
++
++ /* Initialize clock refcount to number of enabled PWM channels. */
++ for (i = 0; i < num_enabled; i++)
++ clk_enable(priv->clk);
+
+ ret = pwmchip_add(&priv->chip);
+ if (ret < 0)
+diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
+index 5ca145b64e63d..30951914afac7 100644
+--- a/drivers/reset/hisilicon/hi6220_reset.c
++++ b/drivers/reset/hisilicon/hi6220_reset.c
+@@ -164,7 +164,7 @@ static int hi6220_reset_probe(struct platform_device *pdev)
+ if (!data)
+ return -ENOMEM;
+
+- type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev);
++ type = (uintptr_t)of_device_get_match_data(dev);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap)) {
+diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
+index 88cba6212ee26..e36775c14782a 100644
+--- a/drivers/s390/block/scm_blk.c
++++ b/drivers/s390/block/scm_blk.c
+@@ -18,6 +18,7 @@
+ #include <linux/genhd.h>
+ #include <linux/slab.h>
+ #include <linux/list.h>
++#include <linux/io.h>
+ #include <asm/eadm.h>
+ #include "scm_blk.h"
+
+@@ -131,7 +132,7 @@ static void scm_request_done(struct scm_request *scmrq)
+
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+ msb = &scmrq->aob->msb[i];
+- aidaw = msb->data_addr;
++ aidaw = (u64)phys_to_virt(msb->data_addr);
+
+ if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
+ IS_ALIGNED(aidaw, PAGE_SIZE))
+@@ -196,12 +197,12 @@ static int scm_request_prepare(struct scm_request *scmrq)
+ msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
+ msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
+ msb->flags |= MSB_FLAG_IDA;
+- msb->data_addr = (u64) aidaw;
++ msb->data_addr = (u64)virt_to_phys(aidaw);
+
+ rq_for_each_segment(bv, req, iter) {
+ WARN_ON(bv.bv_offset);
+ msb->blk_count += bv.bv_len >> 12;
+- aidaw->data_addr = (u64) page_address(bv.bv_page);
++ aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
+ aidaw++;
+ }
+
+diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
+index e7326505cabb1..f611e9f00a9d8 100644
+--- a/drivers/scsi/fnic/fnic_debugfs.c
++++ b/drivers/scsi/fnic/fnic_debugfs.c
+@@ -66,9 +66,10 @@ int fnic_debugfs_init(void)
+ fc_trc_flag->fnic_trace = 2;
+ fc_trc_flag->fc_trace = 3;
+ fc_trc_flag->fc_clear = 4;
++ return 0;
+ }
+
+- return 0;
++ return -ENOMEM;
+ }
+
+ /*
+diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
+index 436d174f2194b..57be32ba0109f 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas.h
++++ b/drivers/scsi/hisi_sas/hisi_sas.h
+@@ -35,7 +35,7 @@
+ #define HISI_SAS_QUEUE_SLOTS 4096
+ #define HISI_SAS_MAX_ITCT_ENTRIES 1024
+ #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
+-#define HISI_SAS_RESET_BIT 0
++#define HISI_SAS_RESETTING_BIT 0
+ #define HISI_SAS_REJECT_CMD_BIT 1
+ #define HISI_SAS_PM_BIT 2
+ #define HISI_SAS_HW_FAULT_BIT 3
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 7d93783c09a50..530f61df109a3 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -724,7 +724,7 @@ static int hisi_sas_init_device(struct domain_device *device)
+ */
+ local_phy = sas_get_local_phy(device);
+ if (!scsi_is_sas_phy_local(local_phy) &&
+- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
++ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ unsigned long deadline = ata_deadline(jiffies, 20000);
+ struct sata_device *sata_dev = &device->sata_dev;
+ struct ata_host *ata_host = sata_dev->ata_host;
+@@ -1072,7 +1072,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
+ sas_dev->device_id, sas_dev->dev_type);
+
+ down(&hisi_hba->sem);
+- if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
++ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0, true);
+
+@@ -1548,7 +1548,6 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
+ {
+ struct Scsi_Host *shost = hisi_hba->shost;
+
+- down(&hisi_hba->sem);
+ hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
+
+ scsi_block_requests(shost);
+@@ -1574,9 +1573,9 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
+ if (hisi_hba->reject_stp_links_msk)
+ hisi_sas_terminate_stp_reject(hisi_hba);
+ hisi_sas_reset_init_all_devices(hisi_hba);
+- up(&hisi_hba->sem);
+ scsi_unblock_requests(shost);
+- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
++ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
++ up(&hisi_hba->sem);
+
+ hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
+ }
+@@ -1585,10 +1584,13 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
+ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
+ {
+ if (!hisi_hba->hw->soft_reset)
+- return -1;
++ return -ENOENT;
+
+- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+- return -1;
++ down(&hisi_hba->sem);
++ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
++ up(&hisi_hba->sem);
++ return -EPERM;
++ }
+
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+ hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
+@@ -1611,7 +1613,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
+ scsi_unblock_requests(shost);
+- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
++ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ return rc;
+ }
+
+@@ -2251,7 +2253,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
+ } else {
+ struct hisi_sas_port *port = phy->port;
+
+- if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
++ if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
+ phy->in_reset) {
+ dev_info(dev, "ignore flutter phy%d down\n", phy_no);
+ return;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+index afe639994f3dd..862f4e8b7eb58 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+@@ -1422,7 +1422,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
+ goto end;
+ }
+
+- if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
++ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
+
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+index c40588ed68a54..a6d89a1495461 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+@@ -2831,7 +2831,7 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
++ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index d1c07e7cb60df..1651d03d3b463 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -1622,7 +1622,7 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
++ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+@@ -3331,7 +3331,7 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
+ int i;
+
+- for (i = 0; i < debugfs_axi_reg.count; i++, databuf++)
++ for (i = 0; i < debugfs_global_reg.count; i++, databuf++)
+ *databuf = hisi_sas_read32(hisi_hba, 4 * i);
+ }
+
+@@ -4935,7 +4935,8 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
+ int rc;
+
+ dev_info(dev, "FLR prepare\n");
+- set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
++ down(&hisi_hba->sem);
++ set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ hisi_sas_controller_reset_prepare(hisi_hba);
+
+ rc = disable_host_v3_hw(hisi_hba);
+@@ -4947,6 +4948,7 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+ {
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
++ struct Scsi_Host *shost = hisi_hba->shost;
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+@@ -4955,6 +4957,10 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+ rc = hw_init_v3_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
++ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
++ scsi_unblock_requests(shost);
++ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
++ up(&hisi_hba->sem);
+ return;
+ }
+
+@@ -4981,8 +4987,8 @@ static int _suspend_v3_hw(struct device *device)
+ return -ENODEV;
+ }
+
+- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+- return -1;
++ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
++ return -EPERM;
+
+ scsi_block_requests(shost);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+@@ -4992,7 +4998,7 @@ static int _suspend_v3_hw(struct device *device)
+ if (rc) {
+ dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
++ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
+ return rc;
+ }
+@@ -5031,7 +5037,7 @@ static int _resume_v3_hw(struct device *device)
+ }
+ phys_init_v3_hw(hisi_hba);
+ sas_resume_ha(sha);
+- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
++ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+
+ return 0;
+ }
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index b2c650542bac5..a0d3e2eb8c1a6 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -781,8 +781,14 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+ tgtdev = NULL;
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
+- !tgtdev->is_hidden && !tgtdev->host_exposed)
+- mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
++ !tgtdev->is_hidden) {
++ if (!tgtdev->host_exposed)
++ mpi3mr_report_tgtdev_to_host(mrioc,
++ tgtdev->perst_id);
++ else if (tgtdev->starget)
++ starget_for_each_device(tgtdev->starget,
++ (void *)tgtdev, mpi3mr_update_sdev);
++ }
+ }
+ }
+
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index fabc5ce828af3..59e359e9062b4 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -450,14 +450,14 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
+ u32 disable_cap_alloc, retain_pc;
+
+ disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
+- ret = regmap_write(drv_data->bcast_regmap,
+- LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc);
++ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC,
++ BIT(config->slice_id), disable_cap_alloc);
+ if (ret)
+ return ret;
+
+ retain_pc = config->retain_on_pc << config->slice_id;
+- ret = regmap_write(drv_data->bcast_regmap,
+- LLCC_TRP_PCB_ACT, retain_pc);
++ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT,
++ BIT(config->slice_id), retain_pc);
+ if (ret)
+ return ret;
+ }
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 123689e457d12..412154732c462 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -984,9 +984,10 @@ config SPI_ZYNQ_QSPI
+
+ config SPI_ZYNQMP_GQSPI
+ tristate "Xilinx ZynqMP GQSPI controller"
+- depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST
++ depends on (SPI_MEM && HAS_DMA) || COMPILE_TEST
+ help
+ Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
++ This controller only supports SPI memory interface.
+
+ config SPI_AMD
+ tristate "AMD SPI controller"
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index f88d9acd20d94..eb2c64e0a5f7c 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -30,12 +30,15 @@
+
+ #include <asm/unaligned.h>
+
++#define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0)
++
+ struct sh_msiof_chipdata {
+ u32 bits_per_word_mask;
+ u16 tx_fifo_size;
+ u16 rx_fifo_size;
+ u16 ctlr_flags;
+ u16 min_div_pow;
++ u32 flags;
+ };
+
+ struct sh_msiof_spi_priv {
+@@ -1073,6 +1076,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
+ .min_div_pow = 1,
+ };
+
++static const struct sh_msiof_chipdata rcar_r8a7795_data = {
++ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
++ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
++ .tx_fifo_size = 64,
++ .rx_fifo_size = 64,
++ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
++ .min_div_pow = 1,
++ .flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
++};
++
+ static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
+@@ -1083,6 +1096,7 @@ static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
+ { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
++ { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data },
+ { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
+ { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
+@@ -1279,6 +1293,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
+ return -ENXIO;
+ }
+
++ if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200)
++ info->dtdl = 200;
++
+ if (info->mode == MSIOF_SPI_SLAVE)
+ ctlr = spi_alloc_slave(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 20e0703f1def1..770c4cabdf9b7 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1492,7 +1492,7 @@ static int omap8250_remove(struct platform_device *pdev)
+
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+- return err;
++ dev_err(&pdev->dev, "Failed to resume hardware\n");
+
+ serial8250_unregister_port(priv->line);
+ priv->line = -ENODEV;
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 3b2beb98eb1e9..4504b5fcc1718 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -450,13 +450,13 @@ static void imx_uart_stop_tx(struct uart_port *port)
+ ucr1 = imx_uart_readl(sport, UCR1);
+ imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1);
+
++ ucr4 = imx_uart_readl(sport, UCR4);
+ usr2 = imx_uart_readl(sport, USR2);
+- if (!(usr2 & USR2_TXDC)) {
++ if ((!(usr2 & USR2_TXDC)) && (ucr4 & UCR4_TCEN)) {
+ /* The shifter is still busy, so retry once TC triggers */
+ return;
+ }
+
+- ucr4 = imx_uart_readl(sport, UCR4);
+ ucr4 &= ~UCR4_TCEN;
+ imx_uart_writel(sport, ucr4, UCR4);
+
+@@ -2318,7 +2318,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ /* For register access, we only need to enable the ipg clock. */
+ ret = clk_prepare_enable(sport->clk_ipg);
+ if (ret) {
+- dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
++ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+@@ -2330,10 +2330,8 @@ static int imx_uart_probe(struct platform_device *pdev)
+ sport->ufcr = readl(sport->port.membase + UFCR);
+
+ ret = uart_get_rs485_mode(&sport->port);
+- if (ret) {
+- clk_disable_unprepare(sport->clk_ipg);
+- return ret;
+- }
++ if (ret)
++ goto err_clk;
+
+ if (sport->port.rs485.flags & SER_RS485_ENABLED &&
+ (!sport->have_rtscts && !sport->have_rtsgpio))
+@@ -2417,8 +2415,6 @@ static int imx_uart_probe(struct platform_device *pdev)
+ imx_uart_writel(sport, ucr3, UCR3);
+ }
+
+- clk_disable_unprepare(sport->clk_ipg);
+-
+ hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ sport->trigger_start_tx.function = imx_trigger_start_tx;
+@@ -2434,7 +2430,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rx irq: %d\n",
+ ret);
+- return ret;
++ goto err_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0,
+@@ -2442,7 +2438,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request tx irq: %d\n",
+ ret);
+- return ret;
++ goto err_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
+@@ -2450,14 +2446,14 @@ static int imx_uart_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ ret);
+- return ret;
++ goto err_clk;
+ }
+ } else {
+ ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+- return ret;
++ goto err_clk;
+ }
+ }
+
+@@ -2465,7 +2461,12 @@ static int imx_uart_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, sport);
+
+- return uart_add_one_port(&imx_uart_uart_driver, &sport->port);
++ ret = uart_add_one_port(&imx_uart_uart_driver, &sport->port);
++
++err_clk:
++ clk_disable_unprepare(sport->clk_ipg);
++
++ return ret;
+ }
+
+ static int imx_uart_remove(struct platform_device *pdev)
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index adaccbdc10c36..8c09c97f98149 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -24,6 +24,7 @@
+ #include <linux/tty_flip.h>
+ #include <linux/spi/spi.h>
+ #include <linux/uaccess.h>
++#include <linux/units.h>
+ #include <uapi/linux/sched/types.h>
+
+ #define SC16IS7XX_NAME "sc16is7xx"
+@@ -1449,9 +1450,12 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
+
+ /* Setup SPI bus */
+ spi->bits_per_word = 8;
+- /* only supports mode 0 on SC16IS762 */
++ /* For all variants, only mode 0 is supported */
++ if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
++ return dev_err_probe(&spi->dev, -EINVAL, "Unsupported SPI mode\n");
++
+ spi->mode = spi->mode ? : SPI_MODE_0;
+- spi->max_speed_hz = spi->max_speed_hz ? : 15000000;
++ spi->max_speed_hz = spi->max_speed_hz ? : 4 * HZ_PER_MHZ;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
+index 72b88aafd5361..989986f67263a 100644
+--- a/drivers/tty/tty.h
++++ b/drivers/tty/tty.h
+@@ -63,7 +63,7 @@ int tty_check_change(struct tty_struct *tty);
+ void __stop_tty(struct tty_struct *tty);
+ void __start_tty(struct tty_struct *tty);
+ void tty_write_unlock(struct tty_struct *tty);
+-int tty_write_lock(struct tty_struct *tty, int ndelay);
++int tty_write_lock(struct tty_struct *tty, bool ndelay);
+ void tty_vhangup_session(struct tty_struct *tty);
+ void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
+ int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 3d540dff42ef4..d5191065b6e91 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -956,7 +956,7 @@ void tty_write_unlock(struct tty_struct *tty)
+ wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+ }
+
+-int tty_write_lock(struct tty_struct *tty, int ndelay)
++int tty_write_lock(struct tty_struct *tty, bool ndelay)
+ {
+ if (!mutex_trylock(&tty->atomic_write_lock)) {
+ if (ndelay)
+@@ -1173,7 +1173,7 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
+ return 0;
+ }
+
+- if (tty_write_lock(tty, 0) < 0)
++ if (tty_write_lock(tty, false) < 0)
+ return -ERESTARTSYS;
+
+ down_read(&tty->termios_rwsem);
+@@ -2504,22 +2504,25 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
+ return 0;
+
+ if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
+- retval = tty->ops->break_ctl(tty, duration);
+- else {
+- /* Do the work ourselves */
+- if (tty_write_lock(tty, 0) < 0)
+- return -EINTR;
+- retval = tty->ops->break_ctl(tty, -1);
+- if (retval)
+- goto out;
+- if (!signal_pending(current))
+- msleep_interruptible(duration);
++ return tty->ops->break_ctl(tty, duration);
++
++ /* Do the work ourselves */
++ if (tty_write_lock(tty, false) < 0)
++ return -EINTR;
++
++ retval = tty->ops->break_ctl(tty, -1);
++ if (!retval) {
++ msleep_interruptible(duration);
+ retval = tty->ops->break_ctl(tty, 0);
+-out:
+- tty_write_unlock(tty);
+- if (signal_pending(current))
+- retval = -EINTR;
++ } else if (retval == -EOPNOTSUPP) {
++ /* some drivers can tell only dynamically */
++ retval = 0;
+ }
++ tty_write_unlock(tty);
++
++ if (signal_pending(current))
++ retval = -EINTR;
++
+ return retval;
+ }
+
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 1736130f9c394..dac1e25688034 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -427,7 +427,7 @@ retry_write_wait:
+ if (retval < 0)
+ return retval;
+
+- if (tty_write_lock(tty, 0) < 0)
++ if (tty_write_lock(tty, false) < 0)
+ goto retry_write_wait;
+
+ /* Racing writer? */
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index ea96e319c8a0e..e9b739def70e3 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -464,13 +464,13 @@ static int uio_open(struct inode *inode, struct file *filep)
+
+ mutex_lock(&minor_lock);
+ idev = idr_find(&uio_idr, iminor(inode));
+- mutex_unlock(&minor_lock);
+ if (!idev) {
+ ret = -ENODEV;
++ mutex_unlock(&minor_lock);
+ goto out;
+ }
+-
+ get_device(&idev->dev);
++ mutex_unlock(&minor_lock);
+
+ if (!try_module_get(idev->owner)) {
+ ret = -ENODEV;
+@@ -1062,9 +1062,8 @@ void uio_unregister_device(struct uio_info *info)
+ wake_up_interruptible(&idev->wait);
+ kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
+
+- device_unregister(&idev->dev);
+-
+ uio_free_minor(minor);
++ device_unregister(&idev->dev);
+
+ return;
+ }
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index 69a44bd7e5d02..ccdd525bd7c80 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -1117,6 +1117,8 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ dma_addr_t trb_dma;
+ u32 togle_pcs = 1;
+ int sg_iter = 0;
++ int num_trb_req;
++ int trb_burst;
+ int num_trb;
+ int address;
+ u32 control;
+@@ -1125,15 +1127,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct scatterlist *s = NULL;
+ bool sg_supported = !!(request->num_mapped_sgs);
+
++ num_trb_req = sg_supported ? request->num_mapped_sgs : 1;
++
++ /* ISO transfer require each SOF have a TD, each TD include some TRBs */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+- num_trb = priv_ep->interval;
++ num_trb = priv_ep->interval * num_trb_req;
+ else
+- num_trb = sg_supported ? request->num_mapped_sgs : 1;
+-
+- if (num_trb > priv_ep->free_trbs) {
+- priv_ep->flags |= EP_RING_FULL;
+- return -ENOBUFS;
+- }
++ num_trb = num_trb_req;
+
+ priv_req = to_cdns3_request(request);
+ address = priv_ep->endpoint.desc->bEndpointAddress;
+@@ -1182,14 +1182,31 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+
+ link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
+ TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
++
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
++ /*
++ * ISO require LINK TRB must be first one of TD.
++ * Fill LINK TRBs for left trb space to simply software process logic.
++ */
++ while (priv_ep->enqueue) {
++ *trb = *link_trb;
++ trace_cdns3_prepare_trb(priv_ep, trb);
++
++ cdns3_ep_inc_enq(priv_ep);
++ trb = priv_ep->trb_pool + priv_ep->enqueue;
++ priv_req->trb = trb;
++ }
++ }
++ }
++
++ if (num_trb > priv_ep->free_trbs) {
++ priv_ep->flags |= EP_RING_FULL;
++ return -ENOBUFS;
+ }
+
+ if (priv_dev->dev_ver <= DEV_VER_V2)
+ togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
+
+- if (sg_supported)
+- s = request->sg;
+-
+ /* set incorrect Cycle Bit for first trb*/
+ control = priv_ep->pcs ? 0 : TRB_CYCLE;
+ trb->length = 0;
+@@ -1207,6 +1224,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ do {
+ u32 length;
+
++ if (!(sg_iter % num_trb_req) && sg_supported)
++ s = request->sg;
++
+ /* fill TRB */
+ control |= TRB_TYPE(TRB_NORMAL);
+ if (sg_supported) {
+@@ -1221,7 +1241,36 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ total_tdl += DIV_ROUND_UP(length,
+ priv_ep->endpoint.maxpacket);
+
+- trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
++ trb_burst = priv_ep->trb_burst_size;
++
++ /*
++ * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still
++ * met problem when do ISO transfer if sg enabled.
++ *
++ * Data pattern likes below when sg enabled, package size is 1k and mult is 2
++ * [UVC Header(8B) ] [data(3k - 8)] ...
++ *
++ * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen
++ * as below pattern:
++ * 0xd000: wrong
++ * 0xe000: wrong
++ * 0xf000: correct
++ * 0x10000: wrong
++ * 0x11000: wrong
++ * 0x12000: correct
++ * ...
++ *
++ * But it is still unclear about why error have not happen below 0xd000, it should
++ * cross 4k bounder. But anyway, the below code can fix this problem.
++ *
++ * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16.
++ */
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2)
++ if (ALIGN_DOWN(trb->buffer, SZ_4K) !=
++ ALIGN_DOWN(trb->buffer + length, SZ_4K))
++ trb_burst = 16;
++
++ trb->length |= cpu_to_le32(TRB_BURST_LEN(trb_burst) |
+ TRB_LEN(length));
+ pcs = priv_ep->pcs ? TRB_CYCLE : 0;
+
+@@ -1248,7 +1297,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ if (sg_supported) {
+ trb->control |= cpu_to_le32(TRB_ISP);
+ /* Don't set chain bit for last TRB */
+- if (sg_iter < num_trb - 1)
++ if ((sg_iter % num_trb_req) < num_trb_req - 1)
+ trb->control |= cpu_to_le32(TRB_CHAIN);
+
+ s = sg_next(s);
+@@ -1506,6 +1555,12 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
+
+ /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
+ while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
++
++ /* ISO ep_traddr may stop at LINK TRB */
++ if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) &&
++ priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++ break;
++
+ trace_cdns3_complete_trb(priv_ep, trb);
+ cdns3_ep_inc_deq(priv_ep);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+@@ -1538,6 +1593,10 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
+ }
+
+ if (request_handled) {
++ /* TRBs are duplicated by priv_ep->interval time for ISO IN */
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir)
++ request->actual /= priv_ep->interval;
++
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ request_handled = false;
+ transfer_end = false;
+@@ -2033,11 +2092,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
+- u32 max_packet_size = 0;
+- u8 maxburst = 0;
++ u32 max_packet_size = priv_ep->wMaxPacketSize;
++ u8 maxburst = priv_ep->bMaxBurst;
+ u32 ep_cfg = 0;
+ u8 buffering;
+- u8 mult = 0;
+ int ret;
+
+ buffering = priv_dev->ep_buf_size - 1;
+@@ -2059,8 +2117,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ break;
+ default:
+ ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
+- mult = priv_dev->ep_iso_burst - 1;
+- buffering = mult + 1;
++ buffering = (priv_ep->bMaxBurst + 1) * (priv_ep->mult + 1) - 1;
+ }
+
+ switch (priv_dev->gadget.speed) {
+@@ -2071,17 +2128,8 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ max_packet_size = is_iso_ep ? 1024 : 512;
+ break;
+ case USB_SPEED_SUPER:
+- /* It's limitation that driver assumes in driver. */
+- mult = 0;
+- max_packet_size = 1024;
+- if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
+- maxburst = priv_dev->ep_iso_burst - 1;
+- buffering = (mult + 1) *
+- (maxburst + 1);
+-
+- if (priv_ep->interval > 1)
+- buffering++;
+- } else {
++ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
++ max_packet_size = 1024;
+ maxburst = priv_dev->ep_buf_size - 1;
+ }
+ break;
+@@ -2110,7 +2158,6 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ if (priv_dev->dev_ver < DEV_VER_V2)
+ priv_ep->trb_burst_size = 16;
+
+- mult = min_t(u8, mult, EP_CFG_MULT_MAX);
+ buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
+ maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
+
+@@ -2144,7 +2191,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ }
+
+ ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
+- EP_CFG_MULT(mult) |
++ EP_CFG_MULT(priv_ep->mult) | /* must match EP setting */
+ EP_CFG_BUFFERING(buffering) |
+ EP_CFG_MAXBURST(maxburst);
+
+@@ -2234,6 +2281,13 @@ usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
+ priv_ep->type = usb_endpoint_type(desc);
+ priv_ep->flags |= EP_CLAIMED;
+ priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
++ priv_ep->wMaxPacketSize = usb_endpoint_maxp(desc);
++ priv_ep->mult = USB_EP_MAXP_MULT(priv_ep->wMaxPacketSize);
++ priv_ep->wMaxPacketSize &= USB_ENDPOINT_MAXP_MASK;
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && comp_desc) {
++ priv_ep->mult = USB_SS_MULT(comp_desc->bmAttributes) - 1;
++ priv_ep->bMaxBurst = comp_desc->bMaxBurst;
++ }
+
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return &priv_ep->endpoint;
+@@ -3015,22 +3069,40 @@ static int cdns3_gadget_check_config(struct usb_gadget *gadget)
+ struct cdns3_endpoint *priv_ep;
+ struct usb_ep *ep;
+ int n_in = 0;
++ int iso = 0;
++ int out = 1;
+ int total;
++ int n;
+
+ list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+ priv_ep = ep_to_cdns3_ep(ep);
+- if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN))
+- n_in++;
++ if (!(priv_ep->flags & EP_CLAIMED))
++ continue;
++
++ n = (priv_ep->mult + 1) * (priv_ep->bMaxBurst + 1);
++ if (ep->address & USB_DIR_IN) {
++ /*
++ * ISO transfer: DMA start move data when get ISO, only transfer
++ * data as min(TD size, iso). No benefit for allocate bigger
++ * internal memory than 'iso'.
++ */
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++ iso += n;
++ else
++ n_in++;
++ } else {
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++ out = max_t(int, out, n);
++ }
+ }
+
+ /* 2KB are reserved for EP0, 1KB for out*/
+- total = 2 + n_in + 1;
++ total = 2 + n_in + out + iso;
+
+ if (total > priv_dev->onchip_buffers)
+ return -ENOMEM;
+
+- priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
+- (priv_dev->onchip_buffers - 2) / (n_in + 1);
++ priv_dev->ep_buf_size = (priv_dev->onchip_buffers - 2 - iso) / (n_in + out);
+
+ return 0;
+ }
+diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h
+index fbe4a8e3aa897..086a7bb838975 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.h
++++ b/drivers/usb/cdns3/cdns3-gadget.h
+@@ -1168,6 +1168,9 @@ struct cdns3_endpoint {
+ u8 dir;
+ u8 num;
+ u8 type;
++ u8 mult;
++ u8 bMaxBurst;
++ u16 wMaxPacketSize;
+ int interval;
+
+ int free_trbs;
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 0e8f4aa031f81..6541bec61cc80 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -521,6 +521,13 @@ static irqreturn_t ci_irq_handler(int irq, void *data)
+ u32 otgsc = 0;
+
+ if (ci->in_lpm) {
++ /*
++ * If we already have a wakeup irq pending there,
++ * let's just return to wait resume finished firstly.
++ */
++ if (ci->wakeup_int)
++ return IRQ_HANDLED;
++
+ disable_irq_nosync(irq);
+ ci->wakeup_int = true;
+ pm_runtime_get(ci->dev);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index adc154b691d05..f21fd809e44ff 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -896,6 +896,9 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
+ struct acm *acm = tty->driver_data;
+ int retval;
+
++ if (!(acm->ctrl_caps & USB_CDC_CAP_BRK))
++ return -EOPNOTSUPP;
++
+ retval = acm_send_break(acm, state ? 0xffff : 0);
+ if (retval < 0)
+ dev_dbg(&acm->control->dev,
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 9e42023e2962a..0f0269d28c371 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -275,48 +275,11 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
+ /*
+ * We're resetting only the device side because, if we're in host mode,
+ * XHCI driver will reset the host block. If dwc3 was configured for
+- * host-only mode or current role is host, then we can return early.
++ * host-only mode, then we can return early.
+ */
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ return 0;
+
+- /*
+- * If the dr_mode is host and the dwc->current_dr_role is not the
+- * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
+- * isn't executed yet. Ensure the phy is ready before the controller
+- * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
+- * the phy.
+- *
+- * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
+- * is port index. If this is a multiport host, then we need to reset
+- * all active ports.
+- */
+- if (dwc->dr_mode == USB_DR_MODE_HOST) {
+- u32 usb3_port;
+- u32 usb2_port;
+-
+- usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+- usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
+- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+-
+- usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+- usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
+- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+-
+- /* Small delay for phy reset assertion */
+- usleep_range(1000, 2000);
+-
+- usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
+- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+-
+- usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
+- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+-
+- /* Wait for clock synchronization */
+- msleep(50);
+- return 0;
+- }
+-
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CSFTRST;
+ reg &= ~DWC3_DCTL_RUN_STOP;
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 658739410992a..52f2bfae46bce 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -236,7 +236,10 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+ struct dwc3_request *req;
+
+ req = next_request(&dep->pending_list);
+- dwc3_gadget_giveback(dep, req, -ECONNRESET);
++ if (!dwc->connected)
++ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
++ else
++ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ }
+
+ dwc->ep0state = EP0_SETUP_PHASE;
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index b55ddc1156cce..4e5c7d3fb4bc2 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -7,6 +7,7 @@
+ * Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/iopoll.h>
+ #include <linux/kernel.h>
+@@ -72,6 +73,9 @@
+ #define FRMCNT_LEV1_RANG (0x12b << 8)
+ #define FRMCNT_LEV1_RANG_MASK GENMASK(19, 8)
+
++#define HSCH_CFG1 0x960
++#define SCH3_RXFIFO_DEPTH_MASK GENMASK(21, 20)
++
+ #define SS_GEN2_EOF_CFG 0x990
+ #define SSG2EOF_OFFSET 0x3c
+
+@@ -100,6 +104,8 @@
+ #define SSC_IP_SLEEP_EN BIT(4)
+ #define SSC_SPM_INT_EN BIT(1)
+
++#define SCH_FIFO_TO_KB(x) ((x) >> 10)
++
+ enum ssusb_uwk_vers {
+ SSUSB_UWK_V1 = 1,
+ SSUSB_UWK_V2,
+@@ -147,6 +153,35 @@ static void xhci_mtk_set_frame_interval(struct xhci_hcd_mtk *mtk)
+ writel(value, hcd->regs + SS_GEN2_EOF_CFG);
+ }
+
++/*
++ * workaround: usb3.2 gen1 isoc rx hw issue
++ * host send out unexpected ACK afer device fininsh a burst transfer with
++ * a short packet.
++ */
++static void xhci_mtk_rxfifo_depth_set(struct xhci_hcd_mtk *mtk)
++{
++ struct usb_hcd *hcd = mtk->hcd;
++ u32 value;
++
++ if (!mtk->rxfifo_depth)
++ return;
++
++ value = readl(hcd->regs + HSCH_CFG1);
++ value &= ~SCH3_RXFIFO_DEPTH_MASK;
++ value |= FIELD_PREP(SCH3_RXFIFO_DEPTH_MASK,
++ SCH_FIFO_TO_KB(mtk->rxfifo_depth) - 1);
++ writel(value, hcd->regs + HSCH_CFG1);
++}
++
++static void xhci_mtk_init_quirk(struct xhci_hcd_mtk *mtk)
++{
++ /* workaround only for mt8195 */
++ xhci_mtk_set_frame_interval(mtk);
++
++ /* workaround for SoCs using SSUSB about before IPM v1.6.0 */
++ xhci_mtk_rxfifo_depth_set(mtk);
++}
++
+ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
+ {
+ struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
+@@ -429,8 +464,7 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
+ if (ret)
+ return ret;
+
+- /* workaround only for mt8195 */
+- xhci_mtk_set_frame_interval(mtk);
++ xhci_mtk_init_quirk(mtk);
+ }
+
+ ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
+@@ -517,6 +551,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ of_property_read_u32(node, "mediatek,u2p-dis-msk",
+ &mtk->u2p_dis_msk);
+
++ of_property_read_u32(node, "rx-fifo-depth", &mtk->rxfifo_depth);
++
+ ret = usb_wakeup_of_property_parse(mtk, node);
+ if (ret) {
+ dev_err(dev, "failed to parse uwk property\n");
+diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
+index 4b1ea89f959a4..153fc7ba1609a 100644
+--- a/drivers/usb/host/xhci-mtk.h
++++ b/drivers/usb/host/xhci-mtk.h
+@@ -161,6 +161,8 @@ struct xhci_hcd_mtk {
+ struct regmap *uwk;
+ u32 uwk_reg_base;
+ u32 uwk_vers;
++ /* quirk */
++ u32 rxfifo_depth;
+ };
+
+ static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd)
+diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
+index 094e812e9e692..35483217b1f6c 100644
+--- a/drivers/usb/mon/mon_bin.c
++++ b/drivers/usb/mon/mon_bin.c
+@@ -1247,14 +1247,19 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
+ struct mon_reader_bin *rp = vmf->vma->vm_private_data;
+ unsigned long offset, chunk_idx;
+ struct page *pageptr;
++ unsigned long flags;
+
++ spin_lock_irqsave(&rp->b_lock, flags);
+ offset = vmf->pgoff << PAGE_SHIFT;
+- if (offset >= rp->b_size)
++ if (offset >= rp->b_size) {
++ spin_unlock_irqrestore(&rp->b_lock, flags);
+ return VM_FAULT_SIGBUS;
++ }
+ chunk_idx = offset / CHUNK_SIZE;
+ pageptr = rp->b_vec[chunk_idx].pg;
+ get_page(pageptr);
+ vmf->page = pageptr;
++ spin_unlock_irqrestore(&rp->b_lock, flags);
+ return 0;
+ }
+
+diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
+index 7a7eb8af60448..19b54c1cb779e 100644
+--- a/drivers/usb/phy/phy-mxs-usb.c
++++ b/drivers/usb/phy/phy-mxs-usb.c
+@@ -388,8 +388,7 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
+
+ static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
+ {
+- return IS_ENABLED(CONFIG_USB_OTG) &&
+- mxs_phy->phy.last_event == USB_EVENT_ID;
++ return mxs_phy->phy.last_event == USB_EVENT_ID;
+ }
+
+ static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index ea86195c75b83..173d86d120daf 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -261,11 +261,13 @@ static void typec_altmode_put_partner(struct altmode *altmode)
+ {
+ struct altmode *partner = altmode->partner;
+ struct typec_altmode *adev;
++ struct typec_altmode *partner_adev;
+
+ if (!partner)
+ return;
+
+ adev = &altmode->adev;
++ partner_adev = &partner->adev;
+
+ if (is_typec_plug(adev->dev.parent)) {
+ struct typec_plug *plug = to_typec_plug(adev->dev.parent);
+@@ -274,7 +276,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
+ } else {
+ partner->partner = NULL;
+ }
+- put_device(&adev->dev);
++ put_device(&partner_adev->dev);
+ }
+
+ /**
+diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
+index a591d291b231a..0708e214c5a33 100644
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -78,11 +78,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
+ return 0;
+
+ inode_lock(inode);
+- /* Kill off the delayed work */
+- cancel_delayed_work_sync(&info->deferred_work);
+-
+- /* Run it immediately */
+- schedule_delayed_work(&info->deferred_work, 0);
++ flush_delayed_work(&info->deferred_work);
+ inode_unlock(inode);
+
+ return 0;
+diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
+index 94907176a0e4f..55c0f7b0e8fba 100644
+--- a/drivers/watchdog/bcm2835_wdt.c
++++ b/drivers/watchdog/bcm2835_wdt.c
+@@ -42,6 +42,7 @@
+
+ #define SECS_TO_WDOG_TICKS(x) ((x) << 16)
+ #define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
++#define WDOG_TICKS_TO_MSECS(x) ((x) * 1000 >> 16)
+
+ struct bcm2835_wdt {
+ void __iomem *base;
+@@ -140,7 +141,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
+ .info = &bcm2835_wdt_info,
+ .ops = &bcm2835_wdt_ops,
+ .min_timeout = 1,
+- .max_timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
++ .max_hw_heartbeat_ms = WDOG_TICKS_TO_MSECS(PM_WDOG_TIME_SET),
+ .timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
+ };
+
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index a5006a58e0dbb..39fc209f799ec 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -176,7 +176,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
+ "3. OA Forward Progress Log\n"
+ "4. iLO Event Log";
+
+- if (ilo5 && ulReason == NMI_UNKNOWN && !mynmi)
++ if (ulReason == NMI_UNKNOWN && !mynmi)
+ return NMI_DONE;
+
+ if (ilo5 && !pretimeout && !mynmi)
+diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
+index 46c2a4bd9ebe9..daa00f3c5a6af 100644
+--- a/drivers/watchdog/rti_wdt.c
++++ b/drivers/watchdog/rti_wdt.c
+@@ -70,6 +70,11 @@ static int rti_wdt_start(struct watchdog_device *wdd)
+ {
+ u32 timer_margin;
+ struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
++ int ret;
++
++ ret = pm_runtime_resume_and_get(wdd->parent);
++ if (ret)
++ return ret;
+
+ /* set timeout period */
+ timer_margin = (u64)wdd->timeout * wdt->freq;
+@@ -296,6 +301,9 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ if (last_ping)
+ watchdog_set_last_hw_keepalive(wdd, last_ping);
+
++ if (!watchdog_hw_running(wdd))
++ pm_runtime_put_sync(&pdev->dev);
++
+ return 0;
+
+ err_iomap:
+@@ -310,7 +318,10 @@ static int rti_wdt_remove(struct platform_device *pdev)
+ struct rti_wdt_device *wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&wdt->wdd);
+- pm_runtime_put(&pdev->dev);
++
++ if (!pm_runtime_suspended(&pdev->dev))
++ pm_runtime_put(&pdev->dev);
++
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 5eec84fa65170..d3b5aa87c1410 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -1035,6 +1035,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+
+ /* Fill in the data structures */
+ cdev_init(&wd_data->cdev, &watchdog_fops);
++ wd_data->cdev.owner = wdd->ops->owner;
+
+ /* Add the device */
+ err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
+@@ -1049,8 +1050,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ return err;
+ }
+
+- wd_data->cdev.owner = wdd->ops->owner;
+-
+ /* Record time of most recent heartbeat as 'just before now'. */
+ wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
+ watchdog_set_open_deadline(wd_data);
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
+index 38930d9b0bb73..df5c2162e7297 100644
+--- a/fs/debugfs/file.c
++++ b/fs/debugfs/file.c
+@@ -84,6 +84,14 @@ int debugfs_file_get(struct dentry *dentry)
+ struct debugfs_fsdata *fsd;
+ void *d_fsd;
+
++ /*
++ * This could only happen if some debugfs user erroneously calls
++ * debugfs_file_get() on a dentry that isn't even a file, let
++ * them know about it.
++ */
++ if (WARN_ON(!d_is_reg(dentry)))
++ return -EINVAL;
++
+ d_fsd = READ_ONCE(dentry->d_fsdata);
+ if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+ fsd = d_fsd;
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 26f9cd3282918..5290a721a703f 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -214,17 +214,19 @@ static const struct super_operations debugfs_super_operations = {
+
+ static void debugfs_release_dentry(struct dentry *dentry)
+ {
+- void *fsd = dentry->d_fsdata;
++ struct debugfs_fsdata *fsd = dentry->d_fsdata;
+
+- if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
+- kfree(dentry->d_fsdata);
++ if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
++ return;
++
++ kfree(fsd);
+ }
+
+ static struct vfsmount *debugfs_automount(struct path *path)
+ {
+- debugfs_automount_t f;
+- f = (debugfs_automount_t)path->dentry->d_fsdata;
+- return f(path->dentry, d_inode(path->dentry)->i_private);
++ struct debugfs_fsdata *fsd = path->dentry->d_fsdata;
++
++ return fsd->automount(path->dentry, d_inode(path->dentry)->i_private);
+ }
+
+ static const struct dentry_operations debugfs_dops = {
+@@ -602,13 +604,23 @@ struct dentry *debugfs_create_automount(const char *name,
+ void *data)
+ {
+ struct dentry *dentry = start_creating(name, parent);
++ struct debugfs_fsdata *fsd;
+ struct inode *inode;
+
+ if (IS_ERR(dentry))
+ return dentry;
+
++ fsd = kzalloc(sizeof(*fsd), GFP_KERNEL);
++ if (!fsd) {
++ failed_creating(dentry);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ fsd->automount = f;
++
+ if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
+ failed_creating(dentry);
++ kfree(fsd);
+ return ERR_PTR(-EPERM);
+ }
+
+@@ -616,13 +628,14 @@ struct dentry *debugfs_create_automount(const char *name,
+ if (unlikely(!inode)) {
+ pr_err("out of free dentries, can not create automount '%s'\n",
+ name);
++ kfree(fsd);
+ return failed_creating(dentry);
+ }
+
+ make_empty_dir_inode(inode);
+ inode->i_flags |= S_AUTOMOUNT;
+ inode->i_private = data;
+- dentry->d_fsdata = (void *)f;
++ dentry->d_fsdata = fsd;
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ d_instantiate(dentry, inode);
+diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
+index 92af8ae313134..f7c489b5a368c 100644
+--- a/fs/debugfs/internal.h
++++ b/fs/debugfs/internal.h
+@@ -17,8 +17,14 @@ extern const struct file_operations debugfs_full_proxy_file_operations;
+
+ struct debugfs_fsdata {
+ const struct file_operations *real_fops;
+- refcount_t active_users;
+- struct completion active_users_drained;
++ union {
++ /* automount_fn is used when real_fops is NULL */
++ debugfs_automount_t automount;
++ struct {
++ refcount_t active_users;
++ struct completion active_users_drained;
++ };
++ };
+ };
+
+ /*
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 15880a68faadc..3626816b174ad 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -13,6 +13,7 @@
+ #include <linux/ucs2_string.h>
+ #include <linux/slab.h>
+ #include <linux/magic.h>
++#include <linux/printk.h>
+
+ #include "internal.h"
+
+@@ -231,8 +232,19 @@ static int efivarfs_get_tree(struct fs_context *fc)
+ return get_tree_single(fc, efivarfs_fill_super);
+ }
+
++static int efivarfs_reconfigure(struct fs_context *fc)
++{
++ if (!efivar_supports_writes() && !(fc->sb_flags & SB_RDONLY)) {
++ pr_err("Firmware does not support SetVariableRT. Can not remount with rw\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static const struct fs_context_operations efivarfs_context_ops = {
+ .get_tree = efivarfs_get_tree,
++ .reconfigure = efivarfs_reconfigure,
+ };
+
+ static int efivarfs_init_fs_context(struct fs_context *fc)
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 58fd32db025da..d220c45239821 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -41,7 +41,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
+ vm_fault_t ret;
+
+ ret = filemap_fault(vmf);
+- if (!ret)
++ if (ret & VM_FAULT_LOCKED)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
+ F2FS_BLKSIZE);
+
+@@ -2780,6 +2780,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ goto out;
+ }
+
++ if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
++ ret = -EOPNOTSUPP;
++ goto out_unlock;
++ }
++
+ ret = -EINVAL;
+ if (pos_in + len > src->i_size || pos_in + len < pos_in)
+ goto out_unlock;
+@@ -4250,6 +4255,8 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ {
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
++ const loff_t orig_pos = iocb->ki_pos;
++ const size_t orig_count = iov_iter_count(from);
+ ssize_t ret;
+
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+@@ -4353,8 +4360,7 @@ write:
+ unlock:
+ inode_unlock(inode);
+ out:
+- trace_f2fs_file_write_iter(inode, iocb->ki_pos,
+- iov_iter_count(from), ret);
++ trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+ return ret;
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 7a86a8dcf4f1c..0d6906644feb7 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -1088,7 +1088,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ }
+
+ if (old_dir_entry) {
+- if (old_dir != new_dir && !whiteout)
++ if (old_dir != new_dir)
+ f2fs_set_link(old_inode, old_dir_entry,
+ old_dir_page, new_dir);
+ else
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index 797ac505a075a..1a18936bc583b 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -745,6 +745,12 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ memcpy(pval, value, size);
+ last->e_value_size = cpu_to_le16(size);
+ new_hsize += newsize;
++ /*
++ * Explicitly add the null terminator. The unused xattr space
++ * is supposed to always be zeroed, which would make this
++ * unnecessary, but don't depend on that.
++ */
++ *(u32 *)((u8 *)last + newsize) = 0;
+ }
+
+ error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index 6901cd85f1df7..e4e85010ab5bd 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -2316,7 +2316,7 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ (unsigned long long)rgd->rd_addr, rgd->rd_flags,
+ rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
+ rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
+- if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
++ if (rgd->rd_sbd->sd_args.ar_rgrplvb && rgd->rd_rgl) {
+ struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
+
+ gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index e058ef1839377..f858d1152368a 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -300,6 +300,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
+ if (!ret)
+ ret = err;
+ }
++ cond_resched();
+ spin_lock(&journal->j_list_lock);
+ jinode->i_flags &= ~JI_COMMIT_RUNNING;
+ smp_mb();
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 11fbc9b6ec5cb..b7af1727a0160 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1616,9 +1616,11 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
+ return -EIO;
+ }
+
+- trace_jbd2_write_superblock(journal, write_flags);
+ if (!(journal->j_flags & JBD2_BARRIER))
+ write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
++
++ trace_jbd2_write_superblock(journal, write_flags);
++
+ if (buffer_write_io_error(bh)) {
+ /*
+ * Oh, dear. A previous attempt to write the journal
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+index d160363c09ebc..e90a1e8c1951d 100644
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -158,8 +158,12 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
+ */
+ bool ksmbd_smb_request(struct ksmbd_conn *conn)
+ {
+- __le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
++ __le32 *proto;
+
++ if (conn->request_buf[0] != 0)
++ return false;
++
++ proto = (__le32 *)smb2_get_msg(conn->request_buf);
+ if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
+ pr_err_ratelimited("smb2 compression not support yet");
+ return false;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 1a9df6afb90b1..932986448a98a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2647,7 +2647,12 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
++ /*
++ * Indicate to the filesystem that the remount request is coming
++ * from the legacy mount system call.
++ */
+ fc->oldapi = true;
++
+ err = parse_monolithic_mount_data(fc, data);
+ if (!err) {
+ down_write(&sb->s_umount);
+@@ -2981,6 +2986,12 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
++ /*
++ * Indicate to the filesystem that the mount request is coming
++ * from the legacy mount system call.
++ */
++ fc->oldapi = true;
++
+ if (subtype)
+ err = vfs_parse_fs_string(fc, "subtype",
+ subtype, strlen(subtype));
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index fe860c5387476..dc657b12822dd 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -599,6 +599,8 @@ retry:
+ nfs4_delete_deviceid(node->ld, node->nfs_client, id);
+ goto retry;
+ }
++
++ nfs4_put_deviceid_node(node);
+ return ERR_PTR(-ENODEV);
+ }
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d65af9a60c35c..a865b384f0fc0 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -173,6 +173,7 @@ static int nfs4_map_errors(int err)
+ case -NFS4ERR_RESOURCE:
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
++ case -NFS4ERR_RETURNCONFLICT:
+ return -EREMOTEIO;
+ case -NFS4ERR_WRONGSEC:
+ case -NFS4ERR_WRONG_CRED:
+@@ -560,6 +561,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
++ case -NFS4ERR_RETURNCONFLICT:
+ exception->delay = 1;
+ return 0;
+
+@@ -9588,6 +9590,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
+ status = -EBUSY;
+ break;
+ case -NFS4ERR_RECALLCONFLICT:
++ case -NFS4ERR_RETURNCONFLICT:
+ status = -ERECALLCONFLICT;
+ break;
+ case -NFS4ERR_DELEG_REVOKED:
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index ccdb71c3dc51d..ec321722384dc 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -190,7 +190,7 @@ static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
+ {
+ int numerr;
+ struct persistent_ram_buffer *buffer = prz->buffer;
+- int ecc_blocks;
++ size_t ecc_blocks;
+ size_t ecc_total;
+
+ if (!ecc_info || !ecc_info->ecc_size)
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index a5db86670bdfa..a406e281ae571 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -138,6 +138,7 @@ struct af_alg_async_req {
+ * recvmsg is invoked.
+ * @init: True if metadata has been sent.
+ * @len: Length of memory allocated for this data structure.
++ * @inflight: Non-zero when AIO requests are in flight.
+ */
+ struct af_alg_ctx {
+ struct list_head tsgl_list;
+@@ -156,6 +157,8 @@ struct af_alg_ctx {
+ bool init;
+
+ unsigned int len;
++
++ unsigned int inflight;
+ };
+
+ int af_alg_register_type(const struct af_alg_type *type);
+diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
+index c84783cd5abd7..3188b7a1d2c7c 100644
+--- a/include/drm/drm_bridge.h
++++ b/include/drm/drm_bridge.h
+@@ -191,7 +191,7 @@ struct drm_bridge_funcs {
+ * or &drm_encoder_helper_funcs.dpms hook.
+ *
+ * The bridge must assume that the display pipe (i.e. clocks and timing
+- * singals) feeding it is no longer running when this callback is
++ * signals) feeding it is no longer running when this callback is
+ * called.
+ *
+ * The @post_disable callback is optional.
+diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
+index e24ee840cfdb8..c557b78dc572f 100644
+--- a/include/dt-bindings/clock/qcom,videocc-sm8150.h
++++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
+@@ -16,6 +16,10 @@
+
+ /* VIDEO_CC Resets */
+ #define VIDEO_CC_MVSC_CORE_CLK_BCR 0
++#define VIDEO_CC_INTERFACE_BCR 1
++#define VIDEO_CC_MVS0_BCR 2
++#define VIDEO_CC_MVS1_BCR 3
++#define VIDEO_CC_MVSC_BCR 4
+
+ /* VIDEO_CC GDSCRs */
+ #define VENUS_GDSC 0
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 67344dfe07a7c..905844172cfd8 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1334,11 +1334,12 @@ static inline bool bdev_is_partition(struct block_device *bdev)
+ enum blk_default_limits {
+ BLK_MAX_SEGMENTS = 128,
+ BLK_SAFE_MAX_SECTORS = 255,
+- BLK_DEF_MAX_SECTORS = 2560,
+ BLK_MAX_SEGMENT_SIZE = 65536,
+ BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
+ };
+
++#define BLK_DEF_MAX_SECTORS 2560u
++
+ static inline unsigned long queue_segment_boundary(const struct request_queue *q)
+ {
+ return q->limits.seg_boundary_mask;
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index f59c875271a0e..6cace09ba63d0 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -350,7 +350,7 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy,
+- unsigned long clk_fixed_flags);
++ unsigned long clk_fixed_flags, bool devm);
+ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate);
+@@ -365,7 +365,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ */
+ #define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+- NULL, (flags), (fixed_rate), 0, 0)
++ NULL, (flags), (fixed_rate), 0, 0, false)
++
++/**
++ * devm_clk_hw_register_fixed_rate - register fixed-rate clock with the clock
++ * framework
++ * @dev: device that is registering this clock
++ * @name: name of this clock
++ * @parent_name: name of clock's parent
++ * @flags: framework-specific flags
++ * @fixed_rate: non-adjustable clock rate
++ */
++#define devm_clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
++ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
++ NULL, (flags), (fixed_rate), 0, 0, true)
+ /**
+ * clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
+ * the clock framework
+@@ -378,7 +391,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ #define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+- NULL, (flags), (fixed_rate), 0, 0)
++ NULL, (flags), (fixed_rate), 0, 0, false)
+ /**
+ * clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+@@ -392,7 +405,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+- 0)
++ 0, false)
+ /**
+ * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
+ * the clock framework
+@@ -408,7 +421,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \
+ NULL, NULL, (flags), (fixed_rate), \
+- (fixed_accuracy), 0)
++ (fixed_accuracy), 0, false)
+ /**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
+ * clock with the clock framework
+@@ -421,9 +434,9 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ */
+ #define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
+ parent_hw, flags, fixed_rate, fixed_accuracy) \
+- __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
+- NULL, NULL, (flags), (fixed_rate), \
+- (fixed_accuracy), 0)
++ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
++ NULL, (flags), (fixed_rate), \
++ (fixed_accuracy), 0, false)
+ /**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
+ * clock with the clock framework
+@@ -438,7 +451,21 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ parent_data, flags, fixed_rate, fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), NULL, (flags), \
+- (fixed_rate), (fixed_accuracy), 0)
++ (fixed_rate), (fixed_accuracy), 0, false)
++/**
++ * clk_hw_register_fixed_rate_parent_accuracy - register fixed-rate clock with
++ * the clock framework
++ * @dev: device that is registering this clock
++ * @name: name of this clock
++ * @parent_name: name of clock's parent
++ * @flags: framework-specific flags
++ * @fixed_rate: non-adjustable clock rate
++ */
++#define clk_hw_register_fixed_rate_parent_accuracy(dev, name, parent_data, \
++ flags, fixed_rate) \
++ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
++ (parent_data), (flags), (fixed_rate), 0, \
++ CLK_FIXED_RATE_PARENT_ACCURACY, false)
+
+ void clk_unregister_fixed_rate(struct clk *clk);
+ void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
+@@ -915,6 +942,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
++#define clk_hw_register_mux_table_parent_data(dev, name, parent_data, \
++ num_parents, flags, reg, shift, mask, \
++ clk_mux_flags, table, lock) \
++ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
++ NULL, NULL, (parent_data), (flags), (reg), \
++ (shift), (mask), (clk_mux_flags), (table), \
++ (lock))
+ #define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
+index 0d5b06b3a4a61..bfffe494356ad 100644
+--- a/include/linux/dma-map-ops.h
++++ b/include/linux/dma-map-ops.h
+@@ -166,6 +166,7 @@ static inline void dma_pernuma_cma_reserve(void) { }
+ #ifdef CONFIG_DMA_DECLARE_COHERENT
+ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size);
++void dma_release_coherent_memory(struct device *dev);
+ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
+@@ -177,9 +178,11 @@ static inline int dma_declare_coherent_memory(struct device *dev,
+ {
+ return -ENOSYS;
+ }
++
+ #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+ #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
+ #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
++static inline void dma_release_coherent_memory(struct device *dev) { }
+ #endif /* CONFIG_DMA_DECLARE_COHERENT */
+
+ #ifdef CONFIG_DMA_GLOBAL_POOL
+diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
+index 52620e5b80522..b7904992d5619 100644
+--- a/include/linux/iio/adc/adi-axi-adc.h
++++ b/include/linux/iio/adc/adi-axi-adc.h
+@@ -41,6 +41,7 @@ struct adi_axi_adc_chip_info {
+ * @reg_access IIO debugfs_reg_access hook for the client ADC
+ * @read_raw IIO read_raw hook for the client ADC
+ * @write_raw IIO write_raw hook for the client ADC
++ * @read_avail IIO read_avail hook for the client ADC
+ */
+ struct adi_axi_adc_conv {
+ const struct adi_axi_adc_chip_info *chip_info;
+@@ -54,6 +55,9 @@ struct adi_axi_adc_conv {
+ int (*write_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
++ int (*read_avail)(struct adi_axi_adc_conv *conv,
++ struct iio_chan_spec const *chan,
++ const int **val, int *type, int *length, long mask);
+ };
+
+ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 6f15e8b0f9d10..29f657101f4f8 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -415,130 +415,6 @@ extern int of_detach_node(struct device_node *);
+
+ #define of_match_ptr(_ptr) (_ptr)
+
+-/**
+- * of_property_read_u8_array - Find and read an array of u8 from a property.
+- *
+- * @np: device node from which the property value is to be read.
+- * @propname: name of the property to be searched.
+- * @out_values: pointer to return value, modified only if return value is 0.
+- * @sz: number of array elements to read
+- *
+- * Search for a property in a device node and read 8-bit value(s) from
+- * it.
+- *
+- * dts entry of array should be like:
+- * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+- *
+- * Return: 0 on success, -EINVAL if the property does not exist,
+- * -ENODATA if property does not have a value, and -EOVERFLOW if the
+- * property data isn't large enough.
+- *
+- * The out_values is modified only if a valid u8 value can be decoded.
+- */
+-static inline int of_property_read_u8_array(const struct device_node *np,
+- const char *propname,
+- u8 *out_values, size_t sz)
+-{
+- int ret = of_property_read_variable_u8_array(np, propname, out_values,
+- sz, 0);
+- if (ret >= 0)
+- return 0;
+- else
+- return ret;
+-}
+-
+-/**
+- * of_property_read_u16_array - Find and read an array of u16 from a property.
+- *
+- * @np: device node from which the property value is to be read.
+- * @propname: name of the property to be searched.
+- * @out_values: pointer to return value, modified only if return value is 0.
+- * @sz: number of array elements to read
+- *
+- * Search for a property in a device node and read 16-bit value(s) from
+- * it.
+- *
+- * dts entry of array should be like:
+- * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+- *
+- * Return: 0 on success, -EINVAL if the property does not exist,
+- * -ENODATA if property does not have a value, and -EOVERFLOW if the
+- * property data isn't large enough.
+- *
+- * The out_values is modified only if a valid u16 value can be decoded.
+- */
+-static inline int of_property_read_u16_array(const struct device_node *np,
+- const char *propname,
+- u16 *out_values, size_t sz)
+-{
+- int ret = of_property_read_variable_u16_array(np, propname, out_values,
+- sz, 0);
+- if (ret >= 0)
+- return 0;
+- else
+- return ret;
+-}
+-
+-/**
+- * of_property_read_u32_array - Find and read an array of 32 bit integers
+- * from a property.
+- *
+- * @np: device node from which the property value is to be read.
+- * @propname: name of the property to be searched.
+- * @out_values: pointer to return value, modified only if return value is 0.
+- * @sz: number of array elements to read
+- *
+- * Search for a property in a device node and read 32-bit value(s) from
+- * it.
+- *
+- * Return: 0 on success, -EINVAL if the property does not exist,
+- * -ENODATA if property does not have a value, and -EOVERFLOW if the
+- * property data isn't large enough.
+- *
+- * The out_values is modified only if a valid u32 value can be decoded.
+- */
+-static inline int of_property_read_u32_array(const struct device_node *np,
+- const char *propname,
+- u32 *out_values, size_t sz)
+-{
+- int ret = of_property_read_variable_u32_array(np, propname, out_values,
+- sz, 0);
+- if (ret >= 0)
+- return 0;
+- else
+- return ret;
+-}
+-
+-/**
+- * of_property_read_u64_array - Find and read an array of 64 bit integers
+- * from a property.
+- *
+- * @np: device node from which the property value is to be read.
+- * @propname: name of the property to be searched.
+- * @out_values: pointer to return value, modified only if return value is 0.
+- * @sz: number of array elements to read
+- *
+- * Search for a property in a device node and read 64-bit value(s) from
+- * it.
+- *
+- * Return: 0 on success, -EINVAL if the property does not exist,
+- * -ENODATA if property does not have a value, and -EOVERFLOW if the
+- * property data isn't large enough.
+- *
+- * The out_values is modified only if a valid u64 value can be decoded.
+- */
+-static inline int of_property_read_u64_array(const struct device_node *np,
+- const char *propname,
+- u64 *out_values, size_t sz)
+-{
+- int ret = of_property_read_variable_u64_array(np, propname, out_values,
+- sz, 0);
+- if (ret >= 0)
+- return 0;
+- else
+- return ret;
+-}
+-
+ /*
+ * struct property *prop;
+ * const __be32 *p;
+@@ -733,32 +609,6 @@ static inline int of_property_count_elems_of_size(const struct device_node *np,
+ return -ENOSYS;
+ }
+
+-static inline int of_property_read_u8_array(const struct device_node *np,
+- const char *propname, u8 *out_values, size_t sz)
+-{
+- return -ENOSYS;
+-}
+-
+-static inline int of_property_read_u16_array(const struct device_node *np,
+- const char *propname, u16 *out_values, size_t sz)
+-{
+- return -ENOSYS;
+-}
+-
+-static inline int of_property_read_u32_array(const struct device_node *np,
+- const char *propname,
+- u32 *out_values, size_t sz)
+-{
+- return -ENOSYS;
+-}
+-
+-static inline int of_property_read_u64_array(const struct device_node *np,
+- const char *propname,
+- u64 *out_values, size_t sz)
+-{
+- return -ENOSYS;
+-}
+-
+ static inline int of_property_read_u32_index(const struct device_node *np,
+ const char *propname, u32 index, u32 *out_value)
+ {
+@@ -1223,7 +1073,8 @@ static inline int of_property_read_string_index(const struct device_node *np,
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+- * Search for a property in a device node.
++ * Search for a boolean property in a device node. Usage on non-boolean
++ * property types is deprecated.
+ *
+ * Return: true if the property exists false otherwise.
+ */
+@@ -1235,6 +1086,144 @@ static inline bool of_property_read_bool(const struct device_node *np,
+ return prop ? true : false;
+ }
+
++/**
++ * of_property_present - Test if a property is present in a node
++ * @np: device node to search for the property.
++ * @propname: name of the property to be searched.
++ *
++ * Test for a property present in a device node.
++ *
++ * Return: true if the property exists false otherwise.
++ */
++static inline bool of_property_present(const struct device_node *np, const char *propname)
++{
++ return of_property_read_bool(np, propname);
++}
++
++/**
++ * of_property_read_u8_array - Find and read an array of u8 from a property.
++ *
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @out_values: pointer to return value, modified only if return value is 0.
++ * @sz: number of array elements to read
++ *
++ * Search for a property in a device node and read 8-bit value(s) from
++ * it.
++ *
++ * dts entry of array should be like:
++ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
++ *
++ * Return: 0 on success, -EINVAL if the property does not exist,
++ * -ENODATA if property does not have a value, and -EOVERFLOW if the
++ * property data isn't large enough.
++ *
++ * The out_values is modified only if a valid u8 value can be decoded.
++ */
++static inline int of_property_read_u8_array(const struct device_node *np,
++ const char *propname,
++ u8 *out_values, size_t sz)
++{
++ int ret = of_property_read_variable_u8_array(np, propname, out_values,
++ sz, 0);
++ if (ret >= 0)
++ return 0;
++ else
++ return ret;
++}
++
++/**
++ * of_property_read_u16_array - Find and read an array of u16 from a property.
++ *
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @out_values: pointer to return value, modified only if return value is 0.
++ * @sz: number of array elements to read
++ *
++ * Search for a property in a device node and read 16-bit value(s) from
++ * it.
++ *
++ * dts entry of array should be like:
++ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
++ *
++ * Return: 0 on success, -EINVAL if the property does not exist,
++ * -ENODATA if property does not have a value, and -EOVERFLOW if the
++ * property data isn't large enough.
++ *
++ * The out_values is modified only if a valid u16 value can be decoded.
++ */
++static inline int of_property_read_u16_array(const struct device_node *np,
++ const char *propname,
++ u16 *out_values, size_t sz)
++{
++ int ret = of_property_read_variable_u16_array(np, propname, out_values,
++ sz, 0);
++ if (ret >= 0)
++ return 0;
++ else
++ return ret;
++}
++
++/**
++ * of_property_read_u32_array - Find and read an array of 32 bit integers
++ * from a property.
++ *
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @out_values: pointer to return value, modified only if return value is 0.
++ * @sz: number of array elements to read
++ *
++ * Search for a property in a device node and read 32-bit value(s) from
++ * it.
++ *
++ * Return: 0 on success, -EINVAL if the property does not exist,
++ * -ENODATA if property does not have a value, and -EOVERFLOW if the
++ * property data isn't large enough.
++ *
++ * The out_values is modified only if a valid u32 value can be decoded.
++ */
++static inline int of_property_read_u32_array(const struct device_node *np,
++ const char *propname,
++ u32 *out_values, size_t sz)
++{
++ int ret = of_property_read_variable_u32_array(np, propname, out_values,
++ sz, 0);
++ if (ret >= 0)
++ return 0;
++ else
++ return ret;
++}
++
++/**
++ * of_property_read_u64_array - Find and read an array of 64 bit integers
++ * from a property.
++ *
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @out_values: pointer to return value, modified only if return value is 0.
++ * @sz: number of array elements to read
++ *
++ * Search for a property in a device node and read 64-bit value(s) from
++ * it.
++ *
++ * Return: 0 on success, -EINVAL if the property does not exist,
++ * -ENODATA if property does not have a value, and -EOVERFLOW if the
++ * property data isn't large enough.
++ *
++ * The out_values is modified only if a valid u64 value can be decoded.
++ */
++static inline int of_property_read_u64_array(const struct device_node *np,
++ const char *propname,
++ u64 *out_values, size_t sz)
++{
++ int ret = of_property_read_variable_u64_array(np, propname, out_values,
++ sz, 0);
++ if (ret >= 0)
++ return 0;
++ else
++ return ret;
++}
++
+ static inline int of_property_read_u8(const struct device_node *np,
+ const char *propname,
+ u8 *out_value)
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 9a21ccbb5b5c1..b6114bc0dd0f2 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -852,7 +852,6 @@ void hci_inquiry_cache_flush(struct hci_dev *hdev);
+ /* ----- HCI Connections ----- */
+ enum {
+ HCI_CONN_AUTH_PEND,
+- HCI_CONN_REAUTH_PEND,
+ HCI_CONN_ENCRYPT_PEND,
+ HCI_CONN_RSWITCH_PEND,
+ HCI_CONN_MODE_CHANGE_PEND,
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index f5dcf7c9b7076..6033eaddcb749 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -540,17 +540,17 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
+
+ TRACE_EVENT(f2fs_file_write_iter,
+
+- TP_PROTO(struct inode *inode, unsigned long offset,
+- unsigned long length, int ret),
++ TP_PROTO(struct inode *inode, loff_t offset, size_t length,
++ ssize_t ret),
+
+ TP_ARGS(inode, offset, length, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+- __field(unsigned long, offset)
+- __field(unsigned long, length)
+- __field(int, ret)
++ __field(loff_t, offset)
++ __field(size_t, length)
++ __field(ssize_t, ret)
+ ),
+
+ TP_fast_assign(
+@@ -562,7 +562,7 @@ TRACE_EVENT(f2fs_file_write_iter,
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, "
+- "offset = %lu, length = %lu, written(err) = %d",
++ "offset = %lld, length = %zu, written(err) = %zd",
+ show_dev_ino(__entry),
+ __entry->offset,
+ __entry->length,
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 9fb06a511250f..1da082a8c4ecb 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -4245,6 +4245,8 @@ union bpf_attr {
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ * Description
+ * Return a user or a kernel stack in bpf program provided buffer.
++ * Note: the user stack will only be populated if the *task* is
++ * the current task; all other tasks will return -EOPNOTSUPP.
+ * To achieve this, the helper needs *task*, which is a valid
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
+@@ -4256,6 +4258,7 @@ union bpf_attr {
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
++ * The *task* must be the current task.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index 762b534978d95..c542570a24dfa 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -666,7 +666,10 @@ struct file_system_type rootfs_fs_type = {
+
+ void __init init_rootfs(void)
+ {
+- if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
+- (!root_fs_names || strstr(root_fs_names, "tmpfs")))
+- is_tmpfs = true;
++ if (IS_ENABLED(CONFIG_TMPFS)) {
++ if (!saved_root_name[0] && !root_fs_names)
++ is_tmpfs = true;
++ else if (root_fs_names && !!strstr(root_fs_names, "tmpfs"))
++ is_tmpfs = true;
++ }
+ }
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 30535d4edee75..55fd6d98fe121 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -3490,14 +3490,17 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
+ struct iovec *iov = iorw->fast_iov;
+ int ret;
+
++ iorw->bytes_done = 0;
++ iorw->free_iovec = NULL;
++
+ ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
+ if (unlikely(ret < 0))
+ return ret;
+
+- iorw->bytes_done = 0;
+- iorw->free_iovec = iov;
+- if (iov)
++ if (iov) {
++ iorw->free_iovec = iov;
+ req->flags |= REQ_F_NEED_CLEANUP;
++ }
+ iov_iter_save_state(&iorw->iter, &iorw->iter_state);
+ return 0;
+ }
+diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
+index 423549d2c52e2..4ea7fb0ca1ad4 100644
+--- a/kernel/bpf/lpm_trie.c
++++ b/kernel/bpf/lpm_trie.c
+@@ -230,6 +230,9 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
+ struct lpm_trie_node *node, *found = NULL;
+ struct bpf_lpm_trie_key *key = _key;
+
++ if (key->prefixlen > trie->max_prefixlen)
++ return NULL;
++
+ /* Start walking the trie from the root node ... */
+
+ for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held());
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index 7efae3af62017..f8587abef73c1 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -435,6 +435,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ {
+ u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
+ bool user_build_id = flags & BPF_F_USER_BUILD_ID;
++ bool crosstask = task && task != current;
+ u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
+ bool user = flags & BPF_F_USER_STACK;
+ struct perf_callchain_entry *trace;
+@@ -457,6 +458,14 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ if (task && user && !user_mode(regs))
+ goto err_fault;
+
++ /* get_perf_callchain does not support crosstask user stack walking
++ * but returns an empty stack instead of NULL.
++ */
++ if (crosstask && user) {
++ err = -EOPNOTSUPP;
++ goto clear;
++ }
++
+ num_elem = size / elem_size;
+ max_depth = num_elem + skip;
+ if (sysctl_perf_event_max_stack < max_depth)
+@@ -468,7 +477,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ trace = get_callchain_entry_for_task(task, max_depth);
+ else
+ trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
+- false, false);
++ crosstask, false);
+ if (unlikely(!trace))
+ goto err_fault;
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index ad41b8230780b..dbe98040e855e 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -2761,6 +2761,10 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ *
+ * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
+ * was detached and is going for re-attachment.
++ *
++ * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
++ * are NULL, then program was already attached and user did not provide
++ * tgt_prog_fd so we have no way to find out or create trampoline
+ */
+ if (!prog->aux->dst_trampoline && !tgt_prog) {
+ /*
+@@ -2774,6 +2778,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ err = -EINVAL;
+ goto out_unlock;
+ }
++ /* We can allow re-attach only if we have valid attach_btf. */
++ if (!prog->aux->attach_btf) {
++ err = -EINVAL;
++ goto out_unlock;
++ }
+ btf_id = prog->aux->attach_btf_id;
+ key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 5d8f352faebda..88a468cc05103 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2844,7 +2844,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ * so it's aligned access and [off, off + size) are within stack limits
+ */
+ if (!env->allow_ptr_leaks &&
+- state->stack[spi].slot_type[0] == STACK_SPILL &&
++ is_spilled_reg(&state->stack[spi]) &&
+ size != BPF_REG_SIZE) {
+ verbose(env, "attempt to corrupt spilled pointer on stack\n");
+ return -EACCES;
+@@ -4305,10 +4305,7 @@ static int check_stack_access_within_bounds(
+
+ if (tnum_is_const(reg->var_off)) {
+ min_off = reg->var_off.value + off;
+- if (access_size > 0)
+- max_off = min_off + access_size - 1;
+- else
+- max_off = min_off;
++ max_off = min_off + access_size;
+ } else {
+ if (reg->smax_value >= BPF_MAX_VAR_OFF ||
+ reg->smin_value <= -BPF_MAX_VAR_OFF) {
+@@ -4317,15 +4314,12 @@ static int check_stack_access_within_bounds(
+ return -EACCES;
+ }
+ min_off = reg->smin_value + off;
+- if (access_size > 0)
+- max_off = reg->smax_value + off + access_size - 1;
+- else
+- max_off = min_off;
++ max_off = reg->smax_value + off + access_size;
+ }
+
+ err = check_stack_slot_within_bounds(min_off, state, type);
+- if (!err)
+- err = check_stack_slot_within_bounds(max_off, state, type);
++ if (!err && max_off > 0)
++ err = -EINVAL; /* out of stack access into non-negative offsets */
+
+ if (err) {
+ if (tnum_is_const(reg->var_off)) {
+@@ -6244,6 +6238,13 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+ verbose(env, "R0 not a scalar value\n");
+ return -EACCES;
+ }
++
++ /* we are going to rely on register's precise value */
++ err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
++ err = err ?: mark_chain_precision(env, BPF_REG_0);
++ if (err)
++ return err;
++
+ if (!tnum_in(range, r0->var_off)) {
+ verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
+ return -EINVAL;
+@@ -7294,6 +7295,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ }
+
+ switch (base_type(ptr_reg->type)) {
++ case PTR_TO_FLOW_KEYS:
++ if (known)
++ break;
++ fallthrough;
+ case CONST_PTR_TO_MAP:
+ /* smin_val represents the known value */
+ if (known && smin_val == 0 && opcode == BPF_ADD)
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index ead4da9471270..23723c5727aac 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -1350,8 +1350,6 @@ do_full_getstr:
+ /* PROMPT can only be set if we have MEM_READ permission. */
+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+ raw_smp_processor_id());
+- if (defcmd_in_progress)
+- strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+
+ /*
+ * Fetch command from keyboard
+diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
+index 25fc85a7aebef..2df824fa40ef2 100644
+--- a/kernel/dma/coherent.c
++++ b/kernel/dma/coherent.c
+@@ -75,7 +75,7 @@ out_unmap_membase:
+ return ERR_PTR(-ENOMEM);
+ }
+
+-static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
++static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
+ {
+ if (!mem)
+ return;
+@@ -127,10 +127,18 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+
+ ret = dma_assign_coherent_memory(dev, mem);
+ if (ret)
+- dma_release_coherent_memory(mem);
++ _dma_release_coherent_memory(mem);
+ return ret;
+ }
+
++void dma_release_coherent_memory(struct device *dev)
++{
++ if (dev) {
++ _dma_release_coherent_memory(dev->dma_mem);
++ dev->dma_mem = NULL;
++ }
++}
++
+ static void *__dma_alloc_from_coherent(struct device *dev,
+ struct dma_coherent_mem *mem,
+ ssize_t size, dma_addr_t *dma_handle)
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 07d36cee2a800..5d713a7d7e874 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -549,17 +549,15 @@ static void do_unoptimize_kprobes(void)
+ /* See comment in do_optimize_kprobes() */
+ lockdep_assert_cpus_held();
+
+- /* Unoptimization must be done anytime */
+- if (list_empty(&unoptimizing_list))
+- return;
++ if (!list_empty(&unoptimizing_list))
++ arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+
+- arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+- /* Loop free_list for disarming */
++ /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
+ list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+ /* Switching from detour code to origin */
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+- /* Disarm probes if marked disabled */
+- if (kprobe_disabled(&op->kp))
++ /* Disarm probes if marked disabled and not gone */
++ if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
+ arch_disarm_kprobe(&op->kp);
+ if (kprobe_unused(&op->kp)) {
+ /*
+@@ -788,14 +786,13 @@ static void kill_optimized_kprobe(struct kprobe *p)
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+
+ if (kprobe_unused(p)) {
+- /* Enqueue if it is unused */
+- list_add(&op->list, &freeing_list);
+ /*
+- * Remove unused probes from the hash list. After waiting
+- * for synchronization, this probe is reclaimed.
+- * (reclaiming is done by do_free_cleaned_kprobes().)
++ * Unused kprobe is on unoptimizing or freeing list. We move it
++ * to freeing_list and let the kprobe_optimizer() remove it from
++ * the kprobe hash list and free it.
+ */
+- hlist_del_rcu(&op->kp.hlist);
++ if (optprobe_queued_unopt(op))
++ list_move(&op->list, &freeing_list);
+ }
+
+ /* Don't touch the code, because it is already freed. */
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 7f5310d1a4d6a..8f4788be9e856 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1528,13 +1528,18 @@ void tick_setup_sched_timer(void)
+ void tick_cancel_sched_timer(int cpu)
+ {
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
++ ktime_t idle_sleeptime, iowait_sleeptime;
+
+ # ifdef CONFIG_HIGH_RES_TIMERS
+ if (ts->sched_timer.base)
+ hrtimer_cancel(&ts->sched_timer);
+ # endif
+
++ idle_sleeptime = ts->idle_sleeptime;
++ iowait_sleeptime = ts->iowait_sleeptime;
+ memset(ts, 0, sizeof(*ts));
++ ts->idle_sleeptime = idle_sleeptime;
++ ts->iowait_sleeptime = iowait_sleeptime;
+ }
+ #endif
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 65a728b89574c..374cfba6c3068 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3603,6 +3603,12 @@ rb_reserve_next_event(struct trace_buffer *buffer,
+ int nr_loops = 0;
+ int add_ts_default;
+
++ /* ring buffer does cmpxchg, make sure it is safe in NMI context */
++ if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
++ (unlikely(in_nmi()))) {
++ return NULL;
++ }
++
+ rb_start_commit(cpu_buffer);
+ /* The commit page can not change after this */
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 91ff118138c88..bac5505b243e9 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4667,7 +4667,11 @@ static int s_show(struct seq_file *m, void *v)
+ iter->leftover = ret;
+
+ } else {
+- print_trace_line(iter);
++ ret = print_trace_line(iter);
++ if (ret == TRACE_TYPE_PARTIAL_LINE) {
++ iter->seq.full = 0;
++ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
++ }
+ ret = trace_print_seq(m, &iter->seq);
+ /*
+ * If we overflow the seq_file buffer, then it will
+@@ -4900,6 +4904,12 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
+ return 0;
+ }
+
++int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
++{
++ tracing_release_file_tr(inode, filp);
++ return single_release(inode, filp);
++}
++
+ static int tracing_mark_open(struct inode *inode, struct file *filp)
+ {
+ stream_open(inode, filp);
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index c6eb116dc279d..449a8bd873cf7 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -593,6 +593,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
+ int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+ int tracing_open_file_tr(struct inode *inode, struct file *filp);
+ int tracing_release_file_tr(struct inode *inode, struct file *filp);
++int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
+ bool tracing_is_disabled(void);
+ bool tracer_tracing_is_on(struct trace_array *tr);
+ void tracer_tracing_on(struct trace_array *tr);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index c32a53f089229..e7799814a3c8a 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -4946,10 +4946,12 @@ static int event_hist_open(struct inode *inode, struct file *file)
+ {
+ int ret;
+
+- ret = security_locked_down(LOCKDOWN_TRACEFS);
++ ret = tracing_open_file_tr(inode, file);
+ if (ret)
+ return ret;
+
++ /* Clear private_data to avoid warning in single_open() */
++ file->private_data = NULL;
+ return single_open(file, hist_show, file);
+ }
+
+@@ -4957,7 +4959,7 @@ const struct file_operations event_hist_fops = {
+ .open = event_hist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = tracing_single_release_file_tr,
+ };
+
+ #ifdef CONFIG_HIST_TRIGGERS_DEBUG
+@@ -5218,10 +5220,12 @@ static int event_hist_debug_open(struct inode *inode, struct file *file)
+ {
+ int ret;
+
+- ret = security_locked_down(LOCKDOWN_TRACEFS);
++ ret = tracing_open_file_tr(inode, file);
+ if (ret)
+ return ret;
+
++ /* Clear private_data to avoid warning in single_open() */
++ file->private_data = NULL;
+ return single_open(file, hist_debug_show, file);
+ }
+
+@@ -5229,7 +5233,7 @@ const struct file_operations event_hist_debug_fops = {
+ .open = event_hist_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = tracing_single_release_file_tr,
+ };
+ #endif
+
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 6b4d3f3abdae2..4c4b84e507f74 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -1446,11 +1446,12 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
+ {
+ struct print_entry *field;
+ struct trace_seq *s = &iter->seq;
++ int max = iter->ent_size - offsetof(struct print_entry, buf);
+
+ trace_assign_type(field, iter->ent);
+
+ seq_print_ip_sym(s, field->ip, flags);
+- trace_seq_printf(s, ": %s", field->buf);
++ trace_seq_printf(s, ": %.*s", max, field->buf);
+
+ return trace_handle_return(s);
+ }
+@@ -1459,10 +1460,11 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+ {
+ struct print_entry *field;
++ int max = iter->ent_size - offsetof(struct print_entry, buf);
+
+ trace_assign_type(field, iter->ent);
+
+- trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
++ trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf);
+
+ return trace_handle_return(&iter->seq);
+ }
+diff --git a/lib/idr.c b/lib/idr.c
+index 13f2758c23773..da36054c3ca02 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -508,7 +508,7 @@ void ida_free(struct ida *ida, unsigned int id)
+ goto delete;
+ xas_store(&xas, xa_mk_value(v));
+ } else {
+- if (!test_bit(bit, bitmap->bitmap))
++ if (!bitmap || !test_bit(bit, bitmap->bitmap))
+ goto err;
+ __clear_bit(bit, bitmap->bitmap);
+ xas_set_mark(&xas, XA_FREE_MARK);
+diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
+index 1048ef1b8d6ec..4c4b84db8f4a4 100644
+--- a/lib/kunit/debugfs.c
++++ b/lib/kunit/debugfs.c
+@@ -52,12 +52,14 @@ static void debugfs_print_result(struct seq_file *seq,
+ static int debugfs_print_results(struct seq_file *seq, void *v)
+ {
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+- enum kunit_status success = kunit_suite_has_succeeded(suite);
++ enum kunit_status success;
+ struct kunit_case *test_case;
+
+ if (!suite || !suite->log)
+ return 0;
+
++ success = kunit_suite_has_succeeded(suite);
++
+ seq_printf(seq, "%s", suite->log);
+
+ kunit_suite_for_each_test_case(suite, test_case)
+diff --git a/lib/test_ida.c b/lib/test_ida.c
+index b068806259615..55105baa19da9 100644
+--- a/lib/test_ida.c
++++ b/lib/test_ida.c
+@@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida)
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+
++/*
++ * Check various situations where we attempt to free an ID we don't own.
++ */
++static void ida_check_bad_free(struct ida *ida)
++{
++ unsigned long i;
++
++ printk("vvv Ignore \"not allocated\" warnings\n");
++ /* IDA is empty; all of these will fail */
++ ida_free(ida, 0);
++ for (i = 0; i < 31; i++)
++ ida_free(ida, 1 << i);
++
++ /* IDA contains a single value entry */
++ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
++ ida_free(ida, 0);
++ for (i = 0; i < 31; i++)
++ ida_free(ida, 1 << i);
++
++ /* IDA contains a single bitmap */
++ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
++ ida_free(ida, 0);
++ for (i = 0; i < 31; i++)
++ ida_free(ida, 1 << i);
++
++ /* IDA contains a tree */
++ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
++ ida_free(ida, 0);
++ for (i = 0; i < 31; i++)
++ ida_free(ida, 1 << i);
++ printk("^^^ \"not allocated\" warnings over\n");
++
++ ida_free(ida, 3);
++ ida_free(ida, 1023);
++ ida_free(ida, (1 << 20) - 1);
++
++ IDA_BUG_ON(ida, !ida_is_empty(ida));
++}
++
+ static DEFINE_IDA(ida);
+
+ static int ida_checks(void)
+@@ -162,6 +201,7 @@ static int ida_checks(void)
+ ida_check_leaf(&ida, 1024 * 64);
+ ida_check_max(&ida);
+ ida_check_conv(&ida);
++ ida_check_bad_free(&ida);
+
+ printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run != tests_passed) ? 0 : -EINVAL;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index ce538dbe89d13..700920aea39ef 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1436,12 +1436,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
+
+- /* If we're already encrypted set the REAUTH_PEND flag,
+- * otherwise set the ENCRYPT_PEND.
++ /* Set the ENCRYPT_PEND to trigger encryption after
++ * authentication.
+ */
+- if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+- set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+- else
++ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ }
+
+diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
+index 841393389f7b9..d112b2bc3798d 100644
+--- a/net/bluetooth/hci_debugfs.c
++++ b/net/bluetooth/hci_debugfs.c
+@@ -1044,10 +1044,12 @@ static int min_key_size_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE)
++ hci_dev_lock(hdev);
++ if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->le_min_key_size = val;
+ hci_dev_unlock(hdev);
+
+@@ -1072,10 +1074,12 @@ static int max_key_size_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size)
++ hci_dev_lock(hdev);
++ if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->le_max_key_size = val;
+ hci_dev_unlock(hdev);
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index edf5132b5dc96..2ad2f4647847c 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3017,14 +3017,8 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+
+ if (!ev->status) {
+ clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
+-
+- if (!hci_conn_ssp_enabled(conn) &&
+- test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
+- bt_dev_info(hdev, "re-auth of legacy device is not possible.");
+- } else {
+- set_bit(HCI_CONN_AUTH, &conn->flags);
+- conn->sec_level = conn->pending_sec_level;
+- }
++ set_bit(HCI_CONN_AUTH, &conn->flags);
++ conn->sec_level = conn->pending_sec_level;
+ } else {
+ if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
+ set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
+@@ -3033,7 +3027,6 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ }
+
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
+- clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status && hci_conn_ssp_enabled(conn)) {
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 927c4db2703ff..5829477efbba5 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -225,9 +225,11 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ {
+ int max_clean = atomic_read(&tbl->gc_entries) -
+ READ_ONCE(tbl->gc_thresh2);
++ u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
+ unsigned long tref = jiffies - 5 * HZ;
+ struct neighbour *n, *tmp;
+ int shrunk = 0;
++ int loop = 0;
+
+ NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
+
+@@ -250,11 +252,16 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ shrunk++;
+ if (shrunk >= max_clean)
+ break;
++ if (++loop == 16) {
++ if (ktime_get_ns() > tmax)
++ goto unlock;
++ loop = 0;
++ }
+ }
+ }
+
+ WRITE_ONCE(tbl->last_flush, jiffies);
+-
++unlock:
+ write_unlock_bh(&tbl->lock);
+
+ return shrunk;
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 8324e9f970668..26a9d8434c234 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -104,7 +104,7 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ const struct dns_server_list_v1_header *v1;
+
+ /* It may be a server list. */
+- if (datalen <= sizeof(*v1))
++ if (datalen < sizeof(*v1))
+ return -EINVAL;
+
+ v1 = (const struct dns_server_list_v1_header *)data;
+diff --git a/net/ethtool/features.c b/net/ethtool/features.c
+index 1c9f4df273bd5..faccab84d8656 100644
+--- a/net/ethtool/features.c
++++ b/net/ethtool/features.c
+@@ -235,17 +235,20 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+ dev = req_info.dev;
+
+ rtnl_lock();
++ ret = ethnl_ops_begin(dev);
++ if (ret < 0)
++ goto out_rtnl;
+ ethnl_features_to_bitmap(old_active, dev->features);
+ ethnl_features_to_bitmap(old_wanted, dev->wanted_features);
+ ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
+ tb[ETHTOOL_A_FEATURES_WANTED],
+ netdev_features_strings, info->extack);
+ if (ret < 0)
+- goto out_rtnl;
++ goto out_ops;
+ if (ethnl_bitmap_to_features(req_mask) & ~NETIF_F_ETHTOOL_BITS) {
+ GENL_SET_ERR_MSG(info, "attempt to change non-ethtool features");
+ ret = -EINVAL;
+- goto out_rtnl;
++ goto out_ops;
+ }
+
+ /* set req_wanted bits not in req_mask from old_wanted */
+@@ -282,6 +285,8 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+ if (mod)
+ netdev_features_change(dev);
+
++out_ops:
++ ethnl_ops_complete(dev);
+ out_rtnl:
+ rtnl_unlock();
+ dev_put(dev);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index bc5d3188454d0..a41ba4b161c43 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -401,7 +401,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+ unsigned int nhoff = raw - skb->data;
+ unsigned int off = nhoff + sizeof(*ipv6h);
+- u8 next, nexthdr = ipv6h->nexthdr;
++ u8 nexthdr = ipv6h->nexthdr;
+
+ while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
+ struct ipv6_opt_hdr *hdr;
+@@ -412,25 +412,25 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+ if (nexthdr == NEXTHDR_FRAGMENT) {
+- struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
+- if (frag_hdr->frag_off)
+- break;
+ optlen = 8;
+ } else if (nexthdr == NEXTHDR_AUTH) {
+ optlen = ipv6_authlen(hdr);
+ } else {
+ optlen = ipv6_optlen(hdr);
+ }
+- /* cache hdr->nexthdr, since pskb_may_pull() might
+- * invalidate hdr
+- */
+- next = hdr->nexthdr;
+- if (nexthdr == NEXTHDR_DEST) {
+- u16 i = 2;
+
+- /* Remember : hdr is no longer valid at this point. */
+- if (!pskb_may_pull(skb, off + optlen))
++ if (!pskb_may_pull(skb, off + optlen))
++ break;
++
++ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
++ if (nexthdr == NEXTHDR_FRAGMENT) {
++ struct frag_hdr *frag_hdr = (struct frag_hdr *)hdr;
++
++ if (frag_hdr->frag_off)
+ break;
++ }
++ if (nexthdr == NEXTHDR_DEST) {
++ u16 i = 2;
+
+ while (1) {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+@@ -451,7 +451,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ i++;
+ }
+ }
+- nexthdr = next;
++ nexthdr = hdr->nexthdr;
+ off += optlen;
+ }
+ return 0;
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 87c699d57b366..0ea7d97cdc025 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -2725,8 +2725,12 @@ void ipv6_mc_down(struct inet6_dev *idev)
+ synchronize_net();
+ mld_query_stop_work(idev);
+ mld_report_stop_work(idev);
++
++ mutex_lock(&idev->mc_lock);
+ mld_ifc_stop_work(idev);
+ mld_gq_stop_work(idev);
++ mutex_unlock(&idev->mc_lock);
++
+ mld_dad_stop_work(idev);
+ }
+
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index aa4b0cf7c6380..3b4ce8a06f999 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -103,6 +103,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->suboptions |= OPTION_MPTCP_DSS;
+ mp_opt->use_map = 1;
+ mp_opt->mpc_map = 1;
++ mp_opt->use_ack = 0;
+ mp_opt->data_len = get_unaligned_be16(ptr);
+ ptr += 2;
+ }
+@@ -117,8 +118,8 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ break;
+
+ case MPTCPOPT_MP_JOIN:
+- mp_opt->suboptions |= OPTIONS_MPTCP_MPJ;
+ if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
++ mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYN;
+ mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
+ mp_opt->join_id = *ptr++;
+ mp_opt->token = get_unaligned_be32(ptr);
+@@ -129,6 +130,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->backup, mp_opt->join_id,
+ mp_opt->token, mp_opt->nonce);
+ } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
++ mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYNACK;
+ mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
+ mp_opt->join_id = *ptr++;
+ mp_opt->thmac = get_unaligned_be64(ptr);
+@@ -139,11 +141,10 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->backup, mp_opt->join_id,
+ mp_opt->thmac, mp_opt->nonce);
+ } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
++ mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
+ ptr += 2;
+ memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+ pr_debug("MP_JOIN hmac");
+- } else {
+- mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ;
+ }
+ break;
+
+@@ -353,8 +354,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ }
+ }
+
+-void mptcp_get_options(const struct sock *sk,
+- const struct sk_buff *skb,
++void mptcp_get_options(const struct sk_buff *skb,
+ struct mptcp_options_received *mp_opt)
+ {
+ const struct tcphdr *th = tcp_hdr(skb);
+@@ -1090,7 +1090,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
+ return true;
+ }
+
+- mptcp_get_options(sk, skb, &mp_opt);
++ mptcp_get_options(skb, &mp_opt);
+
+ /* The subflow can be in close state only if check_fully_established()
+ * just sent a reset. If so, tell the caller to ignore the current packet.
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index e193b710b471a..78aa6125eafbb 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -636,8 +636,7 @@ int __init mptcp_proto_v6_init(void);
+ struct sock *mptcp_sk_clone(const struct sock *sk,
+ const struct mptcp_options_received *mp_opt,
+ struct request_sock *req);
+-void mptcp_get_options(const struct sock *sk,
+- const struct sk_buff *skb,
++void mptcp_get_options(const struct sk_buff *skb,
+ struct mptcp_options_received *mp_opt);
+
+ void mptcp_finish_connect(struct sock *sk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 666f6720db765..099bdfc12da96 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -152,10 +152,10 @@ static int subflow_check_req(struct request_sock *req,
+ return -EINVAL;
+ #endif
+
+- mptcp_get_options(sk_listener, skb, &mp_opt);
++ mptcp_get_options(skb, &mp_opt);
+
+ opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
+- opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
++ opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
+ if (opt_mp_capable) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
+
+@@ -249,10 +249,10 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ int err;
+
+ subflow_init_req(req, sk_listener);
+- mptcp_get_options(sk_listener, skb, &mp_opt);
++ mptcp_get_options(skb, &mp_opt);
+
+ opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
+- opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
++ opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
+ if (opt_mp_capable && opt_mp_join)
+ return -EINVAL;
+
+@@ -407,7 +407,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+ pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
+
+- mptcp_get_options(sk, skb, &mp_opt);
++ mptcp_get_options(skb, &mp_opt);
+ if (subflow->request_mptcp) {
+ if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
+ MPTCP_INC_STATS(sock_net(sk),
+@@ -432,7 +432,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ } else if (subflow->request_join) {
+ u8 hmac[SHA256_DIGEST_SIZE];
+
+- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
++ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
+ subflow->reset_reason = MPTCP_RST_EMPTCP;
+ goto do_reset;
+ }
+@@ -687,7 +687,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ * reordered MPC will cause fallback, but we don't have other
+ * options.
+ */
+- mptcp_get_options(sk, skb, &mp_opt);
++ mptcp_get_options(skb, &mp_opt);
+ if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
+ fallback = true;
+ goto create_child;
+@@ -697,8 +697,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ if (!new_msk)
+ fallback = true;
+ } else if (subflow_req->mp_join) {
+- mptcp_get_options(sk, skb, &mp_opt);
+- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
++ mptcp_get_options(skb, &mp_opt);
++ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
+ !subflow_hmac_valid(req, &mp_opt) ||
+ !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index 03757e76bb6b9..374412ed780b6 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -105,8 +105,11 @@ enum {
+
+
+ struct ncsi_channel_version {
+- u32 version; /* Supported BCD encoded NCSI version */
+- u32 alpha2; /* Supported BCD encoded NCSI version */
++ u8 major; /* NCSI version major */
++ u8 minor; /* NCSI version minor */
++ u8 update; /* NCSI version update */
++ char alpha1; /* NCSI version alpha1 */
++ char alpha2; /* NCSI version alpha2 */
+ u8 fw_name[12]; /* Firmware name string */
+ u32 fw_version; /* Firmware version */
+ u16 pci_ids[4]; /* PCI identification */
+diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
+index c189b4c8a1823..db350b8f5d88b 100644
+--- a/net/ncsi/ncsi-netlink.c
++++ b/net/ncsi/ncsi-netlink.c
+@@ -71,8 +71,8 @@ static int ncsi_write_channel_info(struct sk_buff *skb,
+ if (nc == nc->package->preferred_channel)
+ nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);
+
+- nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
+- nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2);
++ nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.major);
++ nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.minor);
+ nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name);
+
+ vid_nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR_VLAN_LIST);
+diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h
+index ba66c7dc3a216..c9d1da34dc4dc 100644
+--- a/net/ncsi/ncsi-pkt.h
++++ b/net/ncsi/ncsi-pkt.h
+@@ -197,9 +197,12 @@ struct ncsi_rsp_gls_pkt {
+ /* Get Version ID */
+ struct ncsi_rsp_gvi_pkt {
+ struct ncsi_rsp_pkt_hdr rsp; /* Response header */
+- __be32 ncsi_version; /* NCSI version */
++ unsigned char major; /* NCSI version major */
++ unsigned char minor; /* NCSI version minor */
++ unsigned char update; /* NCSI version update */
++ unsigned char alpha1; /* NCSI version alpha1 */
+ unsigned char reserved[3]; /* Reserved */
+- unsigned char alpha2; /* NCSI version */
++ unsigned char alpha2; /* NCSI version alpha2 */
+ unsigned char fw_name[12]; /* f/w name string */
+ __be32 fw_version; /* f/w version */
+ __be16 pci_ids[4]; /* PCI IDs */
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 069c2659074bc..480e80e3c2836 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -19,6 +19,19 @@
+ #include "ncsi-pkt.h"
+ #include "ncsi-netlink.h"
+
++/* Nibbles within [0xA, 0xF] add zero "0" to the returned value.
++ * Optional fields (encoded as 0xFF) will default to zero.
++ */
++static u8 decode_bcd_u8(u8 x)
++{
++ int lo = x & 0xF;
++ int hi = x >> 4;
++
++ lo = lo < 0xA ? lo : 0;
++ hi = hi < 0xA ? hi : 0;
++ return lo + hi * 10;
++}
++
+ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
+ unsigned short payload)
+ {
+@@ -755,9 +768,18 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
+ if (!nc)
+ return -ENODEV;
+
+- /* Update to channel's version info */
++ /* Update channel's version info
++ *
++ * Major, minor, and update fields are supposed to be
++ * unsigned integers encoded as packed BCD.
++ *
++ * Alpha1 and alpha2 are ISO/IEC 8859-1 characters.
++ */
+ ncv = &nc->version;
+- ncv->version = ntohl(rsp->ncsi_version);
++ ncv->major = decode_bcd_u8(rsp->major);
++ ncv->minor = decode_bcd_u8(rsp->minor);
++ ncv->update = decode_bcd_u8(rsp->update);
++ ncv->alpha1 = rsp->alpha1;
+ ncv->alpha2 = rsp->alpha2;
+ memcpy(ncv->fw_name, rsp->fw_name, 12);
+ ncv->fw_version = ntohl(rsp->fw_version);
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index cd2130e98836b..c87dbc8970023 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -271,7 +271,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
+ skb->dev = dst->dev;
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED,
+ ICMPV6_EXC_HOPLIMIT, 0);
+- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+
+ return false;
+ }
+@@ -286,7 +286,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
+ {
+ if (ip_hdr(skb)->ttl <= 1) {
+ /* Tell the sender its packet died... */
+- __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
++ IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
+ icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
+ return false;
+ }
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 3556818c7162f..e2e3ccbb635fa 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4453,8 +4453,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
+ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ const struct nlattr *nla)
+ {
++ u32 num_regs = 0, key_num_regs = 0;
+ struct nlattr *attr;
+- u32 num_regs = 0;
+ int rem, err, i;
+
+ nla_for_each_nested(attr, nla, rem) {
+@@ -4469,6 +4469,10 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ for (i = 0; i < desc->field_count; i++)
+ num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
+
++ key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
++ if (key_num_regs != num_regs)
++ return -EINVAL;
++
+ if (num_regs > NFT_REG32_COUNT)
+ return -E2BIG;
+
+@@ -4683,16 +4687,28 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ }
+
+ desc.policy = NFT_SET_POL_PERFORMANCE;
+- if (nla[NFTA_SET_POLICY] != NULL)
++ if (nla[NFTA_SET_POLICY] != NULL) {
+ desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
++ switch (desc.policy) {
++ case NFT_SET_POL_PERFORMANCE:
++ case NFT_SET_POL_MEMORY:
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++ }
+
+ if (nla[NFTA_SET_DESC] != NULL) {
+ err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
+ if (err < 0)
+ return err;
+
+- if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
++ if (desc.field_count > 1) {
++ if (!(flags & NFT_SET_CONCAT))
++ return -EINVAL;
++ } else if (flags & NFT_SET_CONCAT) {
+ return -EINVAL;
++ }
+ } else if (flags & NFT_SET_CONCAT) {
+ return -EINVAL;
+ }
+@@ -5336,7 +5352,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ struct nft_set_dump_args *args;
+
+- if (nft_set_elem_expired(ext))
++ if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
+ return 0;
+
+ args = container_of(iter, struct nft_set_dump_args, iter);
+@@ -5888,7 +5904,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ int err, i, k;
+
+ for (i = 0; i < set->num_exprs; i++) {
+- expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL);
++ expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL_ACCOUNT);
+ if (!expr)
+ goto err_expr;
+
+@@ -6034,7 +6050,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net,
+
+ list_for_each_entry(catchall, &set->catchall_list, list) {
+ ext = nft_set_elem_ext(set, catchall->elem);
+- if (!nft_is_active(net, ext))
++ if (!nft_is_active_next(net, ext))
+ continue;
+
+ kfree(elem->priv);
+@@ -9740,6 +9756,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ nft_trans_destroy(trans);
+ break;
+ }
++ nft_trans_set(trans)->dead = 1;
+ list_del_rcu(&nft_trans_set(trans)->list);
+ break;
+ case NFT_MSG_DELSET:
+diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
+index 7d0761fad37ef..f5df535bcbd08 100644
+--- a/net/netfilter/nft_connlimit.c
++++ b/net/netfilter/nft_connlimit.c
+@@ -14,7 +14,7 @@
+ #include <net/netfilter/nf_conntrack_zones.h>
+
+ struct nft_connlimit {
+- struct nf_conncount_list list;
++ struct nf_conncount_list *list;
+ u32 limit;
+ bool invert;
+ };
+@@ -43,12 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
+ return;
+ }
+
+- if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
++ if (nf_conncount_add(nft_net(pkt), priv->list, tuple_ptr, zone)) {
+ regs->verdict.code = NF_DROP;
+ return;
+ }
+
+- count = priv->list.count;
++ count = priv->list->count;
+
+ if ((count > priv->limit) ^ priv->invert) {
+ regs->verdict.code = NFT_BREAK;
+@@ -62,6 +62,7 @@ static int nft_connlimit_do_init(const struct nft_ctx *ctx,
+ {
+ bool invert = false;
+ u32 flags, limit;
++ int err;
+
+ if (!tb[NFTA_CONNLIMIT_COUNT])
+ return -EINVAL;
+@@ -76,18 +77,31 @@ static int nft_connlimit_do_init(const struct nft_ctx *ctx,
+ invert = true;
+ }
+
+- nf_conncount_list_init(&priv->list);
++ priv->list = kmalloc(sizeof(*priv->list), GFP_KERNEL_ACCOUNT);
++ if (!priv->list)
++ return -ENOMEM;
++
++ nf_conncount_list_init(priv->list);
+ priv->limit = limit;
+ priv->invert = invert;
+
+- return nf_ct_netns_get(ctx->net, ctx->family);
++ err = nf_ct_netns_get(ctx->net, ctx->family);
++ if (err < 0)
++ goto err_netns;
++
++ return 0;
++err_netns:
++ kfree(priv->list);
++
++ return err;
+ }
+
+ static void nft_connlimit_do_destroy(const struct nft_ctx *ctx,
+ struct nft_connlimit *priv)
+ {
+ nf_ct_netns_put(ctx->net, ctx->family);
+- nf_conncount_cache_free(&priv->list);
++ nf_conncount_cache_free(priv->list);
++ kfree(priv->list);
+ }
+
+ static int nft_connlimit_do_dump(struct sk_buff *skb,
+@@ -200,7 +214,11 @@ static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src)
+ struct nft_connlimit *priv_dst = nft_expr_priv(dst);
+ struct nft_connlimit *priv_src = nft_expr_priv(src);
+
+- nf_conncount_list_init(&priv_dst->list);
++ priv_dst->list = kmalloc(sizeof(*priv_dst->list), GFP_ATOMIC);
++ if (!priv_dst->list)
++ return -ENOMEM;
++
++ nf_conncount_list_init(priv_dst->list);
+ priv_dst->limit = priv_src->limit;
+ priv_dst->invert = priv_src->invert;
+
+@@ -212,7 +230,8 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
+ {
+ struct nft_connlimit *priv = nft_expr_priv(expr);
+
+- nf_conncount_cache_free(&priv->list);
++ nf_conncount_cache_free(priv->list);
++ kfree(priv->list);
+ }
+
+ static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
+@@ -221,7 +240,7 @@ static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
+ bool ret;
+
+ local_bh_disable();
+- ret = nf_conncount_gc_list(net, &priv->list);
++ ret = nf_conncount_gc_list(net, priv->list);
+ local_bh_enable();
+
+ return ret;
+diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
+index 8edd3b3c173d7..9f78f8ad4ee10 100644
+--- a/net/netfilter/nft_counter.c
++++ b/net/netfilter/nft_counter.c
+@@ -61,7 +61,7 @@ static int nft_counter_do_init(const struct nlattr * const tb[],
+ struct nft_counter __percpu *cpu_stats;
+ struct nft_counter *this_cpu;
+
+- cpu_stats = alloc_percpu(struct nft_counter);
++ cpu_stats = alloc_percpu_gfp(struct nft_counter, GFP_KERNEL_ACCOUNT);
+ if (cpu_stats == NULL)
+ return -ENOMEM;
+
+diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
+index 304e33cbed9b4..63145a5e69ef6 100644
+--- a/net/netfilter/nft_last.c
++++ b/net/netfilter/nft_last.c
+@@ -8,9 +8,13 @@
+ #include <net/netfilter/nf_tables_core.h>
+ #include <net/netfilter/nf_tables.h>
+
++struct nft_last {
++ unsigned long jiffies;
++ unsigned int set;
++};
++
+ struct nft_last_priv {
+- unsigned long last_jiffies;
+- unsigned int last_set;
++ struct nft_last *last;
+ };
+
+ static const struct nla_policy nft_last_policy[NFTA_LAST_MAX + 1] = {
+@@ -22,47 +26,55 @@ static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+ {
+ struct nft_last_priv *priv = nft_expr_priv(expr);
++ struct nft_last *last;
+ u64 last_jiffies;
+- u32 last_set = 0;
+ int err;
+
+- if (tb[NFTA_LAST_SET]) {
+- last_set = ntohl(nla_get_be32(tb[NFTA_LAST_SET]));
+- if (last_set == 1)
+- priv->last_set = 1;
+- }
++ last = kzalloc(sizeof(*last), GFP_KERNEL_ACCOUNT);
++ if (!last)
++ return -ENOMEM;
+
+- if (last_set && tb[NFTA_LAST_MSECS]) {
++ if (tb[NFTA_LAST_SET])
++ last->set = ntohl(nla_get_be32(tb[NFTA_LAST_SET]));
++
++ if (last->set && tb[NFTA_LAST_MSECS]) {
+ err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
+ if (err < 0)
+- return err;
++ goto err;
+
+- priv->last_jiffies = jiffies - (unsigned long)last_jiffies;
++ last->jiffies = jiffies - (unsigned long)last_jiffies;
+ }
++ priv->last = last;
+
+ return 0;
++err:
++ kfree(last);
++
++ return err;
+ }
+
+ static void nft_last_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt)
+ {
+ struct nft_last_priv *priv = nft_expr_priv(expr);
++ struct nft_last *last = priv->last;
+
+- if (READ_ONCE(priv->last_jiffies) != jiffies)
+- WRITE_ONCE(priv->last_jiffies, jiffies);
+- if (READ_ONCE(priv->last_set) == 0)
+- WRITE_ONCE(priv->last_set, 1);
++ if (READ_ONCE(last->jiffies) != jiffies)
++ WRITE_ONCE(last->jiffies, jiffies);
++ if (READ_ONCE(last->set) == 0)
++ WRITE_ONCE(last->set, 1);
+ }
+
+ static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ {
+ struct nft_last_priv *priv = nft_expr_priv(expr);
+- unsigned long last_jiffies = READ_ONCE(priv->last_jiffies);
+- u32 last_set = READ_ONCE(priv->last_set);
++ struct nft_last *last = priv->last;
++ unsigned long last_jiffies = READ_ONCE(last->jiffies);
++ u32 last_set = READ_ONCE(last->set);
+ __be64 msecs;
+
+ if (time_before(jiffies, last_jiffies)) {
+- WRITE_ONCE(priv->last_set, 0);
++ WRITE_ONCE(last->set, 0);
+ last_set = 0;
+ }
+
+@@ -81,11 +93,36 @@ nla_put_failure:
+ return -1;
+ }
+
++static void nft_last_destroy(const struct nft_ctx *ctx,
++ const struct nft_expr *expr)
++{
++ struct nft_last_priv *priv = nft_expr_priv(expr);
++
++ kfree(priv->last);
++}
++
++static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
++{
++ struct nft_last_priv *priv_dst = nft_expr_priv(dst);
++ struct nft_last_priv *priv_src = nft_expr_priv(src);
++
++ priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
++ if (!priv_dst->last)
++ return -ENOMEM;
++
++ priv_dst->last->set = priv_src->last->set;
++ priv_dst->last->jiffies = priv_src->last->jiffies;
++
++ return 0;
++}
++
+ static const struct nft_expr_ops nft_last_ops = {
+ .type = &nft_last_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_last_priv)),
+ .eval = nft_last_eval,
+ .init = nft_last_init,
++ .destroy = nft_last_destroy,
++ .clone = nft_last_clone,
+ .dump = nft_last_dump,
+ };
+
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 82ec27bdf9412..593fa07f10d5e 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -18,6 +18,10 @@ struct nft_limit {
+ spinlock_t lock;
+ u64 last;
+ u64 tokens;
++};
++
++struct nft_limit_priv {
++ struct nft_limit *limit;
+ u64 tokens_max;
+ u64 rate;
+ u64 nsecs;
+@@ -25,93 +29,102 @@ struct nft_limit {
+ bool invert;
+ };
+
+-static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
++static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
+ {
+ u64 now, tokens;
+ s64 delta;
+
+- spin_lock_bh(&limit->lock);
++ spin_lock_bh(&priv->limit->lock);
+ now = ktime_get_ns();
+- tokens = limit->tokens + now - limit->last;
+- if (tokens > limit->tokens_max)
+- tokens = limit->tokens_max;
++ tokens = priv->limit->tokens + now - priv->limit->last;
++ if (tokens > priv->tokens_max)
++ tokens = priv->tokens_max;
+
+- limit->last = now;
++ priv->limit->last = now;
+ delta = tokens - cost;
+ if (delta >= 0) {
+- limit->tokens = delta;
+- spin_unlock_bh(&limit->lock);
+- return limit->invert;
++ priv->limit->tokens = delta;
++ spin_unlock_bh(&priv->limit->lock);
++ return priv->invert;
+ }
+- limit->tokens = tokens;
+- spin_unlock_bh(&limit->lock);
+- return !limit->invert;
++ priv->limit->tokens = tokens;
++ spin_unlock_bh(&priv->limit->lock);
++ return !priv->invert;
+ }
+
+ /* Use same default as in iptables. */
+ #define NFT_LIMIT_PKT_BURST_DEFAULT 5
+
+-static int nft_limit_init(struct nft_limit *limit,
++static int nft_limit_init(struct nft_limit_priv *priv,
+ const struct nlattr * const tb[], bool pkts)
+ {
++ bool invert = false;
+ u64 unit, tokens;
+
+ if (tb[NFTA_LIMIT_RATE] == NULL ||
+ tb[NFTA_LIMIT_UNIT] == NULL)
+ return -EINVAL;
+
+- limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
++ priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+ unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+- limit->nsecs = unit * NSEC_PER_SEC;
+- if (limit->rate == 0 || limit->nsecs < unit)
++ priv->nsecs = unit * NSEC_PER_SEC;
++ if (priv->rate == 0 || priv->nsecs < unit)
+ return -EOVERFLOW;
+
+ if (tb[NFTA_LIMIT_BURST])
+- limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
++ priv->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+
+- if (pkts && limit->burst == 0)
+- limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
++ if (pkts && priv->burst == 0)
++ priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
+
+- if (limit->rate + limit->burst < limit->rate)
++ if (priv->rate + priv->burst < priv->rate)
+ return -EOVERFLOW;
+
+ if (pkts) {
+- tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
++ tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
+ } else {
+ /* The token bucket size limits the number of tokens can be
+ * accumulated. tokens_max specifies the bucket size.
+ * tokens_max = unit * (rate + burst) / rate.
+ */
+- tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
+- limit->rate);
++ tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
++ priv->rate);
+ }
+
+- limit->tokens = tokens;
+- limit->tokens_max = limit->tokens;
+-
+ if (tb[NFTA_LIMIT_FLAGS]) {
+ u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
+
++ if (flags & ~NFT_LIMIT_F_INV)
++ return -EOPNOTSUPP;
++
+ if (flags & NFT_LIMIT_F_INV)
+- limit->invert = true;
++ invert = true;
+ }
+- limit->last = ktime_get_ns();
+- spin_lock_init(&limit->lock);
++
++ priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL_ACCOUNT);
++ if (!priv->limit)
++ return -ENOMEM;
++
++ priv->limit->tokens = tokens;
++ priv->tokens_max = priv->limit->tokens;
++ priv->invert = invert;
++ priv->limit->last = ktime_get_ns();
++ spin_lock_init(&priv->limit->lock);
+
+ return 0;
+ }
+
+-static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
++static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit_priv *priv,
+ enum nft_limit_type type)
+ {
+- u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0;
+- u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
++ u32 flags = priv->invert ? NFT_LIMIT_F_INV : 0;
++ u64 secs = div_u64(priv->nsecs, NSEC_PER_SEC);
+
+- if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate),
++ if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate),
+ NFTA_LIMIT_PAD) ||
+ nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
+ NFTA_LIMIT_PAD) ||
+- nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
++ nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(priv->burst)) ||
+ nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)) ||
+ nla_put_be32(skb, NFTA_LIMIT_FLAGS, htonl(flags)))
+ goto nla_put_failure;
+@@ -121,8 +134,34 @@ nla_put_failure:
+ return -1;
+ }
+
+-struct nft_limit_pkts {
+- struct nft_limit limit;
++static void nft_limit_destroy(const struct nft_ctx *ctx,
++ const struct nft_limit_priv *priv)
++{
++ kfree(priv->limit);
++}
++
++static int nft_limit_clone(struct nft_limit_priv *priv_dst,
++ const struct nft_limit_priv *priv_src)
++{
++ priv_dst->tokens_max = priv_src->tokens_max;
++ priv_dst->rate = priv_src->rate;
++ priv_dst->nsecs = priv_src->nsecs;
++ priv_dst->burst = priv_src->burst;
++ priv_dst->invert = priv_src->invert;
++
++ priv_dst->limit = kmalloc(sizeof(*priv_dst->limit), GFP_ATOMIC);
++ if (!priv_dst->limit)
++ return -ENOMEM;
++
++ spin_lock_init(&priv_dst->limit->lock);
++ priv_dst->limit->tokens = priv_src->tokens_max;
++ priv_dst->limit->last = ktime_get_ns();
++
++ return 0;
++}
++
++struct nft_limit_priv_pkts {
++ struct nft_limit_priv limit;
+ u64 cost;
+ };
+
+@@ -130,7 +169,7 @@ static void nft_limit_pkts_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+ {
+- struct nft_limit_pkts *priv = nft_expr_priv(expr);
++ struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
+
+ if (nft_limit_eval(&priv->limit, priv->cost))
+ regs->verdict.code = NFT_BREAK;
+@@ -148,7 +187,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+ {
+- struct nft_limit_pkts *priv = nft_expr_priv(expr);
++ struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
+ int err;
+
+ err = nft_limit_init(&priv->limit, tb, true);
+@@ -161,17 +200,37 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+
+ static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ {
+- const struct nft_limit_pkts *priv = nft_expr_priv(expr);
++ const struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
+
+ return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
+ }
+
++static void nft_limit_pkts_destroy(const struct nft_ctx *ctx,
++ const struct nft_expr *expr)
++{
++ const struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
++
++ nft_limit_destroy(ctx, &priv->limit);
++}
++
++static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src)
++{
++ struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
++ struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
++
++ priv_dst->cost = priv_src->cost;
++
++ return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
++}
++
+ static struct nft_expr_type nft_limit_type;
+ static const struct nft_expr_ops nft_limit_pkts_ops = {
+ .type = &nft_limit_type,
+- .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
++ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
+ .eval = nft_limit_pkts_eval,
+ .init = nft_limit_pkts_init,
++ .destroy = nft_limit_pkts_destroy,
++ .clone = nft_limit_pkts_clone,
+ .dump = nft_limit_pkts_dump,
+ };
+
+@@ -179,7 +238,7 @@ static void nft_limit_bytes_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+ {
+- struct nft_limit *priv = nft_expr_priv(expr);
++ struct nft_limit_priv *priv = nft_expr_priv(expr);
+ u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
+
+ if (nft_limit_eval(priv, cost))
+@@ -190,7 +249,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+ {
+- struct nft_limit *priv = nft_expr_priv(expr);
++ struct nft_limit_priv *priv = nft_expr_priv(expr);
+
+ return nft_limit_init(priv, tb, false);
+ }
+@@ -198,17 +257,35 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
+ static int nft_limit_bytes_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+ {
+- const struct nft_limit *priv = nft_expr_priv(expr);
++ const struct nft_limit_priv *priv = nft_expr_priv(expr);
+
+ return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
+ }
+
++static void nft_limit_bytes_destroy(const struct nft_ctx *ctx,
++ const struct nft_expr *expr)
++{
++ const struct nft_limit_priv *priv = nft_expr_priv(expr);
++
++ nft_limit_destroy(ctx, priv);
++}
++
++static int nft_limit_bytes_clone(struct nft_expr *dst, const struct nft_expr *src)
++{
++ struct nft_limit_priv *priv_dst = nft_expr_priv(dst);
++ struct nft_limit_priv *priv_src = nft_expr_priv(src);
++
++ return nft_limit_clone(priv_dst, priv_src);
++}
++
+ static const struct nft_expr_ops nft_limit_bytes_ops = {
+ .type = &nft_limit_type,
+- .size = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
++ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv)),
+ .eval = nft_limit_bytes_eval,
+ .init = nft_limit_bytes_init,
+ .dump = nft_limit_bytes_dump,
++ .clone = nft_limit_bytes_clone,
++ .destroy = nft_limit_bytes_destroy,
+ };
+
+ static const struct nft_expr_ops *
+@@ -240,7 +317,7 @@ static void nft_limit_obj_pkts_eval(struct nft_object *obj,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+ {
+- struct nft_limit_pkts *priv = nft_obj_data(obj);
++ struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
+
+ if (nft_limit_eval(&priv->limit, priv->cost))
+ regs->verdict.code = NFT_BREAK;
+@@ -250,7 +327,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[],
+ struct nft_object *obj)
+ {
+- struct nft_limit_pkts *priv = nft_obj_data(obj);
++ struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
+ int err;
+
+ err = nft_limit_init(&priv->limit, tb, true);
+@@ -265,16 +342,25 @@ static int nft_limit_obj_pkts_dump(struct sk_buff *skb,
+ struct nft_object *obj,
+ bool reset)
+ {
+- const struct nft_limit_pkts *priv = nft_obj_data(obj);
++ const struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
+
+ return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
+ }
+
++static void nft_limit_obj_pkts_destroy(const struct nft_ctx *ctx,
++ struct nft_object *obj)
++{
++ struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
++
++ nft_limit_destroy(ctx, &priv->limit);
++}
++
+ static struct nft_object_type nft_limit_obj_type;
+ static const struct nft_object_ops nft_limit_obj_pkts_ops = {
+ .type = &nft_limit_obj_type,
+- .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
++ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
+ .init = nft_limit_obj_pkts_init,
++ .destroy = nft_limit_obj_pkts_destroy,
+ .eval = nft_limit_obj_pkts_eval,
+ .dump = nft_limit_obj_pkts_dump,
+ };
+@@ -283,7 +369,7 @@ static void nft_limit_obj_bytes_eval(struct nft_object *obj,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+ {
+- struct nft_limit *priv = nft_obj_data(obj);
++ struct nft_limit_priv *priv = nft_obj_data(obj);
+ u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
+
+ if (nft_limit_eval(priv, cost))
+@@ -294,7 +380,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[],
+ struct nft_object *obj)
+ {
+- struct nft_limit *priv = nft_obj_data(obj);
++ struct nft_limit_priv *priv = nft_obj_data(obj);
+
+ return nft_limit_init(priv, tb, false);
+ }
+@@ -303,16 +389,25 @@ static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
+ struct nft_object *obj,
+ bool reset)
+ {
+- const struct nft_limit *priv = nft_obj_data(obj);
++ const struct nft_limit_priv *priv = nft_obj_data(obj);
+
+ return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
+ }
+
++static void nft_limit_obj_bytes_destroy(const struct nft_ctx *ctx,
++ struct nft_object *obj)
++{
++ struct nft_limit_priv *priv = nft_obj_data(obj);
++
++ nft_limit_destroy(ctx, priv);
++}
++
+ static struct nft_object_type nft_limit_obj_type;
+ static const struct nft_object_ops nft_limit_obj_bytes_ops = {
+ .type = &nft_limit_obj_type,
+- .size = sizeof(struct nft_limit),
++ .size = sizeof(struct nft_limit_priv),
+ .init = nft_limit_obj_bytes_init,
++ .destroy = nft_limit_obj_bytes_destroy,
+ .eval = nft_limit_obj_bytes_eval,
+ .dump = nft_limit_obj_bytes_dump,
+ };
+diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
+index c4d1389f7185a..73de40007dfe9 100644
+--- a/net/netfilter/nft_quota.c
++++ b/net/netfilter/nft_quota.c
+@@ -15,13 +15,13 @@
+ struct nft_quota {
+ atomic64_t quota;
+ unsigned long flags;
+- atomic64_t consumed;
++ atomic64_t *consumed;
+ };
+
+ static inline bool nft_overquota(struct nft_quota *priv,
+ const struct sk_buff *skb)
+ {
+- return atomic64_add_return(skb->len, &priv->consumed) >=
++ return atomic64_add_return(skb->len, priv->consumed) >=
+ atomic64_read(&priv->quota);
+ }
+
+@@ -90,13 +90,23 @@ static int nft_quota_do_init(const struct nlattr * const tb[],
+ return -EOPNOTSUPP;
+ }
+
++ priv->consumed = kmalloc(sizeof(*priv->consumed), GFP_KERNEL_ACCOUNT);
++ if (!priv->consumed)
++ return -ENOMEM;
++
+ atomic64_set(&priv->quota, quota);
+ priv->flags = flags;
+- atomic64_set(&priv->consumed, consumed);
++ atomic64_set(priv->consumed, consumed);
+
+ return 0;
+ }
+
++static void nft_quota_do_destroy(const struct nft_ctx *ctx,
++ struct nft_quota *priv)
++{
++ kfree(priv->consumed);
++}
++
+ static int nft_quota_obj_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[],
+ struct nft_object *obj)
+@@ -128,7 +138,7 @@ static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
+ * that we see, don't go over the quota boundary in what we send to
+ * userspace.
+ */
+- consumed = atomic64_read(&priv->consumed);
++ consumed = atomic64_read(priv->consumed);
+ quota = atomic64_read(&priv->quota);
+ if (consumed >= quota) {
+ consumed_cap = quota;
+@@ -145,7 +155,7 @@ static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
+ goto nla_put_failure;
+
+ if (reset) {
+- atomic64_sub(consumed, &priv->consumed);
++ atomic64_sub(consumed, priv->consumed);
+ clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
+ }
+ return 0;
+@@ -162,11 +172,20 @@ static int nft_quota_obj_dump(struct sk_buff *skb, struct nft_object *obj,
+ return nft_quota_do_dump(skb, priv, reset);
+ }
+
++static void nft_quota_obj_destroy(const struct nft_ctx *ctx,
++ struct nft_object *obj)
++{
++ struct nft_quota *priv = nft_obj_data(obj);
++
++ return nft_quota_do_destroy(ctx, priv);
++}
++
+ static struct nft_object_type nft_quota_obj_type;
+ static const struct nft_object_ops nft_quota_obj_ops = {
+ .type = &nft_quota_obj_type,
+ .size = sizeof(struct nft_quota),
+ .init = nft_quota_obj_init,
++ .destroy = nft_quota_obj_destroy,
+ .eval = nft_quota_obj_eval,
+ .dump = nft_quota_obj_dump,
+ .update = nft_quota_obj_update,
+@@ -205,12 +224,39 @@ static int nft_quota_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ return nft_quota_do_dump(skb, priv, false);
+ }
+
++static void nft_quota_destroy(const struct nft_ctx *ctx,
++ const struct nft_expr *expr)
++{
++ struct nft_quota *priv = nft_expr_priv(expr);
++
++ return nft_quota_do_destroy(ctx, priv);
++}
++
++static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
++{
++ struct nft_quota *priv_dst = nft_expr_priv(dst);
++ struct nft_quota *priv_src = nft_expr_priv(src);
++
++ priv_dst->quota = priv_src->quota;
++ priv_dst->flags = priv_src->flags;
++
++ priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
++ if (!priv_dst->consumed)
++ return -ENOMEM;
++
++ *priv_dst->consumed = *priv_src->consumed;
++
++ return 0;
++}
++
+ static struct nft_expr_type nft_quota_type;
+ static const struct nft_expr_ops nft_quota_ops = {
+ .type = &nft_quota_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_quota)),
+ .eval = nft_quota_eval,
+ .init = nft_quota_init,
++ .destroy = nft_quota_destroy,
++ .clone = nft_quota_clone,
+ .dump = nft_quota_dump,
+ };
+
+diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
+index 91a19c3ea1a36..84ef4a29864bb 100644
+--- a/net/netlabel/netlabel_calipso.c
++++ b/net/netlabel/netlabel_calipso.c
+@@ -54,6 +54,28 @@ static const struct nla_policy calipso_genl_policy[NLBL_CALIPSO_A_MAX + 1] = {
+ [NLBL_CALIPSO_A_MTYPE] = { .type = NLA_U32 },
+ };
+
++static const struct netlbl_calipso_ops *calipso_ops;
++
++/**
++ * netlbl_calipso_ops_register - Register the CALIPSO operations
++ * @ops: ops to register
++ *
++ * Description:
++ * Register the CALIPSO packet engine operations.
++ *
++ */
++const struct netlbl_calipso_ops *
++netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
++{
++ return xchg(&calipso_ops, ops);
++}
++EXPORT_SYMBOL(netlbl_calipso_ops_register);
++
++static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
++{
++ return READ_ONCE(calipso_ops);
++}
++
+ /* NetLabel Command Handlers
+ */
+ /**
+@@ -96,15 +118,18 @@ static int netlbl_calipso_add_pass(struct genl_info *info,
+ *
+ */
+ static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info)
+-
+ {
+ int ret_val = -EINVAL;
+ struct netlbl_audit audit_info;
++ const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get();
+
+ if (!info->attrs[NLBL_CALIPSO_A_DOI] ||
+ !info->attrs[NLBL_CALIPSO_A_MTYPE])
+ return -EINVAL;
+
++ if (!ops)
++ return -EOPNOTSUPP;
++
+ netlbl_netlink_auditinfo(&audit_info);
+ switch (nla_get_u32(info->attrs[NLBL_CALIPSO_A_MTYPE])) {
+ case CALIPSO_MAP_PASS:
+@@ -362,28 +387,6 @@ int __init netlbl_calipso_genl_init(void)
+ return genl_register_family(&netlbl_calipso_gnl_family);
+ }
+
+-static const struct netlbl_calipso_ops *calipso_ops;
+-
+-/**
+- * netlbl_calipso_ops_register - Register the CALIPSO operations
+- * @ops: ops to register
+- *
+- * Description:
+- * Register the CALIPSO packet engine operations.
+- *
+- */
+-const struct netlbl_calipso_ops *
+-netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
+-{
+- return xchg(&calipso_ops, ops);
+-}
+-EXPORT_SYMBOL(netlbl_calipso_ops_register);
+-
+-static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
+-{
+- return READ_ONCE(calipso_ops);
+-}
+-
+ /**
+ * calipso_doi_add - Add a new DOI to the CALIPSO protocol engine
+ * @doi_def: the DOI structure
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index 3e40a1ba48f79..4a13b9f7abb44 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -569,7 +569,9 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
+ if (!node)
+ return -ENOENT;
+
+- return server_del(node, port, true);
++ server_del(node, port, true);
++
++ return 0;
+ }
+
+ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 81a2d6cbfb441..b4c42b257ae7c 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -705,7 +705,6 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ if (err || !frag)
+ return err;
+
+- skb_get(skb);
+ mru = tc_skb_cb(skb)->mru;
+
+ if (family == NFPROTO_IPV4) {
+@@ -987,12 +986,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ nh_ofs = skb_network_offset(skb);
+ skb_pull_rcsum(skb, nh_ofs);
+ err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
+- if (err == -EINPROGRESS) {
+- retval = TC_ACT_STOLEN;
+- goto out_clear;
+- }
+ if (err)
+- goto drop;
++ goto out_frag;
+
+ err = tcf_ct_skb_network_trim(skb, family);
+ if (err)
+@@ -1059,6 +1054,11 @@ out_clear:
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ return retval;
+
++out_frag:
++ if (err != -EINPROGRESS)
++ tcf_action_inc_drop_qstats(&c->common);
++ return TC_ACT_CONSUMED;
++
+ drop:
+ tcf_action_inc_drop_qstats(&c->common);
+ return TC_ACT_SHOT;
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 00e8b60af0f8f..b490f832439e4 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -355,6 +355,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ struct virtio_vsock_pkt *pkt;
+ size_t bytes, total = 0;
+ u32 free_space;
++ u32 fwd_cnt_delta;
++ bool low_rx_bytes;
+ int err = -EFAULT;
+
+ spin_lock_bh(&vvs->rx_lock);
+@@ -386,7 +388,10 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ }
+ }
+
+- free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
++ fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt;
++ free_space = vvs->buf_alloc - fwd_cnt_delta;
++ low_rx_bytes = (vvs->rx_bytes <
++ sock_rcvlowat(sk_vsock(vsk), 0, INT_MAX));
+
+ spin_unlock_bh(&vvs->rx_lock);
+
+@@ -396,9 +401,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ * too high causes extra messages. Too low causes transmitter
+ * stalls. As stalls are in theory more expensive than extra
+ * messages, we set the limit to a high value. TODO: experiment
+- * with different values.
++ * with different values. Also send credit update message when
++ * number of bytes in rx queue is not enough to wake up reader.
+ */
+- if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
++ if (fwd_cnt_delta &&
++ (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || low_rx_bytes))
+ virtio_transport_send_credit_update(vsk);
+
+ return total;
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index d10686f4bf153..d51d27ff3729d 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -216,7 +216,9 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
+ {
+ struct cfg80211_registered_device *rdev = data;
+
++ wiphy_lock(&rdev->wiphy);
+ rdev_rfkill_poll(rdev);
++ wiphy_unlock(&rdev->wiphy);
+ }
+
+ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
+index 86ecd2ac874c3..60ddd47bfa1ba 100644
+--- a/scripts/Makefile.compiler
++++ b/scripts/Makefile.compiler
+@@ -68,7 +68,3 @@ cc-ifversion = $(shell [ $(CONFIG_GCC_VERSION)0 $(1) $(2)000 ] && echo $(3) || e
+ # ld-option
+ # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y)
+ ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
+-
+-# ld-ifversion
+-# Usage: $(call ld-ifversion, -ge, 22252, y)
+-ld-ifversion = $(shell [ $(CONFIG_LD_VERSION)0 $(1) $(2)0 ] && echo $(3) || echo $(4))
+diff --git a/scripts/pahole-flags.sh b/scripts/pahole-flags.sh
+index d38fa6d84d62a..5c724f6971009 100755
+--- a/scripts/pahole-flags.sh
++++ b/scripts/pahole-flags.sh
+@@ -20,5 +20,8 @@ fi
+ if [ "${pahole_ver}" -ge "124" ]; then
+ extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_enum64"
+ fi
++if [ "${pahole_ver}" -ge "125" ]; then
++ extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_inconsistent_proto --btf_gen_optimized"
++fi
+
+ echo ${extra_paholeopt}
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 5f758b289ace3..d1a385b44d631 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -696,6 +696,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+
+ tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
+ if (tmpns) {
++ if (!tmpname) {
++ info = "empty profile name";
++ goto fail;
++ }
+ *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
+ if (!*ns_name) {
+ info = "out of memory";
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 9ce029b2f2267..ec839716dbac2 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -4774,6 +4774,13 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
+ return -EINVAL;
+ addr4 = (struct sockaddr_in *)address;
+ if (family_sa == AF_UNSPEC) {
++ if (family == PF_INET6) {
++ /* Length check from inet6_bind_sk() */
++ if (addrlen < SIN6_LEN_RFC2133)
++ return -EINVAL;
++ /* Family check from __inet6_bind() */
++ goto err_af;
++ }
+ /* see __inet_bind(), we only want to allow
+ * AF_UNSPEC if the address is INADDR_ANY
+ */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5e0e19c4b509e..9a0824459d136 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6854,6 +6854,7 @@ enum {
+ ALC290_FIXUP_SUBWOOFER_HSJACK,
+ ALC269_FIXUP_THINKPAD_ACPI,
+ ALC269_FIXUP_DMIC_THINKPAD_ACPI,
++ ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO,
+ ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ ALC255_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -7152,6 +7153,14 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
+ },
++ [ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x18, 0x03a19020 }, /* headset mic */
++ { 0x1b, 0x90170150 }, /* speaker */
++ { }
++ },
++ },
+ [ALC269_FIXUP_AMIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -9089,6 +9098,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
++ SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+@@ -9402,6 +9412,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
++ SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+ SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
+index 46705ec77b481..eb3aca16359c5 100644
+--- a/sound/pci/oxygen/oxygen_mixer.c
++++ b/sound/pci/oxygen/oxygen_mixer.c
+@@ -718,7 +718,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
+ oldreg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN);
+ newreg = oldreg & ~0x0707;
+ newreg = newreg | (value->value.integer.value[0] & 7);
+- newreg = newreg | ((value->value.integer.value[0] & 7) << 8);
++ newreg = newreg | ((value->value.integer.value[1] & 7) << 8);
+ change = newreg != oldreg;
+ if (change)
+ oxygen_write_ac97(chip, 1, AC97_REC_GAIN, newreg);
+diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
+index d243de5f23dc1..8a55d59a6c2aa 100644
+--- a/sound/soc/atmel/sam9g20_wm8731.c
++++ b/sound/soc/atmel/sam9g20_wm8731.c
+@@ -46,6 +46,35 @@
+ */
+ #undef ENABLE_MIC_INPUT
+
++static struct clk *mclk;
++
++static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
++ struct snd_soc_dapm_context *dapm,
++ enum snd_soc_bias_level level)
++{
++ static int mclk_on;
++ int ret = 0;
++
++ switch (level) {
++ case SND_SOC_BIAS_ON:
++ case SND_SOC_BIAS_PREPARE:
++ if (!mclk_on)
++ ret = clk_enable(mclk);
++ if (ret == 0)
++ mclk_on = 1;
++ break;
++
++ case SND_SOC_BIAS_OFF:
++ case SND_SOC_BIAS_STANDBY:
++ if (mclk_on)
++ clk_disable(mclk);
++ mclk_on = 0;
++ break;
++ }
++
++ return ret;
++}
++
+ static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+@@ -106,6 +135,7 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
+ .owner = THIS_MODULE,
+ .dai_link = &at91sam9g20ek_dai,
+ .num_links = 1,
++ .set_bias_level = at91sam9g20ek_set_bias_level,
+
+ .dapm_widgets = at91sam9g20ek_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
+@@ -118,6 +148,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
+ {
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *codec_np, *cpu_np;
++ struct clk *pllb;
+ struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
+ int ret;
+
+@@ -131,6 +162,31 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ /*
++ * Codec MCLK is supplied by PCK0 - set it up.
++ */
++ mclk = clk_get(NULL, "pck0");
++ if (IS_ERR(mclk)) {
++ dev_err(&pdev->dev, "Failed to get MCLK\n");
++ ret = PTR_ERR(mclk);
++ goto err;
++ }
++
++ pllb = clk_get(NULL, "pllb");
++ if (IS_ERR(pllb)) {
++ dev_err(&pdev->dev, "Failed to get PLLB\n");
++ ret = PTR_ERR(pllb);
++ goto err_mclk;
++ }
++ ret = clk_set_parent(mclk, pllb);
++ clk_put(pllb);
++ if (ret != 0) {
++ dev_err(&pdev->dev, "Failed to set MCLK parent\n");
++ goto err_mclk;
++ }
++
++ clk_set_rate(mclk, MCLK_RATE);
++
+ card->dev = &pdev->dev;
+
+ /* Parse device node info */
+@@ -174,6 +230,9 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
+
+ return ret;
+
++err_mclk:
++ clk_put(mclk);
++ mclk = NULL;
+ err:
+ atmel_ssc_put_audio(0);
+ return ret;
+@@ -183,6 +242,8 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
+ {
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
++ clk_disable(mclk);
++ mclk = NULL;
+ snd_soc_unregister_card(card);
+ atmel_ssc_put_audio(0);
+
+diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
+index 2a6f5e46d0312..3ea311fda98f1 100644
+--- a/sound/soc/codecs/cs35l33.c
++++ b/sound/soc/codecs/cs35l33.c
+@@ -22,13 +22,11 @@
+ #include <sound/soc-dapm.h>
+ #include <sound/initval.h>
+ #include <sound/tlv.h>
+-#include <linux/gpio.h>
+ #include <linux/gpio/consumer.h>
+ #include <sound/cs35l33.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/regulator/machine.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/of_irq.h>
+@@ -1169,7 +1167,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
+
+ /* We could issue !RST or skip it based on AMP topology */
+ cs35l33->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+- "reset-gpios", GPIOD_OUT_HIGH);
++ "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(cs35l33->reset_gpio)) {
+ dev_err(&i2c_client->dev, "%s ERROR: Can't get reset GPIO\n",
+ __func__);
+diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
+index ed678241c22bc..be0edeebddfe0 100644
+--- a/sound/soc/codecs/cs35l34.c
++++ b/sound/soc/codecs/cs35l34.c
+@@ -20,14 +20,12 @@
+ #include <linux/regulator/machine.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/of_device.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of_irq.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
+-#include <linux/gpio.h>
+ #include <linux/gpio/consumer.h>
+ #include <sound/initval.h>
+ #include <sound/tlv.h>
+@@ -1063,7 +1061,7 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
+ dev_err(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
+
+ cs35l34->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+- "reset-gpios", GPIOD_OUT_LOW);
++ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(cs35l34->reset_gpio)) {
+ ret = PTR_ERR(cs35l34->reset_gpio);
+ goto err_regulator;
+diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
+index 44b20c1ef8517..d3edc5796d669 100644
+--- a/sound/soc/codecs/cs43130.c
++++ b/sound/soc/codecs/cs43130.c
+@@ -579,7 +579,7 @@ static int cs43130_set_sp_fmt(int dai_id, unsigned int bitwidth_sclk,
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ hi_size = bitwidth_sclk;
+- frm_delay = 2;
++ frm_delay = 0;
+ frm_phase = 1;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+@@ -1684,7 +1684,7 @@ static ssize_t hpload_dc_r_show(struct device *dev,
+ return cs43130_show_dc(dev, buf, HP_RIGHT);
+ }
+
+-static u16 const cs43130_ac_freq[CS43130_AC_FREQ] = {
++static const u16 cs43130_ac_freq[CS43130_AC_FREQ] = {
+ 24,
+ 43,
+ 93,
+@@ -2365,7 +2365,7 @@ static const struct regmap_config cs43130_regmap = {
+ .use_single_write = true,
+ };
+
+-static u16 const cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
++static const u16 cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
+ 50,
+ 120,
+ };
+diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
+index 7d18de959439f..4dc6eed6c18aa 100644
+--- a/sound/soc/codecs/da7219-aad.c
++++ b/sound/soc/codecs/da7219-aad.c
+@@ -654,7 +654,7 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
+ aad_pdata->mic_det_thr =
+ da7219_aad_fw_mic_det_thr(dev, fw_val32);
+ else
+- aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
++ aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_200_OHMS;
+
+ if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
+ aad_pdata->jack_ins_deb =
+diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
+index b436e532993d1..6ffd0f5e3a609 100644
+--- a/sound/soc/codecs/nau8822.c
++++ b/sound/soc/codecs/nau8822.c
+@@ -184,6 +184,7 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ int i, reg;
+ u16 reg_val, *val;
++ __be16 tmp;
+
+ val = (u16 *)ucontrol->value.bytes.data;
+ reg = NAU8822_REG_EQ1;
+@@ -192,8 +193,8 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+- reg_val = cpu_to_be16(reg_val);
+- memcpy(val + i, &reg_val, sizeof(reg_val));
++ tmp = cpu_to_be16(reg_val);
++ memcpy(val + i, &tmp, sizeof(tmp));
+ }
+
+ return 0;
+@@ -216,6 +217,7 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
+ void *data;
+ u16 *val, value;
+ int i, reg, ret;
++ __be16 *tmp;
+
+ data = kmemdup(ucontrol->value.bytes.data,
+ params->max, GFP_KERNEL | GFP_DMA);
+@@ -228,7 +230,8 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+- value = be16_to_cpu(*(val + i));
++ tmp = (__be16 *)(val + i);
++ value = be16_to_cpup(tmp);
+ ret = snd_soc_component_write(component, reg + i, value);
+ if (ret) {
+ dev_err(component->dev,
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 385f2cca939a0..2fdfec5051926 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -448,6 +448,7 @@ struct rt5645_priv {
+ struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
+ struct rt5645_eq_param_s *eq_param;
+ struct timer_list btn_check_timer;
++ struct mutex jd_mutex;
+
+ int codec_type;
+ int sysclk;
+@@ -3189,6 +3190,8 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
+ rt5645_enable_push_button_irq(component, true);
+ }
+ } else {
++ if (rt5645->en_button_func)
++ rt5645_enable_push_button_irq(component, false);
+ snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+ snd_soc_dapm_sync(dapm);
+ rt5645->jack_type = SND_JACK_HEADPHONE;
+@@ -3269,6 +3272,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ if (!rt5645->component)
+ return;
+
++ mutex_lock(&rt5645->jd_mutex);
++
+ switch (rt5645->pdata.jd_mode) {
+ case 0: /* Not using rt5645 JD */
+ if (rt5645->gpiod_hp_det) {
+@@ -3295,7 +3300,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+
+ if (!val && (rt5645->jack_type == 0)) { /* jack in */
+ report = rt5645_jack_detect(rt5645->component, 1);
+- } else if (!val && rt5645->jack_type != 0) {
++ } else if (!val && rt5645->jack_type == SND_JACK_HEADSET) {
+ /* for push button and jack out */
+ btn_type = 0;
+ if (snd_soc_component_read(rt5645->component, RT5645_INT_IRQ_ST) & 0x4) {
+@@ -3351,6 +3356,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ rt5645_jack_detect(rt5645->component, 0);
+ }
+
++ mutex_unlock(&rt5645->jd_mutex);
++
+ snd_soc_jack_report(rt5645->hp_jack, report, SND_JACK_HEADPHONE);
+ snd_soc_jack_report(rt5645->mic_jack, report, SND_JACK_MICROPHONE);
+ if (rt5645->en_button_func)
+@@ -3821,14 +3828,6 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ },
+ .driver_data = (void *)&ecs_ef20_platform_data,
+ },
+- {
+- .ident = "EF20EA",
+- .callback = cht_rt5645_ef20_quirk_cb,
+- .matches = {
+- DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
+- },
+- .driver_data = (void *)&ecs_ef20_platform_data,
+- },
+ { }
+ };
+
+@@ -4121,6 +4120,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
+ }
+ timer_setup(&rt5645->btn_check_timer, rt5645_btn_check_callback, 0);
+
++ mutex_init(&rt5645->jd_mutex);
+ INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
+ INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
+
+diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
+index fdc68ab497427..9eeac34435664 100644
+--- a/sound/soc/codecs/wm8974.c
++++ b/sound/soc/codecs/wm8974.c
+@@ -186,7 +186,7 @@ SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
+
+ /* Boost mixer */
+ static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
+-SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 1),
++SOC_DAPM_SINGLE("PGA Switch", WM8974_INPPGA, 6, 1, 1),
+ };
+
+ /* Input PGA */
+@@ -246,8 +246,8 @@ static const struct snd_soc_dapm_route wm8974_dapm_routes[] = {
+
+ /* Boost Mixer */
+ {"ADC", NULL, "Boost Mixer"},
+- {"Boost Mixer", "Aux Switch", "Aux Input"},
+- {"Boost Mixer", NULL, "Input PGA"},
++ {"Boost Mixer", NULL, "Aux Input"},
++ {"Boost Mixer", "PGA Switch", "Input PGA"},
+ {"Boost Mixer", NULL, "MICP"},
+
+ /* Input PGA */
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 4df1be8170bb6..49dfbd29c5451 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -870,6 +870,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
++ {
++ /* Medion Lifetab S10346 */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++ /* Above strings are much too generic, also match on BIOS date */
++ DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"),
++ },
++ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++ BYT_RT5640_SSP0_AIF1 |
++ BYT_RT5640_MCLK_EN),
++ },
+ { /* Mele PCG03 Mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
+diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
+index db41bd7170650..af5af5be42f68 100644
+--- a/sound/soc/intel/skylake/skl-pcm.c
++++ b/sound/soc/intel/skylake/skl-pcm.c
+@@ -251,8 +251,10 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
+ snd_pcm_set_sync(substream);
+
+ mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+- if (!mconfig)
++ if (!mconfig) {
++ kfree(dma_params);
+ return -EINVAL;
++ }
+
+ skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
+
+@@ -1465,6 +1467,7 @@ int skl_platform_register(struct device *dev)
+ dais = krealloc(skl->dais, sizeof(skl_fe_dai) +
+ sizeof(skl_platform_dai), GFP_KERNEL);
+ if (!dais) {
++ kfree(skl->dais);
+ ret = -ENOMEM;
+ goto err;
+ }
+@@ -1477,8 +1480,10 @@ int skl_platform_register(struct device *dev)
+
+ ret = devm_snd_soc_register_component(dev, &skl_component,
+ skl->dais, num_dais);
+- if (ret)
++ if (ret) {
++ kfree(skl->dais);
+ dev_err(dev, "soc component registration failed %d\n", ret);
++ }
+ err:
+ return ret;
+ }
+diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
+index 7a425271b08b1..fd9624ad5f72b 100644
+--- a/sound/soc/intel/skylake/skl-sst-ipc.c
++++ b/sound/soc/intel/skylake/skl-sst-ipc.c
+@@ -1003,8 +1003,10 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
+
+ reply.size = (reply.header >> 32) & IPC_DATA_OFFSET_SZ_MASK;
+ buf = krealloc(reply.data, reply.size, GFP_KERNEL);
+- if (!buf)
++ if (!buf) {
++ kfree(reply.data);
+ return -ENOMEM;
++ }
+ *payload = buf;
+ *bytes = reply.size;
+
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index 12effaa59fdb7..c56379fac9272 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -644,7 +644,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card,
+ kctl = snd_soc_card_get_kcontrol(card, name);
+ if (kctl) {
+ struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
+- if (max <= mc->max) {
++ if (max <= mc->max - mc->min) {
+ mc->platform_max = max;
+ ret = 0;
+ }
+diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
+index 53ebabf424722..0a9025e3c8671 100644
+--- a/sound/usb/mixer_scarlett_gen2.c
++++ b/sound/usb/mixer_scarlett_gen2.c
+@@ -1213,9 +1213,11 @@ static void scarlett2_config_save(struct usb_mixer_interface *mixer)
+ {
+ __le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
+
+- scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
+- &req, sizeof(u32),
+- NULL, 0);
++ int err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
++ &req, sizeof(u32),
++ NULL, 0);
++ if (err < 0)
++ usb_audio_err(mixer->chip, "config save failed: %d\n", err);
+ }
+
+ /* Delayed work to save config */
+@@ -1264,7 +1266,10 @@ static int scarlett2_usb_set_config(
+ size = 1;
+ offset = config_item->offset;
+
+- scarlett2_usb_get(mixer, offset, &tmp, 1);
++ err = scarlett2_usb_get(mixer, offset, &tmp, 1);
++ if (err < 0)
++ return err;
++
+ if (value)
+ tmp |= (1 << index);
+ else
+@@ -1669,14 +1674,20 @@ static int scarlett2_sync_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->sync_updated)
+- scarlett2_update_sync(mixer);
++
++ if (private->sync_updated) {
++ err = scarlett2_update_sync(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.enumerated.item[0] = private->sync;
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static const struct snd_kcontrol_new scarlett2_sync_ctl = {
+@@ -1759,22 +1770,35 @@ static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->vol_updated)
+- scarlett2_update_volumes(mixer);
+- mutex_unlock(&private->data_mutex);
+
++ if (private->vol_updated) {
++ err = scarlett2_update_volumes(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.integer.value[0] = private->master_vol;
+- return 0;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int line_out_remap(struct scarlett2_data *private, int index)
+ {
+ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int line_out_count =
++ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+
+ if (!info->line_out_remap_enable)
+ return index;
++
++ if (index >= line_out_count)
++ return index;
++
+ return info->line_out_remap[index];
+ }
+
+@@ -1785,14 +1809,20 @@ static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ int index = line_out_remap(private, elem->control);
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->vol_updated)
+- scarlett2_update_volumes(mixer);
+- mutex_unlock(&private->data_mutex);
+
++ if (private->vol_updated) {
++ err = scarlett2_update_volumes(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.integer.value[0] = private->vol[index];
+- return 0;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_volume_ctl_put(struct snd_kcontrol *kctl,
+@@ -1859,14 +1889,20 @@ static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ int index = line_out_remap(private, elem->control);
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->vol_updated)
+- scarlett2_update_volumes(mixer);
+- mutex_unlock(&private->data_mutex);
+
++ if (private->vol_updated) {
++ err = scarlett2_update_volumes(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.integer.value[0] = private->mute_switch[index];
+- return 0;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_mute_ctl_put(struct snd_kcontrol *kctl,
+@@ -2112,14 +2148,20 @@ static int scarlett2_level_enum_ctl_get(struct snd_kcontrol *kctl,
+ const struct scarlett2_device_info *info = private->info;
+
+ int index = elem->control + info->level_input_first;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->input_other_updated)
+- scarlett2_update_input_other(mixer);
++
++ if (private->input_other_updated) {
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.enumerated.item[0] = private->level_switch[index];
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
+@@ -2170,15 +2212,21 @@ static int scarlett2_pad_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->input_other_updated)
+- scarlett2_update_input_other(mixer);
++
++ if (private->input_other_updated) {
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.integer.value[0] =
+ private->pad_switch[elem->control];
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
+@@ -2228,14 +2276,20 @@ static int scarlett2_air_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->input_other_updated)
+- scarlett2_update_input_other(mixer);
++
++ if (private->input_other_updated) {
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.integer.value[0] = private->air_switch[elem->control];
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_air_ctl_put(struct snd_kcontrol *kctl,
+@@ -2285,15 +2339,21 @@ static int scarlett2_phantom_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->input_other_updated)
+- scarlett2_update_input_other(mixer);
++
++ if (private->input_other_updated) {
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.integer.value[0] =
+ private->phantom_switch[elem->control];
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_phantom_ctl_put(struct snd_kcontrol *kctl,
+@@ -2465,14 +2525,20 @@ static int scarlett2_direct_monitor_ctl_get(
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = elem->head.mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->monitor_other_updated)
+- scarlett2_update_monitor_other(mixer);
++
++ if (private->monitor_other_updated) {
++ err = scarlett2_update_monitor_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.enumerated.item[0] = private->direct_monitor_switch;
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_direct_monitor_ctl_put(
+@@ -2572,14 +2638,20 @@ static int scarlett2_speaker_switch_enum_ctl_get(
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->monitor_other_updated)
+- scarlett2_update_monitor_other(mixer);
++
++ if (private->monitor_other_updated) {
++ err = scarlett2_update_monitor_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.enumerated.item[0] = private->speaker_switching_switch;
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ /* when speaker switching gets enabled, switch the main/alt speakers
+@@ -2727,14 +2799,20 @@ static int scarlett2_talkback_enum_ctl_get(
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->monitor_other_updated)
+- scarlett2_update_monitor_other(mixer);
++
++ if (private->monitor_other_updated) {
++ err = scarlett2_update_monitor_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.enumerated.item[0] = private->talkback_switch;
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_talkback_enum_ctl_put(
+@@ -2882,14 +2960,20 @@ static int scarlett2_dim_mute_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->vol_updated)
+- scarlett2_update_volumes(mixer);
+- mutex_unlock(&private->data_mutex);
+
++ if (private->vol_updated) {
++ err = scarlett2_update_volumes(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.integer.value[0] = private->dim_mute[elem->control];
+- return 0;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
+@@ -3153,7 +3237,8 @@ static int scarlett2_mixer_ctl_put(struct snd_kcontrol *kctl,
+ mutex_lock(&private->data_mutex);
+
+ oval = private->mix[index];
+- val = ucontrol->value.integer.value[0];
++ val = clamp(ucontrol->value.integer.value[0],
++ 0L, (long)SCARLETT2_MIXER_MAX_VALUE);
+ num_mixer_in = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+ mix_num = index / num_mixer_in;
+
+@@ -3259,22 +3344,21 @@ static int scarlett2_mux_src_enum_ctl_get(struct snd_kcontrol *kctl,
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int line_out_count =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int index = elem->control;
+-
+- if (index < line_out_count)
+- index = line_out_remap(private, index);
++ int index = line_out_remap(private, elem->control);
++ int err = 0;
+
+ mutex_lock(&private->data_mutex);
+- if (private->mux_updated)
+- scarlett2_usb_get_mux(mixer);
++
++ if (private->mux_updated) {
++ err = scarlett2_usb_get_mux(mixer);
++ if (err < 0)
++ goto unlock;
++ }
+ ucontrol->value.enumerated.item[0] = private->mux[index];
+- mutex_unlock(&private->data_mutex);
+
+- return 0;
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
+ }
+
+ static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
+@@ -3283,16 +3367,9 @@ static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int line_out_count =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int index = elem->control;
++ int index = line_out_remap(private, elem->control);
+ int oval, val, err = 0;
+
+- if (index < line_out_count)
+- index = line_out_remap(private, index);
+-
+ mutex_lock(&private->data_mutex);
+
+ oval = private->mux[index];
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 1e3e3f16eabcb..d83eaa35c5817 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -4245,6 +4245,8 @@ union bpf_attr {
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ * Description
+ * Return a user or a kernel stack in bpf program provided buffer.
++ * Note: the user stack will only be populated if the *task* is
++ * the current task; all other tasks will return -EOPNOTSUPP.
+ * To achieve this, the helper needs *task*, which is a valid
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
+@@ -4256,6 +4258,7 @@ union bpf_attr {
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
++ * The *task* must be the current task.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
+index 777c20f6b6047..458acd294237d 100644
+--- a/tools/lib/api/io.h
++++ b/tools/lib/api/io.h
+@@ -9,6 +9,7 @@
+
+ #include <stdlib.h>
+ #include <unistd.h>
++#include <linux/types.h>
+
+ struct io {
+ /* File descriptor being read/ */
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index cf1b9f6ec0dbe..ce74bc367e9c4 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -554,9 +554,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
+ return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+ }
+
+-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+- struct perf_env *env,
+- FILE *fp)
++void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
++ struct perf_env *env,
++ FILE *fp)
+ {
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+@@ -572,7 +572,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ if (info->btf_id) {
+ struct btf_node *node;
+
+- node = perf_env__find_btf(env, info->btf_id);
++ node = __perf_env__find_btf(env, info->btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
+index 68f315c3df5be..50f7412464dfc 100644
+--- a/tools/perf/util/bpf-event.h
++++ b/tools/perf/util/bpf-event.h
+@@ -34,9 +34,9 @@ struct btf_node {
+ int machine__process_bpf(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
+-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+- struct perf_env *env,
+- FILE *fp);
++void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
++ struct perf_env *env,
++ FILE *fp);
+ #else
+ static inline int machine__process_bpf(struct machine *machine __maybe_unused,
+ union perf_event *event __maybe_unused,
+@@ -51,9 +51,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
+ return 0;
+ }
+
+-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+- struct perf_env *env __maybe_unused,
+- FILE *fp __maybe_unused)
++static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
++ struct perf_env *env __maybe_unused,
++ FILE *fp __maybe_unused)
+ {
+
+ }
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index 5b24eb010336c..d3d67ce70f556 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -20,13 +20,19 @@ struct perf_env perf_env;
+
+ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node)
++{
++ down_write(&env->bpf_progs.lock);
++ __perf_env__insert_bpf_prog_info(env, info_node);
++ up_write(&env->bpf_progs.lock);
++}
++
++void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+ {
+ __u32 prog_id = info_node->info_linear->info.id;
+ struct bpf_prog_info_node *node;
+ struct rb_node *parent = NULL;
+ struct rb_node **p;
+
+- down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.infos.rb_node;
+
+ while (*p != NULL) {
+@@ -38,15 +44,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
+- goto out;
++ return;
+ }
+ }
+
+ rb_link_node(&info_node->rb_node, parent, p);
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ env->bpf_progs.infos_cnt++;
+-out:
+- up_write(&env->bpf_progs.lock);
+ }
+
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+@@ -75,14 +79,22 @@ out:
+ }
+
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
++{
++ bool ret;
++
++ down_write(&env->bpf_progs.lock);
++ ret = __perf_env__insert_btf(env, btf_node);
++ up_write(&env->bpf_progs.lock);
++ return ret;
++}
++
++bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+ {
+ struct rb_node *parent = NULL;
+ __u32 btf_id = btf_node->id;
+ struct btf_node *node;
+ struct rb_node **p;
+- bool ret = true;
+
+- down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.btfs.rb_node;
+
+ while (*p != NULL) {
+@@ -94,25 +106,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated btf %u\n", btf_id);
+- ret = false;
+- goto out;
++ return false;
+ }
+ }
+
+ rb_link_node(&btf_node->rb_node, parent, p);
+ rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+ env->bpf_progs.btfs_cnt++;
+-out:
+- up_write(&env->bpf_progs.lock);
+- return ret;
++ return true;
+ }
+
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
++{
++ struct btf_node *res;
++
++ down_read(&env->bpf_progs.lock);
++ res = __perf_env__find_btf(env, btf_id);
++ up_read(&env->bpf_progs.lock);
++ return res;
++}
++
++struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+ {
+ struct btf_node *node = NULL;
+ struct rb_node *n;
+
+- down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.btfs.rb_node;
+
+ while (n) {
+@@ -122,13 +140,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+ else if (btf_id > node->id)
+ n = n->rb_right;
+ else
+- goto out;
++ return node;
+ }
+- node = NULL;
+-
+-out:
+- up_read(&env->bpf_progs.lock);
+- return node;
++ return NULL;
+ }
+
+ /* purge data in bpf_progs.infos tree */
+diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
+index 163e5ec503a26..192318054e12f 100644
+--- a/tools/perf/util/env.h
++++ b/tools/perf/util/env.h
+@@ -163,12 +163,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
+ int perf_env__nr_cpus_avail(struct perf_env *env);
+
+ void perf_env__init(struct perf_env *env);
++void __perf_env__insert_bpf_prog_info(struct perf_env *env,
++ struct bpf_prog_info_node *info_node);
+ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
++bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
++struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+ int perf_env__numa_node(struct perf_env *env, int cpu);
+ #endif /* __PERF_ENV_H */
+diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
+index 02cd9f75e3d2f..89a85601485d9 100644
+--- a/tools/perf/util/genelf.c
++++ b/tools/perf/util/genelf.c
+@@ -291,9 +291,9 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ */
+ phdr = elf_newphdr(e, 1);
+ phdr[0].p_type = PT_LOAD;
+- phdr[0].p_offset = 0;
+- phdr[0].p_vaddr = 0;
+- phdr[0].p_paddr = 0;
++ phdr[0].p_offset = GEN_ELF_TEXT_OFFSET;
++ phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET;
++ phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_filesz = csize;
+ phdr[0].p_memsz = csize;
+ phdr[0].p_flags = PF_X | PF_R;
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 25947d0136038..8b0a8ac7afefd 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1735,8 +1735,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+
+- bpf_event__print_bpf_prog_info(&node->info_linear->info,
+- env, fp);
++ __bpf_event__print_bpf_prog_info(&node->info_linear->info,
++ env, fp);
+ }
+
+ up_read(&env->bpf_progs.lock);
+@@ -3073,7 +3073,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+ /* after reading from file, translate offset to address */
+ bpf_program__bpil_offs_to_addr(info_linear);
+ info_node->info_linear = info_linear;
+- perf_env__insert_bpf_prog_info(env, info_node);
++ __perf_env__insert_bpf_prog_info(env, info_node);
+ }
+
+ up_write(&env->bpf_progs.lock);
+@@ -3120,7 +3120,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+ if (__do_read(ff, node->data, data_size))
+ goto out;
+
+- perf_env__insert_btf(env, node);
++ __perf_env__insert_btf(env, node);
+ node = NULL;
+ }
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+index dc18e5ae0febc..c8ab00607698c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+@@ -150,6 +150,8 @@ static void test_task_stack(void)
+ do_dummy_read(skel->progs.dump_task_stack);
+ do_dummy_read(skel->progs.get_task_user_stacks);
+
++ ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
++
+ bpf_iter_task_stack__destroy(skel);
+ }
+
+diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+index f2b8167b72a84..442f4ca39fd76 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
++++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+@@ -35,6 +35,8 @@ int dump_task_stack(struct bpf_iter__task *ctx)
+ return 0;
+ }
+
++int num_user_stacks = 0;
++
+ SEC("iter/task")
+ int get_task_user_stacks(struct bpf_iter__task *ctx)
+ {
+@@ -51,6 +53,9 @@ int get_task_user_stacks(struct bpf_iter__task *ctx)
+ if (res <= 0)
+ return 0;
+
++ /* Only one task, the current one, should succeed */
++ ++num_user_stacks;
++
+ buf_sz += res;
+
+ /* If the verifier doesn't refine bpf_get_task_stack res, and instead
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+index 5d5622fc27582..56761de1ca3b7 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+@@ -121,6 +121,9 @@ h2_destroy()
+
+ switch_create()
+ {
++ local lanes_swp4
++ local pg1_size
++
+ # pools
+ # -----
+
+@@ -230,7 +233,20 @@ switch_create()
+ dcb pfc set dev $swp4 prio-pfc all:off 1:on
+ # PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
+ # is (-2*MTU) about 80K of delay provision.
+- dcb buffer set dev $swp4 buffer-size all:0 1:$_100KB
++ pg1_size=$_100KB
++
++ setup_wait_dev_with_timeout $swp4
++
++ lanes_swp4=$(ethtool $swp4 | grep 'Lanes:')
++ lanes_swp4=${lanes_swp4#*"Lanes: "}
++
++ # 8-lane ports use two buffers among which the configured buffer
++ # is split, so double the size to get twice (20K + 80K).
++ if [[ $lanes_swp4 -eq 8 ]]; then
++ pg1_size=$((pg1_size * 2))
++ fi
++
++ dcb buffer set dev $swp4 buffer-size all:0 1:$pg1_size
+
+ # bridges
+ # -------
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+index fb850e0ec8375..616d3581419ca 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+@@ -10,7 +10,8 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
+ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
+ multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ delta_two_masks_one_key_test delta_simple_rehash_test \
+- bloom_simple_test bloom_complex_test bloom_delta_test"
++ bloom_simple_test bloom_complex_test bloom_delta_test \
++ max_erp_entries_test max_group_size_test"
+ NUM_NETIFS=2
+ source $lib_dir/lib.sh
+ source $lib_dir/tc_common.sh
+@@ -983,6 +984,109 @@ bloom_delta_test()
+ log_test "bloom delta test ($tcflags)"
+ }
+
++max_erp_entries_test()
++{
++ # The number of eRP entries is limited. Once the maximum number of eRPs
++ # has been reached, filters cannot be added. This test verifies that
++ # when this limit is reached, inserstion fails without crashing.
++
++ RET=0
++
++ local num_masks=32
++ local num_regions=15
++ local chain_failed
++ local mask_failed
++ local ret
++
++ if [[ "$tcflags" != "skip_sw" ]]; then
++ return 0;
++ fi
++
++ for ((i=1; i < $num_regions; i++)); do
++ for ((j=$num_masks; j >= 0; j--)); do
++ tc filter add dev $h2 ingress chain $i protocol ip \
++ pref $i handle $j flower $tcflags \
++ dst_ip 192.1.0.0/$j &> /dev/null
++ ret=$?
++
++ if [ $ret -ne 0 ]; then
++ chain_failed=$i
++ mask_failed=$j
++ break 2
++ fi
++ done
++ done
++
++ # We expect to exceed the maximum number of eRP entries, so that
++ # insertion eventually fails. Otherwise, the test should be adjusted to
++ # add more filters.
++ check_fail $ret "expected to exceed number of eRP entries"
++
++ for ((; i >= 1; i--)); do
++ for ((j=0; j <= $num_masks; j++)); do
++ tc filter del dev $h2 ingress chain $i protocol ip \
++ pref $i handle $j flower &> /dev/null
++ done
++ done
++
++ log_test "max eRP entries test ($tcflags). " \
++ "max chain $chain_failed, mask $mask_failed"
++}
++
++max_group_size_test()
++{
++ # The number of ACLs in an ACL group is limited. Once the maximum
++ # number of ACLs has been reached, filters cannot be added. This test
++ # verifies that when this limit is reached, insertion fails without
++ # crashing.
++
++ RET=0
++
++ local num_acls=32
++ local max_size
++ local ret
++
++ if [[ "$tcflags" != "skip_sw" ]]; then
++ return 0;
++ fi
++
++ for ((i=1; i < $num_acls; i++)); do
++ if [[ $(( i % 2 )) == 1 ]]; then
++ tc filter add dev $h2 ingress pref $i proto ipv4 \
++ flower $tcflags dst_ip 198.51.100.1/32 \
++ ip_proto tcp tcp_flags 0x01/0x01 \
++ action drop &> /dev/null
++ else
++ tc filter add dev $h2 ingress pref $i proto ipv6 \
++ flower $tcflags dst_ip 2001:db8:1::1/128 \
++ action drop &> /dev/null
++ fi
++
++ ret=$?
++ [[ $ret -ne 0 ]] && max_size=$((i - 1)) && break
++ done
++
++ # We expect to exceed the maximum number of ACLs in a group, so that
++ # insertion eventually fails. Otherwise, the test should be adjusted to
++ # add more filters.
++ check_fail $ret "expected to exceed number of ACLs in a group"
++
++ for ((; i >= 1; i--)); do
++ if [[ $(( i % 2 )) == 1 ]]; then
++ tc filter del dev $h2 ingress pref $i proto ipv4 \
++ flower $tcflags dst_ip 198.51.100.1/32 \
++ ip_proto tcp tcp_flags 0x01/0x01 \
++ action drop &> /dev/null
++ else
++ tc filter del dev $h2 ingress pref $i proto ipv6 \
++ flower $tcflags dst_ip 2001:db8:1::1/128 \
++ action drop &> /dev/null
++ fi
++ done
++
++ log_test "max ACL group size test ($tcflags). max size $max_size"
++}
++
+ setup_prepare()
+ {
+ h1=${NETIFS[p1]}
+diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+index 51df5e305855a..b52d59547fc59 100755
+--- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
++++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+@@ -209,12 +209,12 @@ validate_v6_exception()
+ echo "Route get"
+ ip -netns h0 -6 ro get ${dst}
+ echo "Searching for:"
+- echo " ${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
++ echo " ${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ echo
+ fi
+
+ ip -netns h0 -6 ro get ${dst} | \
+- grep -q "${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
++ grep -q "${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ rc=$?
+
+ log_test $rc 0 "IPv6: host 0 to host ${i}, mtu ${mtu}"
+diff --git a/tools/testing/selftests/powerpc/math/fpu_preempt.c b/tools/testing/selftests/powerpc/math/fpu_preempt.c
+index 5235bdc8c0b11..3e5b5663d2449 100644
+--- a/tools/testing/selftests/powerpc/math/fpu_preempt.c
++++ b/tools/testing/selftests/powerpc/math/fpu_preempt.c
+@@ -37,19 +37,20 @@ __thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
+ int threads_starting;
+ int running;
+
+-extern void preempt_fpu(double *darray, int *threads_starting, int *running);
++extern int preempt_fpu(double *darray, int *threads_starting, int *running);
+
+ void *preempt_fpu_c(void *p)
+ {
++ long rc;
+ int i;
++
+ srand(pthread_self());
+ for (i = 0; i < 21; i++)
+ darray[i] = rand();
+
+- /* Test failed if it ever returns */
+- preempt_fpu(darray, &threads_starting, &running);
++ rc = preempt_fpu(darray, &threads_starting, &running);
+
+- return p;
++ return (void *)rc;
+ }
+
+ int test_preempt_fpu(void)
+diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c
+index 6761d6ce30eca..6f7cf400c6875 100644
+--- a/tools/testing/selftests/powerpc/math/vmx_preempt.c
++++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c
+@@ -37,19 +37,21 @@ __thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
+ int threads_starting;
+ int running;
+
+-extern void preempt_vmx(vector int *varray, int *threads_starting, int *running);
++extern int preempt_vmx(vector int *varray, int *threads_starting, int *running);
+
+ void *preempt_vmx_c(void *p)
+ {
+ int i, j;
++ long rc;
++
+ srand(pthread_self());
+ for (i = 0; i < 12; i++)
+ for (j = 0; j < 4; j++)
+ varray[i][j] = rand();
+
+- /* Test fails if it ever returns */
+- preempt_vmx(varray, &threads_starting, &running);
+- return p;
++ rc = preempt_vmx(varray, &threads_starting, &running);
++
++ return (void *)rc;
+ }
+
+ int test_preempt_vmx(void)
+diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile
+index 472b27ccd7dcb..394d0dde479b4 100644
+--- a/tools/testing/selftests/sgx/Makefile
++++ b/tools/testing/selftests/sgx/Makefile
+@@ -16,9 +16,9 @@ HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC -z noexecstack
+ ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
+ -fno-stack-protector -mrdrnd $(INCLUDES)
+
++ifeq ($(CAN_BUILD_X86_64), 1)
+ TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
+
+-ifeq ($(CAN_BUILD_X86_64), 1)
+ all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
+ endif
+
+diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c
+index 92bbc5a15c39f..a201d64f9b497 100644
+--- a/tools/testing/selftests/sgx/sigstruct.c
++++ b/tools/testing/selftests/sgx/sigstruct.c
+@@ -310,9 +310,9 @@ bool encl_measure(struct encl *encl)
+ struct sgx_sigstruct *sigstruct = &encl->sigstruct;
+ struct sgx_sigstruct_payload payload;
+ uint8_t digest[SHA256_DIGEST_LENGTH];
++ EVP_MD_CTX *ctx = NULL;
+ unsigned int siglen;
+ RSA *key = NULL;
+- EVP_MD_CTX *ctx;
+ int i;
+
+ memset(sigstruct, 0, sizeof(*sigstruct));
+@@ -376,7 +376,8 @@ bool encl_measure(struct encl *encl)
+ return true;
+
+ err:
+- EVP_MD_CTX_destroy(ctx);
++ if (ctx)
++ EVP_MD_CTX_destroy(ctx);
+ RSA_free(key);
+ return false;
+ }