summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-11-20 06:22:41 -0500
committerMike Pagano <mpagano@gentoo.org>2023-11-20 06:22:41 -0500
commit34b054e030b4d77909f23813b69a0cb891c5e2fe (patch)
treeee7b76ddcbc1c22de50e68b0f0cc6a1d1090a807
parentLinux patch 6.1.62 (diff)
downloadlinux-patches-34b054e030b4d77909f23813b69a0cb891c5e2fe.tar.gz
linux-patches-34b054e030b4d77909f23813b69a0cb891c5e2fe.tar.bz2
linux-patches-34b054e030b4d77909f23813b69a0cb891c5e2fe.zip
Linux patch 6.1.636.1-70
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1062_linux-6.1.63.patch22957
2 files changed, 22961 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 3a96d6b9..56731b47 100644
--- a/0000_README
+++ b/0000_README
@@ -291,6 +291,10 @@ Patch: 1061_linux-6.1.62.patch
From: https://www.kernel.org
Desc: Linux 6.1.62
+Patch: 1062_linux-6.1.63.patch
+From: https://www.kernel.org
+Desc: Linux 6.1.63
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1062_linux-6.1.63.patch b/1062_linux-6.1.63.patch
new file mode 100644
index 00000000..00337f2f
--- /dev/null
+++ b/1062_linux-6.1.63.patch
@@ -0,0 +1,22957 @@
+diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
+index 0088442efca1a..8f9784af92d6e 100644
+--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
++++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
+@@ -21,8 +21,10 @@ Required properties:
+ compatible:
+ "mediatek,mt6323" for PMIC MT6323
+ "mediatek,mt6331" for PMIC MT6331 and MT6332
+- "mediatek,mt6358" for PMIC MT6358 and MT6366
++ "mediatek,mt6357" for PMIC MT6357
++ "mediatek,mt6358" for PMIC MT6358
+ "mediatek,mt6359" for PMIC MT6359
++ "mediatek,mt6366", "mediatek,mt6358" for PMIC MT6366
+ "mediatek,mt6397" for PMIC MT6397
+
+ Optional subnodes:
+@@ -39,6 +41,7 @@ Optional subnodes:
+ - compatible: "mediatek,mt6323-regulator"
+ see ../regulator/mt6323-regulator.txt
+ - compatible: "mediatek,mt6358-regulator"
++ - compatible: "mediatek,mt6366-regulator", "mediatek-mt6358-regulator"
+ see ../regulator/mt6358-regulator.txt
+ - compatible: "mediatek,mt6397-regulator"
+ see ../regulator/mt6397-regulator.txt
+diff --git a/Documentation/virt/coco/sev-guest.rst b/Documentation/virt/coco/sev-guest.rst
+index bf593e88cfd9d..68b0d2363af82 100644
+--- a/Documentation/virt/coco/sev-guest.rst
++++ b/Documentation/virt/coco/sev-guest.rst
+@@ -37,11 +37,11 @@ along with a description:
+ the return value. General error numbers (-ENOMEM, -EINVAL)
+ are not detailed, but errors with specific meanings are.
+
+-The guest ioctl should be issued on a file descriptor of the /dev/sev-guest device.
+-The ioctl accepts struct snp_user_guest_request. The input and output structure is
+-specified through the req_data and resp_data field respectively. If the ioctl fails
+-to execute due to a firmware error, then fw_err code will be set otherwise the
+-fw_err will be set to 0x00000000000000ff.
++The guest ioctl should be issued on a file descriptor of the /dev/sev-guest
++device. The ioctl accepts struct snp_user_guest_request. The input and
++output structure is specified through the req_data and resp_data field
++respectively. If the ioctl fails to execute due to a firmware error, then
++the fw_error code will be set, otherwise fw_error will be set to -1.
+
+ The firmware checks that the message sequence counter is one greater than
+ the guests message sequence counter. If guest driver fails to increment message
+@@ -57,8 +57,14 @@ counter (e.g. counter overflow), then -EIO will be returned.
+ __u64 req_data;
+ __u64 resp_data;
+
+- /* firmware error code on failure (see psp-sev.h) */
+- __u64 fw_err;
++ /* bits[63:32]: VMM error code, bits[31:0] firmware error code (see psp-sev.h) */
++ union {
++ __u64 exitinfo2;
++ struct {
++ __u32 fw_error;
++ __u32 vmm_error;
++ };
++ };
+ };
+
+ 2.1 SNP_GET_REPORT
+diff --git a/Makefile b/Makefile
+index 2e7bc3cc1c177..7c69293b7e059 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 62
++SUBLEVEL = 63
+ EXTRAVERSION =
+ NAME = Curry Ramen
+
+diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
+index 7bab0a9dadb30..95508b7fa3bf6 100644
+--- a/arch/arm/boot/dts/am3517-evm.dts
++++ b/arch/arm/boot/dts/am3517-evm.dts
+@@ -271,13 +271,6 @@
+ >;
+ };
+
+- leds_pins: pinmux_leds_pins {
+- pinctrl-single,pins = <
+- OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */
+- OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */
+- >;
+- };
+-
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
+@@ -355,3 +348,12 @@
+ >;
+ };
+ };
++
++&omap3_pmx_wkup {
++ leds_pins: pinmux_leds_pins {
++ pinctrl-single,pins = <
++ OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */
++ OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */
++ >;
++ };
++};
+diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
+index b47c86412de2c..17a1a06dfb3f1 100644
+--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
++++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
+@@ -82,14 +82,12 @@
+ };
+ };
+
+- regulators {
+- vsdcc_fixed: vsdcc-regulator {
+- compatible = "regulator-fixed";
+- regulator-name = "SDCC Power";
+- regulator-min-microvolt = <2700000>;
+- regulator-max-microvolt = <2700000>;
+- regulator-always-on;
+- };
++ vsdcc_fixed: vsdcc-regulator {
++ compatible = "regulator-fixed";
++ regulator-name = "SDCC Power";
++ regulator-min-microvolt = <2700000>;
++ regulator-max-microvolt = <2700000>;
++ regulator-always-on;
+ };
+
+ soc: soc {
+diff --git a/arch/arm/boot/dts/r8a7792-blanche.dts b/arch/arm/boot/dts/r8a7792-blanche.dts
+index c66de9dd12dfc..6a83923aa4612 100644
+--- a/arch/arm/boot/dts/r8a7792-blanche.dts
++++ b/arch/arm/boot/dts/r8a7792-blanche.dts
+@@ -239,7 +239,7 @@
+ };
+
+ keyboard_pins: keyboard {
+- pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_02";
++ pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_2";
+ bias-pull-up;
+ };
+
+diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
+index d71ab61430b26..de75ae4d5ab41 100644
+--- a/arch/arm/lib/memset.S
++++ b/arch/arm/lib/memset.S
+@@ -17,6 +17,7 @@ ENTRY(__memset)
+ ENTRY(mmioset)
+ WEAK(memset)
+ UNWIND( .fnstart )
++ and r1, r1, #255 @ cast to unsigned char
+ ands r3, r0, #3 @ 1 unaligned?
+ mov ip, r0 @ preserve r0 as return value
+ bne 6f @ 1
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index 93c8ccbf29828..b647306eb1608 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
+ BUG_ON(err);
+ per_cpu(xen_vcpu, cpu) = vcpup;
+
+- if (!xen_kernel_unmapped_at_usr())
+- xen_setup_runstate_info(cpu);
+-
+ after_register_vcpu_info:
+ enable_percpu_irq(xen_events_irq, 0);
+ return 0;
+@@ -523,9 +520,6 @@ static int __init xen_guest_init(void)
+ return -EINVAL;
+ }
+
+- if (!xen_kernel_unmapped_at_usr())
+- xen_time_setup_guest();
+-
+ if (xen_initial_domain())
+ pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
+
+@@ -535,7 +529,13 @@ static int __init xen_guest_init(void)
+ }
+ early_initcall(xen_guest_init);
+
+-static int __init xen_pm_init(void)
++static int xen_starting_runstate_cpu(unsigned int cpu)
++{
++ xen_setup_runstate_info(cpu);
++ return 0;
++}
++
++static int __init xen_late_init(void)
+ {
+ if (!xen_domain())
+ return -ENODEV;
+@@ -548,9 +548,16 @@ static int __init xen_pm_init(void)
+ do_settimeofday64(&ts);
+ }
+
+- return 0;
++ if (xen_kernel_unmapped_at_usr())
++ return 0;
++
++ xen_time_setup_guest();
++
++ return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
++ "arm/xen_runstate:starting",
++ xen_starting_runstate_cpu, NULL);
+ }
+-late_initcall(xen_pm_init);
++late_initcall(xen_late_init);
+
+
+ /* empty stubs */
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 12c82bb1bb7aa..d583db18f74cc 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -398,6 +398,7 @@
+ "pll8k", "pll11k", "clkext3";
+ dmas = <&sdma2 24 25 0x80000000>;
+ dma-names = "rx";
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index 37246ca9d9075..66fadbf19f0a3 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -370,6 +370,7 @@
+ "pll8k", "pll11k", "clkext3";
+ dmas = <&sdma2 24 25 0x80000000>;
+ dma-names = "rx";
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+index 7764b4146e0ab..2bbdacb1313f9 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+@@ -8,5 +8,5 @@
+ };
+
+ &jpegenc {
+- compatible = "nxp,imx8qm-jpgdec", "nxp,imx8qxp-jpgenc";
++ compatible = "nxp,imx8qm-jpgenc", "nxp,imx8qxp-jpgenc";
+ };
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+index 8e4ec243fb8fc..e5fc6cca50e74 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+@@ -120,7 +120,7 @@
+ "mpp59", "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+- cp0_spi0_pins: cp0-spi-pins-0 {
++ cp0_spi1_pins: cp0-spi-pins-1 {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+@@ -170,7 +170,7 @@
+
+ &cp0_spi1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&cp0_spi0_pins>;
++ pinctrl-0 = <&cp0_spi1_pins>;
+ reg = <0x700680 0x50>, /* control */
+ <0x2000000 0x1000000>; /* CS0 */
+ status = "okay";
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+index c7de1ea0d470a..6eb6a175de38d 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+@@ -307,7 +307,7 @@
+ &cp0_spi1 {
+ status = "disabled";
+ pinctrl-names = "default";
+- pinctrl-0 = <&cp0_spi0_pins>;
++ pinctrl-0 = <&cp0_spi1_pins>;
+ reg = <0x700680 0x50>;
+
+ flash@0 {
+@@ -371,7 +371,7 @@
+ "mpp59", "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+- cp0_spi0_pins: cp0-spi-pins-0 {
++ cp0_spi1_pins: cp0-spi-pins-1 {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index dfe2cf2f4b218..6598e9ac52b81 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -532,12 +532,12 @@
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index 9650ae70c8723..9d116e1fbe10c 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -200,6 +200,9 @@
+ pd-gpios = <&msmgpio 32 GPIO_ACTIVE_HIGH>;
+
+ avdd-supply = <&pm8916_l6>;
++ a2vdd-supply = <&pm8916_l6>;
++ dvdd-supply = <&pm8916_l6>;
++ pvdd-supply = <&pm8916_l6>;
+ v1p2-supply = <&pm8916_l6>;
+ v3p3-supply = <&pm8916_l17>;
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index f84b3c1a03c53..bafac2cf7e3d6 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1257,7 +1257,7 @@
+ #size-cells = <1>;
+ #iommu-cells = <1>;
+ compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
+- ranges = <0 0x01e20000 0x40000>;
++ ranges = <0 0x01e20000 0x20000>;
+ reg = <0x01ef0000 0x3000>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+index 3ab0ad14e8704..95eab1f379229 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+@@ -109,11 +109,6 @@
+ qcom,client-id = <1>;
+ };
+
+- audio_mem: audio@cb400000 {
+- reg = <0 0xcb000000 0 0x400000>;
+- no-mem;
+- };
+-
+ qseecom_mem: qseecom@cb400000 {
+ reg = <0 0xcb400000 0 0x1c00000>;
+ no-mem;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 0cdc579f26de7..aea356c63b9a3 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -820,7 +820,8 @@
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>,
+ <0>, <&pcie1_lane>,
+- <0>, <0>, <0>, <0>;
++ <0>, <0>, <0>,
++ <&usb_1_ssphy>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk",
+ "pcie_0_pipe_clk", "pcie_1_pipe_clk",
+ "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk",
+@@ -5337,6 +5338,14 @@
+ reg = <0 0x18591000 0 0x1000>,
+ <0 0x18592000 0 0x1000>,
+ <0 0x18593000 0 0x1000>;
++
++ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "dcvsh-irq-0",
++ "dcvsh-irq-1",
++ "dcvsh-irq-2";
++
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+ #freq-domain-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index b5f11fbcc3004..a5c0c788969fb 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -145,6 +145,10 @@
+ };
+ };
+
++&cpufreq_hw {
++ /delete-property/ interrupts-extended; /* reference to lmh_cluster[01] */
++};
++
+ &psci {
+ /delete-node/ cpu0;
+ /delete-node/ cpu1;
+@@ -277,6 +281,14 @@
+ &CLUSTER_SLEEP_0>;
+ };
+
++&lmh_cluster0 {
++ status = "disabled";
++};
++
++&lmh_cluster1 {
++ status = "disabled";
++};
++
+ /*
+ * Reserved memory changes
+ *
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+index de2d10e0315af..64958dee17d8b 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+@@ -714,6 +714,8 @@
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
++
++ qcom,snoc-host-cap-8bit-quirk;
+ };
+
+ /* PINCTRL - additions to nodes defined in sdm845.dtsi */
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index de794a5078dfc..c586378fc6bc7 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1839,8 +1839,12 @@
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
++ <&gcc GCC_PCIE_0_CLKREF_CLK>,
+ <&gcc GCC_PCIE0_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "refgen";
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen";
+
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "phy";
+@@ -1938,8 +1942,12 @@
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
++ <&gcc GCC_PCIE_1_CLKREF_CLK>,
+ <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "refgen";
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen";
+
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "phy";
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index b3245b13b2611..793768a2c9e1e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -1778,7 +1778,7 @@
+ };
+
+ qup_uart18_default: qup-uart18-default-state {
+- pins = "gpio58", "gpio59";
++ pins = "gpio68", "gpio69";
+ function = "qup18";
+ drive-strength = <2>;
+ bias-disable;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+index b08a083d722d4..7f265c671654d 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+@@ -172,7 +172,7 @@
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+- clock-frequency = <400000>;
++ clock-frequency = <100000>;
+
+ exp1: gpio@22 {
+ compatible = "ti,tca6424";
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 357932938b5ab..7dce9c0aa7836 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -85,7 +85,8 @@
+ #define ARM_CPU_PART_NEOVERSE_N2 0xD49
+ #define ARM_CPU_PART_CORTEX_A78C 0xD4B
+
+-#define APM_CPU_PART_POTENZA 0x000
++#define APM_CPU_PART_XGENE 0x000
++#define APM_CPU_VAR_POTENZA 0x00
+
+ #define CAVIUM_CPU_PART_THUNDERX 0x0A1
+ #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index dd20b8688d230..f44ae09a51956 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -863,7 +863,7 @@ u32 __attribute_const__ kvm_target_cpu(void)
+ break;
+ case ARM_CPU_IMP_APM:
+ switch (part_number) {
+- case APM_CPU_PART_POTENZA:
++ case APM_CPU_PART_XGENE:
+ return KVM_ARM_TARGET_XGENE_POTENZA;
+ }
+ break;
+diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+index 2d3153cfc0d79..acf61242e85bf 100644
+--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
++++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+@@ -69,9 +69,6 @@
+
+ #define _PTE_NONE_MASK 0
+
+-/* Until my rework is finished, 40x still needs atomic PTE updates */
+-#define PTE_ATOMIC_UPDATES 1
+-
+ #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+ #define _PAGE_BASE (_PAGE_BASE_NC)
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 9bdd79aa51cfc..3956f32682c62 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1164,6 +1164,7 @@ static void emulate_single_step(struct pt_regs *regs)
+ __single_step_exception(regs);
+ }
+
++#ifdef CONFIG_PPC_FPU_REGS
+ static inline int __parse_fpscr(unsigned long fpscr)
+ {
+ int ret = FPE_FLTUNK;
+@@ -1190,6 +1191,7 @@ static inline int __parse_fpscr(unsigned long fpscr)
+
+ return ret;
+ }
++#endif
+
+ static void parse_fpe(struct pt_regs *regs)
+ {
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 9d229ef7f86ef..ada817c49b722 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -51,7 +51,7 @@ static int trace_imc_mem_size;
+ * core and trace-imc
+ */
+ static struct imc_pmu_ref imc_global_refc = {
+- .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
+ .id = 0,
+ .refc = 0,
+ };
+diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
+index 40f5ae5e1238d..92e60cb3163fa 100644
+--- a/arch/powerpc/platforms/book3s/vas-api.c
++++ b/arch/powerpc/platforms/book3s/vas-api.c
+@@ -4,6 +4,8 @@
+ * Copyright (C) 2019 Haren Myneni, IBM Corp
+ */
+
++#define pr_fmt(fmt) "vas-api: " fmt
++
+ #include <linux/kernel.h>
+ #include <linux/device.h>
+ #include <linux/cdev.h>
+@@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+ task_ref->mm = get_task_mm(current);
+ if (!task_ref->mm) {
+ put_pid(task_ref->pid);
+- pr_err("VAS: pid(%d): mm_struct is not found\n",
++ pr_err("pid(%d): mm_struct is not found\n",
+ current->pid);
+ return -EPERM;
+ }
+@@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb,
+ rc = kill_pid_info(SIGSEGV, &info, pid);
+ rcu_read_unlock();
+
+- pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+- pid_vnr(pid), rc);
++ pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc);
+ }
+
+ void vas_dump_crb(struct coprocessor_request_block *crb)
+@@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+
+ rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ if (rc) {
+- pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
++ pr_err("copy_from_user() returns %d\n", rc);
+ return -EFAULT;
+ }
+
+@@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+ txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ cp_inst->coproc->cop_type);
+ if (IS_ERR(txwin)) {
+- pr_err("%s() VAS window open failed, %ld\n", __func__,
++ pr_err_ratelimited("VAS window open failed rc=%ld\n",
+ PTR_ERR(txwin));
+ return PTR_ERR(txwin);
+ }
+@@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ * window is not opened. Shouldn't expect this error.
+ */
+ if (!cp_inst || !cp_inst->txwin) {
+- pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
+- __func__);
++ pr_err("Unexpected fault on paste address with TX window closed\n");
+ return VM_FAULT_SIGBUS;
+ }
+
+@@ -421,8 +421,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ * issue NX request.
+ */
+ if (txwin->task_ref.vma != vmf->vma) {
+- pr_err("%s(): No previous mapping with paste address\n",
+- __func__);
++ pr_err("No previous mapping with paste address\n");
+ return VM_FAULT_SIGBUS;
+ }
+
+@@ -481,19 +480,19 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ txwin = cp_inst->txwin;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+- pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
++ pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n",
+ (vma->vm_end - vma->vm_start), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /* Ensure instance has an open send window */
+ if (!txwin) {
+- pr_err("%s(): No send window open?\n", __func__);
++ pr_err("No send window open?\n");
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
+- pr_err("%s(): VAS API is not registered\n", __func__);
++ pr_err("VAS API is not registered\n");
+ return -EACCES;
+ }
+
+@@ -510,14 +509,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ */
+ mutex_lock(&txwin->task_ref.mmap_mutex);
+ if (txwin->status != VAS_WIN_ACTIVE) {
+- pr_err("%s(): Window is not active\n", __func__);
++ pr_err("Window is not active\n");
+ rc = -EACCES;
+ goto out;
+ }
+
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (!paste_addr) {
+- pr_err("%s(): Window paste address failed\n", __func__);
++ pr_err("Window paste address failed\n");
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -533,8 +532,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, prot);
+
+- pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+- paste_addr, vma->vm_start, rc);
++ pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr,
++ vma->vm_start, rc);
+
+ txwin->task_ref.vma = vma;
+ vma->vm_ops = &vas_vm_ops;
+@@ -609,8 +608,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ goto err;
+ }
+
+- pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+- MINOR(devno));
++ pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno));
+
+ return 0;
+
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 2c2812a87d470..541199c6a587d 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -524,8 +524,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
+
+ if (cmd) {
+ rc = init_cpu_associativity();
+- if (rc)
++ if (rc) {
++ destroy_cpu_associativity();
+ goto out;
++ }
+
+ for_each_possible_cpu(cpu) {
+ disp = per_cpu_ptr(&vcpu_disp_data, cpu);
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index 041a25c08066b..5db8060776b0c 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -340,7 +340,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+
+ if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
+ atomic_read(&cop_feat_caps->nr_total_credits)) {
+- pr_err("Credits are not available to allocate window\n");
++ pr_err_ratelimited("Credits are not available to allocate window\n");
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -423,7 +423,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+
+ put_vas_user_win_ref(&txwin->vas_win.task_ref);
+ rc = -EBUSY;
+- pr_err("No credit is available to allocate window\n");
++ pr_err_ratelimited("No credit is available to allocate window\n");
+
+ out_free:
+ /*
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 3925825954bcc..e5baa91ddd07b 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -804,7 +804,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
+ if (out_qpage)
+ *out_qpage = be64_to_cpu(qpage);
+ if (out_qsize)
+- *out_qsize = be32_to_cpu(qsize);
++ *out_qsize = be64_to_cpu(qsize);
+ if (out_qeoi_page)
+ *out_qeoi_page = be64_to_cpu(qeoi_page);
+ if (out_escalate_irq)
+diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
+index 852ecccd8920f..0f76181dc634d 100644
+--- a/arch/riscv/kernel/cpu.c
++++ b/arch/riscv/kernel/cpu.c
+@@ -57,13 +57,14 @@ int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
+ */
+ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
+ {
+- int rc;
+-
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv")) {
+- rc = riscv_of_processor_hartid(node, hartid);
+- if (!rc)
+- return 0;
++ *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
++ if (*hartid == ~0UL) {
++ pr_warn("Found CPU without hart ID\n");
++ return -ENODEV;
++ }
++ return 0;
+ }
+ }
+
+diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
+index c449e7c1b20ff..8bcd6c1431a95 100644
+--- a/arch/sh/Kconfig.debug
++++ b/arch/sh/Kconfig.debug
+@@ -22,6 +22,17 @@ config STACK_DEBUG
+ every function call and will therefore incur a major
+ performance hit. Most users should say N.
+
++config EARLY_PRINTK
++ bool "Early printk"
++ depends on SH_STANDARD_BIOS
++ help
++ Say Y here to redirect kernel printk messages to the serial port
++ used by the SH-IPL bootloader, starting very early in the boot
++ process and ending when the kernel's serial console is initialised.
++ This option is only useful while porting the kernel to a new machine,
++ when the kernel may crash or hang before the serial console is
++ initialised. If unsure, say N.
++
+ config 4KSTACKS
+ bool "Use 4Kb for kernel stacks instead of 8Kb"
+ depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
+diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
+index b63be696b776a..0759af9b1acfc 100644
+--- a/arch/x86/include/asm/sev-common.h
++++ b/arch/x86/include/asm/sev-common.h
+@@ -128,10 +128,6 @@ struct snp_psc_desc {
+ struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
+ } __packed;
+
+-/* Guest message request error codes */
+-#define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32)
+-#define SNP_GUEST_REQ_ERR_BUSY BIT_ULL(33)
+-
+ #define GHCB_MSR_TERM_REQ 0x100
+ #define GHCB_MSR_TERM_REASON_SET_POS 12
+ #define GHCB_MSR_TERM_REASON_SET_MASK 0xf
+diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
+index a0a58c4122ec3..7ca5c9ec8b52e 100644
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -9,6 +9,8 @@
+ #define __ASM_ENCRYPTED_STATE_H
+
+ #include <linux/types.h>
++#include <linux/sev-guest.h>
++
+ #include <asm/insn.h>
+ #include <asm/sev-common.h>
+ #include <asm/bootparam.h>
+@@ -185,6 +187,9 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
+
+ return rc;
+ }
++
++struct snp_guest_request_ioctl;
++
+ void setup_ghcb(void);
+ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages);
+@@ -196,7 +201,7 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
+ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+ void __init __noreturn snp_abort(void);
+-int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
++int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
+ #else
+ static inline void sev_es_ist_enter(struct pt_regs *regs) { }
+ static inline void sev_es_ist_exit(void) { }
+@@ -216,8 +221,7 @@ static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npa
+ static inline void snp_set_wakeup_secondary_cpu(void) { }
+ static inline bool snp_init(struct boot_params *bp) { return false; }
+ static inline void snp_abort(void) { }
+-static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input,
+- unsigned long *fw_err)
++static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
+ {
+ return -ENOTTY;
+ }
+diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
+index 64df897c0ee30..1be13b2dfe8bf 100644
+--- a/arch/x86/include/asm/sparsemem.h
++++ b/arch/x86/include/asm/sparsemem.h
+@@ -37,6 +37,8 @@ extern int phys_to_target_node(phys_addr_t start);
+ #define phys_to_target_node phys_to_target_node
+ extern int memory_add_physaddr_to_nid(u64 start);
+ #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
++extern int numa_fill_memblks(u64 start, u64 end);
++#define numa_fill_memblks numa_fill_memblks
+ #endif
+ #endif /* __ASSEMBLY__ */
+
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 1cc756eafa447..6ca0c661cb637 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -518,7 +518,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
+ #define copy_mc_to_kernel copy_mc_to_kernel
+
+ unsigned long __must_check
+-copy_mc_to_user(void *to, const void *from, unsigned len);
++copy_mc_to_user(void __user *to, const void *from, unsigned len);
+ #endif
+
+ /*
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 7e331e8f36929..8ea24df3c5ff1 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -100,6 +100,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ {}
+ };
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 263df737d5cd5..13dffc43ded02 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2477,7 +2477,7 @@ static void __init srso_select_mitigation(void)
+ pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
+
+ pred_cmd:
+- if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
++ if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) &&
+ boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ }
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 6a3cfaf6b72ad..84adf12a76d3c 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -80,7 +80,7 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] = {
+ * while the kernel still uses a direct mapping.
+ */
+ static struct desc_ptr startup_gdt_descr = {
+- .size = sizeof(startup_gdt),
++ .size = sizeof(startup_gdt)-1,
+ .address = 0,
+ };
+
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index e7968c41ecf57..68b2a9d3dbc6b 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -22,6 +22,8 @@
+ #include <linux/efi.h>
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
++#include <linux/psp-sev.h>
++#include <uapi/linux/sev-guest.h>
+
+ #include <asm/cpu_entry_area.h>
+ #include <asm/stacktrace.h>
+@@ -2205,7 +2207,7 @@ static int __init init_sev_config(char *str)
+ }
+ __setup("sev=", init_sev_config);
+
+-int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err)
++int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
+ {
+ struct ghcb_state state;
+ struct es_em_ctxt ctxt;
+@@ -2213,8 +2215,7 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
+ struct ghcb *ghcb;
+ int ret;
+
+- if (!fw_err)
+- return -EINVAL;
++ rio->exitinfo2 = SEV_RET_NO_FW_CALL;
+
+ /*
+ * __sev_get_ghcb() needs to run with IRQs disabled because it is using
+@@ -2239,16 +2240,16 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
+ if (ret)
+ goto e_put;
+
+- *fw_err = ghcb->save.sw_exit_info_2;
+- switch (*fw_err) {
++ rio->exitinfo2 = ghcb->save.sw_exit_info_2;
++ switch (rio->exitinfo2) {
+ case 0:
+ break;
+
+- case SNP_GUEST_REQ_ERR_BUSY:
++ case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
+ ret = -EAGAIN;
+ break;
+
+- case SNP_GUEST_REQ_INVALID_LEN:
++ case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
+ /* Number of expected pages are returned in RBX */
+ if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
+ input->data_npages = ghcb_get_rbx(ghcb);
+diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
+index 80efd45a77617..6e8b7e600def5 100644
+--- a/arch/x86/lib/copy_mc.c
++++ b/arch/x86/lib/copy_mc.c
+@@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
+ }
+ EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
+
+-unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
++unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
+ {
+ unsigned long ret;
+
+ if (copy_mc_fragile_enabled) {
+ __uaccess_begin();
+- ret = copy_mc_fragile(dst, src, len);
++ ret = copy_mc_fragile((__force void *)dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+ if (static_cpu_has(X86_FEATURE_ERMS)) {
+ __uaccess_begin();
+- ret = copy_mc_enhanced_fast_string(dst, src, len);
++ ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+- return copy_user_generic(dst, src, len);
++ return copy_user_generic((__force void *)dst, src, len);
+ }
+diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
+index 5a53c2cc169cc..6993f026adec9 100644
+--- a/arch/x86/mm/maccess.c
++++ b/arch/x86/mm/maccess.c
+@@ -9,12 +9,21 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+ unsigned long vaddr = (unsigned long)unsafe_src;
+
+ /*
+- * Range covering the highest possible canonical userspace address
+- * as well as non-canonical address range. For the canonical range
+- * we also need to include the userspace guard page.
++ * Do not allow userspace addresses. This disallows
++ * normal userspace and the userspace guard page:
+ */
+- return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+- __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
++ if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
++ return false;
++
++ /*
++ * Allow everything during early boot before 'x86_virt_bits'
++ * is initialized. Needed for instruction decoding in early
++ * exception handlers.
++ */
++ if (!boot_cpu_data.x86_virt_bits)
++ return true;
++
++ return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+ }
+ #else
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 2aadb2019b4f2..c01c5506fd4ae 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -11,6 +11,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/sched.h>
+ #include <linux/topology.h>
++#include <linux/sort.h>
+
+ #include <asm/e820/api.h>
+ #include <asm/proto.h>
+@@ -961,4 +962,83 @@ int memory_add_physaddr_to_nid(u64 start)
+ return nid;
+ }
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++
++static int __init cmp_memblk(const void *a, const void *b)
++{
++ const struct numa_memblk *ma = *(const struct numa_memblk **)a;
++ const struct numa_memblk *mb = *(const struct numa_memblk **)b;
++
++ return ma->start - mb->start;
++}
++
++static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
++
++/**
++ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
++ * @start: address to begin fill
++ * @end: address to end fill
++ *
++ * Find and extend numa_meminfo memblks to cover the @start-@end
++ * physical address range, such that the first memblk includes
++ * @start, the last memblk includes @end, and any gaps in between
++ * are filled.
++ *
++ * RETURNS:
++ * 0 : Success
++ * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
++ */
++
++int __init numa_fill_memblks(u64 start, u64 end)
++{
++ struct numa_memblk **blk = &numa_memblk_list[0];
++ struct numa_meminfo *mi = &numa_meminfo;
++ int count = 0;
++ u64 prev_end;
++
++ /*
++ * Create a list of pointers to numa_meminfo memblks that
++ * overlap start, end. Exclude (start == bi->end) since
++ * end addresses in both a CFMWS range and a memblk range
++ * are exclusive.
++ *
++ * This list of pointers is used to make in-place changes
++ * that fill out the numa_meminfo memblks.
++ */
++ for (int i = 0; i < mi->nr_blks; i++) {
++ struct numa_memblk *bi = &mi->blk[i];
++
++ if (start < bi->end && end >= bi->start) {
++ blk[count] = &mi->blk[i];
++ count++;
++ }
++ }
++ if (!count)
++ return NUMA_NO_MEMBLK;
++
++ /* Sort the list of pointers in memblk->start order */
++ sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
++
++ /* Make sure the first/last memblks include start/end */
++ blk[0]->start = min(blk[0]->start, start);
++ blk[count - 1]->end = max(blk[count - 1]->end, end);
++
++ /*
++ * Fill any gaps by tracking the previous memblks
++ * end address and backfilling to it if needed.
++ */
++ prev_end = blk[0]->end;
++ for (int i = 1; i < count; i++) {
++ struct numa_memblk *curr = blk[i];
++
++ if (prev_end >= curr->start) {
++ if (prev_end < curr->end)
++ prev_end = curr->end;
++ } else {
++ curr->start = prev_end;
++ prev_end = curr->end;
++ }
++ }
++ return 0;
++}
++
+ #endif
+diff --git a/block/blk-core.c b/block/blk-core.c
+index ebb7a1689b261..6eaf2b0ad7cca 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -490,8 +490,8 @@ static inline void bio_check_ro(struct bio *bio)
+ if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ return;
+- pr_warn("Trying to write to read-only block-device %pg\n",
+- bio->bi_bdev);
++ pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
++ bio->bi_bdev);
+ /* Older lvm-tools actually trigger this */
+ }
+ }
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index 120873dad2cc5..c727fb320eeea 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -158,8 +158,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+ return 0;
+
+ len = snprintf(modalias, size, "acpi:");
+- if (len <= 0)
+- return len;
++ if (len >= size)
++ return -ENOMEM;
+
+ size -= len;
+
+@@ -212,8 +212,10 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
+ len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+ ACPI_FREE(buf.pointer);
+
+- if (len <= 0)
+- return len;
++ if (len >= size)
++ return -ENOMEM;
++
++ size -= len;
+
+ of_compatible = acpi_dev->data.of_compatible;
+ if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index 1f4fc5f8a819d..12f330b0eac01 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -310,11 +310,16 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ start = cfmws->base_hpa;
+ end = cfmws->base_hpa + cfmws->window_size;
+
+- /* Skip if the SRAT already described the NUMA details for this HPA */
+- node = phys_to_target_node(start);
+- if (node != NUMA_NO_NODE)
++ /*
++ * The SRAT may have already described NUMA details for all,
++ * or a portion of, this CFMWS HPA range. Extend the memblks
++ * found for any portion of the window to cover the entire
++ * window.
++ */
++ if (!numa_fill_memblks(start, end))
+ return 0;
+
++ /* No SRAT description. Create a new node. */
+ node = acpi_map_pxm_to_node(*fake_pxm);
+
+ if (node == NUMA_NO_NODE) {
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index b8d9eb9a433ed..0565c18c2ee31 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1114,25 +1114,26 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ switch (proptype) {
+ case DEV_PROP_STRING:
+ break;
+- case DEV_PROP_U8 ... DEV_PROP_U64:
++ default:
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (nval > obj->buffer.length)
+ return -EOVERFLOW;
+- break;
++ } else {
++ if (nval > obj->package.count)
++ return -EOVERFLOW;
+ }
+- fallthrough;
+- default:
+- if (nval > obj->package.count)
+- return -EOVERFLOW;
+ break;
+ }
+ if (nval == 0)
+ return -EINVAL;
+
+- if (obj->type != ACPI_TYPE_BUFFER)
+- items = obj->package.elements;
+- else
++ if (obj->type == ACPI_TYPE_BUFFER) {
++ if (proptype != DEV_PROP_U8)
++ return -EPROTO;
+ items = obj;
++ } else {
++ items = obj->package.elements;
++ }
+
+ switch (proptype) {
+ case DEV_PROP_U8:
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 073d26ddb6c21..60b0128a10e86 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -130,6 +130,16 @@ static int video_detect_force_native(const struct dmi_system_id *d)
+ return 0;
+ }
+
++static int video_detect_portege_r100(const struct dmi_system_id *d)
++{
++ struct pci_dev *dev;
++ /* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */
++ dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL);
++ if (dev)
++ acpi_backlight_dmi = acpi_backlight_vendor;
++ return 0;
++}
++
+ static const struct dmi_system_id video_detect_dmi_table[] = {
+ /*
+ * Models which should use the vendor backlight interface,
+@@ -268,6 +278,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ },
+ },
+
++ /*
++ * Toshiba Portégé R100 has working both acpi_video and toshiba_acpi
++ * vendor driver. But none of them gets activated as it has a VGA with
++ * no kernel driver (Trident CyberBlade XP4m32).
++ * The DMI strings are generic so check for the VGA chip in callback.
++ */
++ {
++ .callback = video_detect_portege_r100,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
++ DMI_MATCH(DMI_BOARD_NAME, "Portable PC")
++ },
++ },
++
+ /*
+ * Models which need acpi_video backlight control where the GPU drivers
+ * do not call acpi_video_register_backlight() because no internal panel
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 817eda2075aa5..1e3d205ce15a0 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file,
+ name = map->dev->driver->name;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+- if (ret < 0) {
++ if (ret >= PAGE_SIZE) {
+ kfree(buf);
+ return ret;
+ }
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index df1f78abdf266..140af27f591ae 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1702,17 +1702,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+ }
+
+ if (!map->cache_bypass && map->format.parse_val) {
+- unsigned int ival;
++ unsigned int ival, offset;
+ int val_bytes = map->format.val_bytes;
+- for (i = 0; i < val_len / val_bytes; i++) {
+- ival = map->format.parse_val(val + (i * val_bytes));
+- ret = regcache_write(map,
+- reg + regmap_get_offset(map, i),
+- ival);
++
++ /* Cache the last written value for noinc writes */
++ i = noinc ? val_len - val_bytes : 0;
++ for (; i < val_len; i += val_bytes) {
++ ival = map->format.parse_val(val + i);
++ offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
++ ret = regcache_write(map, reg + offset, ival);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+- reg + regmap_get_offset(map, i), ret);
++ reg + offset, ret);
+ return ret;
+ }
+ }
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 7718c81e1dba8..e94d2ff6b1223 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ struct gendisk *disk = nbd->disk;
+
+ del_gendisk(disk);
+- put_disk(disk);
+ blk_mq_free_tag_set(&nbd->tag_set);
+
+ /*
+@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ idr_remove(&nbd_index_idr, nbd->index);
+ mutex_unlock(&nbd_index_mutex);
+ destroy_workqueue(nbd->recv_workq);
+- kfree(nbd);
++ put_disk(disk);
+ }
+
+ static void nbd_dev_remove_work(struct work_struct *work)
+@@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
+ nbd_put(nbd);
+ }
+
++static void nbd_free_disk(struct gendisk *disk)
++{
++ struct nbd_device *nbd = disk->private_data;
++
++ kfree(nbd);
++}
++
+ static const struct block_device_operations nbd_fops =
+ {
+ .owner = THIS_MODULE,
+@@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops =
+ .release = nbd_release,
+ .ioctl = nbd_ioctl,
+ .compat_ioctl = nbd_ioctl,
++ .free_disk = nbd_free_disk,
+ };
+
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
+index e98fcac578d66..634eab4776f32 100644
+--- a/drivers/char/hw_random/bcm2835-rng.c
++++ b/drivers/char/hw_random/bcm2835-rng.c
+@@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
+ if (!wait)
+ return 0;
+- hwrng_msleep(rng, 1000);
++ hwrng_yield(rng);
+ }
+
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index cc002b0c2f0c3..8f31f9d810305 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -680,6 +680,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+ }
+ EXPORT_SYMBOL_GPL(hwrng_msleep);
+
++long hwrng_yield(struct hwrng *rng)
++{
++ return wait_for_completion_interruptible_timeout(&rng->dying, 1);
++}
++EXPORT_SYMBOL_GPL(hwrng_yield);
++
+ static int __init hwrng_modinit(void)
+ {
+ int ret;
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index 12fbe80918319..159baf00a8675 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -58,7 +58,8 @@ struct amd_geode_priv {
+
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ {
+- void __iomem *mem = (void __iomem *)rng->priv;
++ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++ void __iomem *mem = priv->membase;
+
+ *data = readl(mem + GEODE_RNG_DATA_REG);
+
+@@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+
+ static int geode_rng_data_present(struct hwrng *rng, int wait)
+ {
+- void __iomem *mem = (void __iomem *)rng->priv;
++ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++ void __iomem *mem = priv->membase;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
+index e319cfa51a8a3..030186def9c69 100644
+--- a/drivers/clk/clk-npcm7xx.c
++++ b/drivers/clk/clk-npcm7xx.c
+@@ -510,7 +510,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
+ return;
+
+ npcm7xx_init_fail:
+- kfree(npcm7xx_clk_data->hws);
++ kfree(npcm7xx_clk_data);
+ npcm7xx_init_np_err:
+ iounmap(clk_base);
+ npcm7xx_init_error:
+diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
+index 2c7a830ce3080..fdec715c9ba9b 100644
+--- a/drivers/clk/clk-scmi.c
++++ b/drivers/clk/clk-scmi.c
+@@ -213,6 +213,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
+ sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
+ if (!sclk->info) {
+ dev_dbg(dev, "invalid clock info for idx %d\n", idx);
++ devm_kfree(dev, sclk);
+ continue;
+ }
+
+diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
+index 25785ec9c2762..f219004b8a337 100644
+--- a/drivers/clk/imx/Kconfig
++++ b/drivers/clk/imx/Kconfig
+@@ -96,6 +96,7 @@ config CLK_IMX8QXP
+ depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ depends on IMX_SCU && HAVE_ARM_SMCCC
+ select MXC_CLK_SCU
++ select MXC_CLK
+ help
+ Build the driver for IMX8QXP SCU based clocks.
+
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 882dcad4817d7..0a75814b3bc77 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -288,8 +288,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ void __iomem *base;
+ int err;
+
+- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+- IMX8MQ_CLK_END), GFP_KERNEL);
++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MQ_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+
+@@ -306,10 +305,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ hws[IMX8MQ_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
+- base = of_iomap(np, 0);
++ base = devm_of_iomap(dev, np, 0, NULL);
+ of_node_put(np);
+- if (WARN_ON(!base))
+- return -ENOMEM;
++ if (WARN_ON(IS_ERR(base))) {
++ err = PTR_ERR(base);
++ goto unregister_hws;
++ }
+
+ hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+@@ -395,8 +396,10 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+
+ np = dev->of_node;
+ base = devm_platform_ioremap_resource(pdev, 0);
+- if (WARN_ON(IS_ERR(base)))
+- return PTR_ERR(base);
++ if (WARN_ON(IS_ERR(base))) {
++ err = PTR_ERR(base);
++ goto unregister_hws;
++ }
+
+ /* CORE */
+ hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000);
+diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
+index 546a3703bfeb2..273de1f293076 100644
+--- a/drivers/clk/imx/clk-imx8qxp.c
++++ b/drivers/clk/imx/clk-imx8qxp.c
+@@ -148,10 +148,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+ imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("adc1_clk", IMX_SC_R_ADC_1, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
++ imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+ imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
+ imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
+- imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+
+ /* Audio SS */
+ imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
+diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
+index ee5c72369334f..6bbdd4705d71f 100644
+--- a/drivers/clk/keystone/pll.c
++++ b/drivers/clk/keystone/pll.c
+@@ -281,12 +281,13 @@ static void __init of_pll_div_clk_init(struct device_node *node)
+
+ clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
+ mask, 0, NULL);
+- if (clk) {
+- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+- } else {
++ if (IS_ERR(clk)) {
+ pr_err("%s: error registering divider %s\n", __func__, clk_name);
+ iounmap(reg);
++ return;
+ }
++
++ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
+
+@@ -328,10 +329,12 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
+ clk = clk_register_mux(NULL, clk_name, (const char **)&parents,
+ ARRAY_SIZE(parents) , 0, reg, shift, mask,
+ 0, NULL);
+- if (clk)
+- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+- else
++ if (IS_ERR(clk)) {
+ pr_err("%s: error registering mux %s\n", __func__, clk_name);
++ return;
++ }
++
++ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
+
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index 1c3a93143dc5e..00d2e81bdd43e 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -670,6 +670,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+@@ -749,6 +751,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return;
+
+ for (i = 0; i < CLK_INFRA_NR; i++)
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -776,6 +780,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return -ENOMEM;
+ } else {
+ for (i = 0; i < CLK_INFRA_NR; i++) {
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+@@ -893,6 +899,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
+index 665981fc411f5..2c6a52ff5564e 100644
+--- a/drivers/clk/mediatek/clk-mt6765.c
++++ b/drivers/clk/mediatek/clk-mt6765.c
+@@ -738,6 +738,8 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+@@ -773,6 +775,8 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ clk_data);
+@@ -813,6 +817,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
+ clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
+index 0d0a90ee5eb2c..39dadc9547088 100644
+--- a/drivers/clk/mediatek/clk-mt6779.c
++++ b/drivers/clk/mediatek/clk-mt6779.c
+@@ -1218,6 +1218,8 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+@@ -1238,6 +1240,8 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
+index 78339cb35beb0..b362e99c8f53c 100644
+--- a/drivers/clk/mediatek/clk-mt6797.c
++++ b/drivers/clk/mediatek/clk-mt6797.c
+@@ -392,6 +392,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ clk_data);
+@@ -546,6 +548,8 @@ static void mtk_infrasys_init_early(struct device_node *node)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return;
+
+ for (i = 0; i < CLK_INFRA_NR; i++)
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -571,6 +575,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return -ENOMEM;
+ } else {
+ for (i = 0; i < CLK_INFRA_NR; i++) {
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
+index b0c8fa3b8bbec..e1d2635c72c10 100644
+--- a/drivers/clk/mediatek/clk-mt7629-eth.c
++++ b/drivers/clk/mediatek/clk-mt7629-eth.c
+@@ -79,6 +79,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data);
+
+@@ -101,6 +103,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK,
+ clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
+index 0bc88b7d171b5..01ee45fcd7e34 100644
+--- a/drivers/clk/mediatek/clk-mt7629.c
++++ b/drivers/clk/mediatek/clk-mt7629.c
+@@ -557,6 +557,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+@@ -580,6 +582,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ struct clk_hw_onecell_data *clk_data;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+@@ -603,6 +607,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 76e6dee450d59..cbf55949c6493 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -127,6 +127,7 @@ config IPQ_APSS_6018
+ tristate "IPQ APSS Clock Controller"
+ select IPQ_APSS_PLL
+ depends on QCOM_APCS_IPC || COMPILE_TEST
++ depends on QCOM_SMEM
+ help
+ Support for APSS clock controller on IPQ platforms. The
+ APSS clock controller manages the Mux and enable block that feeds the
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 76551534f10df..dc797bd137caf 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -158,17 +158,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+ static unsigned long
+ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+ {
+- if (hid_div) {
+- rate *= 2;
+- rate /= hid_div + 1;
+- }
++ if (hid_div)
++ rate = mult_frac(rate, 2, hid_div + 1);
+
+- if (mode) {
+- u64 tmp = rate;
+- tmp *= m;
+- do_div(tmp, n);
+- rate = tmp;
+- }
++ if (mode)
++ rate = mult_frac(rate, m, n);
+
+ return rate;
+ }
+diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
+index e161637067351..ff5a16700ef71 100644
+--- a/drivers/clk/qcom/gcc-msm8996.c
++++ b/drivers/clk/qcom/gcc-msm8996.c
+@@ -245,71 +245,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+ { .hw = &gpll0_early_div.hw }
+ };
+
+-static const struct freq_tbl ftbl_system_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+- F(100000000, P_GPLL0, 6, 0, 0),
+- F(150000000, P_GPLL0, 4, 0, 0),
+- F(200000000, P_GPLL0, 3, 0, 0),
+- F(240000000, P_GPLL0, 2.5, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 system_noc_clk_src = {
+- .cmd_rcgr = 0x0401c,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+- .freq_tbl = ftbl_system_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "system_noc_clk_src",
+- .parent_data = gcc_xo_gpll0_gpll0_early_div,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+-static const struct freq_tbl ftbl_config_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(37500000, P_GPLL0, 16, 0, 0),
+- F(75000000, P_GPLL0, 8, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 config_noc_clk_src = {
+- .cmd_rcgr = 0x0500c,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_map,
+- .freq_tbl = ftbl_config_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "config_noc_clk_src",
+- .parent_data = gcc_xo_gpll0,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+-static const struct freq_tbl ftbl_periph_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(37500000, P_GPLL0, 16, 0, 0),
+- F(50000000, P_GPLL0, 12, 0, 0),
+- F(75000000, P_GPLL0, 8, 0, 0),
+- F(100000000, P_GPLL0, 6, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 periph_noc_clk_src = {
+- .cmd_rcgr = 0x06014,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_map,
+- .freq_tbl = ftbl_periph_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "periph_noc_clk_src",
+- .parent_data = gcc_xo_gpll0,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+ static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(120000000, P_GPLL0, 5, 0, 0),
+@@ -1298,11 +1233,7 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
++ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1465,11 +1396,6 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1499,11 +1425,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1550,11 +1471,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1584,11 +1500,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1618,11 +1529,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1636,11 +1542,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1978,11 +1879,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2319,11 +2215,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2354,11 +2245,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2371,11 +2257,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2423,11 +2304,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2521,11 +2397,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2538,11 +2409,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2555,11 +2421,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2607,11 +2468,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2624,11 +2480,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2641,11 +2492,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2693,11 +2539,6 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2710,11 +2551,6 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2727,11 +2563,6 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2779,11 +2610,6 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2830,11 +2656,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3061,11 +2882,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_snoc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3078,11 +2895,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_cnoc_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3095,11 +2908,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_aggre0_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3112,11 +2921,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_aggre0_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3163,10 +2968,6 @@ static struct clk_branch gcc_dcc_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3179,10 +2980,6 @@ static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3195,11 +2992,6 @@ static struct clk_branch gcc_qspi_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3348,10 +3140,6 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3364,10 +3152,6 @@ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3380,10 +3164,6 @@ static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3396,10 +3176,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3495,9 +3271,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
+index 09cf827addabe..4501c15c4a417 100644
+--- a/drivers/clk/qcom/gcc-sm8150.c
++++ b/drivers/clk/qcom/gcc-sm8150.c
+@@ -792,7 +792,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+- .flags = CLK_SET_RATE_PARENT,
++ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_floor_ops,
+ },
+ };
+diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
+index c421b12916516..e5a72c2f080f8 100644
+--- a/drivers/clk/qcom/mmcc-msm8998.c
++++ b/drivers/clk/qcom/mmcc-msm8998.c
+@@ -2478,6 +2478,7 @@ static struct clk_branch fd_ahb_clk = {
+
+ static struct clk_branch mnoc_ahb_clk = {
+ .halt_reg = 0x5024,
++ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+@@ -2493,6 +2494,7 @@ static struct clk_branch mnoc_ahb_clk = {
+
+ static struct clk_branch bimc_smmu_ahb_clk = {
+ .halt_reg = 0xe004,
++ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+@@ -2510,6 +2512,7 @@ static struct clk_branch bimc_smmu_ahb_clk = {
+
+ static struct clk_branch bimc_smmu_axi_clk = {
+ .halt_reg = 0xe008,
++ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe008,
+ .hwcg_bit = 1,
+ .clkr = {
+@@ -2650,11 +2653,13 @@ static struct gdsc camss_cpp_gdsc = {
+ static struct gdsc bimc_smmu_gdsc = {
+ .gdscr = 0xe020,
+ .gds_hw_ctrl = 0xe024,
++ .cxcs = (unsigned int []){ 0xe008 },
++ .cxc_count = 1,
+ .pd = {
+ .name = "bimc_smmu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = HW_CTRL | ALWAYS_ON,
++ .flags = VOTABLE,
+ };
+
+ static struct clk_regmap *mmcc_msm8998_clocks[] = {
+diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
+index e2e0447de1901..5a15f8788b922 100644
+--- a/drivers/clk/renesas/rcar-cpg-lib.c
++++ b/drivers/clk/renesas/rcar-cpg-lib.c
+@@ -70,8 +70,21 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ #define STPnHCK BIT(9 - SDnSRCFC_SHIFT)
+
+ static const struct clk_div_table cpg_sdh_div_table[] = {
++ /*
++ * These values are recommended by the datasheet. Because they come
++ * first, Linux will only use these.
++ */
+ { 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 },
+- { STPnHCK | 4, 16 }, { 0, 0 },
++ { STPnHCK | 4, 16 },
++ /*
++ * These values are not recommended because STPnHCK is wrong. But they
++ * have been seen because of broken firmware. So, we support reading
++ * them but Linux will sanitize them when initializing through
++ * recalc_rate.
++ */
++ { STPnHCK | 0, 1 }, { STPnHCK | 1, 2 }, { 2, 4 }, { 3, 8 }, { 4, 16 },
++ /* Sentinel */
++ { 0, 0 }
+ };
+
+ struct clk * __init cpg_sdh_clk_register(const char *name,
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 2c877576c5729..84767cfc1e739 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -11,6 +11,7 @@
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/clk/renesas.h>
+@@ -39,14 +40,13 @@
+ #define WARN_DEBUG(x) do { } while (0)
+ #endif
+
+-#define DIV_RSMASK(v, s, m) ((v >> s) & m)
+ #define GET_SHIFT(val) ((val >> 12) & 0xff)
+ #define GET_WIDTH(val) ((val >> 8) & 0xf)
+
+-#define KDIV(val) DIV_RSMASK(val, 16, 0xffff)
+-#define MDIV(val) DIV_RSMASK(val, 6, 0x3ff)
+-#define PDIV(val) DIV_RSMASK(val, 0, 0x3f)
+-#define SDIV(val) DIV_RSMASK(val, 0, 0x7)
++#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
++#define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
++#define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
++#define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
+
+ #define CLK_ON_R(reg) (reg)
+ #define CLK_MON_R(reg) (0x180 + (reg))
+@@ -192,7 +192,9 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ u32 off = GET_REG_OFFSET(hwdata->conf);
+ u32 shift = GET_SHIFT(hwdata->conf);
+ const u32 clk_src_266 = 2;
+- u32 bitmask;
++ u32 msk, val, bitmask;
++ unsigned long flags;
++ int ret;
+
+ /*
+ * As per the HW manual, we should not directly switch from 533 MHz to
+@@ -206,26 +208,30 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ * the index to value mapping is done by adding 1 to the index.
+ */
+ bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
++ msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
++ spin_lock_irqsave(&priv->rmw_lock, flags);
+ if (index != clk_src_266) {
+- u32 msk, val;
+- int ret;
+-
+ writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
+
+- msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
+-
+- ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
+- !(val & msk), 100,
+- CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
+- if (ret) {
+- dev_err(priv->dev, "failed to switch clk source\n");
+- return ret;
+- }
++ ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++ !(val & msk), 10,
++ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++ if (ret)
++ goto unlock;
+ }
+
+ writel(bitmask | ((index + 1) << shift), priv->base + off);
+
+- return 0;
++ ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++ !(val & msk), 10,
++ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++unlock:
++ spin_unlock_irqrestore(&priv->rmw_lock, flags);
++
++ if (ret)
++ dev_err(priv->dev, "failed to switch clk source\n");
++
++ return ret;
+ }
+
+ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+@@ -236,14 +242,8 @@ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+
+ val >>= GET_SHIFT(hwdata->conf);
+ val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
+- if (val) {
+- val--;
+- } else {
+- /* Prohibited clk source, change it to 533 MHz(reset value) */
+- rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
+- }
+
+- return val;
++ return val ? val - 1 : 0;
+ }
+
+ static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
+@@ -699,18 +699,18 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzg2l_cpg_priv *priv = pll_clk->priv;
+ unsigned int val1, val2;
+- unsigned int mult = 1;
+- unsigned int div = 1;
++ u64 rate;
+
+ if (pll_clk->type != CLK_TYPE_SAM_PLL)
+ return parent_rate;
+
+ val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
+ val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
+- mult = MDIV(val1) + KDIV(val1) / 65536;
+- div = PDIV(val1) << SDIV(val2);
+
+- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
++ rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
++ 16 + SDIV(val2));
++
++ return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
+ }
+
+ static const struct clk_ops rzg2l_cpg_pll_ops = {
+diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
+index b33a3e79161b6..aefa53a900597 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.h
++++ b/drivers/clk/renesas/rzg2l-cpg.h
+@@ -43,7 +43,7 @@
+ #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
+ #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
+
+-#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000
++#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200
+
+ /* n = 0/1/2 for PLL1/4/6 */
+ #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n))
+diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
+index dd0709c9c2498..93183287c58db 100644
+--- a/drivers/clk/ti/apll.c
++++ b/drivers/clk/ti/apll.c
+@@ -160,7 +160,7 @@ static void __init omap_clk_register_apll(void *user,
+ ad->clk_bypass = __clk_get_hw(clk);
+
+ name = ti_dt_clk_name(node);
+- clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(init->parent_names);
+@@ -400,7 +400,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
+ goto cleanup;
+
+ name = ti_dt_clk_name(node);
+- clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(init);
+diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
+index ff4d6a9516813..1c576599f6dbd 100644
+--- a/drivers/clk/ti/clk-dra7-atl.c
++++ b/drivers/clk/ti/clk-dra7-atl.c
+@@ -197,7 +197,7 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
+
+ init.parent_names = parent_names;
+
+- clk = ti_clk_register(NULL, &clk_hw->hw, name);
++ clk = of_ti_clk_register(node, &clk_hw->hw, name);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
+index 1dc2f15fb75b2..269355010cdce 100644
+--- a/drivers/clk/ti/clk.c
++++ b/drivers/clk/ti/clk.c
+@@ -475,7 +475,7 @@ void __init ti_clk_add_aliases(void)
+ clkspec.np = np;
+ clk = of_clk_get_from_provider(&clkspec);
+
+- ti_clk_add_alias(NULL, clk, ti_dt_clk_name(np));
++ ti_clk_add_alias(clk, ti_dt_clk_name(np));
+ }
+ }
+
+@@ -528,7 +528,6 @@ void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
+
+ /**
+ * ti_clk_add_alias - add a clock alias for a TI clock
+- * @dev: device alias for this clock
+ * @clk: clock handle to create alias for
+ * @con: connection ID for this clock
+ *
+@@ -536,7 +535,7 @@ void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
+ * and assigns the data to it. Returns 0 if successful, negative error
+ * value otherwise.
+ */
+-int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con)
++int ti_clk_add_alias(struct clk *clk, const char *con)
+ {
+ struct clk_lookup *cl;
+
+@@ -550,8 +549,6 @@ int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con)
+ if (!cl)
+ return -ENOMEM;
+
+- if (dev)
+- cl->dev_id = dev_name(dev);
+ cl->con_id = con;
+ cl->clk = clk;
+
+@@ -561,8 +558,8 @@ int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con)
+ }
+
+ /**
+- * ti_clk_register - register a TI clock to the common clock framework
+- * @dev: device for this clock
++ * of_ti_clk_register - register a TI clock to the common clock framework
++ * @node: device node for this clock
+ * @hw: hardware clock handle
+ * @con: connection ID for this clock
+ *
+@@ -570,17 +567,18 @@ int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con)
+ * alias for it. Returns a handle to the registered clock if successful,
+ * ERR_PTR value in failure.
+ */
+-struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+- const char *con)
++struct clk *of_ti_clk_register(struct device_node *node, struct clk_hw *hw,
++ const char *con)
+ {
+ struct clk *clk;
+ int ret;
+
+- clk = clk_register(dev, hw);
+- if (IS_ERR(clk))
+- return clk;
++ ret = of_clk_hw_register(node, hw);
++ if (ret)
++ return ERR_PTR(ret);
+
+- ret = ti_clk_add_alias(dev, clk, con);
++ clk = hw->clk;
++ ret = ti_clk_add_alias(clk, con);
+ if (ret) {
+ clk_unregister(clk);
+ return ERR_PTR(ret);
+@@ -590,8 +588,8 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+ }
+
+ /**
+- * ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
+- * @dev: device for this clock
++ * of_ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
++ * @node: device node for this clock
+ * @hw: hardware clock handle
+ * @con: connection ID for this clock
+ *
+@@ -600,13 +598,13 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+ * Returns a handle to the registered clock if successful, ERR_PTR value
+ * in failure.
+ */
+-struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
+- const char *con)
++struct clk *of_ti_clk_register_omap_hw(struct device_node *node,
++ struct clk_hw *hw, const char *con)
+ {
+ struct clk *clk;
+ struct clk_hw_omap *oclk;
+
+- clk = ti_clk_register(dev, hw, con);
++ clk = of_ti_clk_register(node, hw, con);
+ if (IS_ERR(clk))
+ return clk;
+
+diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
+index 57611bfb299c1..87e5624789ef6 100644
+--- a/drivers/clk/ti/clkctrl.c
++++ b/drivers/clk/ti/clkctrl.c
+@@ -308,7 +308,7 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
+ init.ops = ops;
+ init.flags = 0;
+
+- clk = ti_clk_register(NULL, clk_hw, init.name);
++ clk = of_ti_clk_register(node, clk_hw, init.name);
+ if (IS_ERR_OR_NULL(clk)) {
+ ret = -EINVAL;
+ goto cleanup;
+@@ -689,7 +689,7 @@ clkdm_found:
+ init.ops = &omap4_clkctrl_clk_ops;
+ hw->hw.init = &init;
+
+- clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
++ clk = of_ti_clk_register_omap_hw(node, &hw->hw, init.name);
+ if (IS_ERR_OR_NULL(clk))
+ goto cleanup;
+
+diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
+index 37ab53339a9be..16a9f7c2280a5 100644
+--- a/drivers/clk/ti/clock.h
++++ b/drivers/clk/ti/clock.h
+@@ -199,12 +199,12 @@ extern const struct omap_clkctrl_data dm816_clkctrl_data[];
+
+ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
+
+-struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+- const char *con);
+-struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
+- const char *con);
++struct clk *of_ti_clk_register(struct device_node *node, struct clk_hw *hw,
++ const char *con);
++struct clk *of_ti_clk_register_omap_hw(struct device_node *node,
++ struct clk_hw *hw, const char *con);
+ const char *ti_dt_clk_name(struct device_node *np);
+-int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
++int ti_clk_add_alias(struct clk *clk, const char *con);
+ void ti_clk_add_aliases(void);
+
+ void ti_clk_latch(struct clk_omap_reg *reg, s8 shift);
+diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
+index 77b771dd050a9..b85382c370f7e 100644
+--- a/drivers/clk/ti/composite.c
++++ b/drivers/clk/ti/composite.c
+@@ -176,7 +176,7 @@ static void __init _register_composite(void *user,
+ &ti_composite_gate_ops, 0);
+
+ if (!IS_ERR(clk)) {
+- ret = ti_clk_add_alias(NULL, clk, name);
++ ret = ti_clk_add_alias(clk, name);
+ if (ret) {
+ clk_unregister(clk);
+ goto cleanup;
+diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
+index 488d3da60c317..5d5bb123ba949 100644
+--- a/drivers/clk/ti/divider.c
++++ b/drivers/clk/ti/divider.c
+@@ -309,7 +309,6 @@ static struct clk *_register_divider(struct device_node *node,
+ u32 flags,
+ struct clk_omap_divider *div)
+ {
+- struct clk *clk;
+ struct clk_init_data init;
+ const char *parent_name;
+ const char *name;
+@@ -326,12 +325,7 @@ static struct clk *_register_divider(struct device_node *node,
+ div->hw.init = &init;
+
+ /* register the clock */
+- clk = ti_clk_register(NULL, &div->hw, name);
+-
+- if (IS_ERR(clk))
+- kfree(div);
+-
+- return clk;
++ return of_ti_clk_register(node, &div->hw, name);
+ }
+
+ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
+diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
+index 8ed43bc6b7cc8..403ec81f561b6 100644
+--- a/drivers/clk/ti/dpll.c
++++ b/drivers/clk/ti/dpll.c
+@@ -187,7 +187,7 @@ static void __init _register_dpll(void *user,
+
+ /* register the clock */
+ name = ti_dt_clk_name(node);
+- clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+@@ -259,7 +259,7 @@ static void _register_dpll_x2(struct device_node *node,
+ #endif
+
+ /* register the clock */
+- clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+
+ if (IS_ERR(clk))
+ kfree(clk_hw);
+diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
+index c80cee0f5d3d7..c102c53201686 100644
+--- a/drivers/clk/ti/fixed-factor.c
++++ b/drivers/clk/ti/fixed-factor.c
+@@ -54,7 +54,7 @@ static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_ti_clk_autoidle_setup(node);
+- ti_clk_add_alias(NULL, clk, clk_name);
++ ti_clk_add_alias(clk, clk_name);
+ }
+ }
+ CLK_OF_DECLARE(ti_fixed_factor_clk, "ti,fixed-factor-clock",
+diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
+index 307702921431d..8e477d50d0fdb 100644
+--- a/drivers/clk/ti/gate.c
++++ b/drivers/clk/ti/gate.c
+@@ -85,7 +85,7 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw)
+ return ret;
+ }
+
+-static struct clk *_register_gate(struct device *dev, const char *name,
++static struct clk *_register_gate(struct device_node *node, const char *name,
+ const char *parent_name, unsigned long flags,
+ struct clk_omap_reg *reg, u8 bit_idx,
+ u8 clk_gate_flags, const struct clk_ops *ops,
+@@ -115,7 +115,7 @@ static struct clk *_register_gate(struct device *dev, const char *name,
+
+ init.flags = flags;
+
+- clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+
+ if (IS_ERR(clk))
+ kfree(clk_hw);
+@@ -158,7 +158,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
+ clk_gate_flags |= INVERT_ENABLE;
+
+ name = ti_dt_clk_name(node);
+- clk = _register_gate(NULL, name, parent_name, flags, &reg,
++ clk = _register_gate(node, name, parent_name, flags, &reg,
+ enable_bit, clk_gate_flags, ops, hw_ops);
+
+ if (!IS_ERR(clk))
+diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
+index f47beeea211e8..172301c646f85 100644
+--- a/drivers/clk/ti/interface.c
++++ b/drivers/clk/ti/interface.c
+@@ -24,7 +24,8 @@ static const struct clk_ops ti_interface_clk_ops = {
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ };
+
+-static struct clk *_register_interface(struct device *dev, const char *name,
++static struct clk *_register_interface(struct device_node *node,
++ const char *name,
+ const char *parent_name,
+ struct clk_omap_reg *reg, u8 bit_idx,
+ const struct clk_hw_omap_ops *ops)
+@@ -49,7 +50,7 @@ static struct clk *_register_interface(struct device *dev, const char *name,
+ init.num_parents = 1;
+ init.parent_names = &parent_name;
+
+- clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+
+ if (IS_ERR(clk))
+ kfree(clk_hw);
+@@ -80,7 +81,7 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node,
+ }
+
+ name = ti_dt_clk_name(node);
+- clk = _register_interface(NULL, name, parent_name, &reg,
++ clk = _register_interface(node, name, parent_name, &reg,
+ enable_bit, ops);
+
+ if (!IS_ERR(clk))
+diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
+index 46b45b3e8319a..1ebafa386be61 100644
+--- a/drivers/clk/ti/mux.c
++++ b/drivers/clk/ti/mux.c
+@@ -118,7 +118,7 @@ const struct clk_ops ti_clk_mux_ops = {
+ .restore_context = clk_mux_restore_context,
+ };
+
+-static struct clk *_register_mux(struct device *dev, const char *name,
++static struct clk *_register_mux(struct device_node *node, const char *name,
+ const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ struct clk_omap_reg *reg, u8 shift, u32 mask,
+@@ -148,7 +148,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
+ mux->table = table;
+ mux->hw.init = &init;
+
+- clk = ti_clk_register(dev, &mux->hw, name);
++ clk = of_ti_clk_register(node, &mux->hw, name);
+
+ if (IS_ERR(clk))
+ kfree(mux);
+@@ -207,7 +207,7 @@ static void of_mux_clk_setup(struct device_node *node)
+ mask = (1 << fls(mask)) - 1;
+
+ name = ti_dt_clk_name(node);
+- clk = _register_mux(NULL, name, parent_names, num_parents,
++ clk = _register_mux(node, name, parent_names, num_parents,
+ flags, &reg, shift, mask, latch, clk_mux_flags,
+ NULL);
+
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index 239c70ac120e8..fee1c4bf10214 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -817,8 +817,9 @@ static u64 __arch_timer_check_delta(void)
+ * Note that TVAL is signed, thus has only 31 of its
+ * 32 bits to express magnitude.
+ */
+- MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
+- APM_CPU_PART_POTENZA)),
++ MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
++ APM_CPU_PART_XGENE),
++ APM_CPU_VAR_POTENZA, 0x0, 0xf),
+ {},
+ };
+
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index 00af1a8e34fbd..ec86aecb748f1 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -141,6 +141,8 @@ struct dmtimer {
+ struct platform_device *pdev;
+ struct list_head node;
+ struct notifier_block nb;
++ struct notifier_block fclk_nb;
++ unsigned long fclk_rate;
+ };
+
+ static u32 omap_reserved_systimers;
+@@ -254,8 +256,7 @@ static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
+ timer->posted = OMAP_TIMER_POSTED;
+ }
+
+-static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+- unsigned long rate)
++static inline void __omap_dm_timer_stop(struct dmtimer *timer)
+ {
+ u32 l;
+
+@@ -270,7 +271,7 @@ static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+ * Wait for functional clock period x 3.5 to make sure that
+ * timer is stopped
+ */
+- udelay(3500000 / rate + 1);
++ udelay(3500000 / timer->fclk_rate + 1);
+ #endif
+ }
+
+@@ -349,6 +350,21 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
+ return NOTIFY_OK;
+ }
+
++static int omap_timer_fclk_notifier(struct notifier_block *nb,
++ unsigned long event, void *data)
++{
++ struct clk_notifier_data *clk_data = data;
++ struct dmtimer *timer = container_of(nb, struct dmtimer, fclk_nb);
++
++ switch (event) {
++ case POST_RATE_CHANGE:
++ timer->fclk_rate = clk_data->new_rate;
++ return NOTIFY_OK;
++ default:
++ return NOTIFY_DONE;
++ }
++}
++
+ static int omap_dm_timer_reset(struct dmtimer *timer)
+ {
+ u32 l, timeout = 100000;
+@@ -742,7 +758,6 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+ {
+ struct dmtimer *timer;
+ struct device *dev;
+- unsigned long rate = 0;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer))
+@@ -750,10 +765,7 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+
+ dev = &timer->pdev->dev;
+
+- if (!timer->omap1)
+- rate = clk_get_rate(timer->fclk);
+-
+- __omap_dm_timer_stop(timer, rate);
++ __omap_dm_timer_stop(timer);
+
+ pm_runtime_put_sync(dev);
+
+@@ -1112,6 +1124,14 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
+ timer->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(timer->fclk))
+ return PTR_ERR(timer->fclk);
++
++ timer->fclk_nb.notifier_call = omap_timer_fclk_notifier;
++ ret = devm_clk_notifier_register(dev, timer->fclk,
++ &timer->fclk_nb);
++ if (ret)
++ return ret;
++
++ timer->fclk_rate = clk_get_rate(timer->fclk);
+ } else {
+ timer->fclk = ERR_PTR(-ENODEV);
+ }
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index d3d8bb0a69900..e156238b4da90 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -566,7 +566,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
+ return -EINVAL;
+
+- ctx->cdata.key_virt = key;
++ memcpy(ctx->key, key, keylen);
++ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+index 4482cb145d051..56058d4992cc4 100644
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -639,7 +639,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
+ return -EINVAL;
+
+- ctx->cdata.key_virt = key;
++ memcpy(ctx->key, key, keylen);
++ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index 3e583f0324874..b8e02c3a19610 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -443,10 +443,10 @@ static int __sev_init_ex_locked(int *error)
+
+ static int __sev_platform_init_locked(int *error)
+ {
++ int rc = 0, psp_ret = SEV_RET_NO_FW_CALL;
+ struct psp_device *psp = psp_master;
+- struct sev_device *sev;
+- int rc = 0, psp_ret = -1;
+ int (*init_function)(int *error);
++ struct sev_device *sev;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+@@ -474,9 +474,11 @@ static int __sev_platform_init_locked(int *error)
+ * initialization function should succeed by replacing the state
+ * with a reset state.
+ */
+- dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
++ dev_err(sev->dev,
++"SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
+ rc = init_function(&psp_ret);
+ }
++
+ if (error)
+ *error = psp_ret;
+
+diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
+index 1e89269a2e4b0..8595a5a5d2288 100644
+--- a/drivers/crypto/hisilicon/Makefile
++++ b/drivers/crypto/hisilicon/Makefile
+@@ -3,6 +3,6 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/
+ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
+ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
+ obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
+-hisi_qm-objs = qm.o sgl.o
++hisi_qm-objs = qm.o sgl.o debugfs.o
+ obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
+ obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
+diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
+new file mode 100644
+index 0000000000000..13bec8b2d7237
+--- /dev/null
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -0,0 +1,1097 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2022 HiSilicon Limited. */
++#include <linux/hisi_acc_qm.h>
++#include "qm_common.h"
++
++#define QM_DFX_BASE 0x0100000
++#define QM_DFX_STATE1 0x0104000
++#define QM_DFX_STATE2 0x01040C8
++#define QM_DFX_COMMON 0x0000
++#define QM_DFX_BASE_LEN 0x5A
++#define QM_DFX_STATE1_LEN 0x2E
++#define QM_DFX_STATE2_LEN 0x11
++#define QM_DFX_COMMON_LEN 0xC3
++#define QM_DFX_REGS_LEN 4UL
++#define QM_DBG_TMP_BUF_LEN 22
++#define CURRENT_FUN_MASK GENMASK(5, 0)
++#define CURRENT_Q_MASK GENMASK(31, 16)
++#define QM_SQE_ADDR_MASK GENMASK(7, 0)
++
++#define QM_DFX_MB_CNT_VF 0x104010
++#define QM_DFX_DB_CNT_VF 0x104020
++#define QM_DFX_SQE_CNT_VF_SQN 0x104030
++#define QM_DFX_CQE_CNT_VF_CQN 0x104040
++#define QM_DFX_QN_SHIFT 16
++#define QM_DFX_CNT_CLR_CE 0x100118
++#define QM_DBG_WRITE_LEN 1024
++
++static const char * const qm_debug_file_name[] = {
++ [CURRENT_QM] = "current_qm",
++ [CURRENT_Q] = "current_q",
++ [CLEAR_ENABLE] = "clear_enable",
++};
++
++struct qm_dfx_item {
++ const char *name;
++ u32 offset;
++};
++
++static struct qm_dfx_item qm_dfx_files[] = {
++ {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
++ {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
++ {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
++ {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
++ {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
++};
++
++#define CNT_CYC_REGS_NUM 10
++static const struct debugfs_reg32 qm_dfx_regs[] = {
++ /* XXX_CNT are reading clear register */
++ {"QM_ECC_1BIT_CNT ", 0x104000ull},
++ {"QM_ECC_MBIT_CNT ", 0x104008ull},
++ {"QM_DFX_MB_CNT ", 0x104018ull},
++ {"QM_DFX_DB_CNT ", 0x104028ull},
++ {"QM_DFX_SQE_CNT ", 0x104038ull},
++ {"QM_DFX_CQE_CNT ", 0x104048ull},
++ {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
++ {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
++ {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
++ {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
++ {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
++ {"QM_ECC_1BIT_INF ", 0x104004ull},
++ {"QM_ECC_MBIT_INF ", 0x10400cull},
++ {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
++ {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
++ {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
++ {"QM_DFX_FF_ST0 ", 0x1040c8ull},
++ {"QM_DFX_FF_ST1 ", 0x1040ccull},
++ {"QM_DFX_FF_ST2 ", 0x1040d0ull},
++ {"QM_DFX_FF_ST3 ", 0x1040d4ull},
++ {"QM_DFX_FF_ST4 ", 0x1040d8ull},
++ {"QM_DFX_FF_ST5 ", 0x1040dcull},
++ {"QM_DFX_FF_ST6 ", 0x1040e0ull},
++ {"QM_IN_IDLE_ST ", 0x1040e4ull},
++};
++
++static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
++ {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
++};
++
++/* define the QM's dfx regs region and region length */
++static struct dfx_diff_registers qm_diff_regs[] = {
++ {
++ .reg_offset = QM_DFX_BASE,
++ .reg_len = QM_DFX_BASE_LEN,
++ }, {
++ .reg_offset = QM_DFX_STATE1,
++ .reg_len = QM_DFX_STATE1_LEN,
++ }, {
++ .reg_offset = QM_DFX_STATE2,
++ .reg_len = QM_DFX_STATE2_LEN,
++ }, {
++ .reg_offset = QM_DFX_COMMON,
++ .reg_len = QM_DFX_COMMON_LEN,
++ },
++};
++
++static struct hisi_qm *file_to_qm(struct debugfs_file *file)
++{
++ struct qm_debug *debug = file->debug;
++
++ return container_of(debug, struct hisi_qm, debug);
++}
++
++static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
++ size_t count, loff_t *pos)
++{
++ char buf[QM_DBG_READ_LEN];
++ int len;
++
++ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
++ "Please echo help to cmd to get help information");
++
++ return simple_read_from_buffer(buffer, count, pos, buf, len);
++}
++
++static void dump_show(struct hisi_qm *qm, void *info,
++ unsigned int info_size, char *info_name)
++{
++ struct device *dev = &qm->pdev->dev;
++ u8 *info_curr = info;
++ u32 i;
++#define BYTE_PER_DW 4
++
++ dev_info(dev, "%s DUMP\n", info_name);
++ for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
++ pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
++ *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
++ }
++}
++
++static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
++{
++ struct device *dev = &qm->pdev->dev;
++ struct qm_sqc *sqc, *sqc_curr;
++ dma_addr_t sqc_dma;
++ u32 qp_id;
++ int ret;
++
++ if (!s)
++ return -EINVAL;
++
++ ret = kstrtou32(s, 0, &qp_id);
++ if (ret || qp_id >= qm->qp_num) {
++ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
++ return -EINVAL;
++ }
++
++ sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
++ if (IS_ERR(sqc))
++ return PTR_ERR(sqc);
++
++ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1);
++ if (ret) {
++ down_read(&qm->qps_lock);
++ if (qm->sqc) {
++ sqc_curr = qm->sqc + qp_id;
++
++ dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
++ }
++ up_read(&qm->qps_lock);
++
++ goto free_ctx;
++ }
++
++ dump_show(qm, sqc, sizeof(*sqc), "SQC");
++
++free_ctx:
++ hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
++ return 0;
++}
++
++static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
++{
++ struct device *dev = &qm->pdev->dev;
++ struct qm_cqc *cqc, *cqc_curr;
++ dma_addr_t cqc_dma;
++ u32 qp_id;
++ int ret;
++
++ if (!s)
++ return -EINVAL;
++
++ ret = kstrtou32(s, 0, &qp_id);
++ if (ret || qp_id >= qm->qp_num) {
++ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
++ return -EINVAL;
++ }
++
++ cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
++ if (IS_ERR(cqc))
++ return PTR_ERR(cqc);
++
++ ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1);
++ if (ret) {
++ down_read(&qm->qps_lock);
++ if (qm->cqc) {
++ cqc_curr = qm->cqc + qp_id;
++
++ dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
++ }
++ up_read(&qm->qps_lock);
++
++ goto free_ctx;
++ }
++
++ dump_show(qm, cqc, sizeof(*cqc), "CQC");
++
++free_ctx:
++ hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
++ return 0;
++}
++
++static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
++ int cmd, char *name)
++{
++ struct device *dev = &qm->pdev->dev;
++ dma_addr_t xeqc_dma;
++ void *xeqc;
++ int ret;
++
++ if (strsep(&s, " ")) {
++ dev_err(dev, "Please do not input extra characters!\n");
++ return -EINVAL;
++ }
++
++ xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma);
++ if (IS_ERR(xeqc))
++ return PTR_ERR(xeqc);
++
++ ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
++ if (ret)
++ goto err_free_ctx;
++
++ dump_show(qm, xeqc, size, name);
++
++err_free_ctx:
++ hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma);
++ return ret;
++}
++
++static int q_dump_param_parse(struct hisi_qm *qm, char *s,
++ u32 *e_id, u32 *q_id, u16 q_depth)
++{
++ struct device *dev = &qm->pdev->dev;
++ unsigned int qp_num = qm->qp_num;
++ char *presult;
++ int ret;
++
++ presult = strsep(&s, " ");
++ if (!presult) {
++ dev_err(dev, "Please input qp number!\n");
++ return -EINVAL;
++ }
++
++ ret = kstrtou32(presult, 0, q_id);
++ if (ret || *q_id >= qp_num) {
++ dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
++ return -EINVAL;
++ }
++
++ presult = strsep(&s, " ");
++ if (!presult) {
++ dev_err(dev, "Please input sqe number!\n");
++ return -EINVAL;
++ }
++
++ ret = kstrtou32(presult, 0, e_id);
++ if (ret || *e_id >= q_depth) {
++ dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
++ return -EINVAL;
++ }
++
++ if (strsep(&s, " ")) {
++ dev_err(dev, "Please do not input extra characters!\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int qm_sq_dump(struct hisi_qm *qm, char *s)
++{
++ u16 sq_depth = qm->qp_array->cq_depth;
++ void *sqe, *sqe_curr;
++ struct hisi_qp *qp;
++ u32 qp_id, sqe_id;
++ int ret;
++
++ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
++ if (ret)
++ return ret;
++
++ sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
++ if (!sqe)
++ return -ENOMEM;
++
++ qp = &qm->qp_array[qp_id];
++ memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
++ sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
++ memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
++ qm->debug.sqe_mask_len);
++
++ dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
++
++ kfree(sqe);
++
++ return 0;
++}
++
++static int qm_cq_dump(struct hisi_qm *qm, char *s)
++{
++ struct qm_cqe *cqe_curr;
++ struct hisi_qp *qp;
++ u32 qp_id, cqe_id;
++ int ret;
++
++ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
++ if (ret)
++ return ret;
++
++ qp = &qm->qp_array[qp_id];
++ cqe_curr = qp->cqe + cqe_id;
++ dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
++
++ return 0;
++}
++
++static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
++ size_t size, char *name)
++{
++ struct device *dev = &qm->pdev->dev;
++ void *xeqe;
++ u32 xeqe_id;
++ int ret;
++
++ if (!s)
++ return -EINVAL;
++
++ ret = kstrtou32(s, 0, &xeqe_id);
++ if (ret)
++ return -EINVAL;
++
++ if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
++ dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
++ return -EINVAL;
++ } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
++ dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
++ return -EINVAL;
++ }
++
++ down_read(&qm->qps_lock);
++
++ if (qm->eqe && !strcmp(name, "EQE")) {
++ xeqe = qm->eqe + xeqe_id;
++ } else if (qm->aeqe && !strcmp(name, "AEQE")) {
++ xeqe = qm->aeqe + xeqe_id;
++ } else {
++ ret = -EINVAL;
++ goto err_unlock;
++ }
++
++ dump_show(qm, xeqe, size, name);
++
++err_unlock:
++ up_read(&qm->qps_lock);
++ return ret;
++}
++
++static int qm_dbg_help(struct hisi_qm *qm, char *s)
++{
++ struct device *dev = &qm->pdev->dev;
++
++ if (strsep(&s, " ")) {
++ dev_err(dev, "Please do not input extra characters!\n");
++ return -EINVAL;
++ }
++
++ dev_info(dev, "available commands:\n");
++ dev_info(dev, "sqc <num>\n");
++ dev_info(dev, "cqc <num>\n");
++ dev_info(dev, "eqc\n");
++ dev_info(dev, "aeqc\n");
++ dev_info(dev, "sq <num> <e>\n");
++ dev_info(dev, "cq <num> <e>\n");
++ dev_info(dev, "eq <e>\n");
++ dev_info(dev, "aeq <e>\n");
++
++ return 0;
++}
++
++static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
++{
++ struct device *dev = &qm->pdev->dev;
++ char *presult, *s, *s_tmp;
++ int ret;
++
++ s = kstrdup(cmd_buf, GFP_KERNEL);
++ if (!s)
++ return -ENOMEM;
++
++ s_tmp = s;
++ presult = strsep(&s, " ");
++ if (!presult) {
++ ret = -EINVAL;
++ goto err_buffer_free;
++ }
++
++ if (!strcmp(presult, "sqc"))
++ ret = qm_sqc_dump(qm, s);
++ else if (!strcmp(presult, "cqc"))
++ ret = qm_cqc_dump(qm, s);
++ else if (!strcmp(presult, "eqc"))
++ ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
++ QM_MB_CMD_EQC, "EQC");
++ else if (!strcmp(presult, "aeqc"))
++ ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
++ QM_MB_CMD_AEQC, "AEQC");
++ else if (!strcmp(presult, "sq"))
++ ret = qm_sq_dump(qm, s);
++ else if (!strcmp(presult, "cq"))
++ ret = qm_cq_dump(qm, s);
++ else if (!strcmp(presult, "eq"))
++ ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
++ else if (!strcmp(presult, "aeq"))
++ ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
++ else if (!strcmp(presult, "help"))
++ ret = qm_dbg_help(qm, s);
++ else
++ ret = -EINVAL;
++
++ if (ret)
++ dev_info(dev, "Please echo help\n");
++
++err_buffer_free:
++ kfree(s_tmp);
++
++ return ret;
++}
++
++static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
++ size_t count, loff_t *pos)
++{
++ struct hisi_qm *qm = filp->private_data;
++ char *cmd_buf, *cmd_buf_tmp;
++ int ret;
++
++ if (*pos)
++ return 0;
++
++ ret = hisi_qm_get_dfx_access(qm);
++ if (ret)
++ return ret;
++
++ /* Judge if the instance is being reset. */
++ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
++ ret = 0;
++ goto put_dfx_access;
++ }
++
++ if (count > QM_DBG_WRITE_LEN) {
++ ret = -ENOSPC;
++ goto put_dfx_access;
++ }
++
++ cmd_buf = memdup_user_nul(buffer, count);
++ if (IS_ERR(cmd_buf)) {
++ ret = PTR_ERR(cmd_buf);
++ goto put_dfx_access;
++ }
++
++ cmd_buf_tmp = strchr(cmd_buf, '\n');
++ if (cmd_buf_tmp) {
++ *cmd_buf_tmp = '\0';
++ count = cmd_buf_tmp - cmd_buf + 1;
++ }
++
++ ret = qm_cmd_write_dump(qm, cmd_buf);
++ if (ret) {
++ kfree(cmd_buf);
++ goto put_dfx_access;
++ }
++
++ kfree(cmd_buf);
++
++ ret = count;
++
++put_dfx_access:
++ hisi_qm_put_dfx_access(qm);
++ return ret;
++}
++
++static const struct file_operations qm_cmd_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = qm_cmd_read,
++ .write = qm_cmd_write,
++};
++
++/**
++ * hisi_qm_regs_dump() - Dump registers's value.
++ * @s: debugfs file handle.
++ * @regset: accelerator registers information.
++ *
++ * Dump accelerator registers.
++ */
++void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
++{
++ struct pci_dev *pdev = to_pci_dev(regset->dev);
++ struct hisi_qm *qm = pci_get_drvdata(pdev);
++ const struct debugfs_reg32 *regs = regset->regs;
++ int regs_len = regset->nregs;
++ int i, ret;
++ u32 val;
++
++ ret = hisi_qm_get_dfx_access(qm);
++ if (ret)
++ return;
++
++ for (i = 0; i < regs_len; i++) {
++ val = readl(regset->base + regs[i].offset);
++ seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
++ }
++
++ hisi_qm_put_dfx_access(qm);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
++
++static int qm_regs_show(struct seq_file *s, void *unused)
++{
++ struct hisi_qm *qm = s->private;
++ struct debugfs_regset32 regset;
++
++ if (qm->fun_type == QM_HW_PF) {
++ regset.regs = qm_dfx_regs;
++ regset.nregs = ARRAY_SIZE(qm_dfx_regs);
++ } else {
++ regset.regs = qm_vf_dfx_regs;
++ regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
++ }
++
++ regset.base = qm->io_base;
++ regset.dev = &qm->pdev->dev;
++
++ hisi_qm_regs_dump(s, &regset);
++
++ return 0;
++}
++
++DEFINE_SHOW_ATTRIBUTE(qm_regs);
++
++static u32 current_q_read(struct hisi_qm *qm)
++{
++ return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
++}
++
++static int current_q_write(struct hisi_qm *qm, u32 val)
++{
++ u32 tmp;
++
++ if (val >= qm->debug.curr_qm_qp_num)
++ return -EINVAL;
++
++ tmp = val << QM_DFX_QN_SHIFT |
++ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
++ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
++
++ tmp = val << QM_DFX_QN_SHIFT |
++ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
++ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
++
++ return 0;
++}
++
++static u32 clear_enable_read(struct hisi_qm *qm)
++{
++ return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
++}
++
++/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
++static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
++{
++ if (rd_clr_ctrl > 1)
++ return -EINVAL;
++
++ writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
++
++ return 0;
++}
++
++static u32 current_qm_read(struct hisi_qm *qm)
++{
++ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
++}
++
++static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
++{
++ u32 remain_q_num, vfq_num;
++ u32 num_vfs = qm->vfs_num;
++
++ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
++ if (vfq_num >= qm->max_qp_num)
++ return qm->max_qp_num;
++
++ remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
++ if (vfq_num + remain_q_num <= qm->max_qp_num)
++ return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
++
++ /*
++ * if vfq_num + remain_q_num > max_qp_num, the last VFs,
++ * each with one more queue.
++ */
++ return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
++}
++
++static int current_qm_write(struct hisi_qm *qm, u32 val)
++{
++ u32 tmp;
++
++ if (val > qm->vfs_num)
++ return -EINVAL;
++
++ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
++ if (!val)
++ qm->debug.curr_qm_qp_num = qm->qp_num;
++ else
++ qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
++
++ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
++ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
++
++ tmp = val |
++ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
++ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
++
++ tmp = val |
++ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
++ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
++
++ return 0;
++}
++
++static ssize_t qm_debug_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *pos)
++{
++ struct debugfs_file *file = filp->private_data;
++ enum qm_debug_file index = file->index;
++ struct hisi_qm *qm = file_to_qm(file);
++ char tbuf[QM_DBG_TMP_BUF_LEN];
++ u32 val;
++ int ret;
++
++ ret = hisi_qm_get_dfx_access(qm);
++ if (ret)
++ return ret;
++
++ mutex_lock(&file->lock);
++ switch (index) {
++ case CURRENT_QM:
++ val = current_qm_read(qm);
++ break;
++ case CURRENT_Q:
++ val = current_q_read(qm);
++ break;
++ case CLEAR_ENABLE:
++ val = clear_enable_read(qm);
++ break;
++ default:
++ goto err_input;
++ }
++ mutex_unlock(&file->lock);
++
++ hisi_qm_put_dfx_access(qm);
++ ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
++ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
++
++err_input:
++ mutex_unlock(&file->lock);
++ hisi_qm_put_dfx_access(qm);
++ return -EINVAL;
++}
++
++static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *pos)
++{
++ struct debugfs_file *file = filp->private_data;
++ enum qm_debug_file index = file->index;
++ struct hisi_qm *qm = file_to_qm(file);
++ unsigned long val;
++ char tbuf[QM_DBG_TMP_BUF_LEN];
++ int len, ret;
++
++ if (*pos != 0)
++ return 0;
++
++ if (count >= QM_DBG_TMP_BUF_LEN)
++ return -ENOSPC;
++
++ len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
++ count);
++ if (len < 0)
++ return len;
++
++ tbuf[len] = '\0';
++ if (kstrtoul(tbuf, 0, &val))
++ return -EFAULT;
++
++ ret = hisi_qm_get_dfx_access(qm);
++ if (ret)
++ return ret;
++
++ mutex_lock(&file->lock);
++ switch (index) {
++ case CURRENT_QM:
++ ret = current_qm_write(qm, val);
++ break;
++ case CURRENT_Q:
++ ret = current_q_write(qm, val);
++ break;
++ case CLEAR_ENABLE:
++ ret = clear_enable_write(qm, val);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ mutex_unlock(&file->lock);
++
++ hisi_qm_put_dfx_access(qm);
++
++ if (ret)
++ return ret;
++
++ return count;
++}
++
++static const struct file_operations qm_debug_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = qm_debug_read,
++ .write = qm_debug_write,
++};
++
++static void dfx_regs_uninit(struct hisi_qm *qm,
++ struct dfx_diff_registers *dregs, int reg_len)
++{
++ int i;
++
++ /* Setting the pointer is NULL to prevent double free */
++ for (i = 0; i < reg_len; i++) {
++ kfree(dregs[i].regs);
++ dregs[i].regs = NULL;
++ }
++ kfree(dregs);
++}
++
++static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
++ const struct dfx_diff_registers *cregs, u32 reg_len)
++{
++ struct dfx_diff_registers *diff_regs;
++ u32 j, base_offset;
++ int i;
++
++ diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
++ if (!diff_regs)
++ return ERR_PTR(-ENOMEM);
++
++ for (i = 0; i < reg_len; i++) {
++ if (!cregs[i].reg_len)
++ continue;
++
++ diff_regs[i].reg_offset = cregs[i].reg_offset;
++ diff_regs[i].reg_len = cregs[i].reg_len;
++ diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
++ GFP_KERNEL);
++ if (!diff_regs[i].regs)
++ goto alloc_error;
++
++ for (j = 0; j < diff_regs[i].reg_len; j++) {
++ base_offset = diff_regs[i].reg_offset +
++ j * QM_DFX_REGS_LEN;
++ diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
++ }
++ }
++
++ return diff_regs;
++
++alloc_error:
++ while (i > 0) {
++ i--;
++ kfree(diff_regs[i].regs);
++ }
++ kfree(diff_regs);
++ return ERR_PTR(-ENOMEM);
++}
++
++static int qm_diff_regs_init(struct hisi_qm *qm,
++ struct dfx_diff_registers *dregs, u32 reg_len)
++{
++ qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++ if (IS_ERR(qm->debug.qm_diff_regs))
++ return PTR_ERR(qm->debug.qm_diff_regs);
++
++ qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
++ if (IS_ERR(qm->debug.acc_diff_regs)) {
++ dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++ return PTR_ERR(qm->debug.acc_diff_regs);
++ }
++
++ return 0;
++}
++
++static void qm_last_regs_uninit(struct hisi_qm *qm)
++{
++ struct qm_debug *debug = &qm->debug;
++
++ if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
++ return;
++
++ kfree(debug->qm_last_words);
++ debug->qm_last_words = NULL;
++}
++
++static int qm_last_regs_init(struct hisi_qm *qm)
++{
++ int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
++ struct qm_debug *debug = &qm->debug;
++ int i;
++
++ if (qm->fun_type == QM_HW_VF)
++ return 0;
++
++ debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
++ if (!debug->qm_last_words)
++ return -ENOMEM;
++
++ for (i = 0; i < dfx_regs_num; i++) {
++ debug->qm_last_words[i] = readl_relaxed(qm->io_base +
++ qm_dfx_regs[i].offset);
++ }
++
++ return 0;
++}
++
++static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
++{
++ dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
++ dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++}
++
++/**
++ * hisi_qm_regs_debugfs_init() - Allocate memory for registers.
++ * @qm: device qm handle.
++ * @dregs: diff registers handle.
++ * @reg_len: diff registers region length.
++ */
++int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
++ struct dfx_diff_registers *dregs, u32 reg_len)
++{
++ int ret;
++
++ if (!qm || !dregs)
++ return -EINVAL;
++
++ if (qm->fun_type != QM_HW_PF)
++ return 0;
++
++ ret = qm_last_regs_init(qm);
++ if (ret) {
++ dev_info(&qm->pdev->dev, "failed to init qm words memory!\n");
++ return ret;
++ }
++
++ ret = qm_diff_regs_init(qm, dregs, reg_len);
++ if (ret) {
++ qm_last_regs_uninit(qm);
++ return ret;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_init);
++
++/**
++ * hisi_qm_regs_debugfs_uninit() - Free memory for registers.
++ * @qm: device qm handle.
++ * @reg_len: diff registers region length.
++ */
++void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len)
++{
++ if (!qm || qm->fun_type != QM_HW_PF)
++ return;
++
++ qm_diff_regs_uninit(qm, reg_len);
++ qm_last_regs_uninit(qm);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_uninit);
++
++/**
++ * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
++ * @qm: device qm handle.
++ * @s: Debugfs file handle.
++ * @dregs: diff registers handle.
++ * @regs_len: diff registers region length.
++ */
++void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
++ struct dfx_diff_registers *dregs, u32 regs_len)
++{
++ u32 j, val, base_offset;
++ int i, ret;
++
++ if (!qm || !s || !dregs)
++ return;
++
++ ret = hisi_qm_get_dfx_access(qm);
++ if (ret)
++ return;
++
++ down_read(&qm->qps_lock);
++ for (i = 0; i < regs_len; i++) {
++ if (!dregs[i].reg_len)
++ continue;
++
++ for (j = 0; j < dregs[i].reg_len; j++) {
++ base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
++ val = readl(qm->io_base + base_offset);
++ if (val != dregs[i].regs[j])
++ seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
++ base_offset, dregs[i].regs[j], val);
++ }
++ }
++ up_read(&qm->qps_lock);
++
++ hisi_qm_put_dfx_access(qm);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
++
++void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
++{
++ struct qm_debug *debug = &qm->debug;
++ struct pci_dev *pdev = qm->pdev;
++ u32 val;
++ int i;
++
++ if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
++ return;
++
++ for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
++ val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
++ if (debug->qm_last_words[i] != val)
++ pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
++ qm_dfx_regs[i].name, debug->qm_last_words[i], val);
++ }
++}
++
++static int qm_diff_regs_show(struct seq_file *s, void *unused)
++{
++ struct hisi_qm *qm = s->private;
++
++ hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
++ ARRAY_SIZE(qm_diff_regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
++
++static ssize_t qm_status_read(struct file *filp, char __user *buffer,
++ size_t count, loff_t *pos)
++{
++ struct hisi_qm *qm = filp->private_data;
++ char buf[QM_DBG_READ_LEN];
++ int val, len;
++
++ val = atomic_read(&qm->status.flags);
++ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
++
++ return simple_read_from_buffer(buffer, count, pos, buf, len);
++}
++
++static const struct file_operations qm_status_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = qm_status_read,
++};
++
++static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
++ enum qm_debug_file index)
++{
++ struct debugfs_file *file = qm->debug.files + index;
++
++ debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
++ &qm_debug_fops);
++
++ file->index = index;
++ mutex_init(&file->lock);
++ file->debug = &qm->debug;
++}
++
++static int qm_debugfs_atomic64_set(void *data, u64 val)
++{
++ if (val)
++ return -EINVAL;
++
++ atomic64_set((atomic64_t *)data, 0);
++
++ return 0;
++}
++
++static int qm_debugfs_atomic64_get(void *data, u64 *val)
++{
++ *val = atomic64_read((atomic64_t *)data);
++
++ return 0;
++}
++
++DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
++ qm_debugfs_atomic64_set, "%llu\n");
++
++/**
++ * hisi_qm_debug_init() - Initialize qm related debugfs files.
++ * @qm: The qm for which we want to add debugfs files.
++ *
++ * Create qm related debugfs files.
++ */
++void hisi_qm_debug_init(struct hisi_qm *qm)
++{
++ struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
++ struct qm_dfx *dfx = &qm->debug.dfx;
++ struct dentry *qm_d;
++ void *data;
++ int i;
++
++ qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
++ qm->debug.qm_d = qm_d;
++
++ /* only show this in PF */
++ if (qm->fun_type == QM_HW_PF) {
++ qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
++ for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
++ qm_create_debugfs_file(qm, qm->debug.qm_d, i);
++ }
++
++ if (qm_regs)
++ debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
++ qm, &qm_diff_regs_fops);
++
++ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
++
++ debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
++
++ debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
++ &qm_status_fops);
++ for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
++ data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
++ debugfs_create_file(qm_dfx_files[i].name,
++ 0644,
++ qm_d,
++ data,
++ &qm_atomic64_ops);
++ }
++
++ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
++ hisi_qm_set_algqos_init(qm);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
++
++/**
++ * hisi_qm_debug_regs_clear() - clear qm debug related registers.
++ * @qm: The qm for which we want to clear its debug registers.
++ */
++void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
++{
++ const struct debugfs_reg32 *regs;
++ int i;
++
++ /* clear current_qm */
++ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
++ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
++
++ /* clear current_q */
++ writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
++ writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
++
++ /*
++ * these registers are reading and clearing, so clear them after
++ * reading them.
++ */
++ writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
++
++ regs = qm_dfx_regs;
++ for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
++ readl(qm->io_base + regs->offset);
++ regs++;
++ }
++
++ /* clear clear_enable */
++ writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index baf1faec7046f..ff8a5f20a5df0 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -431,8 +431,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ }
+
+@@ -1031,7 +1034,7 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
+
+ for (i = 0; i < clusters_num; i++) {
+ ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+- if (ret < 0)
++ if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
+ return -EINVAL;
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
+
+@@ -1101,8 +1104,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
+
+ qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
+- ret = hisi_qm_diff_regs_init(qm, hpre_diff_regs,
+- ARRAY_SIZE(hpre_diff_regs));
++ ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
+ if (ret) {
+ dev_warn(dev, "Failed to init HPRE diff regs!\n");
+ goto debugfs_remove;
+@@ -1121,7 +1123,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
+ return 0;
+
+ failed_to_create:
+- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
++ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
+ debugfs_remove:
+ debugfs_remove_recursive(qm->debug.debug_root);
+ return ret;
+@@ -1129,7 +1131,7 @@ debugfs_remove:
+
+ static void hpre_debugfs_exit(struct hisi_qm *qm)
+ {
+- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
++ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
+
+ debugfs_remove_recursive(qm->debug.debug_root);
+ }
+@@ -1156,6 +1158,8 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &hpre_devices;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ }
+
+ ret = hisi_qm_init(qm);
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index 07e1e39a5e378..a4a3895c74181 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -16,6 +16,7 @@
+ #include <linux/uaccess.h>
+ #include <uapi/misc/uacce/hisi_qm.h>
+ #include <linux/hisi_acc_qm.h>
++#include "qm_common.h"
+
+ /* eq/aeq irq enable */
+ #define QM_VF_AEQ_INT_SOURCE 0x0
+@@ -119,8 +120,6 @@
+ #define QM_SQC_VFT_NUM_SHIFT_V2 45
+ #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
+
+-#define QM_DFX_CNT_CLR_CE 0x100118
+-
+ #define QM_ABNORMAL_INT_SOURCE 0x100000
+ #define QM_ABNORMAL_INT_MASK 0x100004
+ #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
+@@ -187,14 +186,6 @@
+ #define QM_VF_RESET_WAIT_TIMEOUT_US \
+ (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
+
+-#define QM_DFX_MB_CNT_VF 0x104010
+-#define QM_DFX_DB_CNT_VF 0x104020
+-#define QM_DFX_SQE_CNT_VF_SQN 0x104030
+-#define QM_DFX_CQE_CNT_VF_CQN 0x104040
+-#define QM_DFX_QN_SHIFT 16
+-#define CURRENT_FUN_MASK GENMASK(5, 0)
+-#define CURRENT_Q_MASK GENMASK(31, 16)
+-
+ #define POLL_PERIOD 10
+ #define POLL_TIMEOUT 1000
+ #define WAIT_PERIOD_US_MAX 200
+@@ -211,19 +202,13 @@
+ #define QMC_ALIGN(sz) ALIGN(sz, 32)
+
+ #define QM_DBG_READ_LEN 256
+-#define QM_DBG_WRITE_LEN 1024
+-#define QM_DBG_TMP_BUF_LEN 22
+ #define QM_PCI_COMMAND_INVALID ~0
+ #define QM_RESET_STOP_TX_OFFSET 1
+ #define QM_RESET_STOP_RX_OFFSET 2
+
+ #define WAIT_PERIOD 20
+ #define REMOVE_WAIT_DELAY 10
+-#define QM_SQE_ADDR_MASK GENMASK(7, 0)
+
+-#define QM_DRIVER_REMOVING 0
+-#define QM_RST_SCHED 1
+-#define QM_RESETTING 2
+ #define QM_QOS_PARAM_NUM 2
+ #define QM_QOS_VAL_NUM 1
+ #define QM_QOS_BDF_PARAM_NUM 4
+@@ -250,15 +235,6 @@
+ #define QM_QOS_MIN_CIR_B 100
+ #define QM_QOS_MAX_CIR_U 6
+ #define QM_QOS_MAX_CIR_S 11
+-#define QM_DFX_BASE 0x0100000
+-#define QM_DFX_STATE1 0x0104000
+-#define QM_DFX_STATE2 0x01040C8
+-#define QM_DFX_COMMON 0x0000
+-#define QM_DFX_BASE_LEN 0x5A
+-#define QM_DFX_STATE1_LEN 0x2E
+-#define QM_DFX_STATE2_LEN 0x11
+-#define QM_DFX_COMMON_LEN 0xC3
+-#define QM_DFX_REGS_LEN 4UL
+ #define QM_AUTOSUSPEND_DELAY 3000
+
+ #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
+@@ -368,73 +344,6 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
+ {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
+ };
+
+-struct qm_cqe {
+- __le32 rsvd0;
+- __le16 cmd_id;
+- __le16 rsvd1;
+- __le16 sq_head;
+- __le16 sq_num;
+- __le16 rsvd2;
+- __le16 w7;
+-};
+-
+-struct qm_eqe {
+- __le32 dw0;
+-};
+-
+-struct qm_aeqe {
+- __le32 dw0;
+-};
+-
+-struct qm_sqc {
+- __le16 head;
+- __le16 tail;
+- __le32 base_l;
+- __le32 base_h;
+- __le32 dw3;
+- __le16 w8;
+- __le16 rsvd0;
+- __le16 pasid;
+- __le16 w11;
+- __le16 cq_num;
+- __le16 w13;
+- __le32 rsvd1;
+-};
+-
+-struct qm_cqc {
+- __le16 head;
+- __le16 tail;
+- __le32 base_l;
+- __le32 base_h;
+- __le32 dw3;
+- __le16 w8;
+- __le16 rsvd0;
+- __le16 pasid;
+- __le16 w11;
+- __le32 dw6;
+- __le32 rsvd1;
+-};
+-
+-struct qm_eqc {
+- __le16 head;
+- __le16 tail;
+- __le32 base_l;
+- __le32 base_h;
+- __le32 dw3;
+- __le32 rsvd[2];
+- __le32 dw6;
+-};
+-
+-struct qm_aeqc {
+- __le16 head;
+- __le16 tail;
+- __le32 base_l;
+- __le32 base_h;
+- __le32 dw3;
+- __le32 rsvd[2];
+- __le32 dw6;
+-};
+-
+ struct qm_mailbox {
+ __le16 w0;
+ __le16 queue_num;
+@@ -467,25 +376,6 @@ struct hisi_qm_hw_ops {
+ int (*set_msi)(struct hisi_qm *qm, bool set);
+ };
+
+-struct qm_dfx_item {
+- const char *name;
+- u32 offset;
+-};
+-
+-static struct qm_dfx_item qm_dfx_files[] = {
+- {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
+- {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
+- {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
+- {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
+- {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
+-};
+-
+-static const char * const qm_debug_file_name[] = {
+- [CURRENT_QM] = "current_qm",
+- [CURRENT_Q] = "current_q",
+- [CLEAR_ENABLE] = "clear_enable",
+-};
+-
+ struct hisi_qm_hw_error {
+ u32 int_msk;
+ const char *msg;
+@@ -510,23 +400,6 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
+ { /* sentinel */ }
+ };
+
+-/* define the QM's dfx regs region and region length */
+-static struct dfx_diff_registers qm_diff_regs[] = {
+- {
+- .reg_offset = QM_DFX_BASE,
+- .reg_len = QM_DFX_BASE_LEN,
+- }, {
+- .reg_offset = QM_DFX_STATE1,
+- .reg_len = QM_DFX_STATE1_LEN,
+- }, {
+- .reg_offset = QM_DFX_STATE2,
+- .reg_len = QM_DFX_STATE2_LEN,
+- }, {
+- .reg_offset = QM_DFX_COMMON,
+- .reg_len = QM_DFX_COMMON_LEN,
+- },
+-};
+-
+ static const char * const qm_db_timeout[] = {
+ "sq", "cq", "eq", "aeq",
+ };
+@@ -535,10 +408,6 @@ static const char * const qm_fifo_overflow[] = {
+ "cq", "eq", "aeq",
+ };
+
+-static const char * const qm_s[] = {
+- "init", "start", "close", "stop",
+-};
+-
+ static const char * const qp_s[] = {
+ "none", "init", "start", "stop", "close",
+ };
+@@ -1324,999 +1193,158 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
+ tmp = QM_CQC_VFT_VALID;
+ }
+ break;
+- case SHAPER_VFT:
+- if (factor) {
+- tmp = factor->cir_b |
+- (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
+- (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
+- (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
+- (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
+- }
+- break;
+- }
+- }
+-
+- writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
+- writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
+-}
+-
+-static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
+- u32 fun_num, u32 base, u32 number)
+-{
+- struct qm_shaper_factor *factor = NULL;
+- unsigned int val;
+- int ret;
+-
+- if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+- factor = &qm->factor[fun_num];
+-
+- ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+- val & BIT(0), POLL_PERIOD,
+- POLL_TIMEOUT);
+- if (ret)
+- return ret;
+-
+- writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
+- writel(type, qm->io_base + QM_VFT_CFG_TYPE);
+- if (type == SHAPER_VFT)
+- fun_num |= base << QM_SHAPER_VFT_OFFSET;
+-
+- writel(fun_num, qm->io_base + QM_VFT_CFG);
+-
+- qm_vft_data_cfg(qm, type, base, number, factor);
+-
+- writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
+- writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
+-
+- return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+- val & BIT(0), POLL_PERIOD,
+- POLL_TIMEOUT);
+-}
+-
+-static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
+-{
+- u32 qos = qm->factor[fun_num].func_qos;
+- int ret, i;
+-
+- ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
+- if (ret) {
+- dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
+- return ret;
+- }
+- writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
+- for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
+- /* The base number of queue reuse for different alg type */
+- ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
+- if (ret)
+- return ret;
+- }
+-
+- return 0;
+-}
+-
+-/* The config should be conducted after qm_dev_mem_reset() */
+-static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
+- u32 number)
+-{
+- int ret, i;
+-
+- for (i = SQC_VFT; i <= CQC_VFT; i++) {
+- ret = qm_set_vft_common(qm, i, fun_num, base, number);
+- if (ret)
+- return ret;
+- }
+-
+- /* init default shaper qos val */
+- if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
+- ret = qm_shaper_init_vft(qm, fun_num);
+- if (ret)
+- goto back_sqc_cqc;
+- }
+-
+- return 0;
+-back_sqc_cqc:
+- for (i = SQC_VFT; i <= CQC_VFT; i++)
+- qm_set_vft_common(qm, i, fun_num, 0, 0);
+-
+- return ret;
+-}
+-
+-static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
+-{
+- u64 sqc_vft;
+- int ret;
+-
+- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
+- if (ret)
+- return ret;
+-
+- sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+- ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
+- *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
+- *number = (QM_SQC_VFT_NUM_MASK_v2 &
+- (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
+-
+- return 0;
+-}
+-
+-static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
+-{
+- u32 remain_q_num, vfq_num;
+- u32 num_vfs = qm->vfs_num;
+-
+- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+- if (vfq_num >= qm->max_qp_num)
+- return qm->max_qp_num;
+-
+- remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
+- if (vfq_num + remain_q_num <= qm->max_qp_num)
+- return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
+-
+- /*
+- * if vfq_num + remain_q_num > max_qp_num, the last VFs,
+- * each with one more queue.
+- */
+- return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
+-}
+-
+-static struct hisi_qm *file_to_qm(struct debugfs_file *file)
+-{
+- struct qm_debug *debug = file->debug;
+-
+- return container_of(debug, struct hisi_qm, debug);
+-}
+-
+-static u32 current_q_read(struct hisi_qm *qm)
+-{
+- return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
+-}
+-
+-static int current_q_write(struct hisi_qm *qm, u32 val)
+-{
+- u32 tmp;
+-
+- if (val >= qm->debug.curr_qm_qp_num)
+- return -EINVAL;
+-
+- tmp = val << QM_DFX_QN_SHIFT |
+- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
+- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+-
+- tmp = val << QM_DFX_QN_SHIFT |
+- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
+- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+-
+- return 0;
+-}
+-
+-static u32 clear_enable_read(struct hisi_qm *qm)
+-{
+- return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
+-}
+-
+-/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
+-static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
+-{
+- if (rd_clr_ctrl > 1)
+- return -EINVAL;
+-
+- writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
+-
+- return 0;
+-}
+-
+-static u32 current_qm_read(struct hisi_qm *qm)
+-{
+- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+-}
+-
+-static int current_qm_write(struct hisi_qm *qm, u32 val)
+-{
+- u32 tmp;
+-
+- if (val > qm->vfs_num)
+- return -EINVAL;
+-
+- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+- if (!val)
+- qm->debug.curr_qm_qp_num = qm->qp_num;
+- else
+- qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
+-
+- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+-
+- tmp = val |
+- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+-
+- tmp = val |
+- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+-
+- return 0;
+-}
+-
+-static ssize_t qm_debug_read(struct file *filp, char __user *buf,
+- size_t count, loff_t *pos)
+-{
+- struct debugfs_file *file = filp->private_data;
+- enum qm_debug_file index = file->index;
+- struct hisi_qm *qm = file_to_qm(file);
+- char tbuf[QM_DBG_TMP_BUF_LEN];
+- u32 val;
+- int ret;
+-
+- ret = hisi_qm_get_dfx_access(qm);
+- if (ret)
+- return ret;
+-
+- mutex_lock(&file->lock);
+- switch (index) {
+- case CURRENT_QM:
+- val = current_qm_read(qm);
+- break;
+- case CURRENT_Q:
+- val = current_q_read(qm);
+- break;
+- case CLEAR_ENABLE:
+- val = clear_enable_read(qm);
+- break;
+- default:
+- goto err_input;
+- }
+- mutex_unlock(&file->lock);
+-
+- hisi_qm_put_dfx_access(qm);
+- ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
+- return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+-
+-err_input:
+- mutex_unlock(&file->lock);
+- hisi_qm_put_dfx_access(qm);
+- return -EINVAL;
+-}
+-
+-static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
+- size_t count, loff_t *pos)
+-{
+- struct debugfs_file *file = filp->private_data;
+- enum qm_debug_file index = file->index;
+- struct hisi_qm *qm = file_to_qm(file);
+- unsigned long val;
+- char tbuf[QM_DBG_TMP_BUF_LEN];
+- int len, ret;
+-
+- if (*pos != 0)
+- return 0;
+-
+- if (count >= QM_DBG_TMP_BUF_LEN)
+- return -ENOSPC;
+-
+- len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
+- count);
+- if (len < 0)
+- return len;
+-
+- tbuf[len] = '\0';
+- if (kstrtoul(tbuf, 0, &val))
+- return -EFAULT;
+-
+- ret = hisi_qm_get_dfx_access(qm);
+- if (ret)
+- return ret;
+-
+- mutex_lock(&file->lock);
+- switch (index) {
+- case CURRENT_QM:
+- ret = current_qm_write(qm, val);
+- break;
+- case CURRENT_Q:
+- ret = current_q_write(qm, val);
+- break;
+- case CLEAR_ENABLE:
+- ret = clear_enable_write(qm, val);
+- break;
+- default:
+- ret = -EINVAL;
+- }
+- mutex_unlock(&file->lock);
+-
+- hisi_qm_put_dfx_access(qm);
+-
+- if (ret)
+- return ret;
+-
+- return count;
+-}
+-
+-static const struct file_operations qm_debug_fops = {
+- .owner = THIS_MODULE,
+- .open = simple_open,
+- .read = qm_debug_read,
+- .write = qm_debug_write,
+-};
+-
+-#define CNT_CYC_REGS_NUM 10
+-static const struct debugfs_reg32 qm_dfx_regs[] = {
+- /* XXX_CNT are reading clear register */
+- {"QM_ECC_1BIT_CNT ", 0x104000ull},
+- {"QM_ECC_MBIT_CNT ", 0x104008ull},
+- {"QM_DFX_MB_CNT ", 0x104018ull},
+- {"QM_DFX_DB_CNT ", 0x104028ull},
+- {"QM_DFX_SQE_CNT ", 0x104038ull},
+- {"QM_DFX_CQE_CNT ", 0x104048ull},
+- {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
+- {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
+- {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
+- {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
+- {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
+- {"QM_ECC_1BIT_INF ", 0x104004ull},
+- {"QM_ECC_MBIT_INF ", 0x10400cull},
+- {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
+- {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
+- {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
+- {"QM_DFX_FF_ST0 ", 0x1040c8ull},
+- {"QM_DFX_FF_ST1 ", 0x1040ccull},
+- {"QM_DFX_FF_ST2 ", 0x1040d0ull},
+- {"QM_DFX_FF_ST3 ", 0x1040d4ull},
+- {"QM_DFX_FF_ST4 ", 0x1040d8ull},
+- {"QM_DFX_FF_ST5 ", 0x1040dcull},
+- {"QM_DFX_FF_ST6 ", 0x1040e0ull},
+- {"QM_IN_IDLE_ST ", 0x1040e4ull},
+-};
+-
+-static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
+- {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
+-};
+-
+-/**
+- * hisi_qm_regs_dump() - Dump registers's value.
+- * @s: debugfs file handle.
+- * @regset: accelerator registers information.
+- *
+- * Dump accelerator registers.
+- */
+-void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
+-{
+- struct pci_dev *pdev = to_pci_dev(regset->dev);
+- struct hisi_qm *qm = pci_get_drvdata(pdev);
+- const struct debugfs_reg32 *regs = regset->regs;
+- int regs_len = regset->nregs;
+- int i, ret;
+- u32 val;
+-
+- ret = hisi_qm_get_dfx_access(qm);
+- if (ret)
+- return;
+-
+- for (i = 0; i < regs_len; i++) {
+- val = readl(regset->base + regs[i].offset);
+- seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
+- }
+-
+- hisi_qm_put_dfx_access(qm);
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
+-
+-static int qm_regs_show(struct seq_file *s, void *unused)
+-{
+- struct hisi_qm *qm = s->private;
+- struct debugfs_regset32 regset;
+-
+- if (qm->fun_type == QM_HW_PF) {
+- regset.regs = qm_dfx_regs;
+- regset.nregs = ARRAY_SIZE(qm_dfx_regs);
+- } else {
+- regset.regs = qm_vf_dfx_regs;
+- regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
+- }
+-
+- regset.base = qm->io_base;
+- regset.dev = &qm->pdev->dev;
+-
+- hisi_qm_regs_dump(s, &regset);
+-
+- return 0;
+-}
+-
+-DEFINE_SHOW_ATTRIBUTE(qm_regs);
+-
+-static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
+- const struct dfx_diff_registers *cregs, int reg_len)
+-{
+- struct dfx_diff_registers *diff_regs;
+- u32 j, base_offset;
+- int i;
+-
+- diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
+- if (!diff_regs)
+- return ERR_PTR(-ENOMEM);
+-
+- for (i = 0; i < reg_len; i++) {
+- if (!cregs[i].reg_len)
+- continue;
+-
+- diff_regs[i].reg_offset = cregs[i].reg_offset;
+- diff_regs[i].reg_len = cregs[i].reg_len;
+- diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
+- GFP_KERNEL);
+- if (!diff_regs[i].regs)
+- goto alloc_error;
+-
+- for (j = 0; j < diff_regs[i].reg_len; j++) {
+- base_offset = diff_regs[i].reg_offset +
+- j * QM_DFX_REGS_LEN;
+- diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
+- }
+- }
+-
+- return diff_regs;
+-
+-alloc_error:
+- while (i > 0) {
+- i--;
+- kfree(diff_regs[i].regs);
+- }
+- kfree(diff_regs);
+- return ERR_PTR(-ENOMEM);
+-}
+-
+-static void dfx_regs_uninit(struct hisi_qm *qm,
+- struct dfx_diff_registers *dregs, int reg_len)
+-{
+- int i;
+-
+- /* Setting the pointer is NULL to prevent double free */
+- for (i = 0; i < reg_len; i++) {
+- kfree(dregs[i].regs);
+- dregs[i].regs = NULL;
+- }
+- kfree(dregs);
+- dregs = NULL;
+-}
+-
+-/**
+- * hisi_qm_diff_regs_init() - Allocate memory for registers.
+- * @qm: device qm handle.
+- * @dregs: diff registers handle.
+- * @reg_len: diff registers region length.
+- */
+-int hisi_qm_diff_regs_init(struct hisi_qm *qm,
+- struct dfx_diff_registers *dregs, int reg_len)
+-{
+- if (!qm || !dregs || reg_len <= 0)
+- return -EINVAL;
+-
+- if (qm->fun_type != QM_HW_PF)
+- return 0;
+-
+- qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs,
+- ARRAY_SIZE(qm_diff_regs));
+- if (IS_ERR(qm->debug.qm_diff_regs))
+- return PTR_ERR(qm->debug.qm_diff_regs);
+-
+- qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+- if (IS_ERR(qm->debug.acc_diff_regs)) {
+- dfx_regs_uninit(qm, qm->debug.qm_diff_regs,
+- ARRAY_SIZE(qm_diff_regs));
+- return PTR_ERR(qm->debug.acc_diff_regs);
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init);
+-
+-/**
+- * hisi_qm_diff_regs_uninit() - Free memory for registers.
+- * @qm: device qm handle.
+- * @reg_len: diff registers region length.
+- */
+-void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len)
+-{
+- if (!qm || reg_len <= 0 || qm->fun_type != QM_HW_PF)
+- return;
+-
+- dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
+- dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit);
+-
+-/**
+- * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
+- * @qm: device qm handle.
+- * @s: Debugfs file handle.
+- * @dregs: diff registers handle.
+- * @regs_len: diff registers region length.
+- */
+-void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+- struct dfx_diff_registers *dregs, int regs_len)
+-{
+- u32 j, val, base_offset;
+- int i, ret;
+-
+- if (!qm || !s || !dregs || regs_len <= 0)
+- return;
+-
+- ret = hisi_qm_get_dfx_access(qm);
+- if (ret)
+- return;
+-
+- down_read(&qm->qps_lock);
+- for (i = 0; i < regs_len; i++) {
+- if (!dregs[i].reg_len)
+- continue;
+-
+- for (j = 0; j < dregs[i].reg_len; j++) {
+- base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
+- val = readl(qm->io_base + base_offset);
+- if (val != dregs[i].regs[j])
+- seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
+- base_offset, dregs[i].regs[j], val);
+- }
+- }
+- up_read(&qm->qps_lock);
+-
+- hisi_qm_put_dfx_access(qm);
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
+-
+-static int qm_diff_regs_show(struct seq_file *s, void *unused)
+-{
+- struct hisi_qm *qm = s->private;
+-
+- hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
+- ARRAY_SIZE(qm_diff_regs));
+-
+- return 0;
+-}
+-DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
+-
+-static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
+- size_t count, loff_t *pos)
+-{
+- char buf[QM_DBG_READ_LEN];
+- int len;
+-
+- len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
+- "Please echo help to cmd to get help information");
+-
+- return simple_read_from_buffer(buffer, count, pos, buf, len);
+-}
+-
+-static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
+- dma_addr_t *dma_addr)
+-{
+- struct device *dev = &qm->pdev->dev;
+- void *ctx_addr;
+-
+- ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
+- if (!ctx_addr)
+- return ERR_PTR(-ENOMEM);
+-
+- *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
+- if (dma_mapping_error(dev, *dma_addr)) {
+- dev_err(dev, "DMA mapping error!\n");
+- kfree(ctx_addr);
+- return ERR_PTR(-ENOMEM);
+- }
+-
+- return ctx_addr;
+-}
+-
+-static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
+- const void *ctx_addr, dma_addr_t *dma_addr)
+-{
+- struct device *dev = &qm->pdev->dev;
+-
+- dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
+- kfree(ctx_addr);
+-}
+-
+-static void dump_show(struct hisi_qm *qm, void *info,
+- unsigned int info_size, char *info_name)
+-{
+- struct device *dev = &qm->pdev->dev;
+- u8 *info_curr = info;
+- u32 i;
+-#define BYTE_PER_DW 4
+-
+- dev_info(dev, "%s DUMP\n", info_name);
+- for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
+- pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+- *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
+- }
+-}
+-
+-static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+-{
+- return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
+-}
+-
+-static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+-{
+- return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
+-}
+-
+-static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
+-{
+- struct device *dev = &qm->pdev->dev;
+- struct qm_sqc *sqc, *sqc_curr;
+- dma_addr_t sqc_dma;
+- u32 qp_id;
+- int ret;
+-
+- if (!s)
+- return -EINVAL;
+-
+- ret = kstrtou32(s, 0, &qp_id);
+- if (ret || qp_id >= qm->qp_num) {
+- dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
+- return -EINVAL;
+- }
+-
+- sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
+- if (IS_ERR(sqc))
+- return PTR_ERR(sqc);
+-
+- ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
+- if (ret) {
+- down_read(&qm->qps_lock);
+- if (qm->sqc) {
+- sqc_curr = qm->sqc + qp_id;
+-
+- dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
+- }
+- up_read(&qm->qps_lock);
+-
+- goto free_ctx;
+- }
+-
+- dump_show(qm, sqc, sizeof(*sqc), "SQC");
+-
+-free_ctx:
+- qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
+- return 0;
+-}
+-
+-static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
+-{
+- struct device *dev = &qm->pdev->dev;
+- struct qm_cqc *cqc, *cqc_curr;
+- dma_addr_t cqc_dma;
+- u32 qp_id;
+- int ret;
+-
+- if (!s)
+- return -EINVAL;
+-
+- ret = kstrtou32(s, 0, &qp_id);
+- if (ret || qp_id >= qm->qp_num) {
+- dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
+- return -EINVAL;
+- }
+-
+- cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
+- if (IS_ERR(cqc))
+- return PTR_ERR(cqc);
+-
+- ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
+- if (ret) {
+- down_read(&qm->qps_lock);
+- if (qm->cqc) {
+- cqc_curr = qm->cqc + qp_id;
+-
+- dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
+- }
+- up_read(&qm->qps_lock);
+-
+- goto free_ctx;
+- }
+-
+- dump_show(qm, cqc, sizeof(*cqc), "CQC");
+-
+-free_ctx:
+- qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
+- return 0;
+-}
+-
+-static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
+- int cmd, char *name)
+-{
+- struct device *dev = &qm->pdev->dev;
+- dma_addr_t xeqc_dma;
+- void *xeqc;
+- int ret;
+-
+- if (strsep(&s, " ")) {
+- dev_err(dev, "Please do not input extra characters!\n");
+- return -EINVAL;
+- }
+-
+- xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
+- if (IS_ERR(xeqc))
+- return PTR_ERR(xeqc);
+-
+- ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
+- if (ret)
+- goto err_free_ctx;
+-
+- dump_show(qm, xeqc, size, name);
+-
+-err_free_ctx:
+- qm_ctx_free(qm, size, xeqc, &xeqc_dma);
+- return ret;
+-}
+-
+-static int q_dump_param_parse(struct hisi_qm *qm, char *s,
+- u32 *e_id, u32 *q_id, u16 q_depth)
+-{
+- struct device *dev = &qm->pdev->dev;
+- unsigned int qp_num = qm->qp_num;
+- char *presult;
+- int ret;
+-
+- presult = strsep(&s, " ");
+- if (!presult) {
+- dev_err(dev, "Please input qp number!\n");
+- return -EINVAL;
+- }
+-
+- ret = kstrtou32(presult, 0, q_id);
+- if (ret || *q_id >= qp_num) {
+- dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
+- return -EINVAL;
+- }
+-
+- presult = strsep(&s, " ");
+- if (!presult) {
+- dev_err(dev, "Please input sqe number!\n");
+- return -EINVAL;
+- }
+-
+- ret = kstrtou32(presult, 0, e_id);
+- if (ret || *e_id >= q_depth) {
+- dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
+- return -EINVAL;
+- }
+-
+- if (strsep(&s, " ")) {
+- dev_err(dev, "Please do not input extra characters!\n");
+- return -EINVAL;
++ case SHAPER_VFT:
++ if (factor) {
++ tmp = factor->cir_b |
++ (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
++ (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
++ (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
++ (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
++ }
++ break;
++ }
+ }
+
+- return 0;
++ writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
++ writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
+ }
+
+-static int qm_sq_dump(struct hisi_qm *qm, char *s)
++static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
++ u32 fun_num, u32 base, u32 number)
+ {
+- u16 sq_depth = qm->qp_array->cq_depth;
+- void *sqe, *sqe_curr;
+- struct hisi_qp *qp;
+- u32 qp_id, sqe_id;
++ struct qm_shaper_factor *factor = NULL;
++ unsigned int val;
+ int ret;
+
+- ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
++ if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
++ factor = &qm->factor[fun_num];
++
++ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
++ val & BIT(0), POLL_PERIOD,
++ POLL_TIMEOUT);
+ if (ret)
+ return ret;
+
+- sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
+- if (!sqe)
+- return -ENOMEM;
++ writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
++ writel(type, qm->io_base + QM_VFT_CFG_TYPE);
++ if (type == SHAPER_VFT)
++ fun_num |= base << QM_SHAPER_VFT_OFFSET;
+
+- qp = &qm->qp_array[qp_id];
+- memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
+- sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
+- memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+- qm->debug.sqe_mask_len);
++ writel(fun_num, qm->io_base + QM_VFT_CFG);
+
+- dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
++ qm_vft_data_cfg(qm, type, base, number, factor);
+
+- kfree(sqe);
++ writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
++ writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
+
+- return 0;
++ return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
++ val & BIT(0), POLL_PERIOD,
++ POLL_TIMEOUT);
+ }
+
+-static int qm_cq_dump(struct hisi_qm *qm, char *s)
++static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
+ {
+- struct qm_cqe *cqe_curr;
+- struct hisi_qp *qp;
+- u32 qp_id, cqe_id;
+- int ret;
++ u32 qos = qm->factor[fun_num].func_qos;
++ int ret, i;
+
+- ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
+- if (ret)
++ ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
++ if (ret) {
++ dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
+ return ret;
+-
+- qp = &qm->qp_array[qp_id];
+- cqe_curr = qp->cqe + cqe_id;
+- dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
++ }
++ writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
++ for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
++ /* The base number of queue reuse for different alg type */
++ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
++ if (ret)
++ return ret;
++ }
+
+ return 0;
+ }
+
+-static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
+- size_t size, char *name)
++/* The config should be conducted after qm_dev_mem_reset() */
++static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
++ u32 number)
+ {
+- struct device *dev = &qm->pdev->dev;
+- void *xeqe;
+- u32 xeqe_id;
+- int ret;
+-
+- if (!s)
+- return -EINVAL;
+-
+- ret = kstrtou32(s, 0, &xeqe_id);
+- if (ret)
+- return -EINVAL;
++ int ret, i;
+
+- if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
+- dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
+- return -EINVAL;
+- } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
+- dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
+- return -EINVAL;
++ for (i = SQC_VFT; i <= CQC_VFT; i++) {
++ ret = qm_set_vft_common(qm, i, fun_num, base, number);
++ if (ret)
++ return ret;
+ }
+
+- down_read(&qm->qps_lock);
+-
+- if (qm->eqe && !strcmp(name, "EQE")) {
+- xeqe = qm->eqe + xeqe_id;
+- } else if (qm->aeqe && !strcmp(name, "AEQE")) {
+- xeqe = qm->aeqe + xeqe_id;
+- } else {
+- ret = -EINVAL;
+- goto err_unlock;
++ /* init default shaper qos val */
++ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
++ ret = qm_shaper_init_vft(qm, fun_num);
++ if (ret)
++ goto back_sqc_cqc;
+ }
+
+- dump_show(qm, xeqe, size, name);
++ return 0;
++back_sqc_cqc:
++ for (i = SQC_VFT; i <= CQC_VFT; i++)
++ qm_set_vft_common(qm, i, fun_num, 0, 0);
+
+-err_unlock:
+- up_read(&qm->qps_lock);
+ return ret;
+ }
+
+-static int qm_dbg_help(struct hisi_qm *qm, char *s)
++static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
+ {
+- struct device *dev = &qm->pdev->dev;
++ u64 sqc_vft;
++ int ret;
+
+- if (strsep(&s, " ")) {
+- dev_err(dev, "Please do not input extra characters!\n");
+- return -EINVAL;
+- }
++ ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
++ if (ret)
++ return ret;
+
+- dev_info(dev, "available commands:\n");
+- dev_info(dev, "sqc <num>\n");
+- dev_info(dev, "cqc <num>\n");
+- dev_info(dev, "eqc\n");
+- dev_info(dev, "aeqc\n");
+- dev_info(dev, "sq <num> <e>\n");
+- dev_info(dev, "cq <num> <e>\n");
+- dev_info(dev, "eq <e>\n");
+- dev_info(dev, "aeq <e>\n");
++ sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
++ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
++ *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
++ *number = (QM_SQC_VFT_NUM_MASK_v2 &
++ (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
+
+ return 0;
+ }
+
+-static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
++void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
++ dma_addr_t *dma_addr)
+ {
+ struct device *dev = &qm->pdev->dev;
+- char *presult, *s, *s_tmp;
+- int ret;
+-
+- s = kstrdup(cmd_buf, GFP_KERNEL);
+- if (!s)
+- return -ENOMEM;
+-
+- s_tmp = s;
+- presult = strsep(&s, " ");
+- if (!presult) {
+- ret = -EINVAL;
+- goto err_buffer_free;
+- }
+-
+- if (!strcmp(presult, "sqc"))
+- ret = qm_sqc_dump(qm, s);
+- else if (!strcmp(presult, "cqc"))
+- ret = qm_cqc_dump(qm, s);
+- else if (!strcmp(presult, "eqc"))
+- ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
+- QM_MB_CMD_EQC, "EQC");
+- else if (!strcmp(presult, "aeqc"))
+- ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
+- QM_MB_CMD_AEQC, "AEQC");
+- else if (!strcmp(presult, "sq"))
+- ret = qm_sq_dump(qm, s);
+- else if (!strcmp(presult, "cq"))
+- ret = qm_cq_dump(qm, s);
+- else if (!strcmp(presult, "eq"))
+- ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
+- else if (!strcmp(presult, "aeq"))
+- ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
+- else if (!strcmp(presult, "help"))
+- ret = qm_dbg_help(qm, s);
+- else
+- ret = -EINVAL;
++ void *ctx_addr;
+
+- if (ret)
+- dev_info(dev, "Please echo help\n");
++ ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
++ if (!ctx_addr)
++ return ERR_PTR(-ENOMEM);
+
+-err_buffer_free:
+- kfree(s_tmp);
++ *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, *dma_addr)) {
++ dev_err(dev, "DMA mapping error!\n");
++ kfree(ctx_addr);
++ return ERR_PTR(-ENOMEM);
++ }
+
+- return ret;
++ return ctx_addr;
+ }
+
+-static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
+- size_t count, loff_t *pos)
++void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
++ const void *ctx_addr, dma_addr_t *dma_addr)
+ {
+- struct hisi_qm *qm = filp->private_data;
+- char *cmd_buf, *cmd_buf_tmp;
+- int ret;
+-
+- if (*pos)
+- return 0;
+-
+- ret = hisi_qm_get_dfx_access(qm);
+- if (ret)
+- return ret;
+-
+- /* Judge if the instance is being reset. */
+- if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
+- ret = 0;
+- goto put_dfx_access;
+- }
+-
+- if (count > QM_DBG_WRITE_LEN) {
+- ret = -ENOSPC;
+- goto put_dfx_access;
+- }
+-
+- cmd_buf = memdup_user_nul(buffer, count);
+- if (IS_ERR(cmd_buf)) {
+- ret = PTR_ERR(cmd_buf);
+- goto put_dfx_access;
+- }
+-
+- cmd_buf_tmp = strchr(cmd_buf, '\n');
+- if (cmd_buf_tmp) {
+- *cmd_buf_tmp = '\0';
+- count = cmd_buf_tmp - cmd_buf + 1;
+- }
+-
+- ret = qm_cmd_write_dump(qm, cmd_buf);
+- if (ret) {
+- kfree(cmd_buf);
+- goto put_dfx_access;
+- }
+-
+- kfree(cmd_buf);
+-
+- ret = count;
++ struct device *dev = &qm->pdev->dev;
+
+-put_dfx_access:
+- hisi_qm_put_dfx_access(qm);
+- return ret;
++ dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
++ kfree(ctx_addr);
+ }
+
+-static const struct file_operations qm_cmd_fops = {
+- .owner = THIS_MODULE,
+- .open = simple_open,
+- .read = qm_cmd_read,
+- .write = qm_cmd_write,
+-};
+-
+-static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
+- enum qm_debug_file index)
++static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+ {
+- struct debugfs_file *file = qm->debug.files + index;
+-
+- debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
+- &qm_debug_fops);
++ return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
++}
+
+- file->index = index;
+- mutex_init(&file->lock);
+- file->debug = &qm->debug;
++static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
++{
++ return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
+ }
+
+ static void qm_hw_error_init_v1(struct hisi_qm *qm)
+@@ -3100,7 +2128,7 @@ static int qm_drain_qp(struct hisi_qp *qp)
+ return ret;
+ }
+
+- addr = qm_ctx_alloc(qm, size, &dma_addr);
++ addr = hisi_qm_ctx_alloc(qm, size, &dma_addr);
+ if (IS_ERR(addr)) {
+ dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
+ return -ENOMEM;
+@@ -3135,7 +2163,7 @@ static int qm_drain_qp(struct hisi_qp *qp)
+ usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
+ }
+
+- qm_ctx_free(qm, size, addr, &dma_addr);
++ hisi_qm_ctx_free(qm, size, addr, &dma_addr);
+
+ return ret;
+ }
+@@ -3659,7 +2687,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
+ mutex_init(&qm->mailbox_lock);
+ init_rwsem(&qm->qps_lock);
+ qm->qp_in_used = 0;
+- qm->misc_ctl = false;
+ if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
+ if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+ dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+@@ -3720,17 +2747,6 @@ static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
+ writel(state, qm->io_base + QM_VF_STATE);
+ }
+
+-static void qm_last_regs_uninit(struct hisi_qm *qm)
+-{
+- struct qm_debug *debug = &qm->debug;
+-
+- if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
+- return;
+-
+- kfree(debug->qm_last_words);
+- debug->qm_last_words = NULL;
+-}
+-
+ static void hisi_qm_unint_work(struct hisi_qm *qm)
+ {
+ destroy_workqueue(qm->wq);
+@@ -3761,8 +2777,6 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm)
+ */
+ void hisi_qm_uninit(struct hisi_qm *qm)
+ {
+- qm_last_regs_uninit(qm);
+-
+ qm_cmd_uninit(qm);
+ hisi_qm_unint_work(qm);
+ down_write(&qm->qps_lock);
+@@ -4131,45 +3145,6 @@ err_unlock:
+ }
+ EXPORT_SYMBOL_GPL(hisi_qm_stop);
+
+-static ssize_t qm_status_read(struct file *filp, char __user *buffer,
+- size_t count, loff_t *pos)
+-{
+- struct hisi_qm *qm = filp->private_data;
+- char buf[QM_DBG_READ_LEN];
+- int val, len;
+-
+- val = atomic_read(&qm->status.flags);
+- len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
+-
+- return simple_read_from_buffer(buffer, count, pos, buf, len);
+-}
+-
+-static const struct file_operations qm_status_fops = {
+- .owner = THIS_MODULE,
+- .open = simple_open,
+- .read = qm_status_read,
+-};
+-
+-static int qm_debugfs_atomic64_set(void *data, u64 val)
+-{
+- if (val)
+- return -EINVAL;
+-
+- atomic64_set((atomic64_t *)data, 0);
+-
+- return 0;
+-}
+-
+-static int qm_debugfs_atomic64_get(void *data, u64 *val)
+-{
+- *val = atomic64_read((atomic64_t *)data);
+-
+- return 0;
+-}
+-
+-DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
+- qm_debugfs_atomic64_set, "%llu\n");
+-
+ static void qm_hw_error_init(struct hisi_qm *qm)
+ {
+ if (!qm->ops->hw_error_init) {
+@@ -4708,7 +3683,7 @@ static const struct file_operations qm_algqos_fops = {
+ *
+ * Create function qos debugfs files, VF ping PF to get function qos.
+ */
+-static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
++void hisi_qm_set_algqos_init(struct hisi_qm *qm)
+ {
+ if (qm->fun_type == QM_HW_PF)
+ debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
+@@ -4718,88 +3693,6 @@ static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
+ qm, &qm_algqos_fops);
+ }
+
+-/**
+- * hisi_qm_debug_init() - Initialize qm related debugfs files.
+- * @qm: The qm for which we want to add debugfs files.
+- *
+- * Create qm related debugfs files.
+- */
+-void hisi_qm_debug_init(struct hisi_qm *qm)
+-{
+- struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
+- struct qm_dfx *dfx = &qm->debug.dfx;
+- struct dentry *qm_d;
+- void *data;
+- int i;
+-
+- qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
+- qm->debug.qm_d = qm_d;
+-
+- /* only show this in PF */
+- if (qm->fun_type == QM_HW_PF) {
+- qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
+- for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
+- qm_create_debugfs_file(qm, qm->debug.qm_d, i);
+- }
+-
+- if (qm_regs)
+- debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
+- qm, &qm_diff_regs_fops);
+-
+- debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+-
+- debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
+-
+- debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
+- &qm_status_fops);
+- for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
+- data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
+- debugfs_create_file(qm_dfx_files[i].name,
+- 0644,
+- qm_d,
+- data,
+- &qm_atomic64_ops);
+- }
+-
+- if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+- hisi_qm_set_algqos_init(qm);
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
+-
+-/**
+- * hisi_qm_debug_regs_clear() - clear qm debug related registers.
+- * @qm: The qm for which we want to clear its debug registers.
+- */
+-void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
+-{
+- const struct debugfs_reg32 *regs;
+- int i;
+-
+- /* clear current_qm */
+- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+-
+- /* clear current_q */
+- writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+- writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+-
+- /*
+- * these registers are reading and clearing, so clear them after
+- * reading them.
+- */
+- writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
+-
+- regs = qm_dfx_regs;
+- for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
+- readl(qm->io_base + regs->offset);
+- regs++;
+- }
+-
+- /* clear clear_enable */
+- writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+-
+ static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
+ {
+ int i;
+@@ -5438,24 +4331,6 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
+ return 0;
+ }
+
+-static void qm_show_last_dfx_regs(struct hisi_qm *qm)
+-{
+- struct qm_debug *debug = &qm->debug;
+- struct pci_dev *pdev = qm->pdev;
+- u32 val;
+- int i;
+-
+- if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
+- return;
+-
+- for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
+- val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
+- if (debug->qm_last_words[i] != val)
+- pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
+- qm_dfx_regs[i].name, debug->qm_last_words[i], val);
+- }
+-}
+-
+ static int qm_controller_reset(struct hisi_qm *qm)
+ {
+ struct pci_dev *pdev = qm->pdev;
+@@ -5471,7 +4346,7 @@ static int qm_controller_reset(struct hisi_qm *qm)
+ return ret;
+ }
+
+- qm_show_last_dfx_regs(qm);
++ hisi_qm_show_last_dfx_regs(qm);
+ if (qm->err_ini->show_last_dfx_regs)
+ qm->err_ini->show_last_dfx_regs(qm);
+
+@@ -6091,6 +4966,7 @@ free_eq_irq:
+
+ static int qm_get_qp_num(struct hisi_qm *qm)
+ {
++ struct device *dev = &qm->pdev->dev;
+ bool is_db_isolation;
+
+ /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+@@ -6107,13 +4983,21 @@ static int qm_get_qp_num(struct hisi_qm *qm)
+ qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ QM_FUNC_MAX_QP_CAP, is_db_isolation);
+
+- /* check if qp number is valid */
+- if (qm->qp_num > qm->max_qp_num) {
+- dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
++ if (qm->qp_num <= qm->max_qp_num)
++ return 0;
++
++ if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
++ /* Check whether the set qp number is valid */
++ dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n",
+ qm->qp_num, qm->max_qp_num);
+ return -EINVAL;
+ }
+
++ dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n",
++ qm->qp_num, qm->max_qp_num);
++ qm->qp_num = qm->max_qp_num;
++ qm->debug.curr_qm_qp_num = qm->qp_num;
++
+ return 0;
+ }
+
+@@ -6358,26 +5242,6 @@ err_destroy_idr:
+ return ret;
+ }
+
+-static void qm_last_regs_init(struct hisi_qm *qm)
+-{
+- int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
+- struct qm_debug *debug = &qm->debug;
+- int i;
+-
+- if (qm->fun_type == QM_HW_VF)
+- return;
+-
+- debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int),
+- GFP_KERNEL);
+- if (!debug->qm_last_words)
+- return;
+-
+- for (i = 0; i < dfx_regs_num; i++) {
+- debug->qm_last_words[i] = readl_relaxed(qm->io_base +
+- qm_dfx_regs[i].offset);
+- }
+-}
+-
+ /**
+ * hisi_qm_init() - Initialize configures about qm.
+ * @qm: The qm needing init.
+@@ -6426,8 +5290,6 @@ int hisi_qm_init(struct hisi_qm *qm)
+ qm_cmd_init(qm);
+ atomic_set(&qm->status.flags, QM_INIT);
+
+- qm_last_regs_init(qm);
+-
+ return 0;
+
+ err_free_qm_memory:
+diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
+new file mode 100644
+index 0000000000000..8e36aa9c681be
+--- /dev/null
++++ b/drivers/crypto/hisilicon/qm_common.h
+@@ -0,0 +1,86 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Copyright (c) 2022 HiSilicon Limited. */
++#ifndef QM_COMMON_H
++#define QM_COMMON_H
++
++#define QM_DBG_READ_LEN 256
++
++struct qm_cqe {
++ __le32 rsvd0;
++ __le16 cmd_id;
++ __le16 rsvd1;
++ __le16 sq_head;
++ __le16 sq_num;
++ __le16 rsvd2;
++ __le16 w7;
++};
++
++struct qm_eqe {
++ __le32 dw0;
++};
++
++struct qm_aeqe {
++ __le32 dw0;
++};
++
++struct qm_sqc {
++ __le16 head;
++ __le16 tail;
++ __le32 base_l;
++ __le32 base_h;
++ __le32 dw3;
++ __le16 w8;
++ __le16 rsvd0;
++ __le16 pasid;
++ __le16 w11;
++ __le16 cq_num;
++ __le16 w13;
++ __le32 rsvd1;
++};
++
++struct qm_cqc {
++ __le16 head;
++ __le16 tail;
++ __le32 base_l;
++ __le32 base_h;
++ __le32 dw3;
++ __le16 w8;
++ __le16 rsvd0;
++ __le16 pasid;
++ __le16 w11;
++ __le32 dw6;
++ __le32 rsvd1;
++};
++
++struct qm_eqc {
++ __le16 head;
++ __le16 tail;
++ __le32 base_l;
++ __le32 base_h;
++ __le32 dw3;
++ __le32 rsvd[2];
++ __le32 dw6;
++};
++
++struct qm_aeqc {
++ __le16 head;
++ __le16 tail;
++ __le32 base_l;
++ __le32 base_h;
++ __le32 dw3;
++ __le32 rsvd[2];
++ __le32 dw6;
++};
++
++static const char * const qm_s[] = {
++ "init", "start", "close", "stop",
++};
++
++void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
++ dma_addr_t *dma_addr);
++void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
++ const void *ctx_addr, dma_addr_t *dma_addr);
++void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm);
++void hisi_qm_set_algqos_init(struct hisi_qm *qm);
++
++#endif
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index 3705412bac5f1..e384988bda917 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -312,8 +312,11 @@ static int sec_diff_regs_show(struct seq_file *s, void *unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
+
++static bool pf_q_num_flag;
+ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ }
+
+@@ -899,8 +902,7 @@ static int sec_debugfs_init(struct hisi_qm *qm)
+ qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
+
+- ret = hisi_qm_diff_regs_init(qm, sec_diff_regs,
+- ARRAY_SIZE(sec_diff_regs));
++ ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));
+ if (ret) {
+ dev_warn(dev, "Failed to init SEC diff regs!\n");
+ goto debugfs_remove;
+@@ -915,7 +917,7 @@ static int sec_debugfs_init(struct hisi_qm *qm)
+ return 0;
+
+ failed_to_create:
+- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
++ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
+ debugfs_remove:
+ debugfs_remove_recursive(sec_debugfs_root);
+ return ret;
+@@ -923,7 +925,7 @@ debugfs_remove:
+
+ static void sec_debugfs_exit(struct hisi_qm *qm)
+ {
+- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
++ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
+
+ debugfs_remove_recursive(qm->debug.debug_root);
+ }
+@@ -1123,6 +1125,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &sec_devices;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
+index c863435e8c75a..190b4fecfc747 100644
+--- a/drivers/crypto/hisilicon/zip/zip_main.c
++++ b/drivers/crypto/hisilicon/zip/zip_main.c
+@@ -365,8 +365,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ }
+
+@@ -849,8 +852,7 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
+ qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
+ qm->debug.debug_root = dev_d;
+- ret = hisi_qm_diff_regs_init(qm, hzip_diff_regs,
+- ARRAY_SIZE(hzip_diff_regs));
++ ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
+ if (ret) {
+ dev_warn(dev, "Failed to init ZIP diff regs!\n");
+ goto debugfs_remove;
+@@ -869,7 +871,7 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
+ return 0;
+
+ failed_to_create:
+- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
++ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+ debugfs_remove:
+ debugfs_remove_recursive(hzip_debugfs_root);
+ return ret;
+@@ -895,7 +897,7 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
+
+ static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
+ {
+- hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
++ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+
+ debugfs_remove_recursive(qm->debug.debug_root);
+
+@@ -1141,6 +1143,8 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &zip_devices;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
+index 80919cfcc29da..b0587d03eac29 100644
+--- a/drivers/crypto/qat/qat_common/Makefile
++++ b/drivers/crypto/qat/qat_common/Makefile
+@@ -19,7 +19,8 @@ intel_qat-objs := adf_cfg.o \
+ qat_asym_algs.o \
+ qat_algs_send.o \
+ qat_uclo.o \
+- qat_hal.o
++ qat_hal.o \
++ qat_bl.o
+
+ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+index 20f50d0e65f89..ad01d99e6e2ba 100644
+--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
++++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+@@ -27,7 +27,7 @@
+ #define ADF_PCI_MAX_BARS 3
+ #define ADF_DEVICE_NAME_LENGTH 32
+ #define ADF_ETR_MAX_RINGS_PER_BANK 16
+-#define ADF_MAX_MSIX_VECTOR_NAME 16
++#define ADF_MAX_MSIX_VECTOR_NAME 48
+ #define ADF_DEVICE_NAME_PREFIX "qat_"
+
+ enum adf_accel_capabilities {
+diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
+index bff613eec5c4b..d2bc2361cd069 100644
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -25,6 +25,7 @@
+ #define ADF_STATUS_AE_STARTED 6
+ #define ADF_STATUS_PF_RUNNING 7
+ #define ADF_STATUS_IRQ_ALLOCATED 8
++#define ADF_STATUS_CRYPTO_ALGS_REGISTERED 9
+
+ enum adf_dev_reset_mode {
+ ADF_DEV_RESET_ASYNC = 0,
+diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
+index d6f3314246179..2e3481270c4ba 100644
+--- a/drivers/crypto/qat/qat_common/adf_init.c
++++ b/drivers/crypto/qat/qat_common/adf_init.c
+@@ -209,6 +209,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
++ set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(adf_dev_start);
+@@ -237,10 +239,12 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev)
+ clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+- if (!list_empty(&accel_dev->crypto_list)) {
++ if (!list_empty(&accel_dev->crypto_list) &&
++ test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
+ qat_algs_unregister();
+ qat_asym_algs_unregister();
+ }
++ clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c
+index 3eb6611ab1b11..81b2ecfcc8060 100644
+--- a/drivers/crypto/qat/qat_common/adf_sysfs.c
++++ b/drivers/crypto/qat/qat_common/adf_sysfs.c
+@@ -61,7 +61,9 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+
+ ret = adf_dev_up(accel_dev, true);
+- if (ret < 0) {
++ if (ret == -EALREADY) {
++ break;
++ } else if (ret) {
+ dev_err(dev, "Failed to start device qat_dev%d\n",
+ accel_id);
+ adf_dev_down(accel_dev, true);
+diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
+index 08bca1c506c0e..e2dd568b87b51 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
++++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
+@@ -90,7 +90,7 @@ DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
+ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+ {
+ struct adf_etr_ring_debug_entry *ring_debug;
+- char entry_name[8];
++ char entry_name[16];
+
+ ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+ if (!ring_debug)
+@@ -192,7 +192,7 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+ {
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct dentry *parent = accel_dev->transport->debug;
+- char name[8];
++ char name[16];
+
+ snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+ bank->bank_debug_dir = debugfs_create_dir(name, parent);
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index f56ee4cc5ae8b..b61ada5591586 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -23,6 +23,7 @@
+ #include "icp_qat_hw.h"
+ #include "icp_qat_fw.h"
+ #include "icp_qat_fw_la.h"
++#include "qat_bl.h"
+
+ #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+@@ -663,189 +664,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ return qat_alg_aead_newkey(tfm, key, keylen);
+ }
+
+-static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+- struct qat_crypto_request *qat_req)
+-{
+- struct device *dev = &GET_DEV(inst->accel_dev);
+- struct qat_alg_buf_list *bl = qat_req->buf.bl;
+- struct qat_alg_buf_list *blout = qat_req->buf.blout;
+- dma_addr_t blp = qat_req->buf.blp;
+- dma_addr_t blpout = qat_req->buf.bloutp;
+- size_t sz = qat_req->buf.sz;
+- size_t sz_out = qat_req->buf.sz_out;
+- int bl_dma_dir;
+- int i;
+-
+- bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+-
+- for (i = 0; i < bl->num_bufs; i++)
+- dma_unmap_single(dev, bl->bufers[i].addr,
+- bl->bufers[i].len, bl_dma_dir);
+-
+- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+-
+- if (!qat_req->buf.sgl_src_valid)
+- kfree(bl);
+-
+- if (blp != blpout) {
+- /* If out of place operation dma unmap only data */
+- int bufless = blout->num_bufs - blout->num_mapped_bufs;
+-
+- for (i = bufless; i < blout->num_bufs; i++) {
+- dma_unmap_single(dev, blout->bufers[i].addr,
+- blout->bufers[i].len,
+- DMA_FROM_DEVICE);
+- }
+- dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+-
+- if (!qat_req->buf.sgl_dst_valid)
+- kfree(blout);
+- }
+-}
+-
+-static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+- struct scatterlist *sgl,
+- struct scatterlist *sglout,
+- struct qat_crypto_request *qat_req,
+- gfp_t flags)
+-{
+- struct device *dev = &GET_DEV(inst->accel_dev);
+- int i, sg_nctr = 0;
+- int n = sg_nents(sgl);
+- struct qat_alg_buf_list *bufl;
+- struct qat_alg_buf_list *buflout = NULL;
+- dma_addr_t blp = DMA_MAPPING_ERROR;
+- dma_addr_t bloutp = DMA_MAPPING_ERROR;
+- struct scatterlist *sg;
+- size_t sz_out, sz = struct_size(bufl, bufers, n);
+- int node = dev_to_node(&GET_DEV(inst->accel_dev));
+- int bufl_dma_dir;
+-
+- if (unlikely(!n))
+- return -EINVAL;
+-
+- qat_req->buf.sgl_src_valid = false;
+- qat_req->buf.sgl_dst_valid = false;
+-
+- if (n > QAT_MAX_BUFF_DESC) {
+- bufl = kzalloc_node(sz, flags, node);
+- if (unlikely(!bufl))
+- return -ENOMEM;
+- } else {
+- bufl = &qat_req->buf.sgl_src.sgl_hdr;
+- memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+- qat_req->buf.sgl_src_valid = true;
+- }
+-
+- bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+-
+- for_each_sg(sgl, sg, n, i)
+- bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+-
+- for_each_sg(sgl, sg, n, i) {
+- int y = sg_nctr;
+-
+- if (!sg->length)
+- continue;
+-
+- bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+- sg->length,
+- bufl_dma_dir);
+- bufl->bufers[y].len = sg->length;
+- if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+- goto err_in;
+- sg_nctr++;
+- }
+- bufl->num_bufs = sg_nctr;
+- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(dev, blp)))
+- goto err_in;
+- qat_req->buf.bl = bufl;
+- qat_req->buf.blp = blp;
+- qat_req->buf.sz = sz;
+- /* Handle out of place operation */
+- if (sgl != sglout) {
+- struct qat_alg_buf *bufers;
+-
+- n = sg_nents(sglout);
+- sz_out = struct_size(buflout, bufers, n);
+- sg_nctr = 0;
+-
+- if (n > QAT_MAX_BUFF_DESC) {
+- buflout = kzalloc_node(sz_out, flags, node);
+- if (unlikely(!buflout))
+- goto err_in;
+- } else {
+- buflout = &qat_req->buf.sgl_dst.sgl_hdr;
+- memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+- qat_req->buf.sgl_dst_valid = true;
+- }
+-
+- bufers = buflout->bufers;
+- for_each_sg(sglout, sg, n, i)
+- bufers[i].addr = DMA_MAPPING_ERROR;
+-
+- for_each_sg(sglout, sg, n, i) {
+- int y = sg_nctr;
+-
+- if (!sg->length)
+- continue;
+-
+- bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+- sg->length,
+- DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+- goto err_out;
+- bufers[y].len = sg->length;
+- sg_nctr++;
+- }
+- buflout->num_bufs = sg_nctr;
+- buflout->num_mapped_bufs = sg_nctr;
+- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(dev, bloutp)))
+- goto err_out;
+- qat_req->buf.blout = buflout;
+- qat_req->buf.bloutp = bloutp;
+- qat_req->buf.sz_out = sz_out;
+- } else {
+- /* Otherwise set the src and dst to the same address */
+- qat_req->buf.bloutp = qat_req->buf.blp;
+- qat_req->buf.sz_out = 0;
+- }
+- return 0;
+-
+-err_out:
+- if (!dma_mapping_error(dev, bloutp))
+- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+-
+- n = sg_nents(sglout);
+- for (i = 0; i < n; i++)
+- if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+- dma_unmap_single(dev, buflout->bufers[i].addr,
+- buflout->bufers[i].len,
+- DMA_FROM_DEVICE);
+-
+- if (!qat_req->buf.sgl_dst_valid)
+- kfree(buflout);
+-
+-err_in:
+- if (!dma_mapping_error(dev, blp))
+- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+-
+- n = sg_nents(sgl);
+- for (i = 0; i < n; i++)
+- if (!dma_mapping_error(dev, bufl->bufers[i].addr))
+- dma_unmap_single(dev, bufl->bufers[i].addr,
+- bufl->bufers[i].len,
+- bufl_dma_dir);
+-
+- if (!qat_req->buf.sgl_src_valid)
+- kfree(bufl);
+-
+- dev_err(dev, "Failed to map buf for dma\n");
+- return -ENOMEM;
+-}
+-
+ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
+ {
+@@ -855,7 +673,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
+ int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+- qat_alg_free_bufl(inst, qat_req);
++ qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+ if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ res = -EBADMSG;
+ areq->base.complete(&areq->base, res);
+@@ -925,7 +743,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
+ int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+- qat_alg_free_bufl(inst, qat_req);
++ qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+ if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ res = -EINVAL;
+
+@@ -981,7 +799,8 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+ if (cipher_len % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
++ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
++ &qat_req->buf, NULL, f);
+ if (unlikely(ret))
+ return ret;
+
+@@ -1003,7 +822,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
+- qat_alg_free_bufl(ctx->inst, qat_req);
++ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+ return ret;
+ }
+@@ -1024,7 +843,8 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+ if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
++ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
++ &qat_req->buf, NULL, f);
+ if (unlikely(ret))
+ return ret;
+
+@@ -1048,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
+- qat_alg_free_bufl(ctx->inst, qat_req);
++ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+ return ret;
+ }
+@@ -1209,7 +1029,8 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+ if (req->cryptlen == 0)
+ return 0;
+
+- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
++ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
++ &qat_req->buf, NULL, f);
+ if (unlikely(ret))
+ return ret;
+
+@@ -1230,7 +1051,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
+- qat_alg_free_bufl(ctx->inst, qat_req);
++ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+ return ret;
+ }
+@@ -1275,7 +1096,8 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+ if (req->cryptlen == 0)
+ return 0;
+
+- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
++ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
++ &qat_req->buf, NULL, f);
+ if (unlikely(ret))
+ return ret;
+
+@@ -1297,7 +1119,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
+- qat_alg_free_bufl(ctx->inst, qat_req);
++ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+ return ret;
+ }
+diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
+index ff5b4347f7831..607ed88f4b197 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs_send.c
++++ b/drivers/crypto/qat/qat_common/qat_algs_send.c
+@@ -39,40 +39,44 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+ spin_unlock_bh(&backlog->lock);
+ }
+
+-static void qat_alg_backlog_req(struct qat_alg_req *req,
+- struct qat_instance_backlog *backlog)
+-{
+- INIT_LIST_HEAD(&req->list);
+-
+- spin_lock_bh(&backlog->lock);
+- list_add_tail(&req->list, &backlog->list);
+- spin_unlock_bh(&backlog->lock);
+-}
+-
+-static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++static bool qat_alg_try_enqueue(struct qat_alg_req *req)
+ {
+ struct qat_instance_backlog *backlog = req->backlog;
+ struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ u32 *fw_req = req->fw_req;
+
+- /* If any request is already backlogged, then add to backlog list */
++ /* Check if any request is already backlogged */
+ if (!list_empty(&backlog->list))
+- goto enqueue;
++ return false;
+
+- /* If ring is nearly full, then add to backlog list */
++ /* Check if ring is nearly full */
+ if (adf_ring_nearly_full(tx_ring))
+- goto enqueue;
++ return false;
+
+- /* If adding request to HW ring fails, then add to backlog list */
++ /* Try to enqueue to HW ring */
+ if (adf_send_message(tx_ring, fw_req))
+- goto enqueue;
++ return false;
+
+- return -EINPROGRESS;
++ return true;
++}
+
+-enqueue:
+- qat_alg_backlog_req(req, backlog);
+
+- return -EBUSY;
++static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++{
++ struct qat_instance_backlog *backlog = req->backlog;
++ int ret = -EINPROGRESS;
++
++ if (qat_alg_try_enqueue(req))
++ return ret;
++
++ spin_lock_bh(&backlog->lock);
++ if (!qat_alg_try_enqueue(req)) {
++ list_add_tail(&req->list, &backlog->list);
++ ret = -EBUSY;
++ }
++ spin_unlock_bh(&backlog->lock);
++
++ return ret;
+ }
+
+ int qat_alg_send_message(struct qat_alg_req *req)
+diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/qat/qat_common/qat_bl.c
+new file mode 100644
+index 0000000000000..221a4eb610a38
+--- /dev/null
++++ b/drivers/crypto/qat/qat_common/qat_bl.c
+@@ -0,0 +1,224 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright(c) 2014 - 2022 Intel Corporation */
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/pci.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include "adf_accel_devices.h"
++#include "qat_bl.h"
++#include "qat_crypto.h"
++
++void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
++ struct qat_request_buffs *buf)
++{
++ struct device *dev = &GET_DEV(accel_dev);
++ struct qat_alg_buf_list *bl = buf->bl;
++ struct qat_alg_buf_list *blout = buf->blout;
++ dma_addr_t blp = buf->blp;
++ dma_addr_t blpout = buf->bloutp;
++ size_t sz = buf->sz;
++ size_t sz_out = buf->sz_out;
++ int bl_dma_dir;
++ int i;
++
++ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
++
++ for (i = 0; i < bl->num_bufs; i++)
++ dma_unmap_single(dev, bl->bufers[i].addr,
++ bl->bufers[i].len, bl_dma_dir);
++
++ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
++
++ if (!buf->sgl_src_valid)
++ kfree(bl);
++
++ if (blp != blpout) {
++ for (i = 0; i < blout->num_mapped_bufs; i++) {
++ dma_unmap_single(dev, blout->bufers[i].addr,
++ blout->bufers[i].len,
++ DMA_FROM_DEVICE);
++ }
++ dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
++
++ if (!buf->sgl_dst_valid)
++ kfree(blout);
++ }
++}
++
++static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
++ struct scatterlist *sgl,
++ struct scatterlist *sglout,
++ struct qat_request_buffs *buf,
++ dma_addr_t extra_dst_buff,
++ size_t sz_extra_dst_buff,
++ gfp_t flags)
++{
++ struct device *dev = &GET_DEV(accel_dev);
++ int i, sg_nctr = 0;
++ int n = sg_nents(sgl);
++ struct qat_alg_buf_list *bufl;
++ struct qat_alg_buf_list *buflout = NULL;
++ dma_addr_t blp = DMA_MAPPING_ERROR;
++ dma_addr_t bloutp = DMA_MAPPING_ERROR;
++ struct scatterlist *sg;
++ size_t sz_out, sz = struct_size(bufl, bufers, n);
++ int node = dev_to_node(&GET_DEV(accel_dev));
++ int bufl_dma_dir;
++
++ if (unlikely(!n))
++ return -EINVAL;
++
++ buf->sgl_src_valid = false;
++ buf->sgl_dst_valid = false;
++
++ if (n > QAT_MAX_BUFF_DESC) {
++ bufl = kzalloc_node(sz, flags, node);
++ if (unlikely(!bufl))
++ return -ENOMEM;
++ } else {
++ bufl = &buf->sgl_src.sgl_hdr;
++ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
++ buf->sgl_src_valid = true;
++ }
++
++ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
++
++ for (i = 0; i < n; i++)
++ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
++
++ for_each_sg(sgl, sg, n, i) {
++ int y = sg_nctr;
++
++ if (!sg->length)
++ continue;
++
++ bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
++ sg->length,
++ bufl_dma_dir);
++ bufl->bufers[y].len = sg->length;
++ if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
++ goto err_in;
++ sg_nctr++;
++ }
++ bufl->num_bufs = sg_nctr;
++ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev, blp)))
++ goto err_in;
++ buf->bl = bufl;
++ buf->blp = blp;
++ buf->sz = sz;
++ /* Handle out of place operation */
++ if (sgl != sglout) {
++ struct qat_alg_buf *bufers;
++ int extra_buff = extra_dst_buff ? 1 : 0;
++ int n_sglout = sg_nents(sglout);
++
++ n = n_sglout + extra_buff;
++ sz_out = struct_size(buflout, bufers, n);
++ sg_nctr = 0;
++
++ if (n > QAT_MAX_BUFF_DESC) {
++ buflout = kzalloc_node(sz_out, flags, node);
++ if (unlikely(!buflout))
++ goto err_in;
++ } else {
++ buflout = &buf->sgl_dst.sgl_hdr;
++ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
++ buf->sgl_dst_valid = true;
++ }
++
++ bufers = buflout->bufers;
++ for (i = 0; i < n; i++)
++ bufers[i].addr = DMA_MAPPING_ERROR;
++
++ for_each_sg(sglout, sg, n_sglout, i) {
++ int y = sg_nctr;
++
++ if (!sg->length)
++ continue;
++
++ bufers[y].addr = dma_map_single(dev, sg_virt(sg),
++ sg->length,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
++ goto err_out;
++ bufers[y].len = sg->length;
++ sg_nctr++;
++ }
++ if (extra_buff) {
++ bufers[sg_nctr].addr = extra_dst_buff;
++ bufers[sg_nctr].len = sz_extra_dst_buff;
++ }
++
++ buflout->num_bufs = sg_nctr;
++ buflout->num_bufs += extra_buff;
++ buflout->num_mapped_bufs = sg_nctr;
++ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(dev, bloutp)))
++ goto err_out;
++ buf->blout = buflout;
++ buf->bloutp = bloutp;
++ buf->sz_out = sz_out;
++ } else {
++ /* Otherwise set the src and dst to the same address */
++ buf->bloutp = buf->blp;
++ buf->sz_out = 0;
++ }
++ return 0;
++
++err_out:
++ if (!dma_mapping_error(dev, bloutp))
++ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
++
++ n = sg_nents(sglout);
++ for (i = 0; i < n; i++) {
++ if (buflout->bufers[i].addr == extra_dst_buff)
++ break;
++ if (!dma_mapping_error(dev, buflout->bufers[i].addr))
++ dma_unmap_single(dev, buflout->bufers[i].addr,
++ buflout->bufers[i].len,
++ DMA_FROM_DEVICE);
++ }
++
++ if (!buf->sgl_dst_valid)
++ kfree(buflout);
++
++err_in:
++ if (!dma_mapping_error(dev, blp))
++ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
++
++ n = sg_nents(sgl);
++ for (i = 0; i < n; i++)
++ if (!dma_mapping_error(dev, bufl->bufers[i].addr))
++ dma_unmap_single(dev, bufl->bufers[i].addr,
++ bufl->bufers[i].len,
++ bufl_dma_dir);
++
++ if (!buf->sgl_src_valid)
++ kfree(bufl);
++
++ dev_err(dev, "Failed to map buf for dma\n");
++ return -ENOMEM;
++}
++
++int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
++ struct scatterlist *sgl,
++ struct scatterlist *sglout,
++ struct qat_request_buffs *buf,
++ struct qat_sgl_to_bufl_params *params,
++ gfp_t flags)
++{
++ dma_addr_t extra_dst_buff = 0;
++ size_t sz_extra_dst_buff = 0;
++
++ if (params) {
++ extra_dst_buff = params->extra_dst_buff;
++ sz_extra_dst_buff = params->sz_extra_dst_buff;
++ }
++
++ return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
++ extra_dst_buff, sz_extra_dst_buff,
++ flags);
++}
+diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/qat/qat_common/qat_bl.h
+new file mode 100644
+index 0000000000000..0c174fee9e645
+--- /dev/null
++++ b/drivers/crypto/qat/qat_common/qat_bl.h
+@@ -0,0 +1,55 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/* Copyright(c) 2014 - 2022 Intel Corporation */
++#ifndef QAT_BL_H
++#define QAT_BL_H
++#include <linux/scatterlist.h>
++#include <linux/types.h>
++
++#define QAT_MAX_BUFF_DESC 4
++
++struct qat_alg_buf {
++ u32 len;
++ u32 resrvd;
++ u64 addr;
++} __packed;
++
++struct qat_alg_buf_list {
++ u64 resrvd;
++ u32 num_bufs;
++ u32 num_mapped_bufs;
++ struct qat_alg_buf bufers[];
++} __packed;
++
++struct qat_alg_fixed_buf_list {
++ struct qat_alg_buf_list sgl_hdr;
++ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
++} __packed __aligned(64);
++
++struct qat_request_buffs {
++ struct qat_alg_buf_list *bl;
++ dma_addr_t blp;
++ struct qat_alg_buf_list *blout;
++ dma_addr_t bloutp;
++ size_t sz;
++ size_t sz_out;
++ bool sgl_src_valid;
++ bool sgl_dst_valid;
++ struct qat_alg_fixed_buf_list sgl_src;
++ struct qat_alg_fixed_buf_list sgl_dst;
++};
++
++struct qat_sgl_to_bufl_params {
++ dma_addr_t extra_dst_buff;
++ size_t sz_extra_dst_buff;
++};
++
++void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
++ struct qat_request_buffs *buf);
++int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
++ struct scatterlist *sgl,
++ struct scatterlist *sglout,
++ struct qat_request_buffs *buf,
++ struct qat_sgl_to_bufl_params *params,
++ gfp_t flags);
++
++#endif
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
+index df3c738ce323a..bb116357a5684 100644
+--- a/drivers/crypto/qat/qat_common/qat_crypto.h
++++ b/drivers/crypto/qat/qat_common/qat_crypto.h
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include "adf_accel_devices.h"
+ #include "icp_qat_fw_la.h"
++#include "qat_bl.h"
+
+ struct qat_instance_backlog {
+ struct list_head list;
+@@ -35,39 +36,6 @@ struct qat_crypto_instance {
+ struct qat_instance_backlog backlog;
+ };
+
+-#define QAT_MAX_BUFF_DESC 4
+-
+-struct qat_alg_buf {
+- u32 len;
+- u32 resrvd;
+- u64 addr;
+-} __packed;
+-
+-struct qat_alg_buf_list {
+- u64 resrvd;
+- u32 num_bufs;
+- u32 num_mapped_bufs;
+- struct qat_alg_buf bufers[];
+-} __packed;
+-
+-struct qat_alg_fixed_buf_list {
+- struct qat_alg_buf_list sgl_hdr;
+- struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+-} __packed __aligned(64);
+-
+-struct qat_crypto_request_buffs {
+- struct qat_alg_buf_list *bl;
+- dma_addr_t blp;
+- struct qat_alg_buf_list *blout;
+- dma_addr_t bloutp;
+- size_t sz;
+- size_t sz_out;
+- bool sgl_src_valid;
+- bool sgl_dst_valid;
+- struct qat_alg_fixed_buf_list sgl_src;
+- struct qat_alg_fixed_buf_list sgl_dst;
+-};
+-
+ struct qat_crypto_request;
+
+ struct qat_crypto_request {
+@@ -80,7 +48,7 @@ struct qat_crypto_request {
+ struct aead_request *aead_req;
+ struct skcipher_request *skcipher_req;
+ };
+- struct qat_crypto_request_buffs buf;
++ struct qat_request_buffs buf;
+ void (*cb)(struct icp_qat_fw_la_resp *resp,
+ struct qat_crypto_request *req);
+ union {
+diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
+index 20ce488a77540..03cf99cce7047 100644
+--- a/drivers/cxl/core/memdev.c
++++ b/drivers/cxl/core/memdev.c
+@@ -214,8 +214,8 @@ static void cxl_memdev_unregister(void *_cxlmd)
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
+
+- cxl_memdev_shutdown(dev);
+ cdev_device_del(&cxlmd->cdev, dev);
++ cxl_memdev_shutdown(dev);
+ put_device(dev);
+ }
+
+diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
+index 39ac069cabc75..74893c06aa087 100644
+--- a/drivers/devfreq/event/rockchip-dfi.c
++++ b/drivers/devfreq/event/rockchip-dfi.c
+@@ -193,14 +193,15 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
+ return dev_err_probe(dev, PTR_ERR(data->clk),
+ "Cannot get the clk pclk_ddr_mon\n");
+
+- /* try to find the optional reference to the pmu syscon */
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+- if (node) {
+- data->regmap_pmu = syscon_node_to_regmap(node);
+- of_node_put(node);
+- if (IS_ERR(data->regmap_pmu))
+- return PTR_ERR(data->regmap_pmu);
+- }
++ if (!node)
++ return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
++
++ data->regmap_pmu = syscon_node_to_regmap(node);
++ of_node_put(node);
++ if (IS_ERR(data->regmap_pmu))
++ return PTR_ERR(data->regmap_pmu);
++
+ data->dev = dev;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
+index a1e9f2b3a37cc..817ffa95a9b11 100644
+--- a/drivers/dma/idxd/Makefile
++++ b/drivers/dma/idxd/Makefile
+@@ -1,12 +1,12 @@
+ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+
++obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
++idxd_bus-y := bus.o
++
+ obj-$(CONFIG_INTEL_IDXD) += idxd.o
+ idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
+
+ idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
+
+-obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+-idxd_bus-y := bus.o
+-
+ obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
+ idxd_compat-y := compat.o
+diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
+index 22a392fe6d32b..04c1f2ee874a5 100644
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -722,7 +722,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
+ dma_addr_t dma;
+ struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
+
+- BUG_ON(sw_desc->nb_desc == 0);
+ for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
+ if (i > 0)
+ dma = sw_desc->hw_desc[i - 1]->ddadr;
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index fa06d7e6d8e38..7ec6e5d728b03 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2410,7 +2410,7 @@ static int edma_probe(struct platform_device *pdev)
+ if (irq < 0 && node)
+ irq = irq_of_parse_and_map(node, 0);
+
+- if (irq >= 0) {
++ if (irq > 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+ dev_name(dev));
+ ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+@@ -2426,7 +2426,7 @@ static int edma_probe(struct platform_device *pdev)
+ if (irq < 0 && node)
+ irq = irq_of_parse_and_map(node, 2);
+
+- if (irq >= 0) {
++ if (irq > 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+ dev_name(dev));
+ ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
+index b9ce784f087df..248594b59c64d 100644
+--- a/drivers/firmware/arm_ffa/bus.c
++++ b/drivers/firmware/arm_ffa/bus.c
+@@ -193,6 +193,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ dev->release = ffa_release_device;
+ dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+
++ ffa_dev->id = id;
+ ffa_dev->vm_id = vm_id;
+ ffa_dev->ops = ops;
+ uuid_copy(&ffa_dev->uuid, uuid);
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 21481fc05800f..e9f86b7573012 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -668,17 +668,9 @@ static int ffa_partition_info_get(const char *uuid_str,
+ return 0;
+ }
+
+-static void _ffa_mode_32bit_set(struct ffa_device *dev)
+-{
+- dev->mode_32bit = true;
+-}
+-
+ static void ffa_mode_32bit_set(struct ffa_device *dev)
+ {
+- if (drv_info->version > FFA_VERSION_1_0)
+- return;
+-
+- _ffa_mode_32bit_set(dev);
++ dev->mode_32bit = true;
+ }
+
+ static int ffa_sync_send_receive(struct ffa_device *dev,
+@@ -787,7 +779,7 @@ static void ffa_setup_partitions(void)
+
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+- _ffa_mode_32bit_set(ffa_dev);
++ ffa_mode_32bit_set(ffa_dev);
+ }
+ kfree(pbuf);
+ }
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index 4c550cfbc086c..597d1a367d96d 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -190,19 +190,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ return 0;
+ }
+
+-/**
+- * ti_sci_debugfs_destroy() - clean up log debug file
+- * @pdev: platform device pointer
+- * @info: Pointer to SCI entity information
+- */
+-static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+- struct ti_sci_info *info)
+-{
+- if (IS_ERR(info->debug_region))
+- return;
+-
+- debugfs_remove(info->d);
+-}
+ #else /* CONFIG_DEBUG_FS */
+ static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ struct ti_sci_info *info)
+@@ -3451,43 +3438,12 @@ out:
+ return ret;
+ }
+
+-static int ti_sci_remove(struct platform_device *pdev)
+-{
+- struct ti_sci_info *info;
+- struct device *dev = &pdev->dev;
+- int ret = 0;
+-
+- of_platform_depopulate(dev);
+-
+- info = platform_get_drvdata(pdev);
+-
+- if (info->nb.notifier_call)
+- unregister_restart_handler(&info->nb);
+-
+- mutex_lock(&ti_sci_list_mutex);
+- if (info->users)
+- ret = -EBUSY;
+- else
+- list_del(&info->node);
+- mutex_unlock(&ti_sci_list_mutex);
+-
+- if (!ret) {
+- ti_sci_debugfs_destroy(pdev, info);
+-
+- /* Safe to free channels since no more users */
+- mbox_free_channel(info->chan_tx);
+- mbox_free_channel(info->chan_rx);
+- }
+-
+- return ret;
+-}
+-
+ static struct platform_driver ti_sci_driver = {
+ .probe = ti_sci_probe,
+- .remove = ti_sci_remove,
+ .driver = {
+ .name = "ti-sci",
+ .of_match_table = of_match_ptr(ti_sci_of_match),
++ .suppress_bind_attrs = true,
+ },
+ };
+ module_platform_driver(ti_sci_driver);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 63feea08904cb..d7e758c86a0b8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -487,11 +487,11 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
+
+ /* We need a new svm_bo. Spin-loop to wait for concurrent
+ * svm_range_bo_release to finish removing this range from
+- * its range list. After this, it is safe to reuse the
+- * svm_bo pointer and svm_bo_list head.
++ * its range list and set prange->svm_bo to null. After this,
++ * it is safe to reuse the svm_bo pointer and svm_bo_list head.
+ */
+- while (!list_empty_careful(&prange->svm_bo_list))
+- ;
++ while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
++ cond_resched();
+
+ return false;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 339f1f5a08339..42e266e074d1d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -9626,16 +9626,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ }
+ }
+
++static void
++dm_get_plane_scale(struct drm_plane_state *plane_state,
++ int *out_plane_scale_w, int *out_plane_scale_h)
++{
++ int plane_src_w, plane_src_h;
++
++ dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
++ *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
++ *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
++}
++
+ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+ {
+- struct drm_plane *cursor = crtc->cursor, *underlying;
++ struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
++ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+- int cursor_src_w, cursor_src_h;
+- int underlying_src_w, underlying_src_h;
++ bool any_relevant_change = false;
+
+ /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ * cursor per pipe but it's going to inherit the scaling and
+@@ -9643,13 +9654,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ * blending properties match the underlying planes'.
+ */
+
+- new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
+- if (!new_cursor_state || !new_cursor_state->fb)
++ /* If no plane was enabled or changed scaling, no need to check again */
++ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
++ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
++
++ if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
++ continue;
++
++ if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
++ any_relevant_change = true;
++ break;
++ }
++
++ if (new_plane_state->fb == old_plane_state->fb &&
++ new_plane_state->crtc_w == old_plane_state->crtc_w &&
++ new_plane_state->crtc_h == old_plane_state->crtc_h)
++ continue;
++
++ dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
++ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
++
++ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
++ any_relevant_change = true;
++ break;
++ }
++ }
++
++ if (!any_relevant_change)
++ return 0;
++
++ new_cursor_state = drm_atomic_get_plane_state(state, cursor);
++ if (IS_ERR(new_cursor_state))
++ return PTR_ERR(new_cursor_state);
++
++ if (!new_cursor_state->fb)
+ return 0;
+
+- dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+- cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+- cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
++ dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
++
++ /* Need to check all enabled planes, even if this commit doesn't change
++ * their state
++ */
++ i = drm_atomic_add_affected_planes(state, crtc);
++ if (i)
++ return i;
+
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+@@ -9660,10 +9708,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ if (!new_underlying_state->fb)
+ continue;
+
+- dm_get_oriented_plane_size(new_underlying_state,
+- &underlying_src_w, &underlying_src_h);
+- underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
+- underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
++ dm_get_plane_scale(new_underlying_state,
++ &underlying_scale_w, &underlying_scale_h);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index 4f6f1deba28c6..9d7f3c99748b4 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -1464,10 +1464,14 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
+ struct it66121_ctx *ctx = dev_get_drvdata(dev);
+
+ mutex_lock(&ctx->lock);
+-
+- memcpy(buf, ctx->connector->eld,
+- min(sizeof(ctx->connector->eld), len));
+-
++ if (!ctx->connector) {
++ /* Pass en empty ELD if connector not available */
++ dev_dbg(dev, "No connector present, passing empty EDID data");
++ memset(buf, 0, len);
++ } else {
++ memcpy(buf, ctx->connector->eld,
++ min(sizeof(ctx->connector->eld), len));
++ }
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index 5e419934d2a39..ac76c23635892 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -45,7 +45,6 @@ struct lt8912 {
+
+ u8 data_lanes;
+ bool is_power_on;
+- bool is_attached;
+ };
+
+ static int lt8912_write_init_config(struct lt8912 *lt)
+@@ -516,14 +515,27 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
+ return 0;
+ }
+
++static void lt8912_bridge_hpd_cb(void *data, enum drm_connector_status status)
++{
++ struct lt8912 *lt = data;
++
++ if (lt->bridge.dev)
++ drm_helper_hpd_irq_event(lt->bridge.dev);
++}
++
+ static int lt8912_bridge_connector_init(struct drm_bridge *bridge)
+ {
+ int ret;
+ struct lt8912 *lt = bridge_to_lt8912(bridge);
+ struct drm_connector *connector = &lt->connector;
+
+- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+- DRM_CONNECTOR_POLL_DISCONNECT;
++ if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) {
++ drm_bridge_hpd_enable(lt->hdmi_port, lt8912_bridge_hpd_cb, lt);
++ connector->polled = DRM_CONNECTOR_POLL_HPD;
++ } else {
++ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++ DRM_CONNECTOR_POLL_DISCONNECT;
++ }
+
+ ret = drm_connector_init(bridge->dev, connector,
+ &lt8912_connector_funcs,
+@@ -546,6 +558,13 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct lt8912 *lt = bridge_to_lt8912(bridge);
+ int ret;
+
++ ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
++ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
++ if (ret < 0) {
++ dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
++ return ret;
++ }
++
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ ret = lt8912_bridge_connector_init(bridge);
+ if (ret) {
+@@ -562,8 +581,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ if (ret)
+ goto error;
+
+- lt->is_attached = true;
+-
+ return 0;
+
+ error:
+@@ -575,11 +592,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
+ {
+ struct lt8912 *lt = bridge_to_lt8912(bridge);
+
+- if (lt->is_attached) {
+- lt8912_hard_power_off(lt);
+- drm_connector_unregister(&lt->connector);
+- drm_connector_cleanup(&lt->connector);
+- }
++ lt8912_hard_power_off(lt);
++
++ if (lt->connector.dev && lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
++ drm_bridge_hpd_disable(lt->hdmi_port);
+ }
+
+ static enum drm_connector_status
+@@ -734,7 +750,6 @@ static void lt8912_remove(struct i2c_client *client)
+ {
+ struct lt8912 *lt = i2c_get_clientdata(client);
+
+- lt8912_bridge_detach(&lt->bridge);
+ drm_bridge_remove(&lt->bridge);
+ lt8912_free_i2c(lt);
+ lt8912_put_dt(lt);
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+index fa1ee6264d921..818848b2c04dd 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+@@ -928,9 +928,9 @@ retry:
+ init_waitqueue_head(&lt9611uxc->wq);
+ INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
+- ret = devm_request_threaded_irq(dev, client->irq, NULL,
+- lt9611uxc_irq_thread_handler,
+- IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
++ ret = request_threaded_irq(client->irq, NULL,
++ lt9611uxc_irq_thread_handler,
++ IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_disable_regulators;
+@@ -966,6 +966,8 @@ retry:
+ return lt9611uxc_audio_init(dev, lt9611uxc);
+
+ err_remove_bridge:
++ free_irq(client->irq, lt9611uxc);
++ cancel_work_sync(&lt9611uxc->work);
+ drm_bridge_remove(&lt9611uxc->bridge);
+
+ err_disable_regulators:
+@@ -982,7 +984,7 @@ static void lt9611uxc_remove(struct i2c_client *client)
+ {
+ struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
+
+- disable_irq(client->irq);
++ free_irq(client->irq, lt9611uxc);
+ cancel_work_sync(&lt9611uxc->work);
+ lt9611uxc_audio_exit(lt9611uxc);
+ drm_bridge_remove(&lt9611uxc->bridge);
+diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
+index 2d0ac9987b58e..8429b6518b502 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -15,6 +15,7 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
++#include <linux/units.h>
+
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_crtc_helper.h>
+@@ -217,6 +218,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ u32 tmp, orig;
+
+ tc358768_read(priv, reg, &orig);
++
++ if (priv->error)
++ return;
++
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ if (tmp != orig)
+@@ -601,7 +606,7 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+
+ dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ clk_get_rate(priv->refclk), fbd, prd, frs);
+- dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
++ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n",
+ priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+@@ -624,15 +629,14 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+ return tc358768_clear_error(priv);
+ }
+
+-#define TC358768_PRECISION 1000
+-static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
++static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps)
+ {
+- return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
++ return DIV_ROUND_UP(ns * 1000, period_ps);
+ }
+
+-static u32 tc358768_to_ns(u32 nsk)
++static u32 tc358768_ps_to_ns(u32 ps)
+ {
+- return (nsk / TC358768_PRECISION);
++ return ps / 1000;
+ }
+
+ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+@@ -643,13 +647,15 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ u32 val, val2, lptxcnt, hact, data_type;
+ s32 raw_val;
+ const struct drm_display_mode *mode;
+- u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+- u32 dsiclk, dsibclk, video_start;
++ u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
++ u32 dsiclk, hsbyteclk, video_start;
+ const u32 internal_delay = 40;
+ int ret, i;
++ struct videomode vm;
++ struct device *dev = priv->dev;
+
+ if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+- dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
++ dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
+ mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ }
+
+@@ -657,7 +663,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+
+ ret = tc358768_sw_reset(priv);
+ if (ret) {
+- dev_err(priv->dev, "Software reset failed: %d\n", ret);
++ dev_err(dev, "Software reset failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+@@ -665,45 +671,47 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ mode = &bridge->encoder->crtc->state->adjusted_mode;
+ ret = tc358768_setup_pll(priv, mode);
+ if (ret) {
+- dev_err(priv->dev, "PLL setup failed: %d\n", ret);
++ dev_err(dev, "PLL setup failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
++ drm_display_mode_to_videomode(mode, &vm);
++
+ dsiclk = priv->dsiclk;
+- dsibclk = dsiclk / 4;
++ hsbyteclk = dsiclk / 4;
+
+ /* Data Format Control Register */
+ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB888:
+ val |= (0x3 << 4);
+- hact = mode->hdisplay * 3;
+- video_start = (mode->htotal - mode->hsync_start) * 3;
++ hact = vm.hactive * 3;
++ video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= (0x4 << 4);
+- hact = mode->hdisplay * 3;
+- video_start = (mode->htotal - mode->hsync_start) * 3;
++ hact = vm.hactive * 3;
++ video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= (0x4 << 4) | BIT(3);
+- hact = mode->hdisplay * 18 / 8;
+- video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
++ hact = vm.hactive * 18 / 8;
++ video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8;
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ val |= (0x5 << 4);
+- hact = mode->hdisplay * 2;
+- video_start = (mode->htotal - mode->hsync_start) * 2;
++ hact = vm.hactive * 2;
++ video_start = (vm.hsync_len + vm.hback_porch) * 2;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+- dev_err(priv->dev, "Invalid data format (%u)\n",
++ dev_err(dev, "Invalid data format (%u)\n",
+ dsi_dev->format);
+ tc358768_hw_disable(priv);
+ return;
+@@ -723,69 +731,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+
+ /* DSI Timings */
+- dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+- dsibclk);
+- dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+- ui_nsk = dsiclk_nsk / 2;
+- phy_delay_nsk = dsibclk_nsk + 2 * dsiclk_nsk;
+- dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+- dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+- dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
+- dev_dbg(priv->dev, "phy_delay_nsk: %u\n", phy_delay_nsk);
++ hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk);
++ dsiclk_ps = (u32)div_u64(PICO, dsiclk);
++ ui_ps = dsiclk_ps / 2;
++ dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps,
++ ui_ps, hsbyteclk_ps);
+
+ /* LP11 > 100us for D-PHY Rx Init */
+- val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+- dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
++ val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "LINEINITCNT: %u\n", val);
+ tc358768_write(priv, TC358768_LINEINITCNT, val);
+
+ /* LPTimeCnt > 50ns */
+- val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
++ val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1;
+ lptxcnt = val;
+- dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
++ dev_dbg(dev, "LPTXTIMECNT: %u\n", val);
+ tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+
+ /* 38ns < TCLK_PREPARE < 95ns */
+- val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
++ val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "TCLK_PREPARECNT %u\n", val);
+ /* TCLK_PREPARE + TCLK_ZERO > 300ns */
+- val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
+- dsibclk_nsk) - 2;
++ val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps),
++ hsbyteclk_ps) - 2;
++ dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2);
+ val |= val2 << 8;
+- dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+ /* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
+- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
++ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5;
+ val = clamp(raw_val, 0, 127);
+- dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
++ dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val);
+ tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+ /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+- val = 50 + tc358768_to_ns(4 * ui_nsk);
+- val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
++ val = 50 + tc358768_ps_to_ns(4 * ui_ps);
++ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "THS_PREPARECNT %u\n", val);
+ /* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
+- raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
++ raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10;
+ val2 = clamp(raw_val, 0, 127);
++ dev_dbg(dev, "THS_ZEROCNT %u\n", val2);
+ val |= val2 << 8;
+- dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+ /* TWAKEUP > 1ms in lptxcnt steps */
+- val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
++ val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps);
+ val = val / (lptxcnt + 1) - 1;
+- dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
++ dev_dbg(dev, "TWAKEUP: %u\n", val);
+ tc358768_write(priv, TC358768_TWAKEUP, val);
+
+ /* TCLK_POSTCNT > 60ns + 52*UI */
+- val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+- dsibclk_nsk) - 3;
+- dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
++ val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps),
++ hsbyteclk_ps) - 3;
++ dev_dbg(dev, "TCLK_POSTCNT: %u\n", val);
+ tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+ /* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
+- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
+- dsibclk_nsk) - 4;
++ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps),
++ hsbyteclk_ps) - 4;
+ val = clamp(raw_val, 0, 15);
+- dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
++ dev_dbg(dev, "THS_TRAILCNT: %u\n", val);
+ tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+ val = BIT(0);
+@@ -793,16 +799,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ val |= BIT(i + 1);
+ tc358768_write(priv, TC358768_HSTXVREGEN, val);
+
+- if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+- tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
++ tc358768_write(priv, TC358768_TXOPTIONCNTRL,
++ (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0));
+
+ /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+- val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+- val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
+- val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+- dsibclk_nsk) - 2;
++ val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4);
++ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1;
++ dev_dbg(dev, "TXTAGOCNT: %u\n", val);
++ val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps),
++ hsbyteclk_ps) - 2;
++ dev_dbg(dev, "RXTASURECNT: %u\n", val2);
+ val = val << 16 | val2;
+- dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ tc358768_write(priv, TC358768_BTACNTRL1, val);
+
+ /* START[0] */
+@@ -813,43 +820,43 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ tc358768_write(priv, TC358768_DSI_EVENT, 0);
+
+ /* vact */
+- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+
+ /* vsw */
+- tc358768_write(priv, TC358768_DSI_VSW,
+- mode->vsync_end - mode->vsync_start);
++ tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len);
++
+ /* vbp */
+- tc358768_write(priv, TC358768_DSI_VBPR,
+- mode->vtotal - mode->vsync_end);
++ tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
+
+ /* hsw * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
++ val = (u32)div_u64(vm.hsync_len *
++ (u64)hsbyteclk * priv->dsi_lanes,
++ vm.pixelclock);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+
+ /* hbp * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->htotal - mode->hsync_end) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
++ val = (u32)div_u64(vm.hback_porch *
++ (u64)hsbyteclk * priv->dsi_lanes,
++ vm.pixelclock);
+ tc358768_write(priv, TC358768_DSI_HBPR, val);
+ } else {
+ /* Set event mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+ /* vact */
+- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+
+ /* vsw (+ vbp) */
+ tc358768_write(priv, TC358768_DSI_VSW,
+- mode->vtotal - mode->vsync_start);
++ vm.vsync_len + vm.vback_porch);
++
+ /* vbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_VBPR, 0);
+
+ /* (hsw + hbp) * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
++ val = (u32)div_u64((vm.hsync_len + vm.hback_porch) *
++ (u64)hsbyteclk * priv->dsi_lanes,
++ vm.pixelclock);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+
+ /* hbp (not used in event mode) */
+@@ -860,11 +867,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ tc358768_write(priv, TC358768_DSI_HACT, hact);
+
+ /* VSYNC polarity */
+- if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+- tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
++ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5),
++ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0);
++
+ /* HSYNC polarity */
+- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+- tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
++ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0),
++ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0);
+
+ /* Start DSI Tx */
+ tc358768_write(priv, TC358768_DSI_START, 0x1);
+@@ -894,7 +902,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+- dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
++ dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index e592c5da70cee..da0145bc104a8 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -1015,7 +1015,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+ dma_fence_put(fence);
+- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
++ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ continue;
+ } else {
+ timeout = -EINVAL;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 14ddfe3a6be77..7fb52a573436e 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -402,6 +402,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+ unsigned int local_layer;
+
+ plane_state = to_mtk_plane_state(plane->state);
++
++ /* should not enable layer before crtc enabled */
++ plane_state->pending.enable = false;
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+index 2f5e007dd3800..c4a0203d17e38 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+@@ -157,9 +157,9 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+- swap(plane->state->fb, new_state->fb);
+
+ mtk_plane_update_new_state(new_state, new_plane_state);
++ swap(plane->state->fb, new_state->fb);
+ wmb(); /* Make sure the above parameters are set before update */
+ new_plane_state->pending.async_dirty = true;
+ mtk_drm_crtc_async_update(new_state->crtc, plane, state);
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 9e1363c9fcdb4..3e74c7c1b89fa 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -406,7 +406,7 @@ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
+ if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ tmp_reg |= HSTX_CKLP_EN;
+
+- if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
++ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ tmp_reg |= DIS_EOT;
+
+ writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
+@@ -483,7 +483,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+ timing->da_hs_zero + timing->da_hs_exit + 3;
+
+ delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
+- delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0;
++ delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2;
+
+ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
+ horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
+index 8a95c744972a1..e9036e4036bc6 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.c
++++ b/drivers/gpu/drm/msm/dsi/dsi.c
+@@ -127,6 +127,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+
++ msm_dsi_tx_buf_free(msm_dsi->host);
+ priv->dsi[msm_dsi->id] = NULL;
+ }
+
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
+index 2a96b4fe7839f..6b239f77fca94 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.h
++++ b/drivers/gpu/drm/msm/dsi/dsi.h
+@@ -123,6 +123,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+ void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+ void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+ void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
+ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index e20cd3dd2c6cc..a7c6e8a1754de 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -149,6 +149,7 @@ struct msm_dsi_host {
+
+ /* DSI 6G TX buffer*/
+ struct drm_gem_object *tx_gem_obj;
++ struct msm_gem_address_space *aspace;
+
+ /* DSI v2 TX buffer */
+ void *tx_buf;
+@@ -1127,8 +1128,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
+ uint64_t iova;
+ u8 *data;
+
++ msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
++
+ data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
+- priv->kms->aspace,
++ msm_host->aspace,
+ &msm_host->tx_gem_obj, &iova);
+
+ if (IS_ERR(data)) {
+@@ -1157,10 +1160,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+ return 0;
+ }
+
+-static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
+ {
++ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct drm_device *dev = msm_host->dev;
+- struct msm_drm_private *priv;
+
+ /*
+ * This is possible if we're tearing down before we've had a chance to
+@@ -1171,11 +1174,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+ if (!dev)
+ return;
+
+- priv = dev->dev_private;
+ if (msm_host->tx_gem_obj) {
+- msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+- drm_gem_object_put(msm_host->tx_gem_obj);
++ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
++ msm_gem_address_space_put(msm_host->aspace);
+ msm_host->tx_gem_obj = NULL;
++ msm_host->aspace = NULL;
+ }
+
+ if (msm_host->tx_buf)
+@@ -2014,7 +2017,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+- dsi_tx_buf_free(msm_host);
+ if (msm_host->workqueue) {
+ destroy_workqueue(msm_host->workqueue);
+ msm_host->workqueue = NULL;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 4f06356d9ce2e..f0ae087be914e 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4821,14 +4821,15 @@ restart_ih:
+ break;
+ case 44: /* hdmi */
+ afmt_idx = src_data;
+- if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
+- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+-
+ if (afmt_idx > 5) {
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ src_id, src_data);
+ break;
+ }
++
++ if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
+ afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index 8526dda919317..0b33c3a1e6e3b 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -1178,6 +1178,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ struct cdn_dp_device *dp;
+ struct extcon_dev *extcon;
+ struct phy *phy;
++ int ret;
+ int i;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+@@ -1218,9 +1219,19 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ mutex_init(&dp->lock);
+ dev_set_drvdata(dev, dp);
+
+- cdn_dp_audio_codec_init(dp, dev);
++ ret = cdn_dp_audio_codec_init(dp, dev);
++ if (ret)
++ return ret;
++
++ ret = component_add(dev, &cdn_dp_component_ops);
++ if (ret)
++ goto err_audio_deinit;
+
+- return component_add(dev, &cdn_dp_component_ops);
++ return 0;
++
++err_audio_deinit:
++ platform_device_unregister(dp->audio_pdev);
++ return ret;
+ }
+
+ static int cdn_dp_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index 9426f7976d22e..10a4970ad2d8a 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -39,7 +39,7 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
+
+ ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
+ prot);
+- if (ret < rk_obj->base.size) {
++ if (ret < (ssize_t)rk_obj->base.size) {
+ DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
+ ret, rk_obj->base.size);
+ ret = -ENOMEM;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 2e2e08f4359a8..ae8c532f7fc84 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -1606,7 +1606,8 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+- rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
++ rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state),
++ sizeof(*rockchip_state), GFP_KERNEL);
+ if (!rockchip_state)
+ return NULL;
+
+@@ -1631,7 +1632,10 @@ static void vop_crtc_reset(struct drm_crtc *crtc)
+ if (crtc->state)
+ vop_crtc_destroy_state(crtc, crtc->state);
+
+- __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++ if (crtc_state)
++ __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++ else
++ __drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+
+ #ifdef CONFIG_DRM_ANALOGIX_DP
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 3c05ce01f73b8..b233f52675dc4 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -2075,30 +2075,15 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
+ .atomic_disable = vop2_crtc_atomic_disable,
+ };
+
+-static void vop2_crtc_reset(struct drm_crtc *crtc)
+-{
+- struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+-
+- if (crtc->state) {
+- __drm_atomic_helper_crtc_destroy_state(crtc->state);
+- kfree(vcstate);
+- }
+-
+- vcstate = kzalloc(sizeof(*vcstate), GFP_KERNEL);
+- if (!vcstate)
+- return;
+-
+- crtc->state = &vcstate->base;
+- crtc->state->crtc = crtc;
+-}
+-
+ static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
+ {
+- struct rockchip_crtc_state *vcstate, *old_vcstate;
++ struct rockchip_crtc_state *vcstate;
+
+- old_vcstate = to_rockchip_crtc_state(crtc->state);
++ if (WARN_ON(!crtc->state))
++ return NULL;
+
+- vcstate = kmemdup(old_vcstate, sizeof(*old_vcstate), GFP_KERNEL);
++ vcstate = kmemdup(to_rockchip_crtc_state(crtc->state),
++ sizeof(*vcstate), GFP_KERNEL);
+ if (!vcstate)
+ return NULL;
+
+@@ -2116,6 +2101,20 @@ static void vop2_crtc_destroy_state(struct drm_crtc *crtc,
+ kfree(vcstate);
+ }
+
++static void vop2_crtc_reset(struct drm_crtc *crtc)
++{
++ struct rockchip_crtc_state *vcstate =
++ kzalloc(sizeof(*vcstate), GFP_KERNEL);
++
++ if (crtc->state)
++ vop2_crtc_destroy_state(crtc, crtc->state);
++
++ if (vcstate)
++ __drm_atomic_helper_crtc_reset(crtc, &vcstate->base);
++ else
++ __drm_atomic_helper_crtc_reset(crtc, NULL);
++}
++
+ static const struct drm_crtc_funcs vop2_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
+index 047696432eb21..93c0c532fe5af 100644
+--- a/drivers/gpu/host1x/context.c
++++ b/drivers/gpu/host1x/context.c
+@@ -34,10 +34,10 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ if (err < 0)
+ return 0;
+
+- cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
++ cdl->len = err / 4;
++ cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
+ if (!cdl->devs)
+ return -ENOMEM;
+- cdl->len = err / 4;
+
+ for (i = 0; i < cdl->len; i++) {
+ struct iommu_fwspec *fwspec;
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 27cadadda7c9d..2770d964133d5 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -163,7 +163,6 @@ struct cp2112_device {
+ atomic_t read_avail;
+ atomic_t xfer_avail;
+ struct gpio_chip gc;
+- struct irq_chip irq;
+ u8 *in_out_buffer;
+ struct mutex lock;
+
+@@ -1080,16 +1079,20 @@ static void cp2112_gpio_irq_mask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+- __clear_bit(d->hwirq, &dev->irq_mask);
++ __clear_bit(hwirq, &dev->irq_mask);
++ gpiochip_disable_irq(gc, hwirq);
+ }
+
+ static void cp2112_gpio_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+- __set_bit(d->hwirq, &dev->irq_mask);
++ gpiochip_enable_irq(gc, hwirq);
++ __set_bit(hwirq, &dev->irq_mask);
+ }
+
+ static void cp2112_gpio_poll_callback(struct work_struct *work)
+@@ -1159,8 +1162,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+- INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+-
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+@@ -1175,7 +1176,12 @@ static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+- cancel_delayed_work_sync(&dev->gpio_poll_worker);
++ cp2112_gpio_irq_mask(d);
++
++ if (!dev->irq_mask) {
++ dev->gpio_poll = false;
++ cancel_delayed_work_sync(&dev->gpio_poll_worker);
++ }
+ }
+
+ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+@@ -1228,6 +1234,18 @@ err_desc:
+ return ret;
+ }
+
++static const struct irq_chip cp2112_gpio_irqchip = {
++ .name = "cp2112-gpio",
++ .irq_startup = cp2112_gpio_irq_startup,
++ .irq_shutdown = cp2112_gpio_irq_shutdown,
++ .irq_ack = cp2112_gpio_irq_ack,
++ .irq_mask = cp2112_gpio_irq_mask,
++ .irq_unmask = cp2112_gpio_irq_unmask,
++ .irq_set_type = cp2112_gpio_irq_type,
++ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
++ GPIOCHIP_IRQ_RESOURCE_HELPERS,
++};
++
+ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ struct cp2112_device *dev;
+@@ -1337,17 +1355,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ dev->gc.can_sleep = 1;
+ dev->gc.parent = &hdev->dev;
+
+- dev->irq.name = "cp2112-gpio";
+- dev->irq.irq_startup = cp2112_gpio_irq_startup;
+- dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
+- dev->irq.irq_ack = cp2112_gpio_irq_ack;
+- dev->irq.irq_mask = cp2112_gpio_irq_mask;
+- dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
+- dev->irq.irq_set_type = cp2112_gpio_irq_type;
+- dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
+-
+ girq = &dev->gc.irq;
+- girq->chip = &dev->irq;
++ gpio_irq_chip_set_chip(girq, &cp2112_gpio_irqchip);
+ /* The event comes from the outside so no parent handler */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+@@ -1356,6 +1365,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
++ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
++
+ ret = gpiochip_add_data(&dev->gc, dev);
+ if (ret < 0) {
+ hid_err(hdev, "error registering gpio chip\n");
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 8d0dad12b2d37..fa1c7e07e220b 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -31,11 +31,6 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+ MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
+
+-static bool disable_raw_mode;
+-module_param(disable_raw_mode, bool, 0644);
+-MODULE_PARM_DESC(disable_raw_mode,
+- "Disable Raw mode reporting for touchpads and keep firmware gestures.");
+-
+ static bool disable_tap_to_click;
+ module_param(disable_tap_to_click, bool, 0644);
+ MODULE_PARM_DESC(disable_tap_to_click,
+@@ -71,7 +66,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
+ /* bits 2..20 are reserved for classes */
+ /* #define HIDPP_QUIRK_CONNECT_EVENTS BIT(21) disabled */
+ #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
+-#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
++#define HIDPP_QUIRK_DELAYED_INIT BIT(23)
+ #define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
+ #define HIDPP_QUIRK_UNIFYING BIT(25)
+ #define HIDPP_QUIRK_HIDPP_WHEELS BIT(26)
+@@ -88,8 +83,6 @@ MODULE_PARM_DESC(disable_tap_to_click,
+ HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \
+ HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL)
+
+-#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
+-
+ #define HIDPP_CAPABILITY_HIDPP10_BATTERY BIT(0)
+ #define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1)
+ #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2)
+@@ -1764,15 +1757,14 @@ static int hidpp_battery_get_property(struct power_supply *psy,
+ /* -------------------------------------------------------------------------- */
+ #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b
+
+-static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
++static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index)
+ {
+ u8 feature_type;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+- &hidpp->wireless_feature_index,
+- &feature_type);
++ feature_index, &feature_type);
+
+ return ret;
+ }
+@@ -4006,6 +3998,13 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
+ }
+ }
+
++ if (hidpp->protocol_major >= 2) {
++ u8 feature_index;
++
++ if (!hidpp_get_wireless_feature_index(hidpp, &feature_index))
++ hidpp->wireless_feature_index = feature_index;
++ }
++
+ if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
+ name = hidpp_get_device_name(hidpp);
+ if (name) {
+@@ -4044,7 +4043,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
+ if (hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
+ hi_res_scroll_enable(hidpp);
+
+- if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
++ if (!(hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) || hidpp->delayed_input)
+ /* if the input nodes are already created, we can stop now */
+ return;
+
+@@ -4149,7 +4148,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ bool connected;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ struct hidpp_ff_private_data data;
+- bool will_restart = false;
+
+ /* report_fixup needs drvdata to be set before we call hid_parse */
+ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
+@@ -4190,11 +4188,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ hidpp_application_equals(hdev, HID_GD_KEYBOARD))
+ hidpp->quirks |= HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS;
+
+- if (disable_raw_mode) {
+- hidpp->quirks &= ~HIDPP_QUIRK_CLASS_WTP;
+- hidpp->quirks &= ~HIDPP_QUIRK_NO_HIDINPUT;
+- }
+-
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
+ ret = wtp_allocate(hdev, id);
+ if (ret)
+@@ -4205,10 +4198,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ return ret;
+ }
+
+- if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
+- hidpp->quirks & HIDPP_QUIRK_UNIFYING)
+- will_restart = true;
+-
+ INIT_WORK(&hidpp->work, delayed_work_cb);
+ mutex_init(&hidpp->send_mutex);
+ init_waitqueue_head(&hidpp->wait);
+@@ -4220,10 +4209,12 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ hdev->name);
+
+ /*
+- * Plain USB connections need to actually call start and open
+- * on the transport driver to allow incoming data.
++ * First call hid_hw_start(hdev, 0) to allow IO without connecting any
++ * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's
++ * name and serial number and store these in hdev->name and hdev->uniq,
++ * before the hid-input and hidraw drivers expose these to userspace.
+ */
+- ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
++ ret = hid_hw_start(hdev, 0);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto hid_hw_start_fail;
+@@ -4256,15 +4247,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ hidpp_overwrite_name(hdev);
+ }
+
+- if (connected && hidpp->protocol_major >= 2) {
+- ret = hidpp_set_wireless_feature_index(hidpp);
+- if (ret == -ENOENT)
+- hidpp->wireless_feature_index = 0;
+- else if (ret)
+- goto hid_hw_init_fail;
+- ret = 0;
+- }
+-
+ if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
+ ret = wtp_get_config(hidpp);
+ if (ret)
+@@ -4278,21 +4260,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ schedule_work(&hidpp->work);
+ flush_work(&hidpp->work);
+
+- if (will_restart) {
+- /* Reset the HID node state */
+- hid_device_io_stop(hdev);
+- hid_hw_close(hdev);
+- hid_hw_stop(hdev);
++ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
++ connect_mask &= ~HID_CONNECT_HIDINPUT;
+
+- if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
+- connect_mask &= ~HID_CONNECT_HIDINPUT;
+-
+- /* Now export the actual inputs and hidraw nodes to the world */
+- ret = hid_hw_start(hdev, connect_mask);
+- if (ret) {
+- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+- goto hid_hw_start_fail;
+- }
++ /* Now export the actual inputs and hidraw nodes to the world */
++ ret = hid_connect(hdev, connect_mask);
++ if (ret) {
++ hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret);
++ goto hid_hw_init_fail;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+@@ -4303,6 +4278,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ ret);
+ }
+
++ /*
++ * This relies on logi_dj_ll_close() being a no-op so that DJ connection
++ * events will still be received.
++ */
++ hid_hw_close(hdev);
+ return ret;
+
+ hid_hw_init_fail:
+diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
+index ce8c44e792213..60f0ef2cb324f 100644
+--- a/drivers/hte/hte-tegra194-test.c
++++ b/drivers/hte/hte-tegra194-test.c
+@@ -154,8 +154,10 @@ static int tegra_hte_test_probe(struct platform_device *pdev)
+ }
+
+ cnt = of_hte_req_count(hte.pdev);
+- if (cnt < 0)
++ if (cnt < 0) {
++ ret = cnt;
+ goto free_irq;
++ }
+
+ dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+
+diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
+index 6724e0dd30880..25abf28084c96 100644
+--- a/drivers/hwmon/axi-fan-control.c
++++ b/drivers/hwmon/axi-fan-control.c
+@@ -496,6 +496,21 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ return -ENODEV;
+ }
+
++ ret = axi_fan_control_init(ctl, pdev->dev.of_node);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to initialize device\n");
++ return ret;
++ }
++
++ ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
++ name,
++ ctl,
++ &axi_chip_info,
++ axi_fan_control_groups);
++
++ if (IS_ERR(ctl->hdev))
++ return PTR_ERR(ctl->hdev);
++
+ ctl->irq = platform_get_irq(pdev, 0);
+ if (ctl->irq < 0)
+ return ctl->irq;
+@@ -509,19 +524,7 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- ret = axi_fan_control_init(ctl, pdev->dev.of_node);
+- if (ret) {
+- dev_err(&pdev->dev, "Failed to initialize device\n");
+- return ret;
+- }
+-
+- ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
+- name,
+- ctl,
+- &axi_chip_info,
+- axi_fan_control_groups);
+-
+- return PTR_ERR_OR_ZERO(ctl->hdev);
++ return 0;
+ }
+
+ static struct platform_driver axi_fan_control_driver = {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index baaf8af4cb443..09aab5859fa75 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+ #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
+ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
+ #define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
+-#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
++#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
+ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index c54233f0369b2..80310845fb993 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -1528,17 +1528,21 @@ struct nct6775_data *nct6775_update_device(struct device *dev)
+ data->fan_div[i]);
+
+ if (data->has_fan_min & BIT(i)) {
+- err = nct6775_read_value(data, data->REG_FAN_MIN[i], &reg);
++ u16 tmp;
++
++ err = nct6775_read_value(data, data->REG_FAN_MIN[i], &tmp);
+ if (err)
+ goto out;
+- data->fan_min[i] = reg;
++ data->fan_min[i] = tmp;
+ }
+
+ if (data->REG_FAN_PULSES[i]) {
+- err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &reg);
++ u16 tmp;
++
++ err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &tmp);
+ if (err)
+ goto out;
+- data->fan_pulses[i] = (reg >> data->FAN_PULSE_SHIFT[i]) & 0x03;
++ data->fan_pulses[i] = (tmp >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+ }
+
+ err = nct6775_select_fan_div(dev, data, i, reg);
+diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
+index 25fbbd4c9a2b3..886386272b9f4 100644
+--- a/drivers/hwmon/sch5627.c
++++ b/drivers/hwmon/sch5627.c
+@@ -6,6 +6,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/bits.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+@@ -32,6 +33,10 @@
+ #define SCH5627_REG_PRIMARY_ID 0x3f
+ #define SCH5627_REG_CTRL 0x40
+
++#define SCH5627_CTRL_START BIT(0)
++#define SCH5627_CTRL_LOCK BIT(1)
++#define SCH5627_CTRL_VBAT BIT(4)
++
+ #define SCH5627_NO_TEMPS 8
+ #define SCH5627_NO_FANS 4
+ #define SCH5627_NO_IN 5
+@@ -147,7 +152,8 @@ static int sch5627_update_in(struct sch5627_data *data)
+
+ /* Trigger a Vbat voltage measurement every 5 minutes */
+ if (time_after(jiffies, data->last_battery + 300 * HZ)) {
+- sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | 0x10);
++ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
++ data->control | SCH5627_CTRL_VBAT);
+ data->last_battery = jiffies;
+ }
+
+@@ -226,6 +232,14 @@ static int reg_to_rpm(u16 reg)
+ static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+ {
++ const struct sch5627_data *data = drvdata;
++
++ /* Once the lock bit is set, the virtual registers become read-only
++ * until the next power cycle.
++ */
++ if (data->control & SCH5627_CTRL_LOCK)
++ return 0444;
++
+ if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp)
+ return 0644;
+
+@@ -483,14 +497,13 @@ static int sch5627_probe(struct platform_device *pdev)
+ return val;
+
+ data->control = val;
+- if (!(data->control & 0x01)) {
++ if (!(data->control & SCH5627_CTRL_START)) {
+ pr_err("hardware monitoring not enabled\n");
+ return -ENODEV;
+ }
+ /* Trigger a Vbat voltage measurement, so that we get a valid reading
+ the first time we read Vbat */
+- sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
+- data->control | 0x10);
++ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | SCH5627_CTRL_VBAT);
+ data->last_battery = jiffies;
+
+ /*
+diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
+index de3a0886c2f72..ac1f725807155 100644
+--- a/drivers/hwmon/sch56xx-common.c
++++ b/drivers/hwmon/sch56xx-common.c
+@@ -7,10 +7,8 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/module.h>
+-#include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+-#include <linux/dmi.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/acpi.h>
+@@ -21,10 +19,7 @@
+ #include <linux/slab.h>
+ #include "sch56xx-common.h"
+
+-static bool ignore_dmi;
+-module_param(ignore_dmi, bool, 0);
+-MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)");
+-
++/* Insmod parameters */
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+@@ -523,66 +518,11 @@ static int __init sch56xx_device_add(int address, const char *name)
+ return PTR_ERR_OR_ZERO(sch56xx_pdev);
+ }
+
+-static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = {
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"),
+- },
+- },
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"),
+- },
+- },
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"),
+- },
+- },
+- { }
+-};
+-
+-/* For autoloading only */
+-static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- },
+- },
+- { }
+-};
+-MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table);
+-
+ static int __init sch56xx_init(void)
+ {
+- const char *name = NULL;
+ int address;
++ const char *name = NULL;
+
+- if (!ignore_dmi) {
+- if (!dmi_check_system(sch56xx_dmi_table))
+- return -ENODEV;
+-
+- if (!dmi_check_system(sch56xx_dmi_override_table)) {
+- /*
+- * Some machines like the Esprimo P720 and Esprimo C700 have
+- * onboard devices named " Antiope"/" Theseus" instead of
+- * "Antiope"/"Theseus", so we need to check for both.
+- */
+- if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
+- return -ENODEV;
+- }
+- }
+-
+- /*
+- * Some devices like the Esprimo C700 have both onboard devices,
+- * so we still have to check manually
+- */
+ address = sch56xx_find(0x4e, &name);
+ if (address < 0)
+ address = sch56xx_find(0x2e, &name);
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 30a2a3200bed9..86a080f24d8a2 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init(
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ }
+
+-static void bcm_iproc_i2c_check_slave_status(
+- struct bcm_iproc_i2c_dev *iproc_i2c)
++static bool bcm_iproc_i2c_check_slave_status
++ (struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
+ {
+ u32 val;
++ bool recover = false;
+
+- val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+- /* status is valid only when START_BUSY is cleared after it was set */
+- if (val & BIT(S_CMD_START_BUSY_SHIFT))
+- return;
++ /* check slave transmit status only if slave is transmitting */
++ if (!iproc_i2c->slave_rx_only) {
++ val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
++ /* status is valid only when START_BUSY is cleared */
++ if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) {
++ val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
++ if (val == S_CMD_STATUS_TIMEOUT ||
++ val == S_CMD_STATUS_MASTER_ABORT) {
++ dev_warn(iproc_i2c->device,
++ (val == S_CMD_STATUS_TIMEOUT) ?
++ "slave random stretch time timeout\n" :
++ "Master aborted read transaction\n");
++ recover = true;
++ }
++ }
++ }
++
++ /* RX_EVENT is not valid when START_BUSY is set */
++ if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
++ (status & BIT(IS_S_START_BUSY_SHIFT))) {
++ dev_warn(iproc_i2c->device, "Slave aborted read transaction\n");
++ recover = true;
++ }
+
+- val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+- if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
+- dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
+- "slave random stretch time timeout\n" :
+- "Master aborted read transaction\n");
++ if (recover) {
+ /* re-initialize i2c for recovery */
+ bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ bcm_iproc_i2c_slave_init(iproc_i2c, true);
+ bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+ }
++
++ return recover;
+ }
+
+ static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+@@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 val;
+ u8 value;
+
+- /*
+- * Slave events in case of master-write, master-write-read and,
+- * master-read
+- *
+- * Master-write : only IS_S_RX_EVENT_SHIFT event
+- * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+- * events
+- * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+- * events or only IS_S_RD_EVENT_SHIFT
+- *
+- * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+- * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+- * full. This can happen if Master issues write requests of more than
+- * 64 bytes.
+- */
+- if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+- status & BIT(IS_S_RD_EVENT_SHIFT) ||
+- status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+- /* disable slave interrupts */
+- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+- val &= ~iproc_i2c->slave_int_mask;
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+-
+- if (status & BIT(IS_S_RD_EVENT_SHIFT))
+- /* Master-write-read request */
+- iproc_i2c->slave_rx_only = false;
+- else
+- /* Master-write request only */
+- iproc_i2c->slave_rx_only = true;
+-
+- /* schedule tasklet to read data later */
+- tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+-
+- /*
+- * clear only IS_S_RX_EVENT_SHIFT and
+- * IS_S_RX_FIFO_FULL_SHIFT interrupt.
+- */
+- val = BIT(IS_S_RX_EVENT_SHIFT);
+- if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
+- val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+- }
+
+ if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+ iproc_i2c->tx_underrun++;
+@@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ * less than PKT_LENGTH bytes were output on the SMBUS
+ */
+ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+- iproc_i2c->slave_int_mask);
++ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++ val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+ /* End of SMBUS for Master Read */
+ val = BIT(S_TX_WR_STATUS_SHIFT);
+@@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ BIT(IS_S_START_BUSY_SHIFT));
+ }
+
+- /* check slave transmit status only if slave is transmitting */
+- if (!iproc_i2c->slave_rx_only)
+- bcm_iproc_i2c_check_slave_status(iproc_i2c);
++ /* if the controller has been reset, immediately return from the ISR */
++ if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status))
++ return true;
++
++ /*
++ * Slave events in case of master-write, master-write-read and,
++ * master-read
++ *
++ * Master-write : only IS_S_RX_EVENT_SHIFT event
++ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events
++ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events or only IS_S_RD_EVENT_SHIFT
++ *
++ * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
++ * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
++ * full. This can happen if Master issues write requests of more than
++ * 64 bytes.
++ */
++ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
++ status & BIT(IS_S_RD_EVENT_SHIFT) ||
++ status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++ /* disable slave interrupts */
++ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++ val &= ~iproc_i2c->slave_int_mask;
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++
++ if (status & BIT(IS_S_RD_EVENT_SHIFT))
++ /* Master-write-read request */
++ iproc_i2c->slave_rx_only = false;
++ else
++ /* Master-write request only */
++ iproc_i2c->slave_rx_only = true;
++
++ /* schedule tasklet to read data later */
++ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
++
++ /* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */
++ if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++ val = BIT(IS_S_RX_FIFO_FULL_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
++ }
++ }
+
+ return true;
+ }
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 351c81a929a6c..ab0b5691b03e0 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1508,9 +1508,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+ desc->dev->dev.of_node = desc->boardinfo->of_node;
+
+ ret = device_register(&desc->dev->dev);
+- if (ret)
++ if (ret) {
+ dev_err(&master->dev,
+ "Failed to add I3C device (err = %d)\n", ret);
++ put_device(&desc->dev->dev);
++ }
+ }
+ }
+
+diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
+index 85e289700c3c5..4abf80f75ef5d 100644
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -33,7 +33,6 @@ enum {
+
+ struct adf4350_state {
+ struct spi_device *spi;
+- struct regulator *reg;
+ struct gpio_desc *lock_detect_gpiod;
+ struct adf4350_platform_data *pdata;
+ struct clk *clk;
+@@ -469,6 +468,15 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
+ return pdata;
+ }
+
++static void adf4350_power_down(void *data)
++{
++ struct iio_dev *indio_dev = data;
++ struct adf4350_state *st = iio_priv(indio_dev);
++
++ st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
++ adf4350_sync_config(st);
++}
++
+ static int adf4350_probe(struct spi_device *spi)
+ {
+ struct adf4350_platform_data *pdata;
+@@ -491,31 +499,21 @@ static int adf4350_probe(struct spi_device *spi)
+ }
+
+ if (!pdata->clkin) {
+- clk = devm_clk_get(&spi->dev, "clkin");
++ clk = devm_clk_get_enabled(&spi->dev, "clkin");
+ if (IS_ERR(clk))
+- return -EPROBE_DEFER;
+-
+- ret = clk_prepare_enable(clk);
+- if (ret < 0)
+- return ret;
++ return PTR_ERR(clk);
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+- if (indio_dev == NULL) {
+- ret = -ENOMEM;
+- goto error_disable_clk;
+- }
++ if (indio_dev == NULL)
++ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+- st->reg = devm_regulator_get(&spi->dev, "vcc");
+- if (!IS_ERR(st->reg)) {
+- ret = regulator_enable(st->reg);
+- if (ret)
+- goto error_disable_clk;
+- }
++ ret = devm_regulator_get_enable(&spi->dev, "vcc");
++ if (ret)
++ return ret;
+
+- spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+ st->pdata = pdata;
+
+@@ -544,47 +542,21 @@ static int adf4350_probe(struct spi_device *spi)
+
+ st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+ GPIOD_IN);
+- if (IS_ERR(st->lock_detect_gpiod)) {
+- ret = PTR_ERR(st->lock_detect_gpiod);
+- goto error_disable_reg;
+- }
++ if (IS_ERR(st->lock_detect_gpiod))
++ return PTR_ERR(st->lock_detect_gpiod);
+
+ if (pdata->power_up_frequency) {
+ ret = adf4350_set_freq(st, pdata->power_up_frequency);
+ if (ret)
+- goto error_disable_reg;
++ return ret;
+ }
+
+- ret = iio_device_register(indio_dev);
++ ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
+ if (ret)
+- goto error_disable_reg;
+-
+- return 0;
+-
+-error_disable_reg:
+- if (!IS_ERR(st->reg))
+- regulator_disable(st->reg);
+-error_disable_clk:
+- clk_disable_unprepare(clk);
+-
+- return ret;
+-}
+-
+-static void adf4350_remove(struct spi_device *spi)
+-{
+- struct iio_dev *indio_dev = spi_get_drvdata(spi);
+- struct adf4350_state *st = iio_priv(indio_dev);
+- struct regulator *reg = st->reg;
+-
+- st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+- adf4350_sync_config(st);
+-
+- iio_device_unregister(indio_dev);
+-
+- clk_disable_unprepare(st->clk);
++ return dev_err_probe(&spi->dev, ret,
++ "Failed to add action to managed power down\n");
+
+- if (!IS_ERR(reg))
+- regulator_disable(reg);
++ return devm_iio_device_register(&spi->dev, indio_dev);
+ }
+
+ static const struct of_device_id adf4350_of_match[] = {
+@@ -607,7 +579,6 @@ static struct spi_driver adf4350_driver = {
+ .of_match_table = adf4350_of_match,
+ },
+ .probe = adf4350_probe,
+- .remove = adf4350_remove,
+ .id_table = adf4350_id,
+ };
+ module_spi_driver(adf4350_driver);
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 3c422698a51c1..3a9b9a28d858f 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -804,7 +804,7 @@ static int alloc_port_data(struct ib_device *device)
+ * empty slots at the beginning.
+ */
+ pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
+- rdma_end_port(device) + 1),
++ size_add(rdma_end_port(device), 1)),
+ GFP_KERNEL);
+ if (!pdata_rcu)
+ return -ENOMEM;
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index 0de83d9a4985d..8c69bdb5bb754 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -2220,7 +2220,9 @@ static int ib_sa_add_one(struct ib_device *device)
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
+
+- sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
++ sa_dev = kzalloc(struct_size(sa_dev, port,
++ size_add(size_sub(e, s), 1)),
++ GFP_KERNEL);
+ if (!sa_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index ee59d73915689..ec5efdc166601 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -903,7 +903,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+- data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+@@ -1009,7 +1009,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+- data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+@@ -1140,7 +1140,7 @@ static int setup_gid_attrs(struct ib_port *port,
+ int ret;
+
+ gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
+- attr->gid_tbl_len * 2),
++ size_mul(attr->gid_tbl_len, 2)),
+ GFP_KERNEL);
+ if (!gid_attr_group)
+ return -ENOMEM;
+@@ -1205,8 +1205,8 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ int ret;
+
+ p = kvzalloc(struct_size(p, attrs_list,
+- attr->gid_tbl_len + attr->pkey_tbl_len),
+- GFP_KERNEL);
++ size_add(attr->gid_tbl_len, attr->pkey_tbl_len)),
++ GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ p->ibdev = device;
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 98cb594cd9a69..d96c78e436f98 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -1373,7 +1373,9 @@ static int ib_umad_add_one(struct ib_device *device)
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
+
+- umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
++ umad_dev = kzalloc(struct_size(umad_dev, ports,
++ size_add(size_sub(e, s), 1)),
++ GFP_KERNEL);
+ if (!umad_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
+index 7741a1d69097c..2b5d264f41e51 100644
+--- a/drivers/infiniband/hw/hfi1/efivar.c
++++ b/drivers/infiniband/hw/hfi1/efivar.c
+@@ -112,7 +112,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ unsigned long *size, void **return_data)
+ {
+ char prefix_name[64];
+- char name[64];
++ char name[128];
+ int result;
+
+ /* create a common prefix */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
+index 480c062dd04f1..103a7787b3712 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -33,7 +33,9 @@
+ #include <linux/pci.h>
+ #include <rdma/ib_addr.h>
+ #include <rdma/ib_cache.h>
++#include "hnae3.h"
+ #include "hns_roce_device.h"
++#include "hns_roce_hw_v2.h"
+
+ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
+ {
+@@ -58,6 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
+ int ret = 0;
++ u32 max_sl;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
+ return -EOPNOTSUPP;
+@@ -71,9 +74,17 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.flowlabel = grh->flow_label;
+ ah->av.udp_sport = get_ah_udp_sport(ah_attr);
+- ah->av.sl = rdma_ah_get_sl(ah_attr);
+ ah->av.tclass = get_tclass(grh);
+
++ ah->av.sl = rdma_ah_get_sl(ah_attr);
++ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++ if (unlikely(ah->av.sl > max_sl)) {
++ ibdev_err_ratelimited(&hr_dev->ib_dev,
++ "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
++ ah->av.sl, max_sl);
++ return -EINVAL;
++ }
++
+ memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
+ memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 33980485ef5ba..8a9d28f81149a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -270,7 +270,7 @@ static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ int mtu = ib_mtu_enum_to_int(qp->path_mtu);
+
+- if (len > qp->max_inline_data || len > mtu) {
++ if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
+ len, qp->max_inline_data, mtu);
+@@ -4883,6 +4883,9 @@ static int check_cong_type(struct ib_qp *ibqp,
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+
++ if (ibqp->qp_type == IB_QPT_UD)
++ hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++
+ /* different congestion types match different configurations */
+ switch (hr_dev->caps.cong_type) {
+ case CONG_TYPE_DCQCN:
+@@ -4979,22 +4982,32 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ const struct ib_gid_attr *gid_attr = NULL;
++ u8 sl = rdma_ah_get_sl(&attr->ah_attr);
+ int is_roce_protocol;
+ u16 vlan_id = 0xffff;
+ bool is_udp = false;
++ u32 max_sl;
+ u8 ib_port;
+ u8 hr_port;
+ int ret;
+
++ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++ if (unlikely(sl > max_sl)) {
++ ibdev_err_ratelimited(ibdev,
++ "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
++ sl, max_sl);
++ return -EINVAL;
++ }
++
+ /*
+ * If free_mr_en of qp is set, it means that this qp comes from
+ * free mr. This qp will perform the loopback operation.
+ * In the loopback scenario, only sl needs to be set.
+ */
+ if (hr_qp->free_mr_en) {
+- hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
++ hr_reg_write(context, QPC_SL, sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
++ hr_qp->sl = sl;
+ return 0;
+ }
+
+@@ -5061,14 +5074,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+
+- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+- if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
+- ibdev_err(ibdev,
+- "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
+- hr_qp->sl, MAX_SERVICE_LEVEL);
+- return -EINVAL;
+- }
+-
++ hr_qp->sl = sl;
+ hr_reg_write(context, QPC_SL, hr_qp->sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+
+@@ -5961,7 +5967,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+- ibdev_warn(ibdev, "send queue drained.\n");
++ ibdev_dbg(ibdev, "send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
+@@ -5976,10 +5982,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ irq_work->queue_num, irq_work->sub_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+- ibdev_warn(ibdev, "SRQ limit reach.\n");
++ ibdev_dbg(ibdev, "SRQ limit reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+- ibdev_warn(ibdev, "SRQ last wqe reach.\n");
++ ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ ibdev_err(ibdev, "SRQ catas error.\n");
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 7a95f8677a02c..7b79e6b3f3baa 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -1128,7 +1128,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ {
+ struct hns_roce_ib_create_qp_resp resp = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+- struct hns_roce_ib_create_qp ucmd;
++ struct hns_roce_ib_create_qp ucmd = {};
+ int ret;
+
+ mutex_init(&hr_qp->mutex);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 0baf3b5518b46..bce31e28eb303 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4027,10 +4027,8 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+ return ret;
+
+ ret = mlx5_mkey_cache_init(dev);
+- if (ret) {
++ if (ret)
+ mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
+- mlx5r_umr_resource_cleanup(dev);
+- }
+ return ret;
+ }
+
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index ac53ed79ca64c..e0df3017e241a 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3960,6 +3960,30 @@ static unsigned int get_tx_affinity(struct ib_qp *qp,
+ return tx_affinity;
+ }
+
++static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
++ struct mlx5_core_dev *mdev)
++{
++ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
++ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
++ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
++ void *rqc;
++
++ if (!qp->rq.wqe_cnt)
++ return 0;
++
++ MLX5_SET(modify_rq_in, in, rq_state, rq->state);
++ MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
++
++ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
++ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
++
++ MLX5_SET64(modify_rq_in, in, modify_bitmask,
++ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
++ MLX5_SET(rqc, rqc, counter_set_id, set_id);
++
++ return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
++}
++
+ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ struct rdma_counter *counter)
+ {
+@@ -3975,6 +3999,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ else
+ set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
+
++ if (mqp->type == IB_QPT_RAW_PACKET)
++ return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
++
+ base = &mqp->trans_qp.base;
+ MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
+index 50a0134b6901b..e6557d5f50ce5 100644
+--- a/drivers/input/rmi4/rmi_bus.c
++++ b/drivers/input/rmi4/rmi_bus.c
+@@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn)
+
+ device_del(&fn->dev);
+ of_node_put(fn->dev.of_node);
+- put_device(&fn->dev);
+
+ for (i = 0; i < fn->num_of_irqs; i++)
+ irq_dispose_mapping(fn->irq[i]);
+
++ put_device(&fn->dev);
+ }
+
+ /**
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index e4b2d9ef61b4d..0c6fc954e7296 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -1100,15 +1100,17 @@ void icc_provider_del(struct icc_provider *provider)
+ }
+ EXPORT_SYMBOL_GPL(icc_provider_del);
+
++static const struct of_device_id __maybe_unused ignore_list[] = {
++ { .compatible = "qcom,sc7180-ipa-virt" },
++ { .compatible = "qcom,sdx55-ipa-virt" },
++ { .compatible = "qcom,sm8150-ipa-virt" },
++ {}
++};
++
+ static int of_count_icc_providers(struct device_node *np)
+ {
+ struct device_node *child;
+ int count = 0;
+- const struct of_device_id __maybe_unused ignore_list[] = {
+- { .compatible = "qcom,sc7180-ipa-virt" },
+- { .compatible = "qcom,sdx55-ipa-virt" },
+- {}
+- };
+
+ for_each_available_child_of_node(np, child) {
+ if (of_property_read_bool(child, "#interconnect-cells") &&
+diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
+index 82d5e8a8c19ea..6d0450351a5a7 100644
+--- a/drivers/interconnect/qcom/sc7180.c
++++ b/drivers/interconnect/qcom/sc7180.c
+@@ -153,30 +153,238 @@ DEFINE_QNODE(srvc_snoc, SC7180_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SC7180_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SC7180_SLAVE_TCU, 1, 8);
+
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup_core_master_1, &qup_core_master_2);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps0);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &qhm_qspi, &xm_sdc2, &xm_emmc, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qxm_pimem, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++ .name = "ACV",
++ .enable_mask = BIT(3),
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++ .name = "MC0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++ .name = "SH0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++ .name = "MM0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++ .name = "CE0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++ .name = "CN0",
++ .keepalive = true,
++ .num_nodes = 48,
++ .nodes = { &qnm_snoc,
++ &xm_qdss_dap,
++ &qhs_a1_noc_cfg,
++ &qhs_a2_noc_cfg,
++ &qhs_ahb2phy0,
++ &qhs_aop,
++ &qhs_aoss,
++ &qhs_boot_rom,
++ &qhs_camera_cfg,
++ &qhs_camera_nrt_throttle_cfg,
++ &qhs_camera_rt_throttle_cfg,
++ &qhs_clk_ctl,
++ &qhs_cpr_cx,
++ &qhs_cpr_mx,
++ &qhs_crypto0_cfg,
++ &qhs_dcc_cfg,
++ &qhs_ddrss_cfg,
++ &qhs_display_cfg,
++ &qhs_display_rt_throttle_cfg,
++ &qhs_display_throttle_cfg,
++ &qhs_glm,
++ &qhs_gpuss_cfg,
++ &qhs_imem_cfg,
++ &qhs_ipa,
++ &qhs_mnoc_cfg,
++ &qhs_mss_cfg,
++ &qhs_npu_cfg,
++ &qhs_npu_dma_throttle_cfg,
++ &qhs_npu_dsp_throttle_cfg,
++ &qhs_pimem_cfg,
++ &qhs_prng,
++ &qhs_qdss_cfg,
++ &qhs_qm_cfg,
++ &qhs_qm_mpu_cfg,
++ &qhs_qup0,
++ &qhs_qup1,
++ &qhs_security,
++ &qhs_snoc_cfg,
++ &qhs_tcsr,
++ &qhs_tlmm_1,
++ &qhs_tlmm_2,
++ &qhs_tlmm_3,
++ &qhs_ufs_mem_cfg,
++ &qhs_usb3,
++ &qhs_venus_cfg,
++ &qhs_venus_throttle_cfg,
++ &qhs_vsense_ctrl_cfg,
++ &srvc_cnoc
++ },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++ .name = "MM1",
++ .keepalive = false,
++ .num_nodes = 8,
++ .nodes = { &qxm_camnoc_hf0_uncomp,
++ &qxm_camnoc_hf1_uncomp,
++ &qxm_camnoc_sf_uncomp,
++ &qhm_mnoc_cfg,
++ &qxm_mdp0,
++ &qxm_rot,
++ &qxm_venus0,
++ &qxm_venus_arm9
++ },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++ .name = "SH2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++ .name = "MM2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++ .name = "QUP0",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &qup_core_master_1, &qup_core_master_2 },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++ .name = "SH3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++ .name = "SH4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &acm_apps0 },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++ .name = "SN0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++ .name = "CO0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_cdsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++ .name = "SN1",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++ .name = "CN1",
++ .keepalive = false,
++ .num_nodes = 8,
++ .nodes = { &qhm_qspi,
++ &xm_sdc2,
++ &xm_emmc,
++ &qhs_ahb2phy2,
++ &qhs_emmc_cfg,
++ &qhs_pdm,
++ &qhs_qspi,
++ &qhs_sdc2
++ },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++ .name = "SN2",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &qxm_pimem, &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_co2 = {
++ .name = "CO2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++ .name = "SN3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++ .name = "CO3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_npu_dsp },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++ .name = "SN4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++ .name = "SN7",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++ .name = "SN9",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++ .name = "SN12",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_gemnoc },
++};
+
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_cn1,
+diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
+index 971f538bc98ad..3c39edd21b6ca 100644
+--- a/drivers/interconnect/qcom/sc7280.c
++++ b/drivers/interconnect/qcom/sc7280.c
+@@ -1284,6 +1284,7 @@ static struct qcom_icc_node srvc_snoc = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
+index 8e32ca958824c..83461e31774ec 100644
+--- a/drivers/interconnect/qcom/sc8180x.c
++++ b/drivers/interconnect/qcom/sc8180x.c
+@@ -1360,6 +1360,7 @@ static struct qcom_icc_node slv_qup_core_2 = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &slv_ebi }
+ };
+diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
+index 507fe5f89791a..489f259a02e5b 100644
+--- a/drivers/interconnect/qcom/sc8280xp.c
++++ b/drivers/interconnect/qcom/sc8280xp.c
+@@ -1727,6 +1727,7 @@ static struct qcom_icc_node srvc_snoc = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
+index 954e7bd13fc41..02cf890684441 100644
+--- a/drivers/interconnect/qcom/sdm845.c
++++ b/drivers/interconnect/qcom/sdm845.c
+@@ -146,34 +146,256 @@ DEFINE_QNODE(srvc_snoc, SDM845_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SDM845_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SDM845_SLAVE_TCU, 1, 8);
+
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", false, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qxs_pcie);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &qxs_pcie_gen3);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &srvc_aggre1_noc, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn11, "SN11", false, &srvc_aggre2_noc, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
+-DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
++static struct qcom_icc_bcm bcm_acv = {
++ .name = "ACV",
++ .enable_mask = BIT(3),
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++ .name = "MC0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++ .name = "SH0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++ .name = "MM0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_sh1 = {
++ .name = "SH1",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_apps_io },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++ .name = "MM1",
++ .keepalive = true,
++ .num_nodes = 7,
++ .nodes = { &qxm_camnoc_hf0_uncomp,
++ &qxm_camnoc_hf1_uncomp,
++ &qxm_camnoc_sf_uncomp,
++ &qxm_camnoc_hf0,
++ &qxm_camnoc_hf1,
++ &qxm_mdp0,
++ &qxm_mdp1
++ },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++ .name = "SH2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_memnoc_snoc },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++ .name = "MM2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns2_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++ .name = "SH3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &acm_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++ .name = "MM3",
++ .keepalive = false,
++ .num_nodes = 5,
++ .nodes = { &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
++};
++
++static struct qcom_icc_bcm bcm_sh5 = {
++ .name = "SH5",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++ .name = "SN0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_memnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++ .name = "CE0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++ .name = "CN0",
++ .keepalive = false,
++ .num_nodes = 47,
++ .nodes = { &qhm_spdm,
++ &qhm_tic,
++ &qnm_snoc,
++ &xm_qdss_dap,
++ &qhs_a1_noc_cfg,
++ &qhs_a2_noc_cfg,
++ &qhs_aop,
++ &qhs_aoss,
++ &qhs_camera_cfg,
++ &qhs_clk_ctl,
++ &qhs_compute_dsp_cfg,
++ &qhs_cpr_cx,
++ &qhs_crypto0_cfg,
++ &qhs_dcc_cfg,
++ &qhs_ddrss_cfg,
++ &qhs_display_cfg,
++ &qhs_glm,
++ &qhs_gpuss_cfg,
++ &qhs_imem_cfg,
++ &qhs_ipa,
++ &qhs_mnoc_cfg,
++ &qhs_pcie0_cfg,
++ &qhs_pcie_gen3_cfg,
++ &qhs_pdm,
++ &qhs_phy_refgen_south,
++ &qhs_pimem_cfg,
++ &qhs_prng,
++ &qhs_qdss_cfg,
++ &qhs_qupv3_north,
++ &qhs_qupv3_south,
++ &qhs_sdc2,
++ &qhs_sdc4,
++ &qhs_snoc_cfg,
++ &qhs_spdm,
++ &qhs_spss_cfg,
++ &qhs_tcsr,
++ &qhs_tlmm_north,
++ &qhs_tlmm_south,
++ &qhs_tsif,
++ &qhs_ufs_card_cfg,
++ &qhs_ufs_mem_cfg,
++ &qhs_usb3_0,
++ &qhs_usb3_1,
++ &qhs_venus_cfg,
++ &qhs_vsense_ctrl_cfg,
++ &qns_cnoc_a2noc,
++ &srvc_cnoc
++ },
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++ .name = "QUP0",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &qhm_qup1, &qhm_qup2 },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++ .name = "SN1",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++ .name = "SN2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_memnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++ .name = "SN3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_cnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++ .name = "SN4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++ .name = "SN5",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++ .name = "SN6",
++ .keepalive = false,
++ .num_nodes = 3,
++ .nodes = { &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++ .name = "SN7",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_pcie },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++ .name = "SN8",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_pcie_gen3 },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++ .name = "SN9",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &srvc_aggre1_noc, &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn11 = {
++ .name = "SN11",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &srvc_aggre2_noc, &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++ .name = "SN12",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &qnm_gladiator_sodv, &xm_gic },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++ .name = "SN14",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_pcie_anoc },
++};
++
++static struct qcom_icc_bcm bcm_sn15 = {
++ .name = "SN15",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_memnoc },
++};
+
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_sn9,
+diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
+index a3d46e59444e0..aae4b43b730c0 100644
+--- a/drivers/interconnect/qcom/sm6350.c
++++ b/drivers/interconnect/qcom/sm6350.c
+@@ -142,31 +142,233 @@ DEFINE_QNODE(srvc_snoc, SM6350_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SM6350_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SM6350_SLAVE_TCU, 1, 8);
+
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_thrott_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_emmc, &xm_sdc2, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_sdc2);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_icp_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf, &qxm_mdp0);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++ .name = "ACV",
++ .enable_mask = BIT(3),
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++ .name = "CE0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++ .name = "CN0",
++ .keepalive = true,
++ .num_nodes = 41,
++ .nodes = { &qnm_snoc,
++ &xm_qdss_dap,
++ &qhs_a1_noc_cfg,
++ &qhs_a2_noc_cfg,
++ &qhs_ahb2phy0,
++ &qhs_aoss,
++ &qhs_boot_rom,
++ &qhs_camera_cfg,
++ &qhs_camera_nrt_thrott_cfg,
++ &qhs_camera_rt_throttle_cfg,
++ &qhs_clk_ctl,
++ &qhs_cpr_cx,
++ &qhs_cpr_mx,
++ &qhs_crypto0_cfg,
++ &qhs_dcc_cfg,
++ &qhs_ddrss_cfg,
++ &qhs_display_cfg,
++ &qhs_display_throttle_cfg,
++ &qhs_glm,
++ &qhs_gpuss_cfg,
++ &qhs_imem_cfg,
++ &qhs_ipa,
++ &qhs_mnoc_cfg,
++ &qhs_mss_cfg,
++ &qhs_npu_cfg,
++ &qhs_pimem_cfg,
++ &qhs_prng,
++ &qhs_qdss_cfg,
++ &qhs_qm_cfg,
++ &qhs_qm_mpu_cfg,
++ &qhs_qup0,
++ &qhs_qup1,
++ &qhs_security,
++ &qhs_snoc_cfg,
++ &qhs_tcsr,
++ &qhs_ufs_mem_cfg,
++ &qhs_usb3_0,
++ &qhs_venus_cfg,
++ &qhs_venus_throttle_cfg,
++ &qhs_vsense_ctrl_cfg,
++ &srvc_cnoc
++ },
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++ .name = "CN1",
++ .keepalive = false,
++ .num_nodes = 6,
++ .nodes = { &xm_emmc,
++ &xm_sdc2,
++ &qhs_ahb2phy2,
++ &qhs_emmc_cfg,
++ &qhs_pdm,
++ &qhs_sdc2
++ },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++ .name = "CO0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_cdsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_co2 = {
++ .name = "CO2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++ .name = "CO3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_npu_dsp },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++ .name = "MC0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++ .name = "MM0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++ .name = "MM1",
++ .keepalive = true,
++ .num_nodes = 5,
++ .nodes = { &qxm_camnoc_hf0_uncomp,
++ &qxm_camnoc_icp_uncomp,
++ &qxm_camnoc_sf_uncomp,
++ &qxm_camnoc_hf,
++ &qxm_mdp0
++ },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++ .name = "MM2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++ .name = "MM3",
++ .keepalive = false,
++ .num_nodes = 4,
++ .nodes = { &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++ .name = "QUP0",
++ .keepalive = false,
++ .num_nodes = 4,
++ .nodes = { &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++ .name = "SH0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++ .name = "SH2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++ .name = "SH3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++ .name = "SH4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &acm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++ .name = "SN0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++ .name = "SN1",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++ .name = "SN2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++ .name = "SN3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++ .name = "SN4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++ .name = "SN5",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++ .name = "SN6",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn10 = {
++ .name = "SN10",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_gemnoc },
++};
+
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_cn1,
+diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
+index 1d04a4bfea800..685f35bbf5a7c 100644
+--- a/drivers/interconnect/qcom/sm8150.c
++++ b/drivers/interconnect/qcom/sm8150.c
+@@ -56,7 +56,6 @@ DEFINE_QNODE(qnm_pcie, SM8150_MASTER_GEM_NOC_PCIE_SNOC, 1, 16, SM8150_SLAVE_LLCC
+ DEFINE_QNODE(qnm_snoc_gc, SM8150_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8150_SLAVE_LLCC);
+ DEFINE_QNODE(qnm_snoc_sf, SM8150_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8150_SLAVE_LLCC);
+ DEFINE_QNODE(qxm_ecc, SM8150_MASTER_ECC, 2, 32, SM8150_SLAVE_LLCC);
+-DEFINE_QNODE(ipa_core_master, SM8150_MASTER_IPA_CORE, 1, 8, SM8150_SLAVE_IPA_CORE);
+ DEFINE_QNODE(llcc_mc, SM8150_MASTER_LLCC, 4, 4, SM8150_SLAVE_EBI_CH0);
+ DEFINE_QNODE(qhm_mnoc_cfg, SM8150_MASTER_CNOC_MNOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_MNOC);
+ DEFINE_QNODE(qxm_camnoc_hf0, SM8150_MASTER_CAMNOC_HF0, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
+@@ -139,7 +138,6 @@ DEFINE_QNODE(qns_ecc, SM8150_SLAVE_ECC, 1, 32);
+ DEFINE_QNODE(qns_gem_noc_snoc, SM8150_SLAVE_GEM_NOC_SNOC, 1, 8, SM8150_MASTER_GEM_NOC_SNOC);
+ DEFINE_QNODE(qns_llcc, SM8150_SLAVE_LLCC, 4, 16, SM8150_MASTER_LLCC);
+ DEFINE_QNODE(srvc_gemnoc, SM8150_SLAVE_SERVICE_GEM_NOC, 1, 4);
+-DEFINE_QNODE(ipa_core_slave, SM8150_SLAVE_IPA_CORE, 1, 8);
+ DEFINE_QNODE(ebi, SM8150_SLAVE_EBI_CH0, 4, 4);
+ DEFINE_QNODE(qns2_mem_noc, SM8150_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SM8150_MASTER_MNOC_SF_MEM_NOC);
+ DEFINE_QNODE(qns_mem_noc_hf, SM8150_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8150_MASTER_MNOC_HF_MEM_NOC);
+@@ -156,35 +154,262 @@ DEFINE_QNODE(xs_pcie_1, SM8150_SLAVE_PCIE_1, 1, 8);
+ DEFINE_QNODE(xs_qdss_stm, SM8150_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SM8150_SLAVE_TCU, 1, 8);
+
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_gem_noc_snoc);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qxm_camnoc_sf, &qns2_mem_noc);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_gpu_tcu, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh5, "SH5", false, &acm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_mem_noc);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_co1, "CO1", false, &qnm_npu);
+-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy_south, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emac_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_npu_cfg, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_phy_refgen_north, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qspi, &qhs_qupv3_east, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_ssc_cfg, &qhs_tcsr, &qhs_tlmm_east, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tlmm_west, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup0, &qhm_qup1, &qhm_qup2);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &xs_pcie_0, &xs_pcie_1);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+-DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++ .name = "ACV",
++ .enable_mask = BIT(3),
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++ .name = "MC0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++ .name = "SH0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++ .name = "MM0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++ .name = "MM1",
++ .keepalive = false,
++ .num_nodes = 7,
++ .nodes = { &qxm_camnoc_hf0_uncomp,
++ &qxm_camnoc_hf1_uncomp,
++ &qxm_camnoc_sf_uncomp,
++ &qxm_camnoc_hf0,
++ &qxm_camnoc_hf1,
++ &qxm_mdp0,
++ &qxm_mdp1
++ },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++ .name = "SH2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_gem_noc_snoc },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++ .name = "MM2",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &qxm_camnoc_sf, &qns2_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++ .name = "SH3",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &acm_gpu_tcu, &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++ .name = "MM3",
++ .keepalive = false,
++ .num_nodes = 4,
++ .nodes = { &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++ .name = "SH4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh5 = {
++ .name = "SH5",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &acm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++ .name = "SN0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++ .name = "CO0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_cdsp_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++ .name = "CE0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++ .name = "SN1",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_co1 = {
++ .name = "CO1",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++ .name = "CN0",
++ .keepalive = true,
++ .num_nodes = 53,
++ .nodes = { &qhm_spdm,
++ &qnm_snoc,
++ &qhs_a1_noc_cfg,
++ &qhs_a2_noc_cfg,
++ &qhs_ahb2phy_south,
++ &qhs_aop,
++ &qhs_aoss,
++ &qhs_camera_cfg,
++ &qhs_clk_ctl,
++ &qhs_compute_dsp,
++ &qhs_cpr_cx,
++ &qhs_cpr_mmcx,
++ &qhs_cpr_mx,
++ &qhs_crypto0_cfg,
++ &qhs_ddrss_cfg,
++ &qhs_display_cfg,
++ &qhs_emac_cfg,
++ &qhs_glm,
++ &qhs_gpuss_cfg,
++ &qhs_imem_cfg,
++ &qhs_ipa,
++ &qhs_mnoc_cfg,
++ &qhs_npu_cfg,
++ &qhs_pcie0_cfg,
++ &qhs_pcie1_cfg,
++ &qhs_phy_refgen_north,
++ &qhs_pimem_cfg,
++ &qhs_prng,
++ &qhs_qdss_cfg,
++ &qhs_qspi,
++ &qhs_qupv3_east,
++ &qhs_qupv3_north,
++ &qhs_qupv3_south,
++ &qhs_sdc2,
++ &qhs_sdc4,
++ &qhs_snoc_cfg,
++ &qhs_spdm,
++ &qhs_spss_cfg,
++ &qhs_ssc_cfg,
++ &qhs_tcsr,
++ &qhs_tlmm_east,
++ &qhs_tlmm_north,
++ &qhs_tlmm_south,
++ &qhs_tlmm_west,
++ &qhs_tsif,
++ &qhs_ufs_card_cfg,
++ &qhs_ufs_mem_cfg,
++ &qhs_usb3_0,
++ &qhs_usb3_1,
++ &qhs_venus_cfg,
++ &qhs_vsense_ctrl_cfg,
++ &qns_cnoc_a2noc,
++ &srvc_cnoc
++ },
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++ .name = "QUP0",
++ .keepalive = false,
++ .num_nodes = 3,
++ .nodes = { &qhm_qup0, &qhm_qup1, &qhm_qup2 },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++ .name = "SN2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++ .name = "SN3",
++ .keepalive = false,
++ .num_nodes = 3,
++ .nodes = { &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++ .name = "SN4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++ .name = "SN5",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++ .name = "SN8",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &xs_pcie_0, &xs_pcie_1 },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++ .name = "SN9",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn11 = {
++ .name = "SN11",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++ .name = "SN12",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &qxm_pimem, &xm_gic },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++ .name = "SN14",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_pcie_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn15 = {
++ .name = "SN15",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_gemnoc },
++};
+
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_qup0,
+@@ -398,22 +623,6 @@ static const struct qcom_icc_desc sm8150_gem_noc = {
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+ };
+
+-static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
+- &bcm_ip0,
+-};
+-
+-static struct qcom_icc_node * const ipa_virt_nodes[] = {
+- [MASTER_IPA_CORE] = &ipa_core_master,
+- [SLAVE_IPA_CORE] = &ipa_core_slave,
+-};
+-
+-static const struct qcom_icc_desc sm8150_ipa_virt = {
+- .nodes = ipa_virt_nodes,
+- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
+- .bcms = ipa_virt_bcms,
+- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
+-};
+-
+ static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+@@ -517,8 +726,6 @@ static const struct of_device_id qnoc_of_match[] = {
+ .data = &sm8150_dc_noc},
+ { .compatible = "qcom,sm8150-gem-noc",
+ .data = &sm8150_gem_noc},
+- { .compatible = "qcom,sm8150-ipa-virt",
+- .data = &sm8150_ipa_virt},
+ { .compatible = "qcom,sm8150-mc-virt",
+ .data = &sm8150_mc_virt},
+ { .compatible = "qcom,sm8150-mmss-noc",
+diff --git a/drivers/interconnect/qcom/sm8150.h b/drivers/interconnect/qcom/sm8150.h
+index 97996f64d799c..023161681fb87 100644
+--- a/drivers/interconnect/qcom/sm8150.h
++++ b/drivers/interconnect/qcom/sm8150.h
+@@ -35,7 +35,7 @@
+ #define SM8150_MASTER_GPU_TCU 24
+ #define SM8150_MASTER_GRAPHICS_3D 25
+ #define SM8150_MASTER_IPA 26
+-#define SM8150_MASTER_IPA_CORE 27
++/* 27 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
+ #define SM8150_MASTER_LLCC 28
+ #define SM8150_MASTER_MDP_PORT0 29
+ #define SM8150_MASTER_MDP_PORT1 30
+@@ -94,7 +94,7 @@
+ #define SM8150_SLAVE_GRAPHICS_3D_CFG 83
+ #define SM8150_SLAVE_IMEM_CFG 84
+ #define SM8150_SLAVE_IPA_CFG 85
+-#define SM8150_SLAVE_IPA_CORE 86
++/* 86 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
+ #define SM8150_SLAVE_LLCC 87
+ #define SM8150_SLAVE_LLCC_CFG 88
+ #define SM8150_SLAVE_MNOC_HF_MEM_NOC 89
+diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
+index 5398e7c8d826b..e6e2dcf4574d8 100644
+--- a/drivers/interconnect/qcom/sm8350.c
++++ b/drivers/interconnect/qcom/sm8350.c
+@@ -165,38 +165,283 @@ DEFINE_QNODE(ebi_disp, SM8350_SLAVE_EBI1_DISP, 4, 4);
+ DEFINE_QNODE(qns_mem_noc_hf_disp, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP);
+ DEFINE_QNODE(qns_mem_noc_sf_disp, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP);
+
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_qdss_dap, &qhs_ahb2phy0, &qhs_ahb2phy1, &qhs_aoss, &qhs_apss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_cfg, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_cx_rdpm, &qhs_dcc_cfg, &qhs_display_cfg, &qhs_gpuss_cfg, &qhs_hwkm, &qhs_imem_cfg, &qhs_ipa, &qhs_ipc_router, &qhs_mss_cfg, &qhs_mx_rdpm, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_pimem_cfg, &qhs_pka_wrapper_cfg, &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg, &qhs_qup0, &qhs_qup1, &qhs_qup2, &qhs_security, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_a1_noc_cfg, &qns_a2_noc_cfg, &qns_ddrss_cfg, &qns_mnoc_cfg, &qns_snoc_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_cn2, "CN2", false, &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_nsp_gemnoc);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_nsp);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_mm4, "MM4", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_mm5, "MM5", false, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp, &qxm_rot);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &chm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xm_pcie3_0);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &xm_pcie3_1);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+-DEFINE_QBCM(bcm_acv_disp, "ACV", false, &ebi_disp);
+-DEFINE_QBCM(bcm_mc0_disp, "MC0", false, &ebi_disp);
+-DEFINE_QBCM(bcm_mm0_disp, "MM0", false, &qns_mem_noc_hf_disp);
+-DEFINE_QBCM(bcm_mm1_disp, "MM1", false, &qxm_mdp0_disp, &qxm_mdp1_disp);
+-DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
+-DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
+-DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
++static struct qcom_icc_bcm bcm_acv = {
++ .name = "ACV",
++ .enable_mask = BIT(3),
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++ .name = "CE0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++ .name = "CN0",
++ .keepalive = true,
++ .num_nodes = 2,
++ .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++ .name = "CN1",
++ .keepalive = false,
++ .num_nodes = 47,
++ .nodes = { &xm_qdss_dap,
++ &qhs_ahb2phy0,
++ &qhs_ahb2phy1,
++ &qhs_aoss,
++ &qhs_apss,
++ &qhs_camera_cfg,
++ &qhs_clk_ctl,
++ &qhs_compute_cfg,
++ &qhs_cpr_cx,
++ &qhs_cpr_mmcx,
++ &qhs_cpr_mx,
++ &qhs_crypto0_cfg,
++ &qhs_cx_rdpm,
++ &qhs_dcc_cfg,
++ &qhs_display_cfg,
++ &qhs_gpuss_cfg,
++ &qhs_hwkm,
++ &qhs_imem_cfg,
++ &qhs_ipa,
++ &qhs_ipc_router,
++ &qhs_mss_cfg,
++ &qhs_mx_rdpm,
++ &qhs_pcie0_cfg,
++ &qhs_pcie1_cfg,
++ &qhs_pimem_cfg,
++ &qhs_pka_wrapper_cfg,
++ &qhs_pmu_wrapper_cfg,
++ &qhs_qdss_cfg,
++ &qhs_qup0,
++ &qhs_qup1,
++ &qhs_qup2,
++ &qhs_security,
++ &qhs_spss_cfg,
++ &qhs_tcsr,
++ &qhs_tlmm,
++ &qhs_ufs_card_cfg,
++ &qhs_ufs_mem_cfg,
++ &qhs_usb3_0,
++ &qhs_usb3_1,
++ &qhs_venus_cfg,
++ &qhs_vsense_ctrl_cfg,
++ &qns_a1_noc_cfg,
++ &qns_a2_noc_cfg,
++ &qns_ddrss_cfg,
++ &qns_mnoc_cfg,
++ &qns_snoc_cfg,
++ &srvc_cnoc
++ },
++};
++
++static struct qcom_icc_bcm bcm_cn2 = {
++ .name = "CN2",
++ .keepalive = false,
++ .num_nodes = 5,
++ .nodes = { &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4 },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++ .name = "CO0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_nsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++ .name = "CO3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_nsp },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++ .name = "MC0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++ .name = "MM0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++ .name = "MM1",
++ .keepalive = false,
++ .num_nodes = 3,
++ .nodes = { &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1 },
++};
++
++static struct qcom_icc_bcm bcm_mm4 = {
++ .name = "MM4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_mm5 = {
++ .name = "MM5",
++ .keepalive = false,
++ .num_nodes = 6,
++ .nodes = { &qnm_camnoc_icp,
++ &qnm_camnoc_sf,
++ &qnm_video0,
++ &qnm_video1,
++ &qnm_video_cvp,
++ &qxm_rot
++ },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++ .name = "SH0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++ .name = "SH2",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &alm_gpu_tcu, &alm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++ .name = "SH3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++ .name = "SH4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &chm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++ .name = "SN0",
++ .keepalive = true,
++ .num_nodes = 1,
++ .nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++ .name = "SN2",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++ .name = "SN3",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++ .name = "SN4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++ .name = "SN5",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &xm_pcie3_0 },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++ .name = "SN6",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &xm_pcie3_1 },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++ .name = "SN7",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++ .name = "SN8",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++ .name = "SN14",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_pcie_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_acv_disp = {
++ .name = "ACV",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &ebi_disp },
++};
++
++static struct qcom_icc_bcm bcm_mc0_disp = {
++ .name = "MC0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &ebi_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm0_disp = {
++ .name = "MM0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_hf_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm1_disp = {
++ .name = "MM1",
++ .keepalive = false,
++ .num_nodes = 2,
++ .nodes = { &qxm_mdp0_disp, &qxm_mdp1_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm4_disp = {
++ .name = "MM4",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_mem_noc_sf_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm5_disp = {
++ .name = "MM5",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qxm_rot_disp },
++};
++
++static struct qcom_icc_bcm bcm_sh0_disp = {
++ .name = "SH0",
++ .keepalive = false,
++ .num_nodes = 1,
++ .nodes = { &qns_llcc_disp },
++};
+
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ };
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index cc892ecd52408..6d3e33e8b5f91 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -53,7 +53,7 @@ static int led_pwm_set(struct led_classdev *led_cdev,
+ duty = led_dat->pwmstate.period - duty;
+
+ led_dat->pwmstate.duty_cycle = duty;
+- led_dat->pwmstate.enabled = duty > 0;
++ led_dat->pwmstate.enabled = true;
+ return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
+ }
+
+diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
+index c7c9851c894a9..179eb243da2f6 100644
+--- a/drivers/leds/leds-turris-omnia.c
++++ b/drivers/leds/leds-turris-omnia.c
+@@ -2,7 +2,7 @@
+ /*
+ * CZ.NIC's Turris Omnia LEDs driver
+ *
+- * 2020 by Marek Behún <kabel@kernel.org>
++ * 2020, 2023 by Marek Behún <kabel@kernel.org>
+ */
+
+ #include <linux/i2c.h>
+@@ -41,6 +41,37 @@ struct omnia_leds {
+ struct omnia_led leds[];
+ };
+
++static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val)
++{
++ u8 buf[2] = { cmd, val };
++
++ return i2c_master_send(client, buf, sizeof(buf));
++}
++
++static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd)
++{
++ struct i2c_msg msgs[2];
++ u8 reply;
++ int ret;
++
++ msgs[0].addr = client->addr;
++ msgs[0].flags = 0;
++ msgs[0].len = 1;
++ msgs[0].buf = &cmd;
++ msgs[1].addr = client->addr;
++ msgs[1].flags = I2C_M_RD;
++ msgs[1].len = 1;
++ msgs[1].buf = &reply;
++
++ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++ if (likely(ret == ARRAY_SIZE(msgs)))
++ return reply;
++ else if (ret < 0)
++ return ret;
++ else
++ return -EIO;
++}
++
+ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ enum led_brightness brightness)
+ {
+@@ -64,7 +95,7 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ if (buf[2] || buf[3] || buf[4])
+ state |= CMD_LED_STATE_ON;
+
+- ret = i2c_smbus_write_byte_data(leds->client, CMD_LED_STATE, state);
++ ret = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state);
+ if (ret >= 0 && (state & CMD_LED_STATE_ON))
+ ret = i2c_master_send(leds->client, buf, 5);
+
+@@ -114,9 +145,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ cdev->brightness_set_blocking = omnia_led_brightness_set_blocking;
+
+ /* put the LED into software mode */
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+- CMD_LED_MODE_LED(led->reg) |
+- CMD_LED_MODE_USER);
++ ret = omnia_cmd_write_u8(client, CMD_LED_MODE,
++ CMD_LED_MODE_LED(led->reg) |
++ CMD_LED_MODE_USER);
+ if (ret < 0) {
+ dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
+ ret);
+@@ -124,8 +155,8 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ }
+
+ /* disable the LED */
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE,
+- CMD_LED_STATE_LED(led->reg));
++ ret = omnia_cmd_write_u8(client, CMD_LED_STATE,
++ CMD_LED_STATE_LED(led->reg));
+ if (ret < 0) {
+ dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
+ return ret;
+@@ -156,12 +187,9 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+ {
+ struct i2c_client *client = to_i2c_client(dev);
+- struct omnia_leds *leds = i2c_get_clientdata(client);
+ int ret;
+
+- mutex_lock(&leds->lock);
+- ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS);
+- mutex_unlock(&leds->lock);
++ ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS);
+
+ if (ret < 0)
+ return ret;
+@@ -173,7 +201,6 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ const char *buf, size_t count)
+ {
+ struct i2c_client *client = to_i2c_client(dev);
+- struct omnia_leds *leds = i2c_get_clientdata(client);
+ unsigned long brightness;
+ int ret;
+
+@@ -183,15 +210,9 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ if (brightness > 100)
+ return -EINVAL;
+
+- mutex_lock(&leds->lock);
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
+- (u8)brightness);
+- mutex_unlock(&leds->lock);
+-
+- if (ret < 0)
+- return ret;
++ ret = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness);
+
+- return count;
++ return ret < 0 ? ret : count;
+ }
+ static DEVICE_ATTR_RW(brightness);
+
+@@ -247,8 +268,8 @@ static void omnia_leds_remove(struct i2c_client *client)
+ u8 buf[5];
+
+ /* put all LEDs into default (HW triggered) mode */
+- i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+- CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
++ omnia_cmd_write_u8(client, CMD_LED_MODE,
++ CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
+
+ /* set all LEDs color to [255, 255, 255] */
+ buf[0] = CMD_LED_COLOR;
+diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
+index 8af4f9bb9cde8..05848a2fecff6 100644
+--- a/drivers/leds/trigger/ledtrig-cpu.c
++++ b/drivers/leds/trigger/ledtrig-cpu.c
+@@ -130,7 +130,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu)
+
+ static int __init ledtrig_cpu_init(void)
+ {
+- int cpu;
++ unsigned int cpu;
+ int ret;
+
+ /* Supports up to 9999 cpu cores */
+@@ -152,7 +152,7 @@ static int __init ledtrig_cpu_init(void)
+ if (cpu >= 8)
+ continue;
+
+- snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
++ snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
+
+ led_trigger_register_simple(trig->name, &trig->_trig);
+ }
+diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
+index 26d2bc7783944..a51e98ab4958d 100644
+--- a/drivers/media/cec/platform/Makefile
++++ b/drivers/media/cec/platform/Makefile
+@@ -6,7 +6,7 @@
+ # Please keep it in alphabetic order
+ obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
+ obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+-obj-$(CONFIG_CEC_MESON_AO) += meson/
++obj-y += meson/
+ obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
+ obj-$(CONFIG_CEC_SECO) += seco/
+ obj-$(CONFIG_CEC_STI) += sti/
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index 892cd97b7cab7..e8c28902d97e9 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -1234,7 +1234,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+
+ i2c_mux_mask |= BIT(id);
+ }
+- of_node_put(node);
+ of_node_put(i2c_mux);
+
+ /* Parse the endpoints */
+@@ -1298,7 +1297,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+ priv->source_mask |= BIT(ep.port);
+ priv->nsources++;
+ }
+- of_node_put(node);
+
+ /*
+ * Parse the initial value of the reverse channel amplitude from
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 2ee832426736d..e0019668a8f86 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -2840,12 +2840,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
+ return 0;
+ }
+
++static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank)
++{
++ const struct ov5640_mode_info *mode = sensor->current_mode;
++
++ __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
++ OV5640_MAX_VTS - mode->height, 1, vblank);
++
++ __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++}
++
+ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ {
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
+ struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
+- const struct ov5640_timings *timings;
++ const struct ov5640_timings *timings = ov5640_timings(sensor, mode);
+ s32 exposure_val, exposure_max;
+ unsigned int hblank;
+ unsigned int i = 0;
+@@ -2864,6 +2874,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
+
++ __v4l2_ctrl_vblank_update(sensor, timings->vblank_def);
++
+ return 0;
+ }
+
+@@ -2906,28 +2918,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
+ __v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
+
+- timings = ov5640_timings(sensor, mode);
+ hblank = timings->htot - mode->width;
+ __v4l2_ctrl_modify_range(sensor->ctrls.hblank,
+ hblank, hblank, 1, hblank);
+
+ vblank = timings->vblank_def;
+-
+- if (sensor->current_fr != mode->def_fps) {
+- /*
+- * Compute the vertical blanking according to the framerate
+- * configured with s_frame_interval.
+- */
+- int fie_num = sensor->frame_interval.numerator;
+- int fie_denom = sensor->frame_interval.denominator;
+-
+- vblank = ((fie_num * pixel_rate / fie_denom) / timings->htot) -
+- mode->height;
+- }
+-
+- __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+- OV5640_MAX_VTS - mode->height, 1, vblank);
+- __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++ __v4l2_ctrl_vblank_update(sensor, vblank);
+
+ exposure_max = timings->crop.height + vblank - 4;
+ exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
+@@ -3913,7 +3909,7 @@ static int ov5640_probe(struct i2c_client *client)
+ ret = ov5640_sensor_resume(dev);
+ if (ret) {
+ dev_err(dev, "failed to power on\n");
+- goto entity_cleanup;
++ goto free_ctrls;
+ }
+
+ pm_runtime_set_active(dev);
+@@ -3937,8 +3933,9 @@ static int ov5640_probe(struct i2c_client *client)
+ err_pm_runtime:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+- v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ ov5640_sensor_suspend(dev);
++free_ctrls:
++ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ entity_cleanup:
+ media_entity_cleanup(&sensor->sd.entity);
+ mutex_destroy(&sensor->lock);
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index d40b537f4e98b..24ba5729969dc 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -4248,6 +4248,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
+
+ /* free resources */
+ free_irq(btv->c.pci->irq,btv);
++ del_timer_sync(&btv->timeout);
+ iounmap(btv->bt848_mmio);
+ release_mem_region(pci_resource_start(btv->c.pci,0),
+ pci_resource_len(btv->c.pci,0));
+diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
+index 667637eedb5d4..7320852668d64 100644
+--- a/drivers/media/platform/amphion/vpu_defs.h
++++ b/drivers/media/platform/amphion/vpu_defs.h
+@@ -71,6 +71,7 @@ enum {
+ VPU_MSG_ID_TIMESTAMP_INFO,
+ VPU_MSG_ID_FIRMWARE_XCPT,
+ VPU_MSG_ID_PIC_SKIPPED,
++ VPU_MSG_ID_DBG_MSG,
+ };
+
+ enum VPU_ENC_MEMORY_RESOURSE {
+diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
+index 2e78666322f02..66fdb0baea746 100644
+--- a/drivers/media/platform/amphion/vpu_helpers.c
++++ b/drivers/media/platform/amphion/vpu_helpers.c
+@@ -454,6 +454,7 @@ const char *vpu_id_name(u32 id)
+ case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
+ case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
+ case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
++ case VPU_MSG_ID_DBG_MSG: return "debug msg";
+ }
+ return "<unknown>";
+ }
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index c2f4fb12c3b64..6b37453eef76c 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -726,6 +726,7 @@ static struct vpu_pair malone_msgs[] = {
+ {VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
+ {VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
+ {VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED},
++ {VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC},
+ };
+
+ static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index d0ead051f7d18..b74a407a19f22 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -23,6 +23,7 @@
+ struct vpu_msg_handler {
+ u32 id;
+ void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
++ u32 is_str;
+ };
+
+ static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+@@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event
+ {
+ char *str = (char *)pkt->data;
+
+- if (strlen(str))
++ if (*str)
+ dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
+ else
+ dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
+@@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc
+ vpu_inst_unlock(inst);
+ }
+
++static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
++{
++ char *str = (char *)pkt->data;
++
++ if (*str)
++ dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
++}
++
++static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
++{
++ if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
++ pkt->hdr.num--;
++ pkt->data[pkt->hdr.num] = 0;
++}
++
+ static struct vpu_msg_handler handlers[] = {
+ {VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
+ {VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
+@@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = {
+ {VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
+ {VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
+ {VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
+- {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
+- {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
++ {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
++ {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
+ {VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
++ {VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
+ };
+
+ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
+@@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
+ }
+ }
+
+- if (handler && handler->done)
+- handler->done(inst, msg);
++ if (handler) {
++ if (handler->is_str)
++ vpu_terminate_string_msg(msg);
++ if (handler->done)
++ handler->done(inst, msg);
++ }
+
+ vpu_response_cmd(inst, msg_id, 1);
+
+diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
+index cc3ebb0d96f66..2a23da6a0b8ee 100644
+--- a/drivers/media/platform/cadence/cdns-csi2rx.c
++++ b/drivers/media/platform/cadence/cdns-csi2rx.c
+@@ -404,8 +404,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
+ struct v4l2_async_subdev);
+ of_node_put(ep);
+- if (IS_ERR(asd))
++ if (IS_ERR(asd)) {
++ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ return PTR_ERR(asd);
++ }
+
+ csi2rx->notifier.ops = &csi2rx_notifier_ops;
+
+@@ -467,6 +469,7 @@ static int csi2rx_probe(struct platform_device *pdev)
+ return 0;
+
+ err_cleanup:
++ v4l2_async_nf_unregister(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ err_free_priv:
+ kfree(csi2rx);
+@@ -477,6 +480,8 @@ static int csi2rx_remove(struct platform_device *pdev)
+ {
+ struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
+
++ v4l2_async_nf_unregister(&csi2rx->notifier);
++ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ v4l2_async_unregister_subdev(&csi2rx->subdev);
+ kfree(csi2rx);
+
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+index 1cf037bf72dda..8c271c38caf73 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+@@ -98,6 +98,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
+ u32 img_stride;
+ u32 mem_stride;
+ u32 i, enc_quality;
++ u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality);
+
+ value = width << 16 | height;
+ writel(value, base + JPEG_ENC_IMG_SIZE);
+@@ -128,8 +129,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
+ writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
+ writel(mem_stride, base + JPEG_ENC_STRIDE);
+
+- enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
+- for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
++ enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value;
++ for (i = 0; i < nr_enc_quality; i++) {
+ if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
+ enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
+ break;
+diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+index db106ebdf870a..bca3cae4dd8bb 100644
+--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
++++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+@@ -1132,12 +1132,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+
+ ret = vb2_queue_init(q);
+ if (ret)
+- goto err_vd_rel;
++ return ret;
+
+ vp->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
+ if (ret)
+- goto err_vd_rel;
++ return ret;
+
+ video_set_drvdata(vfd, vp);
+
+@@ -1170,8 +1170,6 @@ err_ctrlh_free:
+ v4l2_ctrl_handler_free(&vp->ctrl_handler);
+ err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+-err_vd_rel:
+- video_device_release(vfd);
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 8cb4a68c9119e..08840ba313e7a 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work)
+ ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ if (ctx) {
+ vpu_err("frame processing timed out!\n");
+- ctx->codec_ops->reset(ctx);
++ if (ctx->codec_ops->reset)
++ ctx->codec_ops->reset(ctx);
+ hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
+ }
+ }
+diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
+index 09d8cf9426895..708095cf09fe2 100644
+--- a/drivers/media/platform/verisilicon/hantro_postproc.c
++++ b/drivers/media/platform/verisilicon/hantro_postproc.c
+@@ -103,7 +103,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
+
+ static int down_scale_factor(struct hantro_ctx *ctx)
+ {
+- if (ctx->src_fmt.width == ctx->dst_fmt.width)
++ if (ctx->src_fmt.width <= ctx->dst_fmt.width)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+index b51e6a3b8cbeb..f99878eff7ace 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+@@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
+ m->priv = args->priv;
+ m->network_id = args->network_id;
+ m->network_name = kstrdup(args->network_name, GFP_KERNEL);
++ if (!m->network_name)
++ goto free_mux_buf;
++
+ m->timing.current_jiffies = get_jiffies_64();
+
+ if (args->channels)
+ m->channels = args->channels;
+ else
+ if (vidtv_channels_init(m) < 0)
+- goto free_mux_buf;
++ goto free_mux_network_name;
+
+ /* will alloc data for pmt_sections after initializing pat */
+ if (vidtv_channel_si_init(m) < 0)
+@@ -527,6 +530,8 @@ free_channel_si:
+ vidtv_channel_si_destroy(m);
+ free_channels:
+ vidtv_channels_destroy(m);
++free_mux_network_name:
++ kfree(m->network_name);
+ free_mux_buf:
+ vfree(m->mux_buf);
+ free_mux:
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+index a5875380ef407..c45828bc5b278 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+@@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
+
+ desc->service_name_len = service_name_len;
+
+- if (service_name && service_name_len)
++ if (service_name && service_name_len) {
+ desc->service_name = kstrdup(service_name, GFP_KERNEL);
++ if (!desc->service_name)
++ goto free_desc;
++ }
+
+ desc->provider_name_len = provider_name_len;
+
+- if (provider_name && provider_name_len)
++ if (provider_name && provider_name_len) {
+ desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
++ if (!desc->provider_name)
++ goto free_desc_service_name;
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
++
++free_desc_service_name:
++ if (service_name && service_name_len)
++ kfree(desc->service_name);
++free_desc:
++ kfree(desc);
++ return NULL;
+ }
+
+ struct vidtv_psi_desc_registration
+@@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name
+
+ desc->length = network_name_len;
+
+- if (network_name && network_name_len)
++ if (network_name && network_name_len) {
+ desc->network_name = kstrdup(network_name, GFP_KERNEL);
++ if (!desc->network_name) {
++ kfree(desc);
++ return NULL;
++ }
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+@@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event
+ iso_language_code = "eng";
+
+ desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
++ if (!desc->iso_language_code)
++ goto free_desc;
+
+- if (event_name && event_name_len)
++ if (event_name && event_name_len) {
+ desc->event_name = kstrdup(event_name, GFP_KERNEL);
++ if (!desc->event_name)
++ goto free_desc_language_code;
++ }
+
+- if (text && text_len)
++ if (text && text_len) {
+ desc->text = kstrdup(text, GFP_KERNEL);
++ if (!desc->text)
++ goto free_desc_event_name;
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
++
++free_desc_event_name:
++ if (event_name && event_name_len)
++ kfree(desc->event_name);
++free_desc_language_code:
++ kfree(desc->iso_language_code);
++free_desc:
++ kfree(desc);
++ return NULL;
+ }
+
+ struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index cd6f5374414d4..5f9dec71ff6e0 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -323,8 +323,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ (msg[0].addr == state->af9033_i2c_addr[1])) {
+- if (msg[0].len < 3 || msg[1].len < 1)
+- return -EOPNOTSUPP;
++ if (msg[0].len < 3 || msg[1].len < 1) {
++ ret = -EOPNOTSUPP;
++ goto unlock;
++ }
+ /* demod access via firmware interface */
+ reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+@@ -384,8 +386,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ (msg[0].addr == state->af9033_i2c_addr[1])) {
+- if (msg[0].len < 3)
+- return -EOPNOTSUPP;
++ if (msg[0].len < 3) {
++ ret = -EOPNOTSUPP;
++ goto unlock;
++ }
+ /* demod access via firmware interface */
+ reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+@@ -460,6 +464,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ }
+
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+
+ if (ret < 0)
+diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
+index 5c4af05ed0440..3f83a77ce69e7 100644
+--- a/drivers/mfd/arizona-spi.c
++++ b/drivers/mfd/arizona-spi.c
+@@ -159,6 +159,9 @@ static int arizona_spi_acpi_probe(struct arizona *arizona)
+ arizona->pdata.micd_ranges = arizona_micd_aosp_ranges;
+ arizona->pdata.num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges);
+
++ /* Use left headphone speaker for HP vs line-out detection */
++ arizona->pdata.hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
++
+ return 0;
+ }
+
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
+index c3149729cec2e..6cd0b0c752d6e 100644
+--- a/drivers/mfd/dln2.c
++++ b/drivers/mfd/dln2.c
+@@ -827,7 +827,6 @@ out_stop_rx:
+ dln2_stop_rx_urbs(dln2);
+
+ out_free:
+- usb_put_dev(dln2->usb_dev);
+ dln2_free(dln2);
+
+ return ret;
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 16d1861e96823..97909e3e2c303 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -176,6 +176,7 @@ static int mfd_add_device(struct device *parent, int id,
+ struct platform_device *pdev;
+ struct device_node *np = NULL;
+ struct mfd_of_node_entry *of_entry, *tmp;
++ bool disabled = false;
+ int ret = -ENOMEM;
+ int platform_id;
+ int r;
+@@ -213,11 +214,10 @@ static int mfd_add_device(struct device *parent, int id,
+ if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
+ for_each_child_of_node(parent->of_node, np) {
+ if (of_device_is_compatible(np, cell->of_compatible)) {
+- /* Ignore 'disabled' devices error free */
++ /* Skip 'disabled' devices */
+ if (!of_device_is_available(np)) {
+- of_node_put(np);
+- ret = 0;
+- goto fail_alias;
++ disabled = true;
++ continue;
+ }
+
+ ret = mfd_match_of_node_to_dev(pdev, np, cell);
+@@ -227,10 +227,17 @@ static int mfd_add_device(struct device *parent, int id,
+ if (ret)
+ goto fail_alias;
+
+- break;
++ goto match;
+ }
+ }
+
++ if (disabled) {
++ /* Ignore 'disabled' devices error free */
++ ret = 0;
++ goto fail_alias;
++ }
++
++match:
+ if (!pdev->dev.of_node)
+ pr_warn("%s: Failed to locate of_node [id: %d]\n",
+ cell->name, platform_id);
+diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
+index 7f6976a9f508b..48e0f8377e659 100644
+--- a/drivers/misc/ti-st/st_core.c
++++ b/drivers/misc/ti-st/st_core.c
+@@ -15,6 +15,7 @@
+ #include <linux/skbuff.h>
+
+ #include <linux/ti_wilink_st.h>
++#include <linux/netdevice.h>
+
+ extern void st_kim_recv(void *, const unsigned char *, long);
+ void st_int_recv(void *, const unsigned char *, long);
+@@ -435,7 +436,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ case ST_LL_AWAKE_TO_ASLEEP:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ case ST_LL_ASLEEP:
+ skb_queue_tail(&st_gdata->tx_waitq, skb);
+@@ -444,7 +445,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ default:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ }
+
+@@ -498,7 +499,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ break;
+ }
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ }
+ /* if wake-up is set in another context- restart sending */
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 4a4bab9aa7263..89cd48fcec79f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
+ case 3: /* MMC v3.1 - v3.3 */
+ case 4: /* MMC v4 */
+ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
+- card->cid.oemid = UNSTUFF_BITS(resp, 104, 8);
++ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+index c1956b1e9faf7..f685479eda1be 100644
+--- a/drivers/net/can/dev/dev.c
++++ b/drivers/net/can/dev/dev.c
+@@ -132,7 +132,8 @@ static void can_restart(struct net_device *dev)
+ struct can_frame *cf;
+ int err;
+
+- BUG_ON(netif_carrier_ok(dev));
++ if (netif_carrier_ok(dev))
++ netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
+
+ /* No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+@@ -153,11 +154,12 @@ restart:
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+- err = priv->do_set_mode(dev, CAN_MODE_START);
+-
+ netif_carrier_on(dev);
+- if (err)
++ err = priv->do_set_mode(dev, CAN_MODE_START);
++ if (err) {
+ netdev_err(dev, "Error %d during restart", err);
++ netif_carrier_off(dev);
++ }
+ }
+
+ static void can_restart_work(struct work_struct *work)
+diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
+index f6d05b3ef59ab..3ebd4f779b9bd 100644
+--- a/drivers/net/can/dev/skb.c
++++ b/drivers/net/can/dev/skb.c
+@@ -49,7 +49,11 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ {
+ struct can_priv *priv = netdev_priv(dev);
+
+- BUG_ON(idx >= priv->echo_skb_max);
++ if (idx >= priv->echo_skb_max) {
++ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++ __func__, idx, priv->echo_skb_max);
++ return -EINVAL;
++ }
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) ||
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 9609041016776..85570e40c8e9b 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -18086,7 +18086,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
+ if (netif_running(dev))
+ dev_close(dev);
+
+- tg3_power_down(tp);
++ if (system_state == SYSTEM_POWER_OFF)
++ tg3_power_down(tp);
+
+ rtnl_unlock();
+
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+index 7750702900fa6..6f6525983130e 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+@@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
+
+ if (tp->snd_una != snd_una) {
+ tp->snd_una = snd_una;
+- tp->rcv_tstamp = tcp_time_stamp(tp);
++ tp->rcv_tstamp = tcp_jiffies32;
+ if (tp->snd_una == tp->snd_nxt &&
+ !csk_flag_nochk(csk, CSK_TX_FAILOVER))
+ csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 2e5e0a8872704..d3f6ad586ba1b 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -139,7 +139,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
+ rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+ priv->rx_cfg.num_queues;
+ priv->stats_report_len = struct_size(priv->stats_report, stats,
+- tx_stats_num + rx_stats_num);
++ size_add(tx_stats_num, rx_stats_num));
+ priv->stats_report =
+ dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
+ &priv->stats_report_bus, GFP_KERNEL);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 08ccf0024ce1a..68ee2c59692d1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -16283,11 +16283,15 @@ static void i40e_remove(struct pci_dev *pdev)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+- /* Now we can shutdown the PF's VSI, just before we kill
++ /* Now we can shutdown the PF's VSIs, just before we kill
+ * adminq and hmc.
+ */
+- if (pf->vsi[pf->lan_vsi])
+- i40e_vsi_release(pf->vsi[pf->lan_vsi]);
++ for (i = pf->num_alloc_vsi; i--;)
++ if (pf->vsi[i]) {
++ i40e_vsi_close(pf->vsi[i]);
++ i40e_vsi_release(pf->vsi[i]);
++ pf->vsi[i] = NULL;
++ }
+
+ i40e_cloud_filter_exit(pf);
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 06cfd567866c2..7389855fa307a 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -298,8 +298,6 @@ struct iavf_adapter {
+ #define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
+ #define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
+ #define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
+-#define IAVF_FLAG_PROMISC_ON BIT(13)
+-#define IAVF_FLAG_ALLMULTI_ON BIT(14)
+ #define IAVF_FLAG_LEGACY_RX BIT(15)
+ #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
+ #define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+@@ -325,10 +323,7 @@ struct iavf_adapter {
+ #define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+ #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
+ #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
+-#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
+-#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
+-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
+-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
++#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
+ #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
+ #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
+ #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
+@@ -365,6 +360,12 @@ struct iavf_adapter {
+ (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+
++ /* Lock to prevent possible clobbering of
++ * current_netdev_promisc_flags
++ */
++ spinlock_t current_netdev_promisc_flags_lock;
++ netdev_features_t current_netdev_promisc_flags;
++
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+@@ -551,7 +552,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_del_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_add_vlans(struct iavf_adapter *adapter);
+ void iavf_del_vlans(struct iavf_adapter *adapter);
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
++void iavf_set_promiscuous(struct iavf_adapter *adapter);
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
+ void iavf_request_stats(struct iavf_adapter *adapter);
+ int iavf_request_reset(struct iavf_adapter *adapter);
+ void iavf_get_hena(struct iavf_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 326bb5fdf5f90..4836bac2bd09d 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1198,6 +1198,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
+ return 0;
+ }
+
++/**
++ * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
++ * @adapter: device specific adapter
++ */
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
++{
++ return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
++ (IFF_PROMISC | IFF_ALLMULTI);
++}
++
+ /**
+ * iavf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+@@ -1211,19 +1221,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
+ __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+- if (netdev->flags & IFF_PROMISC &&
+- !(adapter->flags & IAVF_FLAG_PROMISC_ON))
+- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
+- else if (!(netdev->flags & IFF_PROMISC) &&
+- adapter->flags & IAVF_FLAG_PROMISC_ON)
+- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
+-
+- if (netdev->flags & IFF_ALLMULTI &&
+- !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
+- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+- else if (!(netdev->flags & IFF_ALLMULTI) &&
+- adapter->flags & IAVF_FLAG_ALLMULTI_ON)
+- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
++ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
++ if (iavf_promiscuous_mode_changed(adapter))
++ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+ }
+
+ /**
+@@ -2174,19 +2175,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
+ return 0;
+ }
+
+- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
+- iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+- FLAG_VF_MULTICAST_PROMISC);
+- return 0;
+- }
+-
+- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
+- iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
+- return 0;
+- }
+- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
+- (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+- iavf_set_promiscuous(adapter, 0);
++ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
++ iavf_set_promiscuous(adapter);
+ return 0;
+ }
+
+@@ -5008,6 +4998,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ spin_lock_init(&adapter->cloud_filter_list_lock);
+ spin_lock_init(&adapter->fdir_fltr_lock);
+ spin_lock_init(&adapter->adv_rss_lock);
++ spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
+
+ INIT_LIST_HEAD(&adapter->mac_filter_list);
+ INIT_LIST_HEAD(&adapter->vlan_filter_list);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 2fc8e60ef6afb..5a66b05c03222 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -948,14 +948,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ /**
+ * iavf_set_promiscuous
+ * @adapter: adapter structure
+- * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
++void iavf_set_promiscuous(struct iavf_adapter *adapter)
+ {
++ struct net_device *netdev = adapter->netdev;
+ struct virtchnl_promisc_info vpi;
+- int promisc_all;
++ unsigned int flags;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+@@ -964,36 +964,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
+ return;
+ }
+
+- promisc_all = FLAG_VF_UNICAST_PROMISC |
+- FLAG_VF_MULTICAST_PROMISC;
+- if ((flags & promisc_all) == promisc_all) {
+- adapter->flags |= IAVF_FLAG_PROMISC_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
+- dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+- }
++ /* prevent changes to promiscuous flags */
++ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
+
+- if (flags & FLAG_VF_MULTICAST_PROMISC) {
+- adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+- dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+- adapter->netdev->name);
++ /* sanity check to prevent duplicate AQ calls */
++ if (!iavf_promiscuous_mode_changed(adapter)) {
++ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++ dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
++ /* allow changes to promiscuous flags */
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++ return;
+ }
+
+- if (!flags) {
+- if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+- adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+- }
++ /* there are 2 bits, but only 3 states */
++ if (!(netdev->flags & IFF_PROMISC) &&
++ netdev->flags & IFF_ALLMULTI) {
++ /* State 1 - only multicast promiscuous mode enabled
++ * - !IFF_PROMISC && IFF_ALLMULTI
++ */
++ flags = FLAG_VF_MULTICAST_PROMISC;
++ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++ adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
++ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
++ } else if (!(netdev->flags & IFF_PROMISC) &&
++ !(netdev->flags & IFF_ALLMULTI)) {
++ /* State 2 - unicast/multicast promiscuous mode disabled
++ * - !IFF_PROMISC && !IFF_ALLMULTI
++ */
++ flags = 0;
++ adapter->current_netdev_promisc_flags &=
++ ~(IFF_PROMISC | IFF_ALLMULTI);
++ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
++ } else {
++ /* State 3 - unicast/multicast promiscuous mode enabled
++ * - IFF_PROMISC && IFF_ALLMULTI
++ * - IFF_PROMISC && !IFF_ALLMULTI
++ */
++ flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
++ adapter->current_netdev_promisc_flags |= IFF_PROMISC;
++ if (netdev->flags & IFF_ALLMULTI)
++ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++ else
++ adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
+
+- if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+- adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+- dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+- adapter->netdev->name);
+- }
++ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ }
+
++ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++
++ /* allow changes to promiscuous flags */
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++
+ adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ vpi.vsi_id = adapter->vsi_res->vsi_id;
+ vpi.flags = flags;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index aadc352c2ffbd..5c9dc3f9262f5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -1222,6 +1222,11 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
+
+ for (aura = id; aura < max_id; aura++) {
+ aq_req.aura_id = aura;
++
++ /* Skip if queue is uninitialized */
++ if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
++ continue;
++
+ seq_printf(m, "======%s : %d=======\n",
+ (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
+ aq_req.aura_id);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+index 73fdb87986148..3d31ddf7c652e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+@@ -8,7 +8,7 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
+
+ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+- otx2_devlink.o
++ otx2_devlink.o qos_sq.o
+ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+
+ rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 011355e73696e..0f896f606c3e6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -513,8 +513,8 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
+ (pfvf->hw.cq_ecount_wait - 1));
+ }
+
+-int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+- dma_addr_t *dma)
++static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
++ dma_addr_t *dma)
+ {
+ u8 *buf;
+
+@@ -532,8 +532,8 @@ int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ return 0;
+ }
+
+-static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+- dma_addr_t *dma)
++int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
++ dma_addr_t *dma)
+ {
+ int ret;
+
+@@ -795,21 +795,21 @@ void otx2_txschq_stop(struct otx2_nic *pfvf)
+ void otx2_sqb_flush(struct otx2_nic *pfvf)
+ {
+ int qidx, sqe_tail, sqe_head;
++ struct otx2_snd_queue *sq;
+ u64 incr, *ptr, val;
+- int timeout = 1000;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+- for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
++ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
++ sq = &pfvf->qset.sq[qidx];
++ if (!sq->sqb_ptrs)
++ continue;
++
+ incr = (u64)qidx << 32;
+- while (timeout) {
+- val = otx2_atomic64_add(incr, ptr);
+- sqe_head = (val >> 20) & 0x3F;
+- sqe_tail = (val >> 28) & 0x3F;
+- if (sqe_head == sqe_tail)
+- break;
+- usleep_range(1, 3);
+- timeout--;
+- }
++ val = otx2_atomic64_add(incr, ptr);
++ sqe_head = (val >> 20) & 0x3F;
++ sqe_tail = (val >> 28) & 0x3F;
++ if (sqe_head != sqe_tail)
++ usleep_range(50, 60);
+ }
+ }
+
+@@ -899,7 +899,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+ }
+
+-static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
++int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+ {
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_snd_queue *sq;
+@@ -972,9 +972,17 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+ cq->cint_idx = qidx - pfvf->hw.rx_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+- cq->cq_type = CQ_XDP;
+- cq->cint_idx = qidx - non_xdp_queues;
+- cq->cqe_cnt = qset->sqe_cnt;
++ if (pfvf->hw.xdp_queues &&
++ qidx < non_xdp_queues + pfvf->hw.xdp_queues) {
++ cq->cq_type = CQ_XDP;
++ cq->cint_idx = qidx - non_xdp_queues;
++ cq->cqe_cnt = qset->sqe_cnt;
++ } else {
++ cq->cq_type = CQ_QOS;
++ cq->cint_idx = qidx - non_xdp_queues -
++ pfvf->hw.xdp_queues;
++ cq->cqe_cnt = qset->sqe_cnt;
++ }
+ }
+ cq->cqe_size = pfvf->qset.xqe_size;
+
+@@ -1085,7 +1093,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
+ }
+
+ /* Initialize TX queues */
+- for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
++ for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
+ u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+
+ err = otx2_sq_init(pfvf, qidx, sqb_aura);
+@@ -1132,7 +1140,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
+
+ /* Set RQ/SQ/CQ counts */
+ nixlf->rq_cnt = pfvf->hw.rx_queues;
+- nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
++ nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf);
+ nixlf->cq_cnt = pfvf->qset.cq_cnt;
+ nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
+ nixlf->rss_grps = MAX_RSS_GROUPS;
+@@ -1170,7 +1178,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
+ int sqb, qidx;
+ u64 iova, pa;
+
+- for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
++ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+ sq = &qset->sq[qidx];
+ if (!sq->sqb_ptrs)
+ continue;
+@@ -1238,8 +1246,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
+ pfvf->qset.pool = NULL;
+ }
+
+-static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+- int pool_id, int numptrs)
++int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
++ int pool_id, int numptrs)
+ {
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+@@ -1315,8 +1323,8 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+ return 0;
+ }
+
+-static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+- int stack_pages, int numptrs, int buf_size)
++int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
++ int stack_pages, int numptrs, int buf_size)
+ {
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+@@ -1386,7 +1394,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+ stack_pages =
+ (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+- for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
++ for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
+@@ -1406,7 +1414,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+- for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
++ for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index 8a9793b06769f..efd66224b3dbf 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -27,6 +27,7 @@
+ #include "otx2_txrx.h"
+ #include "otx2_devlink.h"
+ #include <rvu_trace.h>
++#include "qos.h"
+
+ /* PCI device IDs */
+ #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+@@ -186,7 +187,8 @@ struct otx2_hw {
+ u16 rx_queues;
+ u16 tx_queues;
+ u16 xdp_queues;
+- u16 tot_tx_queues;
++ u16 tc_tx_queues;
++ u16 non_qos_queues; /* tx queues plus xdp queues */
+ u16 max_queues;
+ u16 pool_cnt;
+ u16 rqpool_cnt;
+@@ -498,6 +500,8 @@ struct otx2_nic {
+ u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
+ #endif
++ /* qos */
++ struct otx2_qos qos;
+
+ /* napi event count. It is needed for adaptive irq coalescing. */
+ u32 napi_events;
+@@ -742,8 +746,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
+ /* Alloc pointer from pool/aura */
+ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
+ {
+- u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
+- NPA_LF_AURA_OP_ALLOCX(0));
++ u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
+ u64 incr = (u64)aura | BIT_ULL(63);
+
+ return otx2_atomic64_add(incr, ptr);
+@@ -885,12 +888,23 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
+
+ static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
+ {
++ u16 smq;
+ #ifdef CONFIG_DCB
+ if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
+ return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
+ #endif
++ /* check if qidx falls under QOS queues */
++ if (qidx >= pfvf->hw.non_qos_queues)
++ smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
++ else
++ smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+
+- return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
++ return smq;
++}
++
++static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf)
++{
++ return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues;
+ }
+
+ /* MSI-X APIs */
+@@ -919,18 +933,24 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
+ int otx2_txsch_alloc(struct otx2_nic *pfvf);
+ void otx2_txschq_stop(struct otx2_nic *pfvf);
+ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
++void otx2_free_pending_sqe(struct otx2_nic *pfvf);
+ void otx2_sqb_flush(struct otx2_nic *pfvf);
+-int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+- dma_addr_t *dma);
++int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
++ dma_addr_t *dma);
+ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
+ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
++int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
+ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+ int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma);
++int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
++ int stack_pages, int numptrs, int buf_size);
++int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
++ int pool_id, int numptrs);
+
+ /* RSS configuration APIs*/
+ int otx2_rss_init(struct otx2_nic *pfvf);
+@@ -1038,4 +1058,14 @@ static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+ {}
+ #endif /* CONFIG_MACSEC */
+
++/* qos support */
++static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs)
++{
++ struct otx2_hw *hw = &pfvf->hw;
++
++ hw->tc_tx_queues = qos_txqs;
++}
++
++u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
++ struct net_device *sb_dev);
+ #endif /* OTX2_COMMON_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 17e546d0d7e55..c724131172f3f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -23,6 +23,7 @@
+ #include "otx2_struct.h"
+ #include "otx2_ptp.h"
+ #include "cn10k.h"
++#include "qos.h"
+ #include <rvu_trace.h>
+
+ #define DRV_NAME "rvu_nicpf"
+@@ -1194,36 +1195,38 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ };
+
+ static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
+- "NIX_SND_STATUS_GOOD",
+- "NIX_SND_STATUS_SQ_CTX_FAULT",
+- "NIX_SND_STATUS_SQ_CTX_POISON",
+- "NIX_SND_STATUS_SQB_FAULT",
+- "NIX_SND_STATUS_SQB_POISON",
+- "NIX_SND_STATUS_HDR_ERR",
+- "NIX_SND_STATUS_EXT_ERR",
+- "NIX_SND_STATUS_JUMP_FAULT",
+- "NIX_SND_STATUS_JUMP_POISON",
+- "NIX_SND_STATUS_CRC_ERR",
+- "NIX_SND_STATUS_IMM_ERR",
+- "NIX_SND_STATUS_SG_ERR",
+- "NIX_SND_STATUS_MEM_ERR",
+- "NIX_SND_STATUS_INVALID_SUBDC",
+- "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+- "NIX_SND_STATUS_DATA_FAULT",
+- "NIX_SND_STATUS_DATA_POISON",
+- "NIX_SND_STATUS_NPC_DROP_ACTION",
+- "NIX_SND_STATUS_LOCK_VIOL",
+- "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
+- "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
+- "NIX_SND_STATUS_NPC_MCAST_ABORT",
+- "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+- "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+- "NIX_SND_STATUS_SEND_STATS_ERR",
++ [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
++ [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
++ [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
++ [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
++ [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
++ [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
++ [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
++ [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
++ [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
++ [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
++ [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
++ [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
++ [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
++ [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
++ [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
++ [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
++ [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
++ [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
++ [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
++ [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
++ [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
++ [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
++ [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
++ [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
++ [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
++ [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
+ };
+
+ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ {
+ struct otx2_nic *pf = data;
++ struct otx2_snd_queue *sq;
+ u64 val, *ptr;
+ u64 qidx = 0;
+
+@@ -1238,14 +1241,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ continue;
+
+ if (val & BIT_ULL(42)) {
+- netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++ netdev_err(pf->netdev,
++ "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ qidx);
+ if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+- netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
++ netdev_err(pf->netdev,
++ "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ qidx);
+ }
+
+@@ -1253,10 +1258,14 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ }
+
+ /* SQ */
+- for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
++ for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
+ u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
+ u8 sq_op_err_code, mnq_err_code, snd_err_code;
+
++ sq = &pf->qset.sq[qidx];
++ if (!sq->sqb_ptrs)
++ continue;
++
+ /* Below debug registers captures first errors corresponding to
+ * those registers. We don't have to check against SQ qid as
+ * these are fatal errors.
+@@ -1268,7 +1277,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ (val & NIX_SQINT_BITS));
+
+ if (val & BIT_ULL(42)) {
+- netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++ netdev_err(pf->netdev,
++ "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ goto done;
+ }
+@@ -1278,8 +1288,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ goto chk_mnq_err_dbg;
+
+ sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
+- qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n",
++ qidx, sq_op_err_dbg,
++ nix_sqoperr_e_str[sq_op_err_code],
++ sq_op_err_code);
+
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+
+@@ -1296,16 +1309,21 @@ chk_mnq_err_dbg:
+ goto chk_snd_err_dbg;
+
+ mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
+- qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n",
++ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code],
++ mnq_err_code);
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+
+ chk_snd_err_dbg:
+ snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ if (snd_err_dbg & BIT(44)) {
+ snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
+- qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
++ qidx, snd_err_dbg,
++ nix_snd_status_e_str[snd_err_code],
++ snd_err_code);
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ }
+
+@@ -1379,7 +1397,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
+ /* Free SQB pointers */
+ otx2_sq_free_sqbs(pf);
+- for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
++ for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
+ sq = &qset->sq[qidx];
+ qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->tso_hdrs);
+@@ -1429,7 +1447,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
+ * so, aura count = pool count.
+ */
+ hw->rqpool_cnt = hw->rx_queues;
+- hw->sqpool_cnt = hw->tot_tx_queues;
++ hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
+ hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
+
+ /* Maximum hardware supported transmit length */
+@@ -1578,6 +1596,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
+ else
+ otx2_cleanup_tx_cqes(pf, cq);
+ }
++ otx2_free_pending_sqe(pf);
+
+ otx2_free_sq_res(pf);
+
+@@ -1682,11 +1701,14 @@ int otx2_open(struct net_device *netdev)
+
+ netif_carrier_off(netdev);
+
+- pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
+ /* RQ and SQs are mapped to different CQs,
+ * so find out max CQ IRQs (i.e CINTs) needed.
+ */
+- pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
++ pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
++ pf->hw.tc_tx_queues);
++
++ pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf);
++
+ qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ if (!qset->napi)
+ return -ENOMEM;
+@@ -1702,7 +1724,7 @@ int otx2_open(struct net_device *netdev)
+ if (!qset->cq)
+ goto err_free_mem;
+
+- qset->sq = kcalloc(pf->hw.tot_tx_queues,
++ qset->sq = kcalloc(pf->hw.non_qos_queues,
+ sizeof(struct otx2_snd_queue), GFP_KERNEL);
+ if (!qset->sq)
+ goto err_free_mem;
+@@ -1737,6 +1759,11 @@ int otx2_open(struct net_device *netdev)
+ else
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+
++ cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ?
++ (qidx + pf->hw.rx_queues +
++ pf->hw.non_qos_queues) :
++ CINT_INVALID_CQ;
++
+ cq_poll->dev = (void *)pf;
+ cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
+@@ -1941,6 +1968,12 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
++ int sq_idx;
++
++ /* XDP SQs are not mapped with TXQs
++ * advance qid to derive correct sq mapped with QOS
++ */
++ sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+@@ -1949,7 +1982,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+ return NETDEV_TX_OK;
+ }
+
+- sq = &pf->qset.sq[qidx];
++ sq = &pf->qset.sq[sq_idx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+@@ -1967,8 +2000,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+ return NETDEV_TX_OK;
+ }
+
+-static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+- struct net_device *sb_dev)
++u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
++ struct net_device *sb_dev)
+ {
+ #ifdef CONFIG_DCB
+ struct otx2_nic *pf = netdev_priv(netdev);
+@@ -1990,6 +2023,7 @@ pick_tx:
+ #endif
+ return netdev_pick_tx(netdev, skb, NULL);
+ }
++EXPORT_SYMBOL(otx2_select_queue);
+
+ static netdev_features_t otx2_fix_features(struct net_device *dev,
+ netdev_features_t features)
+@@ -2520,7 +2554,7 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+ else
+ pf->hw.xdp_queues = 0;
+
+- pf->hw.tot_tx_queues += pf->hw.xdp_queues;
++ pf->hw.non_qos_queues += pf->hw.xdp_queues;
+
+ if (if_up)
+ otx2_open(pf->netdev);
+@@ -2703,10 +2737,10 @@ static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
+ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ struct device *dev = &pdev->dev;
++ int err, qcount, qos_txqs;
+ struct net_device *netdev;
+ struct otx2_nic *pf;
+ struct otx2_hw *hw;
+- int err, qcount;
+ int num_vec;
+
+ err = pcim_enable_device(pdev);
+@@ -2731,8 +2765,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ /* Set number of queues */
+ qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
++ qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
+
+- netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
++ netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+@@ -2751,7 +2786,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ hw->pdev = pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+- hw->tot_tx_queues = qcount;
++ hw->non_qos_queues = qcount;
+ hw->max_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+@@ -2919,6 +2954,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto err_pf_sriov_init;
+ #endif
+
++ otx2_qos_init(pf, qos_txqs);
++
+ return 0;
+
+ err_pf_sriov_init:
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+index fa37b9f312cae..4e5899d8fa2e6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+@@ -318,23 +318,23 @@ enum nix_snd_status_e {
+ NIX_SND_STATUS_EXT_ERR = 0x6,
+ NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ NIX_SND_STATUS_JUMP_POISON = 0x8,
+- NIX_SND_STATUS_CRC_ERR = 0x9,
+- NIX_SND_STATUS_IMM_ERR = 0x10,
+- NIX_SND_STATUS_SG_ERR = 0x11,
+- NIX_SND_STATUS_MEM_ERR = 0x12,
+- NIX_SND_STATUS_INVALID_SUBDC = 0x13,
+- NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
+- NIX_SND_STATUS_DATA_FAULT = 0x15,
+- NIX_SND_STATUS_DATA_POISON = 0x16,
+- NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
+- NIX_SND_STATUS_LOCK_VIOL = 0x18,
+- NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
+- NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
+- NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
+- NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
+- NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
+- NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
+- NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
++ NIX_SND_STATUS_CRC_ERR = 0x10,
++ NIX_SND_STATUS_IMM_ERR = 0x11,
++ NIX_SND_STATUS_SG_ERR = 0x12,
++ NIX_SND_STATUS_MEM_ERR = 0x13,
++ NIX_SND_STATUS_INVALID_SUBDC = 0x14,
++ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
++ NIX_SND_STATUS_DATA_FAULT = 0x16,
++ NIX_SND_STATUS_DATA_POISON = 0x17,
++ NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
++ NIX_SND_STATUS_LOCK_VIOL = 0x21,
++ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
++ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
++ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
++ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
++ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
++ NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
++ NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
+ NIX_SND_STATUS_MAX,
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 5704fb75fa477..20d801d30c732 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -468,12 +468,13 @@ process_cqe:
+ break;
+ }
+
+- if (cq->cq_type == CQ_XDP) {
++ qidx = cq->cq_idx - pfvf->hw.rx_queues;
++
++ if (cq->cq_type == CQ_XDP)
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+- } else {
+- otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget,
+- &tx_pkts, &tx_bytes);
+- }
++ else
++ otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
++ cqe, budget, &tx_pkts, &tx_bytes);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ processed_cqe++;
+@@ -490,7 +491,11 @@ process_cqe:
+ if (likely(tx_pkts)) {
+ struct netdev_queue *txq;
+
+- txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
++ qidx = cq->cq_idx - pfvf->hw.rx_queues;
++
++ if (qidx >= pfvf->hw.tx_queues)
++ qidx -= pfvf->hw.xdp_queues;
++ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ /* Check if queue was stopped earlier due to ring full */
+ smp_mb();
+@@ -738,7 +743,8 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ sqe_hdr->aura = sq->aura_id;
+ /* Post a CQE Tx after pkt transmission */
+ sqe_hdr->pnc = 1;
+- sqe_hdr->sq = qidx;
++ sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ?
++ qidx + pfvf->hw.xdp_queues : qidx;
+ }
+ sqe_hdr->total = skb->len;
+ /* Set SQE identifier which will be used later for freeing SKB */
+@@ -1218,13 +1224,17 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+
+ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ {
++ int tx_pkts = 0, tx_bytes = 0;
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct nix_cqe_tx_s *cqe;
++ struct netdev_queue *txq;
+ int processed_cqe = 0;
+ struct sg_list *sg;
++ int qidx;
+
+- sq = &pfvf->qset.sq[cq->cint_idx];
++ qidx = cq->cq_idx - pfvf->hw.rx_queues;
++ sq = &pfvf->qset.sq[qidx];
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+@@ -1239,12 +1249,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ sg = &sq->sg[cqe->comp.sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
++ tx_bytes += skb->len;
++ tx_pkts++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ }
+
++ if (likely(tx_pkts)) {
++ if (qidx >= pfvf->hw.tx_queues)
++ qidx -= pfvf->hw.xdp_queues;
++ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
++ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++ }
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+@@ -1271,6 +1289,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+ return err;
+ }
+
++void otx2_free_pending_sqe(struct otx2_nic *pfvf)
++{
++ int tx_pkts = 0, tx_bytes = 0;
++ struct sk_buff *skb = NULL;
++ struct otx2_snd_queue *sq;
++ struct netdev_queue *txq;
++ struct sg_list *sg;
++ int sq_idx, sqe;
++
++ for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
++ sq = &pfvf->qset.sq[sq_idx];
++ for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
++ sg = &sq->sg[sqe];
++ skb = (struct sk_buff *)sg->skb;
++ if (skb) {
++ tx_bytes += skb->len;
++ tx_pkts++;
++ otx2_dma_unmap_skb_frags(pfvf, sg);
++ dev_kfree_skb_any(skb);
++ sg->skb = (u64)NULL;
++ }
++ }
++
++ if (!tx_pkts)
++ continue;
++ txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
++ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++ tx_pkts = 0;
++ tx_bytes = 0;
++ }
++}
++
+ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+ {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+index 93cac2c2664c2..7ab6db9a986fa 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+@@ -102,7 +102,8 @@ enum cq_type {
+ CQ_RX,
+ CQ_TX,
+ CQ_XDP,
+- CQS_PER_CINT = 3, /* RQ + SQ + XDP */
++ CQ_QOS,
++ CQS_PER_CINT = 4, /* RQ + SQ + XDP + QOS_SQ */
+ };
+
+ struct otx2_cq_poll {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index f8f0c01f62a14..404855bccb4b6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -475,6 +475,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
+ .ndo_open = otx2vf_open,
+ .ndo_stop = otx2vf_stop,
+ .ndo_start_xmit = otx2vf_xmit,
++ .ndo_select_queue = otx2_select_queue,
+ .ndo_set_rx_mode = otx2vf_set_rx_mode,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2vf_change_mtu,
+@@ -520,10 +521,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ int num_vec = pci_msix_vec_count(pdev);
+ struct device *dev = &pdev->dev;
++ int err, qcount, qos_txqs;
+ struct net_device *netdev;
+ struct otx2_nic *vf;
+ struct otx2_hw *hw;
+- int err, qcount;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+@@ -546,7 +547,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ pci_set_master(pdev);
+
+ qcount = num_online_cpus();
+- netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
++ qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
++ netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+@@ -566,7 +568,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->max_queues = qcount;
+- hw->tot_tx_queues = qcount;
++ hw->non_qos_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
+@@ -695,6 +697,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (err)
+ goto err_shutdown_tc;
+ #endif
++ otx2_qos_init(vf, qos_txqs);
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
+new file mode 100644
+index 0000000000000..73a62d092e99a
+--- /dev/null
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
+@@ -0,0 +1,19 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Marvell RVU Ethernet driver
++ *
++ * Copyright (C) 2023 Marvell.
++ *
++ */
++#ifndef OTX2_QOS_H
++#define OTX2_QOS_H
++
++#define OTX2_QOS_MAX_LEAF_NODES 16
++
++int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx, u16 smq);
++void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx, u16 mdq);
++
++struct otx2_qos {
++ u16 qid_to_sqmap[OTX2_QOS_MAX_LEAF_NODES];
++ };
++
++#endif
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+new file mode 100644
+index 0000000000000..e142d43f5a62c
+--- /dev/null
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+@@ -0,0 +1,282 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Marvell RVU Physical Function ethernet driver
++ *
++ * Copyright (C) 2023 Marvell.
++ *
++ */
++
++#include <linux/netdevice.h>
++#include <net/tso.h>
++
++#include "cn10k.h"
++#include "otx2_reg.h"
++#include "otx2_common.h"
++#include "otx2_txrx.h"
++#include "otx2_struct.h"
++
++#define OTX2_QOS_MAX_LEAF_NODES 16
++
++static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id)
++{
++ struct otx2_pool *pool;
++
++ if (!pfvf->qset.pool)
++ return;
++
++ pool = &pfvf->qset.pool[pool_id];
++ qmem_free(pfvf->dev, pool->stack);
++ qmem_free(pfvf->dev, pool->fc_addr);
++ pool->stack = NULL;
++ pool->fc_addr = NULL;
++}
++
++static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
++{
++ struct otx2_qset *qset = &pfvf->qset;
++ int pool_id, stack_pages, num_sqbs;
++ struct otx2_hw *hw = &pfvf->hw;
++ struct otx2_snd_queue *sq;
++ struct otx2_pool *pool;
++ dma_addr_t bufptr;
++ int err, ptr;
++ u64 iova, pa;
++
++ /* Calculate number of SQBs needed.
++ *
++ * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
++ * Last SQE is used for pointing to next SQB.
++ */
++ num_sqbs = (hw->sqb_size / 128) - 1;
++ num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
++
++ /* Get no of stack pages needed */
++ stack_pages =
++ (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
++
++ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
++ pool = &pfvf->qset.pool[pool_id];
++
++ /* Initialize aura context */
++ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
++ if (err)
++ return err;
++
++ /* Initialize pool context */
++ err = otx2_pool_init(pfvf, pool_id, stack_pages,
++ num_sqbs, hw->sqb_size);
++ if (err)
++ goto aura_free;
++
++ /* Flush accumulated messages */
++ err = otx2_sync_mbox_msg(&pfvf->mbox);
++ if (err)
++ goto pool_free;
++
++ /* Allocate pointers and free them to aura/pool */
++ sq = &qset->sq[qidx];
++ sq->sqb_count = 0;
++ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
++ if (!sq->sqb_ptrs) {
++ err = -ENOMEM;
++ goto pool_free;
++ }
++
++ for (ptr = 0; ptr < num_sqbs; ptr++) {
++ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
++ if (err)
++ goto sqb_free;
++ pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
++ sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
++ }
++
++ return 0;
++
++sqb_free:
++ while (ptr--) {
++ if (!sq->sqb_ptrs[ptr])
++ continue;
++ iova = sq->sqb_ptrs[ptr];
++ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
++ dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
++ DMA_FROM_DEVICE,
++ DMA_ATTR_SKIP_CPU_SYNC);
++ put_page(virt_to_page(phys_to_virt(pa)));
++ otx2_aura_allocptr(pfvf, pool_id);
++ }
++ sq->sqb_count = 0;
++ kfree(sq->sqb_ptrs);
++pool_free:
++ qmem_free(pfvf->dev, pool->stack);
++aura_free:
++ qmem_free(pfvf->dev, pool->fc_addr);
++ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
++ return err;
++}
++
++static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
++{
++ struct otx2_qset *qset = &pfvf->qset;
++ struct otx2_hw *hw = &pfvf->hw;
++ struct otx2_snd_queue *sq;
++ u64 iova, pa;
++ int sqb;
++
++ sq = &qset->sq[qidx];
++ if (!sq->sqb_ptrs)
++ return;
++ for (sqb = 0; sqb < sq->sqb_count; sqb++) {
++ if (!sq->sqb_ptrs[sqb])
++ continue;
++ iova = sq->sqb_ptrs[sqb];
++ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
++ dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
++ DMA_FROM_DEVICE,
++ DMA_ATTR_SKIP_CPU_SYNC);
++ put_page(virt_to_page(phys_to_virt(pa)));
++ }
++
++ sq->sqb_count = 0;
++
++ sq = &qset->sq[qidx];
++ qmem_free(pfvf->dev, sq->sqe);
++ qmem_free(pfvf->dev, sq->tso_hdrs);
++ kfree(sq->sg);
++ kfree(sq->sqb_ptrs);
++ qmem_free(pfvf->dev, sq->timestamps);
++
++ memset((void *)sq, 0, sizeof(*sq));
++}
++
++/* send queue id */
++static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
++{
++ int sqe_tail, sqe_head;
++ u64 incr, *ptr, val;
++
++ ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
++ incr = (u64)qidx << 32;
++ val = otx2_atomic64_add(incr, ptr);
++ sqe_head = (val >> 20) & 0x3F;
++ sqe_tail = (val >> 28) & 0x3F;
++ if (sqe_head != sqe_tail)
++ usleep_range(50, 60);
++}
++
++static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
++{
++ struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
++ struct npa_aq_enq_req *aura_aq;
++ struct npa_aq_enq_req *pool_aq;
++ struct nix_aq_enq_req *sq_aq;
++
++ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
++ cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
++ if (!cn10k_sq_aq)
++ return -ENOMEM;
++ cn10k_sq_aq->qidx = qidx;
++ cn10k_sq_aq->sq.ena = 0;
++ cn10k_sq_aq->sq_mask.ena = 1;
++ cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
++ cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
++ } else {
++ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
++ if (!sq_aq)
++ return -ENOMEM;
++ sq_aq->qidx = qidx;
++ sq_aq->sq.ena = 0;
++ sq_aq->sq_mask.ena = 1;
++ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
++ sq_aq->op = NIX_AQ_INSTOP_WRITE;
++ }
++
++ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
++ if (!aura_aq) {
++ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
++ return -ENOMEM;
++ }
++
++ aura_aq->aura_id = aura_id;
++ aura_aq->aura.ena = 0;
++ aura_aq->aura_mask.ena = 1;
++ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
++ aura_aq->op = NPA_AQ_INSTOP_WRITE;
++
++ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
++ if (!pool_aq) {
++ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
++ return -ENOMEM;
++ }
++
++ pool_aq->aura_id = aura_id;
++ pool_aq->pool.ena = 0;
++ pool_aq->pool_mask.ena = 1;
++
++ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
++ pool_aq->op = NPA_AQ_INSTOP_WRITE;
++
++ return otx2_sync_mbox_msg(&pfvf->mbox);
++}
++
++int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx, u16 smq)
++{
++ struct otx2_hw *hw = &pfvf->hw;
++ int pool_id, sq_idx, err;
++
++ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
++ return -EPERM;
++
++ sq_idx = hw->non_qos_queues + qidx;
++
++ mutex_lock(&pfvf->mbox.lock);
++ err = otx2_qos_sq_aura_pool_init(pfvf, sq_idx);
++ if (err)
++ goto out;
++
++ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
++ pfvf->qos.qid_to_sqmap[qidx] = smq;
++ err = otx2_sq_init(pfvf, sq_idx, pool_id);
++ if (err)
++ goto out;
++out:
++ mutex_unlock(&pfvf->mbox.lock);
++ return err;
++}
++
++void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx, u16 mdq)
++{
++ struct otx2_qset *qset = &pfvf->qset;
++ struct otx2_hw *hw = &pfvf->hw;
++ struct otx2_snd_queue *sq;
++ struct otx2_cq_queue *cq;
++ int pool_id, sq_idx;
++
++ sq_idx = hw->non_qos_queues + qidx;
++
++ /* If the DOWN flag is set SQs are already freed */
++ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
++ return;
++
++ sq = &pfvf->qset.sq[sq_idx];
++ if (!sq->sqb_ptrs)
++ return;
++
++ if (sq_idx < hw->non_qos_queues ||
++ sq_idx >= otx2_get_total_tx_queues(pfvf)) {
++ netdev_err(pfvf->netdev, "Send Queue is not a QoS queue\n");
++ return;
++ }
++
++ cq = &qset->cq[pfvf->hw.rx_queues + sq_idx];
++ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
++
++ otx2_qos_sqb_flush(pfvf, sq_idx);
++ otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
++ otx2_cleanup_tx_cqes(pfvf, cq);
++
++ mutex_lock(&pfvf->mbox.lock);
++ otx2_qos_ctx_disable(pfvf, sq_idx, pool_id);
++ mutex_unlock(&pfvf->mbox.lock);
++
++ otx2_qos_sq_free_sqbs(pfvf, sq_idx);
++ otx2_qos_aura_pool_free(pfvf, pool_id);
++}
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+index e270fb3361432..14cd44f8191ba 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -51,8 +51,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+index e2aced7ab4547..95f63fcf4ba1f 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+@@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
+ * is 2^ACL_MAX_BF_LOG
+ */
+ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
+- bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
++ bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
+ GFP_KERNEL);
+ if (!bf)
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 80b6079b8a8e3..d14706265d9cb 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2512,9 +2512,13 @@ static void rtl_set_rx_mode(struct net_device *dev)
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_mode |= AcceptAllPhys;
++ } else if (!(dev->flags & IFF_MULTICAST)) {
++ rx_mode &= ~AcceptMulticast;
+ } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ dev->flags & IFF_ALLMULTI ||
+- tp->mac_version == RTL_GIGA_MAC_VER_35) {
++ tp->mac_version == RTL_GIGA_MAC_VER_35 ||
++ tp->mac_version == RTL_GIGA_MAC_VER_46 ||
++ tp->mac_version == RTL_GIGA_MAC_VER_48) {
+ /* accept all multicasts */
+ } else if (netdev_mc_empty(dev)) {
+ rx_mode &= ~AcceptMulticast;
+@@ -4556,12 +4560,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
+ static void r8169_phylink_handler(struct net_device *ndev)
+ {
+ struct rtl8169_private *tp = netdev_priv(ndev);
++ struct device *d = tp_to_dev(tp);
+
+ if (netif_carrier_ok(ndev)) {
+ rtl_link_chg_patch(tp);
+- pm_request_resume(&tp->pci_dev->dev);
++ pm_request_resume(d);
++ netif_wake_queue(tp->dev);
+ } else {
+- pm_runtime_idle(&tp->pci_dev->dev);
++ /* In few cases rx is broken after link-down otherwise */
++ if (rtl_is_8125(tp))
++ rtl_reset_work(tp);
++ pm_runtime_idle(d);
+ }
+
+ phy_print_status(tp->phydev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+index 1913385df6856..880a75bf2eb1f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+@@ -222,7 +222,7 @@
+ ((val) << XGMAC_PPS_MINIDX(x))
+ #define XGMAC_PPSCMD_START 0x2
+ #define XGMAC_PPSCMD_STOP 0x5
+-#define XGMAC_PPSEN0 BIT(4)
++#define XGMAC_PPSENx(x) BIT(4 + (x) * 8)
+ #define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
+ #define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
+ #define XGMAC_TRGTBUSY0 BIT(31)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index c6c4d7948fe5f..f30e08a106cbe 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -1135,7 +1135,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+- val |= XGMAC_PPSEN0;
++
++ /* XGMAC Core has 4 PPS outputs at most.
++ *
++ * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
++ * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
++ * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
++ * read-only reserved to 0.
++ * But we always set PPSEN{1,2,3} do not make things worse ;-)
++ *
++ * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
++ * be set, or the PPS outputs stay in Fixed PPS mode by default.
++ */
++ val |= XGMAC_PPSENx(index);
+
+ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+
+diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
+index 50d7eacfec582..87e67121477cb 100644
+--- a/drivers/net/ethernet/toshiba/spider_net.c
++++ b/drivers/net/ethernet/toshiba/spider_net.c
+@@ -2332,7 +2332,7 @@ spider_net_alloc_card(void)
+ struct spider_net_card *card;
+
+ netdev = alloc_etherdev(struct_size(card, darray,
+- tx_descriptors + rx_descriptors));
++ size_add(tx_descriptors, rx_descriptors)));
+ if (!netdev)
+ return NULL;
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 59e29e08398a0..b29b7d97b7739 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -441,12 +441,12 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+
+ err = ip_local_out(net, skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+ err:
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ out:
+ return ret;
+@@ -482,12 +482,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+
+ err = ip6_local_out(net, skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+ err:
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ out:
+ return ret;
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index cd16bc8bf154c..fbf2d5b67aafa 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -324,6 +324,7 @@ static void ipvlan_get_stats64(struct net_device *dev,
+ s->rx_dropped = rx_errs;
+ s->tx_dropped = tx_drps;
+ }
++ s->tx_errors = DEV_STATS_READ(dev, tx_errors);
+ }
+
+ static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 81453e84b6413..209ee9f352754 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3664,9 +3664,9 @@ static void macsec_get_stats64(struct net_device *dev,
+
+ dev_fetch_sw_netstats(s, dev->tstats);
+
+- s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
+- s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
+- s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
++ s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
++ s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
++ s->rx_errors = DEV_STATS_READ(dev, rx_errors);
+ }
+
+ static int macsec_get_iflink(const struct net_device *dev)
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index cb77dd6ce9665..21c6b36dc6ebb 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -8549,6 +8549,14 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ if (ar->state != ATH11K_STATE_ON)
+ goto err_fallback;
+
++ /* Firmware doesn't provide Tx power during CAC hence no need to fetch
++ * the stats.
++ */
++ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
++ mutex_unlock(&ar->conf_mutex);
++ return -EAGAIN;
++ }
++
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index 3953ebd551bf8..79d2876a46b53 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -853,10 +853,16 @@ unsupported_wcn6855_soc:
+ if (ret)
+ goto err_pci_disable_msi;
+
++ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
++ if (ret) {
++ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
++ goto err_pci_disable_msi;
++ }
++
+ ret = ath11k_mhi_register(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to register mhi: %d\n", ret);
+- goto err_pci_disable_msi;
++ goto err_irq_affinity_cleanup;
+ }
+
+ ret = ath11k_hal_srng_init(ab);
+@@ -877,12 +883,6 @@ unsupported_wcn6855_soc:
+ goto err_ce_free;
+ }
+
+- ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+- if (ret) {
+- ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+- goto err_free_irq;
+- }
+-
+ /* kernel may allocate a dummy vector before request_irq and
+ * then allocate a real vector when request_irq is called.
+ * So get msi_data here again to avoid spurious interrupt
+@@ -891,19 +891,16 @@ unsupported_wcn6855_soc:
+ ret = ath11k_pci_config_msi_data(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to config msi_data: %d\n", ret);
+- goto err_irq_affinity_cleanup;
++ goto err_free_irq;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+- goto err_irq_affinity_cleanup;
++ goto err_free_irq;
+ }
+ return 0;
+
+-err_irq_affinity_cleanup:
+- ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+-
+ err_free_irq:
+ ath11k_pcic_free_irq(ab);
+
+@@ -916,6 +913,9 @@ err_hal_srng_deinit:
+ err_mhi_unregister:
+ ath11k_mhi_unregister(ab_pci);
+
++err_irq_affinity_cleanup:
++ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
++
+ err_pci_disable_msi:
+ ath11k_pci_free_msi(ab_pci);
+
+diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
+index 27f4d74a41c80..2788a1b06c17c 100644
+--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
++++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
+@@ -206,7 +206,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
+
+ INIT_LIST_HEAD(&cd->head);
+ cd->freq = freq;
+- cd->detectors = kmalloc_array(dpd->num_radar_types,
++ cd->detectors = kcalloc(dpd->num_radar_types,
+ sizeof(*cd->detectors), GFP_ATOMIC);
+ if (cd->detectors == NULL)
+ goto fail;
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+index 60a7b61d59aa3..ca1daec641c4f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -3,6 +3,7 @@
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2019 Intel Corporation
++ * Copyright (C) 2023 Intel Corporation
+ *****************************************************************************/
+
+ #include <linux/kernel.h>
+@@ -1169,7 +1170,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
+ }
+
+- iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
++ iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false);
+
+ freed = 0;
+
+@@ -1315,7 +1316,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
+- &reclaimed_skbs);
++ &reclaimed_skbs, false);
+
+ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ "sta_id = %d\n",
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+index ba538d70985f4..39bee9c00e071 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+@@ -13,6 +13,7 @@
+ #define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
+ #define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
+ #define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
++#define IWL_FW_INI_PRESET_DISABLE 0xff
+
+ /**
+ * struct iwl_fw_ini_hcmd
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+index 128059ca77e60..06fb7d6653905 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+ */
+ #ifndef __iwl_dbg_tlv_h__
+ #define __iwl_dbg_tlv_h__
+@@ -10,7 +10,8 @@
+ #include <fw/file.h>
+ #include <fw/api/dbg-tlv.h>
+
+-#define IWL_DBG_TLV_MAX_PRESET 15
++#define IWL_DBG_TLV_MAX_PRESET 15
++#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
+
+ /**
+ * struct iwl_dbg_tlv_node - debug TLV node
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index a2203f661321c..5eba1a355f043 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1722,6 +1722,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ #endif
+
+ drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
++ if (iwlwifi_mod_params.enable_ini != ENABLE_INI) {
++ /* We have a non-default value in the module parameter,
++ * take its value
++ */
++ drv->trans->dbg.domains_bitmap &= 0xffff;
++ if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) {
++ if (iwlwifi_mod_params.enable_ini > ENABLE_INI) {
++ IWL_ERR(trans,
++ "invalid enable_ini module parameter value: max = %d, using 0 instead\n",
++ ENABLE_INI);
++ iwlwifi_mod_params.enable_ini = 0;
++ }
++ drv->trans->dbg.domains_bitmap =
++ BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini);
++ }
++ }
+
+ ret = iwl_request_firmware(drv, true);
+ if (ret) {
+@@ -1770,8 +1786,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
+ kfree(drv);
+ }
+
+-#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
+-
+ /* shared module parameters */
+ struct iwl_mod_params iwlwifi_mod_params = {
+ .fw_restart = true,
+@@ -1891,38 +1905,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
+ MODULE_PARM_DESC(uapsd_disable,
+ "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
+
+-static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+-{
+- int ret = 0;
+- bool res;
+- __u32 new_enable_ini;
+-
+- /* in case the argument type is a number */
+- ret = kstrtou32(arg, 0, &new_enable_ini);
+- if (!ret) {
+- if (new_enable_ini > ENABLE_INI) {
+- pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+- return -EINVAL;
+- }
+- goto out;
+- }
+-
+- /* in case the argument type is boolean */
+- ret = kstrtobool(arg, &res);
+- if (ret)
+- return ret;
+- new_enable_ini = (res ? ENABLE_INI : 0);
+-
+-out:
+- iwlwifi_mod_params.enable_ini = new_enable_ini;
+- return 0;
+-}
+-
+-static const struct kernel_param_ops enable_ini_ops = {
+- .set = enable_ini_set
+-};
+-
+-module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
++module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444);
+ MODULE_PARM_DESC(enable_ini,
+ "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+ "Debug INI TLV FW debug infrastructure (default: 16)");
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+index d659ccd065f78..70022cadee35b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+@@ -56,6 +56,10 @@
+ * 6) Eventually, the free function will be called.
+ */
+
++/* default preset 0 (start from bit 16)*/
++#define IWL_FW_DBG_DOMAIN_POS 16
++#define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS)
++
+ #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
+
+ #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
+@@ -563,7 +567,7 @@ struct iwl_trans_ops {
+ int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
+ void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
+- struct sk_buff_head *skbs);
++ struct sk_buff_head *skbs, bool is_flush);
+
+ void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
+
+@@ -1187,14 +1191,15 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ }
+
+ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
+- int ssn, struct sk_buff_head *skbs)
++ int ssn, struct sk_buff_head *skbs,
++ bool is_flush)
+ {
+ if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ return;
+ }
+
+- trans->ops->reclaim(trans, queue, ssn, skbs);
++ trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
+ }
+
+ static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 2d01f6226b7c6..618355ecd9d7b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1572,7 +1572,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+- iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
++ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+@@ -1923,7 +1923,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+- iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
++ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index f7e4f868363df..69b95ad5993b0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -497,6 +497,7 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans);
+ void iwl_pcie_rx_free(struct iwl_trans *trans);
+ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
++void iwl_pcie_rx_napi_sync(struct iwl_trans *trans);
+ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq);
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index b455e981faa1f..90a46faaaffdf 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
++ * Copyright (C) 2003-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+@@ -1053,6 +1053,22 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
+ return ret;
+ }
+
++void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ int i;
++
++ if (unlikely(!trans_pcie->rxq))
++ return;
++
++ for (i = 0; i < trans->num_rx_queues; i++) {
++ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
++
++ if (rxq && rxq->napi.poll)
++ napi_synchronize(&rxq->napi);
++ }
++}
++
+ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index 94f40c4d24217..8b9e4b9c5a2e9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -156,6 +156,8 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
++ iwl_pcie_synchronize_irqs(trans);
++ iwl_pcie_rx_napi_sync(trans);
+ iwl_txq_gen2_tx_free(trans);
+ iwl_pcie_rx_stop(trans);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 8e95225cdd605..39ab6526e6b85 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1261,6 +1261,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
++ iwl_pcie_synchronize_irqs(trans);
++ iwl_pcie_rx_napi_sync(trans);
+ iwl_pcie_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+index 726185d6fab8b..8cf206837eeea 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+@@ -1551,7 +1551,7 @@ void iwl_txq_progress(struct iwl_txq *txq)
+
+ /* Frees buffers until index _not_ inclusive */
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+- struct sk_buff_head *skbs)
++ struct sk_buff_head *skbs, bool is_flush)
+ {
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+@@ -1622,9 +1622,11 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ test_bit(txq_id, trans->txqs.queue_stopped)) {
+ struct sk_buff_head overflow_skbs;
++ struct sk_buff *skb;
+
+ __skb_queue_head_init(&overflow_skbs);
+- skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
++ skb_queue_splice_init(&txq->overflow_q,
++ is_flush ? skbs : &overflow_skbs);
+
+ /*
+ * We are going to transmit from the overflow queue.
+@@ -1644,8 +1646,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ */
+ spin_unlock_bh(&txq->lock);
+
+- while (!skb_queue_empty(&overflow_skbs)) {
+- struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
++ while ((skb = __skb_dequeue(&overflow_skbs))) {
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+index eca53bfd326d1..ceb6812fe20b2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+@@ -173,7 +173,7 @@ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs);
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+- struct sk_buff_head *skbs);
++ struct sk_buff_head *skbs, bool is_flush);
+ void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+ void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
+ bool freeze);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+index b65b0a88c1ded..808466b7de472 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+@@ -9,6 +9,23 @@ struct beacon_bc_data {
+ int count[MT7603_MAX_INTERFACES];
+ };
+
++static void
++mt7603_mac_stuck_beacon_recovery(struct mt7603_dev *dev)
++{
++ if (dev->beacon_check % 5 != 4)
++ return;
++
++ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++ mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
++ mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
++ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++
++ mt76_set(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++ mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++ mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++ mt76_clear(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++}
++
+ static void
+ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ {
+@@ -16,6 +33,8 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
++ u32 om_idx = mvif->idx;
++ u32 val;
+
+ if (!(mdev->beacon_mask & BIT(mvif->idx)))
+ return;
+@@ -24,20 +43,33 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ if (!skb)
+ return;
+
+- mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
+- MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++ if (om_idx)
++ om_idx |= 0x10;
++ val = MT_DMA_FQCR0_BUSY | MT_DMA_FQCR0_MODE |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_BSS, om_idx) |
++ FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
++ FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8);
+
+ spin_lock_bh(&dev->ps_lock);
+- mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+- FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
+- FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
+- dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
+- FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
+- FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
+
+- if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
++ mt76_wr(dev, MT_DMA_FQCR0, val |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BCN));
++ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
+ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++ goto out;
++ }
++
++ mt76_wr(dev, MT_DMA_FQCR0, val |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BMC));
++ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
++ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++ goto out;
++ }
+
++ mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
++ MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++
++out:
+ spin_unlock_bh(&dev->ps_lock);
+ }
+
+@@ -81,6 +113,18 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ data.dev = dev;
+ __skb_queue_head_init(&data.q);
+
++ /* Flush all previous CAB queue packets and beacons */
++ mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
++
++ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
++ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
++
++ if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > 0)
++ dev->beacon_check++;
++ else
++ dev->beacon_check = 0;
++ mt7603_mac_stuck_beacon_recovery(dev);
++
+ q = dev->mphy.q_tx[MT_TXQ_BEACON];
+ spin_lock(&q->lock);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+@@ -89,14 +133,9 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ mt76_queue_kick(dev, q);
+ spin_unlock(&q->lock);
+
+- /* Flush all previous CAB queue packets */
+- mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
+-
+- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
+-
+ mt76_csa_check(mdev);
+ if (mdev->csa_complete)
+- goto out;
++ return;
+
+ q = dev->mphy.q_tx[MT_TXQ_CAB];
+ do {
+@@ -108,7 +147,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ skb_queue_len(&data.q) < 8);
+
+ if (skb_queue_empty(&data.q))
+- goto out;
++ return;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+@@ -136,11 +175,6 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ MT_WF_ARB_CAB_START_BSSn(0) |
+ (MT_WF_ARB_CAB_START_BSS0n(1) *
+ ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
+-
+-out:
+- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
+- if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
+- dev->beacon_check++;
+ }
+
+ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+index 60a996b63c0c0..915b8349146af 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+@@ -42,11 +42,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
++ dev->rx_pse_check = 0;
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
++ dev->rx_pse_check = 0;
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index 6cff346d57a78..2980e1234d13f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1430,15 +1430,6 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+
+ mt7603_beacon_set_timer(dev, -1, 0);
+
+- if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
+- dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
+- dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
+- dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
+- mt7603_pse_reset(dev);
+-
+- if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+- goto skip_dma_reset;
+-
+ mt7603_mac_stop(dev);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+@@ -1448,28 +1439,32 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+
+ mt7603_irq_disable(dev, mask);
+
+- mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+-
+ mt7603_pse_client_reset(dev);
+
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+
++ mt7603_dma_sched_reset(dev);
++
++ mt76_tx_status_check(&dev->mt76, true);
++
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+- mt76_tx_status_check(&dev->mt76, true);
++ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
++ dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY)
++ mt7603_pse_reset(dev);
+
+- mt7603_dma_sched_reset(dev);
++ if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
++ mt7603_mac_dma_start(dev);
+
+- mt7603_mac_dma_start(dev);
++ mt7603_irq_enable(dev, mask);
+
+- mt7603_irq_enable(dev, mask);
++ clear_bit(MT76_RESET, &dev->mphy.state);
++ }
+
+-skip_dma_reset:
+- clear_bit(MT76_RESET, &dev->mphy.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+@@ -1559,20 +1554,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
+ {
+ u32 addr, val;
+
+- if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
+- return true;
+-
+ if (mt7603_rx_fifo_busy(dev))
+- return false;
++ goto out;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
+ mt76_wr(dev, addr, 3);
+ val = mt76_rr(dev, addr) >> 16;
+
+- if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
+- return true;
++ if (!(val & BIT(0)))
++ return false;
+
+- return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
++ if (is_mt7628(dev))
++ val &= 0xa000;
++ else
++ val &= 0x8000;
++ if (!val)
++ return false;
++
++out:
++ if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
++ (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
++ return false;
++
++ return true;
+ }
+
+ static bool
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+index 3b901090b29c6..9b84db233aceb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+@@ -462,6 +462,11 @@ enum {
+ #define MT_WF_SEC_BASE 0x21a00
+ #define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs))
+
++#define MT_WF_CFG_OFF_BASE 0x21e00
++#define MT_WF_CFG_OFF(ofs) (MT_WF_CFG_OFF_BASE + (ofs))
++#define MT_WF_CFG_OFF_WOCCR MT_WF_CFG_OFF(0x004)
++#define MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS BIT(4)
++
+ #define MT_SEC_SCR MT_WF_SEC(0x004)
+ #define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index bcfc30d669c20..b2ea539f697f7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -988,13 +988,13 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool bfee)
+ {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
++ int sts = hweight16(phy->mt76->chainmask);
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return false;
+
+- if (!bfee && tx_ant < 2)
++ if (!bfee && sts < 2)
+ return false;
+
+ if (sta->deflink.he_cap.has_he) {
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+index 6f61d6a106272..5a34894a533be 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+@@ -799,7 +799,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+index 0b6a15c2e5ccd..d92aad60edfe9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+@@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+index 8ada31380efa4..0ff8e355c23a4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+@@ -466,7 +466,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index 9ebe544e51d0d..abd750c3c28e5 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -1191,9 +1191,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
+ #define rtw_debugfs_add_core(name, mode, fopname, parent) \
+ do { \
+ rtw_debug_priv_ ##name.rtwdev = rtwdev; \
+- if (!debugfs_create_file(#name, mode, \
++ if (IS_ERR(debugfs_create_file(#name, mode, \
+ parent, &rtw_debug_priv_ ##name,\
+- &file_ops_ ##fopname)) \
++ &file_ops_ ##fopname))) \
+ pr_debug("Unable to initialize debugfs:%s\n", \
+ #name); \
+ } while (0)
+diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
+index 6a5e52a96d183..caa22226b01bc 100644
+--- a/drivers/net/wireless/silabs/wfx/data_tx.c
++++ b/drivers/net/wireless/silabs/wfx/data_tx.c
+@@ -226,53 +226,40 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+
+ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+ {
+- int i;
+- bool finished;
++ bool has_rate0 = false;
++ int i, j;
+
+- /* Firmware is not able to mix rates with different flags */
+- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+- if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+- rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+- if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
++ for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) {
++ if (rates[j].idx == -1)
++ break;
++ /* The device use the rates in descending order, whatever the request from minstrel.
++ * We have to trade off here. Most important is to respect the primary rate
++ * requested by minstrel. So, we drops the entries with rate higher than the
++ * previous.
++ */
++ if (rates[j].idx >= rates[i - 1].idx) {
++ rates[i - 1].count += rates[j].count;
++ rates[i - 1].count = min_t(u16, 15, rates[i - 1].count);
++ } else {
++ memcpy(rates + i, rates + j, sizeof(rates[i]));
++ if (rates[i].idx == 0)
++ has_rate0 = true;
++ /* The device apply Short GI only on the first rate */
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+- if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+- rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+- }
+-
+- /* Sort rates and remove duplicates */
+- do {
+- finished = true;
+- for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+- if (rates[i + 1].idx == rates[i].idx &&
+- rates[i].idx != -1) {
+- rates[i].count += rates[i + 1].count;
+- if (rates[i].count > 15)
+- rates[i].count = 15;
+- rates[i + 1].idx = -1;
+- rates[i + 1].count = 0;
+-
+- finished = false;
+- }
+- if (rates[i + 1].idx > rates[i].idx) {
+- swap(rates[i + 1], rates[i]);
+- finished = false;
+- }
++ i++;
+ }
+- } while (!finished);
++ }
+ /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
+- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+- if (rates[i].idx == 0)
+- break;
+- if (rates[i].idx == -1) {
+- rates[i].idx = 0;
+- rates[i].count = 8; /* == hw->max_rate_tries */
+- rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+- break;
+- }
++ if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) {
++ rates[i].idx = 0;
++ rates[i].count = 8; /* == hw->max_rate_tries */
++ rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS;
++ i++;
++ }
++ for (; i < IEEE80211_TX_MAX_RATES; i++) {
++ memset(rates + i, 0, sizeof(rates[i]));
++ rates[i].idx = -1;
+ }
+- /* All retries use long GI */
+- for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+- rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ }
+
+ static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
+diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
+index 10dbdcdfb9ce9..0243789ba914b 100644
+--- a/drivers/nvdimm/of_pmem.c
++++ b/drivers/nvdimm/of_pmem.c
+@@ -30,7 +30,13 @@ static int of_pmem_region_probe(struct platform_device *pdev)
+ if (!priv)
+ return -ENOMEM;
+
+- priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
++ priv->bus_desc.provider_name = devm_kstrdup(&pdev->dev, pdev->name,
++ GFP_KERNEL);
++ if (!priv->bus_desc.provider_name) {
++ kfree(priv);
++ return -ENOMEM;
++ }
++
+ priv->bus_desc.module = THIS_MODULE;
+ priv->bus_desc.of_node = np;
+
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index e0875d3697624..7995f93db2a82 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -892,7 +892,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
+ {
+ unsigned int cpu, lane;
+
+- cpu = get_cpu();
++ migrate_disable();
++ cpu = smp_processor_id();
+ if (nd_region->num_lanes < nr_cpu_ids) {
+ struct nd_percpu_lane *ndl_lock, *ndl_count;
+
+@@ -911,16 +912,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane);
+ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
+ {
+ if (nd_region->num_lanes < nr_cpu_ids) {
+- unsigned int cpu = get_cpu();
++ unsigned int cpu = smp_processor_id();
+ struct nd_percpu_lane *ndl_lock, *ndl_count;
+
+ ndl_count = per_cpu_ptr(nd_region->lane, cpu);
+ ndl_lock = per_cpu_ptr(nd_region->lane, lane);
+ if (--ndl_count->count == 0)
+ spin_unlock(&ndl_lock->lock);
+- put_cpu();
+ }
+- put_cpu();
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(nd_region_release_lane);
+
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index b33004a4bcb5a..91e6d03475798 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -435,10 +435,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ void *cookie = READ_ONCE(ioucmd->cookie);
+
+ req->bio = pdu->bio;
+- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
++ if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
+ pdu->nvme_status = -EINTR;
+- else
++ } else {
+ pdu->nvme_status = nvme_req(req)->status;
++ if (!pdu->nvme_status)
++ pdu->nvme_status = blk_status_to_errno(err);
++ }
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ /*
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index d4c9b888a79d7..5c35884c226e6 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -510,8 +510,7 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, 0), 0);
+
+- hdr_type = readb(base + PCI_HEADER_TYPE) &
+- PCI_HEADER_TYPE_MASK;
++ hdr_type = readb(base + PCI_HEADER_TYPE);
+
+ functions = (hdr_type & 0x80) ? 8 : 1;
+ for (fn = 0; fn < functions; fn++) {
+diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
+index f70197154a362..820cce7c8b400 100644
+--- a/drivers/pcmcia/cs.c
++++ b/drivers/pcmcia/cs.c
+@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
+ dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
+ skt->thread = NULL;
+ complete(&skt->thread_done);
++ put_device(&skt->dev);
+ return 0;
+ }
+ ret = pccard_sysfs_add_socket(&skt->dev);
+diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
+index ace133b9f7d45..2eb81d9484d27 100644
+--- a/drivers/pcmcia/ds.c
++++ b/drivers/pcmcia/ds.c
+@@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+ /* by default don't allow DMA */
+ p_dev->dma_mask = 0;
+ p_dev->dev.dma_mask = &p_dev->dma_mask;
+- dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
+- if (!dev_name(&p_dev->dev))
+- goto err_free;
+ p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
+ if (!p_dev->devname)
+ goto err_free;
+@@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+
+ pcmcia_device_query(p_dev);
+
+- if (device_register(&p_dev->dev))
+- goto err_unreg;
++ dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
++ if (device_register(&p_dev->dev)) {
++ mutex_lock(&s->ops_mutex);
++ list_del(&p_dev->socket_device_list);
++ s->device_count--;
++ mutex_unlock(&s->ops_mutex);
++ put_device(&p_dev->dev);
++ return NULL;
++ }
+
+ return p_dev;
+
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index cfb36adf4eb80..47e7c3206939f 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -44,8 +44,11 @@
+ #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
+
+ /* The CFG node has various info besides the discovery tree */
+-#define CMN_CFGM_PERIPH_ID_2 0x0010
+-#define CMN_CFGM_PID2_REVISION GENMASK(7, 4)
++#define CMN_CFGM_PERIPH_ID_01 0x0008
++#define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0)
++#define CMN_CFGM_PID1_PART_1 GENMASK_ULL(35, 32)
++#define CMN_CFGM_PERIPH_ID_23 0x0010
++#define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4)
+
+ #define CMN_CFGM_INFO_GLOBAL 0x900
+ #define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
+@@ -107,7 +110,9 @@
+
+ #define CMN_DTM_PMEVCNTSR 0x240
+
+-#define CMN_DTM_UNIT_INFO 0x0910
++#define CMN650_DTM_UNIT_INFO 0x0910
++#define CMN_DTM_UNIT_INFO 0x0960
++#define CMN_DTM_UNIT_INFO_DTC_DOMAIN GENMASK_ULL(1, 0)
+
+ #define CMN_DTM_NUM_COUNTERS 4
+ /* Want more local counters? Why not replicate the whole DTM! Ugh... */
+@@ -186,6 +191,7 @@
+ #define CMN_WP_DOWN 2
+
+
++/* Internal values for encoding event support */
+ enum cmn_model {
+ CMN600 = 1,
+ CMN650 = 2,
+@@ -197,26 +203,34 @@ enum cmn_model {
+ CMN_650ON = CMN650 | CMN700,
+ };
+
++/* Actual part numbers and revision IDs defined by the hardware */
++enum cmn_part {
++ PART_CMN600 = 0x434,
++ PART_CMN650 = 0x436,
++ PART_CMN700 = 0x43c,
++ PART_CI700 = 0x43a,
++};
++
+ /* CMN-600 r0px shouldn't exist in silicon, thankfully */
+ enum cmn_revision {
+- CMN600_R1P0,
+- CMN600_R1P1,
+- CMN600_R1P2,
+- CMN600_R1P3,
+- CMN600_R2P0,
+- CMN600_R3P0,
+- CMN600_R3P1,
+- CMN650_R0P0 = 0,
+- CMN650_R1P0,
+- CMN650_R1P1,
+- CMN650_R2P0,
+- CMN650_R1P2,
+- CMN700_R0P0 = 0,
+- CMN700_R1P0,
+- CMN700_R2P0,
+- CI700_R0P0 = 0,
+- CI700_R1P0,
+- CI700_R2P0,
++ REV_CMN600_R1P0,
++ REV_CMN600_R1P1,
++ REV_CMN600_R1P2,
++ REV_CMN600_R1P3,
++ REV_CMN600_R2P0,
++ REV_CMN600_R3P0,
++ REV_CMN600_R3P1,
++ REV_CMN650_R0P0 = 0,
++ REV_CMN650_R1P0,
++ REV_CMN650_R1P1,
++ REV_CMN650_R2P0,
++ REV_CMN650_R1P2,
++ REV_CMN700_R0P0 = 0,
++ REV_CMN700_R1P0,
++ REV_CMN700_R2P0,
++ REV_CI700_R0P0 = 0,
++ REV_CI700_R1P0,
++ REV_CI700_R2P0,
+ };
+
+ enum cmn_node_type {
+@@ -306,7 +320,7 @@ struct arm_cmn {
+ unsigned int state;
+
+ enum cmn_revision rev;
+- enum cmn_model model;
++ enum cmn_part part;
+ u8 mesh_x;
+ u8 mesh_y;
+ u16 num_xps;
+@@ -394,19 +408,35 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
+ return NULL;
+ }
+
++static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
++{
++ switch (cmn->part) {
++ case PART_CMN600:
++ return CMN600;
++ case PART_CMN650:
++ return CMN650;
++ case PART_CMN700:
++ return CMN700;
++ case PART_CI700:
++ return CI700;
++ default:
++ return 0;
++ };
++}
++
+ static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
+ const struct arm_cmn_node *xp, int port)
+ {
+ int offset = CMN_MXP__CONNECT_INFO(port);
+
+ if (port >= 2) {
+- if (cmn->model & (CMN600 | CMN650))
++ if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
+ return 0;
+ /*
+ * CI-700 may have extra ports, but still has the
+ * mesh_port_connect_info registers in the way.
+ */
+- if (cmn->model == CI700)
++ if (cmn->part == PART_CI700)
+ offset += CI700_CONNECT_INFO_P2_5_OFFSET;
+ }
+
+@@ -640,7 +670,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+
+ eattr = container_of(attr, typeof(*eattr), attr.attr);
+
+- if (!(eattr->model & cmn->model))
++ if (!(eattr->model & arm_cmn_model(cmn)))
+ return 0;
+
+ type = eattr->type;
+@@ -658,7 +688,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
+ return 0;
+
+- if (chan == 4 && cmn->model == CMN600)
++ if (chan == 4 && cmn->part == PART_CMN600)
+ return 0;
+
+ if ((chan == 5 && cmn->rsp_vc_num < 2) ||
+@@ -669,19 +699,19 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ }
+
+ /* Revision-specific differences */
+- if (cmn->model == CMN600) {
+- if (cmn->rev < CMN600_R1P3) {
++ if (cmn->part == PART_CMN600) {
++ if (cmn->rev < REV_CMN600_R1P3) {
+ if (type == CMN_TYPE_CXRA && eventid > 0x10)
+ return 0;
+ }
+- if (cmn->rev < CMN600_R1P2) {
++ if (cmn->rev < REV_CMN600_R1P2) {
+ if (type == CMN_TYPE_HNF && eventid == 0x1b)
+ return 0;
+ if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
+ return 0;
+ }
+- } else if (cmn->model == CMN650) {
+- if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) {
++ } else if (cmn->part == PART_CMN650) {
++ if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) {
+ if (type == CMN_TYPE_HNF && eventid > 0x22)
+ return 0;
+ if (type == CMN_TYPE_SBSX && eventid == 0x17)
+@@ -689,8 +719,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ if (type == CMN_TYPE_RNI && eventid > 0x10)
+ return 0;
+ }
+- } else if (cmn->model == CMN700) {
+- if (cmn->rev < CMN700_R2P0) {
++ } else if (cmn->part == PART_CMN700) {
++ if (cmn->rev < REV_CMN700_R2P0) {
+ if (type == CMN_TYPE_HNF && eventid > 0x2c)
+ return 0;
+ if (type == CMN_TYPE_CCHA && eventid > 0x74)
+@@ -698,7 +728,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ if (type == CMN_TYPE_CCLA && eventid > 0x27)
+ return 0;
+ }
+- if (cmn->rev < CMN700_R1P0) {
++ if (cmn->rev < REV_CMN700_R1P0) {
+ if (type == CMN_TYPE_HNF && eventid > 0x2b)
+ return 0;
+ }
+@@ -1200,7 +1230,7 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
+ u32 grp = CMN_EVENT_WP_GRP(event);
+ u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
+ u32 combine = CMN_EVENT_WP_COMBINE(event);
+- bool is_cmn600 = to_cmn(event->pmu)->model == CMN600;
++ bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600;
+
+ config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
+ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
+@@ -1520,14 +1550,14 @@ done:
+ return ret;
+ }
+
+-static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model,
++static enum cmn_filter_select arm_cmn_filter_sel(const struct arm_cmn *cmn,
+ enum cmn_node_type type,
+ unsigned int eventid)
+ {
+ struct arm_cmn_event_attr *e;
+- int i;
++ enum cmn_model model = arm_cmn_model(cmn);
+
+- for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
++ for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
+ e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
+ if (e->model & model && e->type == type && e->eventid == eventid)
+ return e->fsel;
+@@ -1570,12 +1600,12 @@ static int arm_cmn_event_init(struct perf_event *event)
+ /* ...but the DTM may depend on which port we're watching */
+ if (cmn->multi_dtm)
+ hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
+- } else if (type == CMN_TYPE_XP && cmn->model == CMN700) {
++ } else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) {
+ hw->wide_sel = true;
+ }
+
+ /* This is sufficiently annoying to recalculate, so cache it */
+- hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid);
++ hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid);
+
+ bynodeid = CMN_EVENT_BYNODEID(event);
+ nodeid = CMN_EVENT_NODEID(event);
+@@ -1966,6 +1996,16 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ return 0;
+ }
+
++static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
++{
++ int offset = CMN_DTM_UNIT_INFO;
++
++ if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
++ offset = CMN650_DTM_UNIT_INFO;
++
++ return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
++}
++
+ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
+ {
+ int level;
+@@ -2006,6 +2046,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ void __iomem *cfg_region;
+ struct arm_cmn_node cfg, *dn;
+ struct arm_cmn_dtm *dtm;
++ enum cmn_part part;
+ u16 child_count, child_poff;
+ u32 xp_offset[CMN_MAX_XPS];
+ u64 reg;
+@@ -2017,7 +2058,19 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ return -ENODEV;
+
+ cfg_region = cmn->base + rgn_offset;
+- reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2);
++
++ reg = readq_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_01);
++ part = FIELD_GET(CMN_CFGM_PID0_PART_0, reg);
++ part |= FIELD_GET(CMN_CFGM_PID1_PART_1, reg) << 8;
++ if (cmn->part && cmn->part != part)
++ dev_warn(cmn->dev,
++ "Firmware binding mismatch: expected part number 0x%x, found 0x%x\n",
++ cmn->part, part);
++ cmn->part = part;
++ if (!arm_cmn_model(cmn))
++ dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part);
++
++ reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23);
+ cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
+
+ reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
+@@ -2081,10 +2134,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ if (xp->id == (1 << 3))
+ cmn->mesh_x = xp->logid;
+
+- if (cmn->model == CMN600)
++ if (cmn->part == PART_CMN600)
+ xp->dtc = 0xf;
+ else
+- xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
++ xp->dtc = 1 << arm_cmn_dtc_domain(cmn, xp_region);
+
+ xp->dtm = dtm - cmn->dtms;
+ arm_cmn_init_dtm(dtm++, xp, 0);
+@@ -2201,7 +2254,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ if (cmn->num_xps == 1)
+ dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
+
+- dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev);
++ dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev);
+ reg = cmn->ports_used;
+ dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
+ cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
+@@ -2256,17 +2309,17 @@ static int arm_cmn_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ cmn->dev = &pdev->dev;
+- cmn->model = (unsigned long)device_get_match_data(cmn->dev);
++ cmn->part = (unsigned long)device_get_match_data(cmn->dev);
+ platform_set_drvdata(pdev, cmn);
+
+- if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) {
++ if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
+ rootnode = arm_cmn600_acpi_probe(pdev, cmn);
+ } else {
+ rootnode = 0;
+ cmn->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cmn->base))
+ return PTR_ERR(cmn->base);
+- if (cmn->model == CMN600)
++ if (cmn->part == PART_CMN600)
+ rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
+ }
+ if (rootnode < 0)
+@@ -2335,10 +2388,10 @@ static int arm_cmn_remove(struct platform_device *pdev)
+
+ #ifdef CONFIG_OF
+ static const struct of_device_id arm_cmn_of_match[] = {
+- { .compatible = "arm,cmn-600", .data = (void *)CMN600 },
+- { .compatible = "arm,cmn-650", .data = (void *)CMN650 },
+- { .compatible = "arm,cmn-700", .data = (void *)CMN700 },
+- { .compatible = "arm,ci-700", .data = (void *)CI700 },
++ { .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
++ { .compatible = "arm,cmn-650" },
++ { .compatible = "arm,cmn-700" },
++ { .compatible = "arm,ci-700" },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
+@@ -2346,9 +2399,9 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
+
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id arm_cmn_acpi_match[] = {
+- { "ARMHC600", CMN600 },
+- { "ARMHC650", CMN650 },
+- { "ARMHC700", CMN700 },
++ { "ARMHC600", PART_CMN600 },
++ { "ARMHC650" },
++ { "ARMHC700" },
+ {}
+ };
+ MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index b61f1f9aba214..c4c1cd269c577 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -342,6 +342,10 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
++ /* Check the type first before going on, otherwise it's not our event */
++ if (event->attr.type != event->pmu->type)
++ return -ENOENT;
++
+ event->cpu = pcie_pmu->on_cpu;
+
+ if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
+@@ -349,9 +353,6 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ else
+ hwc->event_base = HISI_PCIE_CNT;
+
+- if (event->attr.type != event->pmu->type)
+- return -ENOENT;
+-
+ /* Sampling is not supported. */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+index 47d3cc9b6eecd..d385234fa28df 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+@@ -416,8 +416,8 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
+ ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+- &pa_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
++ &pa_pmu->node);
+ return ret;
+ }
+
+diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+index b9c79f17230c2..7d363d475deb2 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+@@ -450,8 +450,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
+ ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+- &sllc_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
++ &sllc_pmu->node);
+ return ret;
+ }
+
+diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
+index e0457d84af6b3..16869bf5bf4cc 100644
+--- a/drivers/perf/hisilicon/hns3_pmu.c
++++ b/drivers/perf/hisilicon/hns3_pmu.c
+@@ -1556,8 +1556,8 @@ static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+ ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
+ if (ret) {
+ pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+- &hns3_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++ &hns3_pmu->node);
+ }
+
+ return ret;
+@@ -1568,8 +1568,8 @@ static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
+ struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
+
+ perf_pmu_unregister(&hns3_pmu->pmu);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+- &hns3_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++ &hns3_pmu->node);
+ }
+
+ static int hns3_pmu_init_dev(struct pci_dev *pdev)
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 2a617832a7e60..159812fe1c97c 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -1173,6 +1173,8 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ u32 port;
+ u8 bit;
+
++ irq_chip_disable_parent(d);
++
+ port = RZG2L_PIN_ID_TO_PORT(hwirq);
+ bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+
+@@ -1187,7 +1189,6 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
+- irq_chip_disable_parent(d);
+ }
+
+ static void rzg2l_gpio_irq_enable(struct irq_data *d)
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 2fe6e147785e4..2b79377cc21e2 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -849,21 +849,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+ }
+ static int wmi_char_open(struct inode *inode, struct file *filp)
+ {
+- const char *driver_name = filp->f_path.dentry->d_iname;
+- struct wmi_block *wblock;
+- struct wmi_block *next;
+-
+- list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+- if (!wblock->dev.dev.driver)
+- continue;
+- if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
+- filp->private_data = wblock;
+- break;
+- }
+- }
++ /*
++ * The miscdevice already stores a pointer to itself
++ * inside filp->private_data
++ */
++ struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
+
+- if (!filp->private_data)
+- return -ENODEV;
++ filp->private_data = wblock;
+
+ return nonseekable_open(inode, filp);
+ }
+@@ -1212,8 +1204,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ struct wmi_block *wblock, *next;
+ union acpi_object *obj;
+ acpi_status status;
+- int retval = 0;
+ u32 i, total;
++ int retval;
+
+ status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
+ if (ACPI_FAILURE(status))
+@@ -1224,8 +1216,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ return -ENXIO;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+- retval = -ENXIO;
+- goto out_free_pointer;
++ kfree(obj);
++ return -ENXIO;
+ }
+
+ gblock = (const struct guid_block *)obj->buffer.pointer;
+@@ -1240,8 +1232,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+
+ wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
+ if (!wblock) {
+- retval = -ENOMEM;
+- break;
++ dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
++ continue;
+ }
+
+ wblock->acpi_device = device;
+@@ -1280,9 +1272,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ }
+ }
+
+-out_free_pointer:
+- kfree(out.pointer);
+- return retval;
++ kfree(obj);
++
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
+index 3db3f96edf78d..6afd34d651c77 100644
+--- a/drivers/pwm/pwm-brcmstb.c
++++ b/drivers/pwm/pwm-brcmstb.c
+@@ -290,7 +290,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
+ {
+ struct brcmstb_pwm *p = dev_get_drvdata(dev);
+
+- clk_disable(p->clk);
++ clk_disable_unprepare(p->clk);
+
+ return 0;
+ }
+@@ -299,7 +299,7 @@ static int brcmstb_pwm_resume(struct device *dev)
+ {
+ struct brcmstb_pwm *p = dev_get_drvdata(dev);
+
+- clk_enable(p->clk);
++ clk_prepare_enable(p->clk);
+
+ return 0;
+ }
+diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
+index 44b1f93256b36..652fdb8dc7bfa 100644
+--- a/drivers/pwm/pwm-sti.c
++++ b/drivers/pwm/pwm-sti.c
+@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
+ unsigned int cpt_num_devs;
+ unsigned int max_pwm_cnt;
+ unsigned int max_prescale;
++ struct sti_cpt_ddata *ddata;
+ };
+
+ struct sti_pwm_chip {
+@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ {
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+- struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
++ struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
+ struct device *dev = pc->dev;
+ unsigned int effective_ticks;
+ unsigned long long high, low;
+@@ -440,7 +441,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
+ while (cpt_int_stat) {
+ devicenum = ffs(cpt_int_stat) - 1;
+
+- ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
++ ddata = &pc->cdata->ddata[devicenum];
+
+ /*
+ * Capture input:
+@@ -638,30 +639,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
++
++ cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
++ if (!cdata->ddata)
++ return -ENOMEM;
+ }
+
+ pc->chip.dev = dev;
+ pc->chip.ops = &sti_pwm_ops;
+ pc->chip.npwm = pc->cdata->pwm_num_devs;
+
+- ret = pwmchip_add(&pc->chip);
+- if (ret < 0) {
+- clk_unprepare(pc->pwm_clk);
+- clk_unprepare(pc->cpt_clk);
+- return ret;
+- }
+-
+ for (i = 0; i < cdata->cpt_num_devs; i++) {
+- struct sti_cpt_ddata *ddata;
+-
+- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+- if (!ddata)
+- return -ENOMEM;
++ struct sti_cpt_ddata *ddata = &cdata->ddata[i];
+
+ init_waitqueue_head(&ddata->wait);
+ mutex_init(&ddata->lock);
++ }
+
+- pwm_set_chip_data(&pc->chip.pwms[i], ddata);
++ ret = pwmchip_add(&pc->chip);
++ if (ret < 0) {
++ clk_unprepare(pc->pwm_clk);
++ clk_unprepare(pc->cpt_clk);
++ return ret;
+ }
+
+ platform_set_drvdata(pdev, pc);
+diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
+index a0441b8086712..de7b5db8f7f2d 100644
+--- a/drivers/regulator/mt6358-regulator.c
++++ b/drivers/regulator/mt6358-regulator.c
+@@ -655,12 +655,18 @@ static int mt6358_regulator_probe(struct platform_device *pdev)
+ struct mt6358_regulator_info *mt6358_info;
+ int i, max_regulator;
+
+- if (mt6397->chip_id == MT6366_CHIP_ID) {
+- max_regulator = MT6366_MAX_REGULATOR;
+- mt6358_info = mt6366_regulators;
+- } else {
++ switch (mt6397->chip_id) {
++ case MT6358_CHIP_ID:
+ max_regulator = MT6358_MAX_REGULATOR;
+ mt6358_info = mt6358_regulators;
++ break;
++ case MT6366_CHIP_ID:
++ max_regulator = MT6366_MAX_REGULATOR;
++ mt6358_info = mt6366_regulators;
++ break;
++ default:
++ dev_err(&pdev->dev, "unsupported chip ID: %d\n", mt6397->chip_id);
++ return -EINVAL;
+ }
+
+ for (i = 0; i < max_regulator; i++) {
+diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
+index c05b722f00605..0d1517cb3c62d 100644
+--- a/drivers/rtc/rtc-pcf85363.c
++++ b/drivers/rtc/rtc-pcf85363.c
+@@ -402,7 +402,7 @@ static int pcf85363_probe(struct i2c_client *client)
+ if (client->irq > 0) {
+ regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
+ regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
+- PIN_IO_INTA_OUT, PIN_IO_INTAPM);
++ PIN_IO_INTAPM, PIN_IO_INTA_OUT);
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, pcf85363_rtc_handle_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 1a0c0b7289d26..41148b0430df9 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -22,7 +22,6 @@
+ #include <linux/bsg-lib.h>
+ #include <asm/firmware.h>
+ #include <asm/irq.h>
+-#include <asm/rtas.h>
+ #include <asm/vio.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -5804,7 +5803,7 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
+ irq_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
+- } while (rtas_busy_delay(rc));
++ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ reg_failed:
+ LEAVE;
+ return rc;
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 85219b5e1f416..bc400669ee022 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -778,6 +778,9 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ u32 version;
+ struct regmap *regmap;
+
++ if (!IS_ERR(drv_data))
++ return -EBUSY;
++
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data) {
+ ret = -ENOMEM;
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index d4b969e68c314..946e2186d2448 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -1093,6 +1093,7 @@ config SPI_XTENSA_XTFPGA
+ config SPI_ZYNQ_QSPI
+ tristate "Xilinx Zynq QSPI controller"
+ depends on ARCH_ZYNQ || COMPILE_TEST
++ depends on SPI_MEM
+ help
+ This enables support for the Zynq Quad SPI controller
+ in master mode.
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index c7a4a3606547e..afecf69d3ceba 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -708,7 +708,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+ f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
+ len : NXP_FSPI_MIN_IOMAP;
+
+- f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
++ f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start,
+ f->memmap_len);
+
+ if (!f->ahb_addr) {
+diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
+index 148043d0c2b84..24cab56ecb7fd 100644
+--- a/drivers/spi/spi-tegra20-slink.c
++++ b/drivers/spi/spi-tegra20-slink.c
+@@ -1093,6 +1093,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
+ reset_control_deassert(tspi->rst);
+
+ spi_irq = platform_get_irq(pdev, 0);
++ if (spi_irq < 0)
++ return spi_irq;
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ tegra_slink_isr_thread, IRQF_ONESHOT,
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+index a6470a89851e3..fe5fbf6cf6314 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+@@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device)
+ {
+ struct cedrus_dev *dev = dev_get_drvdata(device);
+
+- reset_control_assert(dev->rstc);
+-
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->ahb_clk);
+
++ reset_control_assert(dev->rstc);
++
+ return 0;
+ }
+
+@@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device)
+ struct cedrus_dev *dev = dev_get_drvdata(device);
+ int ret;
+
++ ret = reset_control_reset(dev->rstc);
++ if (ret) {
++ dev_err(dev->dev, "Failed to apply reset\n");
++
++ return ret;
++ }
++
+ ret = clk_prepare_enable(dev->ahb_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable AHB clock\n");
+
+- return ret;
++ goto err_rst;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+@@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device)
+ goto err_mod_clk;
+ }
+
+- ret = reset_control_reset(dev->rstc);
+- if (ret) {
+- dev_err(dev->dev, "Failed to apply reset\n");
+-
+- goto err_ram_clk;
+- }
+-
+ return 0;
+
+-err_ram_clk:
+- clk_disable_unprepare(dev->ram_clk);
+ err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+ err_ahb_clk:
+ clk_disable_unprepare(dev->ahb_clk);
++err_rst:
++ reset_control_assert(dev->rstc);
+
+ return ret;
+ }
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 1eae4ec719a8f..ebb36b2c72d5d 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -649,7 +649,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (result)
+ goto release_ida;
+
+- sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
++ snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point",
++ dev->id);
+ sysfs_attr_init(&dev->attr.attr);
+ dev->attr.attr.name = dev->attr_name;
+ dev->attr.attr.mode = 0444;
+@@ -658,7 +659,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (result)
+ goto remove_symbol_link;
+
+- sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
++ snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name),
++ "cdev%d_weight", dev->id);
+ sysfs_attr_init(&dev->weight_attr.attr);
+ dev->weight_attr.attr.name = dev->weight_attr_name;
+ dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
+diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
+index 0d04287da0984..ef8741c3e6629 100644
+--- a/drivers/tty/tty_jobctrl.c
++++ b/drivers/tty/tty_jobctrl.c
+@@ -300,12 +300,7 @@ void disassociate_ctty(int on_exit)
+ return;
+ }
+
+- spin_lock_irq(&current->sighand->siglock);
+- put_pid(current->signal->tty_old_pgrp);
+- current->signal->tty_old_pgrp = NULL;
+- tty = tty_kref_get(current->signal->tty);
+- spin_unlock_irq(&current->sighand->siglock);
+-
++ tty = get_current_tty();
+ if (tty) {
+ unsigned long flags;
+
+@@ -320,6 +315,16 @@ void disassociate_ctty(int on_exit)
+ tty_kref_put(tty);
+ }
+
++ /* If tty->ctrl.pgrp is not NULL, it may be assigned to
++ * current->signal->tty_old_pgrp in a race condition, and
++ * cause pid memleak. Release current->signal->tty_old_pgrp
++ * after tty->ctrl.pgrp set to NULL.
++ */
++ spin_lock_irq(&current->sighand->siglock);
++ put_pid(current->signal->tty_old_pgrp);
++ current->signal->tty_old_pgrp = NULL;
++ spin_unlock_irq(&current->sighand->siglock);
++
+ /* Now clear signal->tty under the lock */
+ read_lock(&tasklist_lock);
+ session_clear_tty(task_session(current));
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 6ba4ef2c3949e..dc38d1fa77874 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -3579,7 +3579,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ */
+ ret = utf16s_to_utf8s(uc_str->uc,
+ uc_str->len - QUERY_DESC_HDR_SIZE,
+- UTF16_BIG_ENDIAN, str, ascii_len);
++ UTF16_BIG_ENDIAN, str, ascii_len - 1);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ret; i++)
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 3b08c5e811707..34bbdfadd66f3 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -30,8 +30,7 @@ struct ehci_ci_priv {
+ };
+
+ struct ci_hdrc_dma_aligned_buffer {
+- void *kmalloc_ptr;
+- void *old_xfer_buffer;
++ void *original_buffer;
+ u8 data[];
+ };
+
+@@ -380,59 +379,52 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
+ return 0;
+ }
+
+-static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
++static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb, bool copy_back)
+ {
+ struct ci_hdrc_dma_aligned_buffer *temp;
+- size_t length;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
++ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+
+ temp = container_of(urb->transfer_buffer,
+ struct ci_hdrc_dma_aligned_buffer, data);
++ urb->transfer_buffer = temp->original_buffer;
++
++ if (copy_back && usb_urb_dir_in(urb)) {
++ size_t length;
+
+- if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+- memcpy(temp->old_xfer_buffer, temp->data, length);
++ memcpy(temp->original_buffer, temp->data, length);
+ }
+- urb->transfer_buffer = temp->old_xfer_buffer;
+- kfree(temp->kmalloc_ptr);
+
+- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
++ kfree(temp);
+ }
+
+ static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+ {
+- struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
+- const unsigned int ci_hdrc_usb_dma_align = 32;
+- size_t kmalloc_size;
++ struct ci_hdrc_dma_aligned_buffer *temp;
+
+- if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
+- !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
++ if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0)
++ return 0;
++ if (IS_ALIGNED((uintptr_t)urb->transfer_buffer, 4)
++ && IS_ALIGNED(urb->transfer_buffer_length, 4))
+ return 0;
+
+- /* Allocate a buffer with enough padding for alignment */
+- kmalloc_size = urb->transfer_buffer_length +
+- sizeof(struct ci_hdrc_dma_aligned_buffer) +
+- ci_hdrc_usb_dma_align - 1;
+-
+- kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+- if (!kmalloc_ptr)
++ temp = kmalloc(sizeof(*temp) + ALIGN(urb->transfer_buffer_length, 4), mem_flags);
++ if (!temp)
+ return -ENOMEM;
+
+- /* Position our struct dma_aligned_buffer such that data is aligned */
+- temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
+- temp->kmalloc_ptr = kmalloc_ptr;
+- temp->old_xfer_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+- urb->transfer_buffer = temp->data;
+
++ temp->original_buffer = urb->transfer_buffer;
++ urb->transfer_buffer = temp->data;
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+@@ -449,7 +441,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+- ci_hdrc_free_dma_aligned_buffer(urb);
++ ci_hdrc_free_dma_aligned_buffer(urb, false);
+
+ return ret;
+ }
+@@ -457,7 +449,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+ {
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+- ci_hdrc_free_dma_aligned_buffer(urb);
++ ci_hdrc_free_dma_aligned_buffer(urb, true);
+ }
+
+ int ci_hdrc_host_init(struct ci_hdrc *ci)
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 657f1f659ffaf..35c7a4df8e717 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -4769,8 +4769,8 @@ fail3:
+ if (qh_allocated && qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
+ fail2:
+- spin_unlock_irqrestore(&hsotg->lock, flags);
+ urb->hcpriv = NULL;
++ spin_unlock_irqrestore(&hsotg->lock, flags);
+ kfree(qtd);
+ fail1:
+ if (qh_allocated) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 2aed88c28ef69..c4dd648710ae0 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -348,6 +348,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ /* xHC spec requires PCI devices to support D3hot and D3cold */
+ if (xhci->hci_version >= 0x120)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
++ else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
++ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 5fb55bf194931..c9a101f0e8d01 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -456,23 +456,38 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
+ int ret;
+
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+- clk_prepare_enable(xhci->clk);
+- clk_prepare_enable(xhci->reg_clk);
++ ret = clk_prepare_enable(xhci->clk);
++ if (ret)
++ return ret;
++
++ ret = clk_prepare_enable(xhci->reg_clk);
++ if (ret) {
++ clk_disable_unprepare(xhci->clk);
++ return ret;
++ }
+ }
+
+ ret = xhci_priv_resume_quirk(hcd);
+ if (ret)
+- return ret;
++ goto disable_clks;
+
+ ret = xhci_resume(xhci, 0);
+ if (ret)
+- return ret;
++ goto disable_clks;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
++
++disable_clks:
++ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
++ clk_disable_unprepare(xhci->clk);
++ clk_disable_unprepare(xhci->reg_clk);
++ }
++
++ return ret;
+ }
+
+ static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index 3c6d452e3bf40..4104eea03e806 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -462,8 +462,13 @@ static void stub_disconnect(struct usb_device *udev)
+ /* release port */
+ rc = usb_hub_release_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+- if (rc) {
+- dev_dbg(&udev->dev, "unable to release port\n");
++ /*
++ * NOTE: If a HUB disconnect triggered disconnect of the down stream
++ * device usb_hub_release_port will return -ENODEV so we can safely ignore
++ * that error here.
++ */
++ if (rc && (rc != -ENODEV)) {
++ dev_dbg(&udev->dev, "unable to release port (%i)\n", rc);
+ return;
+ }
+
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index a2b3743723639..1f3b89c885cca 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -51,8 +51,7 @@ struct vhost_vsock {
+ struct hlist_node hash;
+
+ struct vhost_work send_pkt_work;
+- spinlock_t send_pkt_list_lock;
+- struct list_head send_pkt_list; /* host->guest pending packets */
++ struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
+
+ atomic_t queued_replies;
+
+@@ -108,40 +107,31 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ vhost_disable_notify(&vsock->dev, vq);
+
+ do {
+- struct virtio_vsock_pkt *pkt;
++ struct virtio_vsock_hdr *hdr;
++ size_t iov_len, payload_len;
+ struct iov_iter iov_iter;
++ u32 flags_to_restore = 0;
++ struct sk_buff *skb;
+ unsigned out, in;
+ size_t nbytes;
+- size_t iov_len, payload_len;
+ int head;
+- u32 flags_to_restore = 0;
+
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- if (list_empty(&vsock->send_pkt_list)) {
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
++ skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
++
++ if (!skb) {
+ vhost_enable_notify(&vsock->dev, vq);
+ break;
+ }
+
+- pkt = list_first_entry(&vsock->send_pkt_list,
+- struct virtio_vsock_pkt, list);
+- list_del_init(&pkt->list);
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0) {
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- list_add(&pkt->list, &vsock->send_pkt_list);
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
++ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ break;
+ }
+
+ if (head == vq->num) {
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- list_add(&pkt->list, &vsock->send_pkt_list);
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
++ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ /* We cannot finish yet if more buffers snuck in while
+ * re-enabling notify.
+ */
+@@ -153,26 +143,27 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ }
+
+ if (out) {
+- virtio_transport_free_pkt(pkt);
++ kfree_skb(skb);
+ vq_err(vq, "Expected 0 output buffers, got %u\n", out);
+ break;
+ }
+
+ iov_len = iov_length(&vq->iov[out], in);
+- if (iov_len < sizeof(pkt->hdr)) {
+- virtio_transport_free_pkt(pkt);
++ if (iov_len < sizeof(*hdr)) {
++ kfree_skb(skb);
+ vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
+ break;
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
+- payload_len = pkt->len - pkt->off;
++ payload_len = skb->len;
++ hdr = virtio_vsock_hdr(skb);
+
+ /* If the packet is greater than the space available in the
+ * buffer, we split it using multiple buffers.
+ */
+- if (payload_len > iov_len - sizeof(pkt->hdr)) {
+- payload_len = iov_len - sizeof(pkt->hdr);
++ if (payload_len > iov_len - sizeof(*hdr)) {
++ payload_len = iov_len - sizeof(*hdr);
+
+ /* As we are copying pieces of large packet's buffer to
+ * small rx buffers, headers of packets in rx queue are
+@@ -185,31 +176,30 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ * bits set. After initialized header will be copied to
+ * rx buffer, these required bits will be restored.
+ */
+- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
+- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
++ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
+
+- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
++ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
+ }
+ }
+ }
+
+ /* Set the correct length in the header */
+- pkt->hdr.len = cpu_to_le32(payload_len);
++ hdr->len = cpu_to_le32(payload_len);
+
+- nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+- if (nbytes != sizeof(pkt->hdr)) {
+- virtio_transport_free_pkt(pkt);
++ nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
++ if (nbytes != sizeof(*hdr)) {
++ kfree_skb(skb);
+ vq_err(vq, "Faulted on copying pkt hdr\n");
+ break;
+ }
+
+- nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
+- &iov_iter);
++ nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
+ if (nbytes != payload_len) {
+- virtio_transport_free_pkt(pkt);
++ kfree_skb(skb);
+ vq_err(vq, "Faulted on copying pkt buf\n");
+ break;
+ }
+@@ -217,31 +207,28 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ /* Deliver to monitoring devices all packets that we
+ * will transmit.
+ */
+- virtio_transport_deliver_tap_pkt(pkt);
++ virtio_transport_deliver_tap_pkt(skb);
+
+- vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
++ vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
+ added = true;
+
+- pkt->off += payload_len;
++ skb_pull(skb, payload_len);
+ total_len += payload_len;
+
+ /* If we didn't send all the payload we can requeue the packet
+ * to send it with the next available buffer.
+ */
+- if (pkt->off < pkt->len) {
+- pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
++ if (skb->len > 0) {
++ hdr->flags |= cpu_to_le32(flags_to_restore);
+
+- /* We are queueing the same virtio_vsock_pkt to handle
++ /* We are queueing the same skb to handle
+ * the remaining bytes, and we want to deliver it
+ * to monitoring devices in the next iteration.
+ */
+- pkt->tap_delivered = false;
+-
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- list_add(&pkt->list, &vsock->send_pkt_list);
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
++ virtio_vsock_skb_clear_tap_delivered(skb);
++ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ } else {
+- if (pkt->reply) {
++ if (virtio_vsock_skb_reply(skb)) {
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+@@ -253,7 +240,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ restart_tx = true;
+ }
+
+- virtio_transport_free_pkt(pkt);
++ consume_skb(skb);
+ }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+ if (added)
+@@ -278,28 +265,26 @@ static void vhost_transport_send_pkt_work(struct vhost_work *work)
+ }
+
+ static int
+-vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
++vhost_transport_send_pkt(struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct vhost_vsock *vsock;
+- int len = pkt->len;
++ int len = skb->len;
+
+ rcu_read_lock();
+
+ /* Find the vhost_vsock according to guest context id */
+- vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
++ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
+ if (!vsock) {
+ rcu_read_unlock();
+- virtio_transport_free_pkt(pkt);
++ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+- if (pkt->reply)
++ if (virtio_vsock_skb_reply(skb))
+ atomic_inc(&vsock->queued_replies);
+
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- list_add_tail(&pkt->list, &vsock->send_pkt_list);
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
++ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+ rcu_read_unlock();
+@@ -310,10 +295,8 @@ static int
+ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+ {
+ struct vhost_vsock *vsock;
+- struct virtio_vsock_pkt *pkt, *n;
+ int cnt = 0;
+ int ret = -ENODEV;
+- LIST_HEAD(freeme);
+
+ rcu_read_lock();
+
+@@ -322,20 +305,7 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+ if (!vsock)
+ goto out;
+
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+- if (pkt->vsk != vsk)
+- continue;
+- list_move(&pkt->list, &freeme);
+- }
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
+- list_for_each_entry_safe(pkt, n, &freeme, list) {
+- if (pkt->reply)
+- cnt++;
+- list_del(&pkt->list);
+- virtio_transport_free_pkt(pkt);
+- }
++ cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
+
+ if (cnt) {
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+@@ -352,12 +322,14 @@ out:
+ return ret;
+ }
+
+-static struct virtio_vsock_pkt *
+-vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
++static struct sk_buff *
++vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
+ unsigned int out, unsigned int in)
+ {
+- struct virtio_vsock_pkt *pkt;
++ struct virtio_vsock_hdr *hdr;
+ struct iov_iter iov_iter;
++ struct sk_buff *skb;
++ size_t payload_len;
+ size_t nbytes;
+ size_t len;
+
+@@ -366,50 +338,48 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
+ return NULL;
+ }
+
+- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+- if (!pkt)
++ len = iov_length(vq->iov, out);
++
++ /* len contains both payload and hdr */
++ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
++ if (!skb)
+ return NULL;
+
+- len = iov_length(vq->iov, out);
+ iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
+
+- nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+- if (nbytes != sizeof(pkt->hdr)) {
++ hdr = virtio_vsock_hdr(skb);
++ nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
++ if (nbytes != sizeof(*hdr)) {
+ vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
+- sizeof(pkt->hdr), nbytes);
+- kfree(pkt);
++ sizeof(*hdr), nbytes);
++ kfree_skb(skb);
+ return NULL;
+ }
+
+- pkt->len = le32_to_cpu(pkt->hdr.len);
++ payload_len = le32_to_cpu(hdr->len);
+
+ /* No payload */
+- if (!pkt->len)
+- return pkt;
++ if (!payload_len)
++ return skb;
+
+- /* The pkt is too big */
+- if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
+- kfree(pkt);
++ /* The pkt is too big or the length in the header is invalid */
++ if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
++ payload_len + sizeof(*hdr) > len) {
++ kfree_skb(skb);
+ return NULL;
+ }
+
+- pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
+- if (!pkt->buf) {
+- kfree(pkt);
+- return NULL;
+- }
++ virtio_vsock_skb_rx_put(skb);
+
+- pkt->buf_len = pkt->len;
+-
+- nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
+- if (nbytes != pkt->len) {
+- vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
+- pkt->len, nbytes);
+- virtio_transport_free_pkt(pkt);
++ nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
++ if (nbytes != payload_len) {
++ vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
++ payload_len, nbytes);
++ kfree_skb(skb);
+ return NULL;
+ }
+
+- return pkt;
++ return skb;
+ }
+
+ /* Is there space left for replies to rx packets? */
+@@ -496,9 +466,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+- struct virtio_vsock_pkt *pkt;
+ int head, pkts = 0, total_len = 0;
+ unsigned int out, in;
++ struct sk_buff *skb;
+ bool added = false;
+
+ mutex_lock(&vq->mutex);
+@@ -511,6 +481,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+
+ vhost_disable_notify(&vsock->dev, vq);
+ do {
++ struct virtio_vsock_hdr *hdr;
++
+ if (!vhost_vsock_more_replies(vsock)) {
+ /* Stop tx until the device processes already
+ * pending replies. Leave tx virtqueue
+@@ -532,24 +504,26 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+ break;
+ }
+
+- pkt = vhost_vsock_alloc_pkt(vq, out, in);
+- if (!pkt) {
++ skb = vhost_vsock_alloc_skb(vq, out, in);
++ if (!skb) {
+ vq_err(vq, "Faulted on pkt\n");
+ continue;
+ }
+
+- total_len += sizeof(pkt->hdr) + pkt->len;
++ total_len += sizeof(*hdr) + skb->len;
+
+ /* Deliver to monitoring devices all received packets */
+- virtio_transport_deliver_tap_pkt(pkt);
++ virtio_transport_deliver_tap_pkt(skb);
++
++ hdr = virtio_vsock_hdr(skb);
+
+ /* Only accept correctly addressed packets */
+- if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
+- le64_to_cpu(pkt->hdr.dst_cid) ==
++ if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
++ le64_to_cpu(hdr->dst_cid) ==
+ vhost_transport_get_local_cid())
+- virtio_transport_recv_pkt(&vhost_transport, pkt);
++ virtio_transport_recv_pkt(&vhost_transport, skb);
+ else
+- virtio_transport_free_pkt(pkt);
++ kfree_skb(skb);
+
+ vhost_add_used(vq, head, 0);
+ added = true;
+@@ -693,8 +667,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+ VHOST_VSOCK_WEIGHT, true, NULL);
+
+ file->private_data = vsock;
+- spin_lock_init(&vsock->send_pkt_list_lock);
+- INIT_LIST_HEAD(&vsock->send_pkt_list);
++ skb_queue_head_init(&vsock->send_pkt_queue);
+ vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+ return 0;
+
+@@ -760,16 +733,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+ vhost_vsock_flush(vsock);
+ vhost_dev_stop(&vsock->dev);
+
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- while (!list_empty(&vsock->send_pkt_list)) {
+- struct virtio_vsock_pkt *pkt;
+-
+- pkt = list_first_entry(&vsock->send_pkt_list,
+- struct virtio_vsock_pkt, list);
+- list_del_init(&pkt->list);
+- virtio_transport_free_pkt(pkt);
+- }
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
++ virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
+
+ vhost_dev_cleanup(&vsock->dev);
+ kfree(vsock->dev.vqs);
+diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
+index e332017c6af62..ce3c5b0b8f4ef 100644
+--- a/drivers/video/fbdev/fsl-diu-fb.c
++++ b/drivers/video/fbdev/fsl-diu-fb.c
+@@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
+ * Workaround for failed writing desc register of planes.
+ * Needed with MPC5121 DIU rev 2.0 silicon.
+ */
+-void wr_reg_wa(u32 *reg, u32 val)
++static void wr_reg_wa(u32 *reg, u32 val)
+ {
+ do {
+ out_be32(reg, val);
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index e6adb2890ecfe..b194e71f07bfc 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1495,8 +1495,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ if (!request_mem_region(addr, size, "imsttfb")) {
+ printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
+- framebuffer_release(info);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto release_info;
+ }
+
+ switch (pdev->device) {
+@@ -1513,34 +1513,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
+ "contact maintainer.\n", pdev->device);
+ ret = -ENODEV;
+- goto error;
++ goto release_mem_region;
+ }
+
+ info->fix.smem_start = addr;
+ info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
+ 0x400000 : 0x800000);
+ if (!info->screen_base)
+- goto error;
++ goto release_mem_region;
+ info->fix.mmio_start = addr + 0x800000;
+ par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ if (!par->dc_regs)
+- goto error;
++ goto unmap_screen_base;
+ par->cmap_regs_phys = addr + 0x840000;
+ par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ if (!par->cmap_regs)
+- goto error;
++ goto unmap_dc_regs;
+ info->pseudo_palette = par->palette;
+ ret = init_imstt(info);
+- if (!ret)
+- pci_set_drvdata(pdev, info);
+- return ret;
++ if (ret)
++ goto unmap_cmap_regs;
+
+-error:
+- if (par->dc_regs)
+- iounmap(par->dc_regs);
+- if (info->screen_base)
+- iounmap(info->screen_base);
++ pci_set_drvdata(pdev, info);
++ return 0;
++
++unmap_cmap_regs:
++ iounmap(par->cmap_regs);
++unmap_dc_regs:
++ iounmap(par->dc_regs);
++unmap_screen_base:
++ iounmap(info->screen_base);
++release_mem_region:
+ release_mem_region(addr, size);
++release_info:
+ framebuffer_release(info);
+ return ret;
+ }
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 9e172f66a8edb..c47e54b2a865e 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -57,6 +57,11 @@ struct snp_guest_dev {
+
+ struct snp_secrets_page_layout *layout;
+ struct snp_req_data input;
++ union {
++ struct snp_report_req report;
++ struct snp_derived_key_req derived_key;
++ struct snp_ext_report_req ext_report;
++ } req;
+ u32 *os_area_msg_seqno;
+ u8 *vmpck;
+ };
+@@ -334,11 +339,12 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
+ return __enc_payload(snp_dev, req, payload, sz);
+ }
+
+-static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
++static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
++ struct snp_guest_request_ioctl *rio)
+ {
+- unsigned long err = 0xff, override_err = 0;
+ unsigned long req_start = jiffies;
+ unsigned int override_npages = 0;
++ u64 override_err = 0;
+ int rc;
+
+ retry_request:
+@@ -348,7 +354,7 @@ retry_request:
+ * sequence number must be incremented or the VMPCK must be deleted to
+ * prevent reuse of the IV.
+ */
+- rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
++ rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio);
+ switch (rc) {
+ case -ENOSPC:
+ /*
+@@ -366,7 +372,7 @@ retry_request:
+ * request buffer size was too small and give the caller the
+ * required buffer size.
+ */
+- override_err = SNP_GUEST_REQ_INVALID_LEN;
++ override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
+
+ /*
+ * If this call to the firmware succeeds, the sequence number can
+@@ -379,7 +385,7 @@ retry_request:
+ goto retry_request;
+
+ /*
+- * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
++ * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
+ * throttled. Retry in the driver to avoid returning and reusing the
+ * message sequence number on a different message.
+ */
+@@ -400,27 +406,29 @@ retry_request:
+ */
+ snp_inc_msg_seqno(snp_dev);
+
+- if (fw_err)
+- *fw_err = override_err ?: err;
++ if (override_err) {
++ rio->exitinfo2 = override_err;
++
++ /*
++ * If an extended guest request was issued and the supplied certificate
++ * buffer was not large enough, a standard guest request was issued to
++ * prevent IV reuse. If the standard request was successful, return -EIO
++ * back to the caller as would have originally been returned.
++ */
++ if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
++ rc = -EIO;
++ }
+
+ if (override_npages)
+ snp_dev->input.data_npages = override_npages;
+
+- /*
+- * If an extended guest request was issued and the supplied certificate
+- * buffer was not large enough, a standard guest request was issued to
+- * prevent IV reuse. If the standard request was successful, return -EIO
+- * back to the caller as would have originally been returned.
+- */
+- if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
+- return -EIO;
+-
+ return rc;
+ }
+
+-static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
+- u8 type, void *req_buf, size_t req_sz, void *resp_buf,
+- u32 resp_sz, __u64 *fw_err)
++static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
++ struct snp_guest_request_ioctl *rio, u8 type,
++ void *req_buf, size_t req_sz, void *resp_buf,
++ u32 resp_sz)
+ {
+ u64 seqno;
+ int rc;
+@@ -434,7 +442,7 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+
+ /* Encrypt the userspace provided payload in snp_dev->secret_request. */
+- rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
++ rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz);
+ if (rc)
+ return rc;
+
+@@ -445,12 +453,16 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ memcpy(snp_dev->request, &snp_dev->secret_request,
+ sizeof(snp_dev->secret_request));
+
+- rc = __handle_guest_request(snp_dev, exit_code, fw_err);
++ rc = __handle_guest_request(snp_dev, exit_code, rio);
+ if (rc) {
+- if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
++ if (rc == -EIO &&
++ rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
+ return rc;
+
+- dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
++ dev_alert(snp_dev->dev,
++ "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
++ rc, rio->exitinfo2);
++
+ snp_disable_vmpck(snp_dev);
+ return rc;
+ }
+@@ -468,8 +480,8 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
++ struct snp_report_req *req = &snp_dev->req.report;
+ struct snp_report_resp *resp;
+- struct snp_report_req req;
+ int rc, resp_len;
+
+ lockdep_assert_held(&snp_cmd_mutex);
+@@ -477,7 +489,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ if (!arg->req_data || !arg->resp_data)
+ return -EINVAL;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+ /*
+@@ -490,9 +502,9 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ if (!resp)
+ return -ENOMEM;
+
+- rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
+- SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
+- resp_len, &arg->fw_err);
++ rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
++ SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
++ resp_len);
+ if (rc)
+ goto e_free;
+
+@@ -506,9 +518,9 @@ e_free:
+
+ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++ struct snp_derived_key_req *req = &snp_dev->req.derived_key;
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
+ struct snp_derived_key_resp resp = {0};
+- struct snp_derived_key_req req;
+ int rc, resp_len;
+ /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
+ u8 buf[64 + 16];
+@@ -527,12 +539,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+ if (sizeof(buf) < resp_len)
+ return -ENOMEM;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+- rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
+- SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len,
+- &arg->fw_err);
++ rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
++ SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
+ if (rc)
+ return rc;
+
+@@ -548,8 +559,8 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+
+ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++ struct snp_ext_report_req *req = &snp_dev->req.ext_report;
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
+- struct snp_ext_report_req req;
+ struct snp_report_resp *resp;
+ int ret, npages = 0, resp_len;
+
+@@ -558,18 +569,18 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ if (!arg->req_data || !arg->resp_data)
+ return -EINVAL;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+ /* userspace does not want certificate data */
+- if (!req.certs_len || !req.certs_address)
++ if (!req->certs_len || !req->certs_address)
+ goto cmd;
+
+- if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
+- !IS_ALIGNED(req.certs_len, PAGE_SIZE))
++ if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
++ !IS_ALIGNED(req->certs_len, PAGE_SIZE))
+ return -EINVAL;
+
+- if (!access_ok((const void __user *)req.certs_address, req.certs_len))
++ if (!access_ok((const void __user *)req->certs_address, req->certs_len))
+ return -EFAULT;
+
+ /*
+@@ -578,8 +589,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ * the host. If host does not supply any certs in it, then copy
+ * zeros to indicate that certificate data was not provided.
+ */
+- memset(snp_dev->certs_data, 0, req.certs_len);
+- npages = req.certs_len >> PAGE_SHIFT;
++ memset(snp_dev->certs_data, 0, req->certs_len);
++ npages = req->certs_len >> PAGE_SHIFT;
+ cmd:
+ /*
+ * The intermediate response buffer is used while decrypting the
+@@ -592,15 +603,15 @@ cmd:
+ return -ENOMEM;
+
+ snp_dev->input.data_npages = npages;
+- ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg->msg_version,
+- SNP_MSG_REPORT_REQ, &req.data,
+- sizeof(req.data), resp->data, resp_len, &arg->fw_err);
++ ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
++ SNP_MSG_REPORT_REQ, &req->data,
++ sizeof(req->data), resp->data, resp_len);
+
+ /* If certs length is invalid then copy the returned length */
+- if (arg->fw_err == SNP_GUEST_REQ_INVALID_LEN) {
+- req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
++ if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
++ req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
+
+- if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
++ if (copy_to_user((void __user *)arg->req_data, req, sizeof(*req)))
+ ret = -EFAULT;
+ }
+
+@@ -608,8 +619,8 @@ cmd:
+ goto e_free;
+
+ if (npages &&
+- copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
+- req.certs_len)) {
++ copy_to_user((void __user *)req->certs_address, snp_dev->certs_data,
++ req->certs_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+@@ -632,7 +643,7 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
+ if (copy_from_user(&input, argp, sizeof(input)))
+ return -EFAULT;
+
+- input.fw_err = 0xff;
++ input.exitinfo2 = 0xff;
+
+ /* Message version must be non-zero */
+ if (!input.msg_version)
+@@ -663,7 +674,7 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
+
+ mutex_unlock(&snp_cmd_mutex);
+
+- if (input.fw_err && copy_to_user(argp, &input, sizeof(input)))
++ if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
+ return -EFAULT;
+
+ return ret;
+diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
+index 281a48d9889fc..0fc91e9c4a773 100644
+--- a/drivers/watchdog/ixp4xx_wdt.c
++++ b/drivers/watchdog/ixp4xx_wdt.c
+@@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = {
+ .owner = THIS_MODULE,
+ };
+
++/*
++ * The A0 version of the IXP422 had a bug in the watchdog making
++ * is useless, but we still need to use it to restart the system
++ * as it is the only way, so in this special case we register a
++ * "dummy" watchdog that doesn't really work, but will support
++ * the restart operation.
++ */
++static int ixp4xx_wdt_dummy(struct watchdog_device *wdd)
++{
++ return 0;
++}
++
++static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = {
++ .start = ixp4xx_wdt_dummy,
++ .stop = ixp4xx_wdt_dummy,
++ .restart = ixp4xx_wdt_restart,
++ .owner = THIS_MODULE,
++};
++
+ static const struct watchdog_info ixp4xx_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING
+ | WDIOF_MAGICCLOSE
+@@ -120,14 +139,17 @@ static void ixp4xx_clock_action(void *d)
+
+ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ {
++ static const struct watchdog_ops *iwdt_ops;
+ struct device *dev = &pdev->dev;
+ struct ixp4xx_wdt *iwdt;
+ struct clk *clk;
+ int ret;
+
+ if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
+- dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
+- return -ENODEV;
++ dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n");
++ iwdt_ops = &ixp4xx_wdt_restart_only_ops;
++ } else {
++ iwdt_ops = &ixp4xx_wdt_ops;
+ }
+
+ iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
+@@ -153,7 +175,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ iwdt->rate = IXP4XX_TIMER_FREQ;
+
+ iwdt->wdd.info = &ixp4xx_wdt_info;
+- iwdt->wdd.ops = &ixp4xx_wdt_ops;
++ iwdt->wdd.ops = iwdt_ops;
+ iwdt->wdd.min_timeout = 1;
+ iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
+ iwdt->wdd.parent = dev;
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 059de92aea7d0..d47eee6c51435 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -288,12 +288,6 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ u16 val;
+ int ret = 0;
+
+- err = pci_read_config_word(dev, PCI_COMMAND, &val);
+- if (err)
+- return err;
+- if (!(val & PCI_COMMAND_INTX_DISABLE))
+- ret |= INTERRUPT_TYPE_INTX;
+-
+ /*
+ * Do not trust dev->msi(x)_enabled here, as enabling could be done
+ * bypassing the pci_*msi* functions, by the qemu.
+@@ -316,6 +310,19 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ if (val & PCI_MSIX_FLAGS_ENABLE)
+ ret |= INTERRUPT_TYPE_MSIX;
+ }
++
++ /*
++ * PCIe spec says device cannot use INTx if MSI/MSI-X is enabled,
++ * so check for INTx only when both are disabled.
++ */
++ if (!ret) {
++ err = pci_read_config_word(dev, PCI_COMMAND, &val);
++ if (err)
++ return err;
++ if (!(val & PCI_COMMAND_INTX_DISABLE))
++ ret |= INTERRUPT_TYPE_INTX;
++ }
++
+ return ret ?: INTERRUPT_TYPE_NONE;
+ }
+
+diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
+index 097316a741268..1948a9700c8fa 100644
+--- a/drivers/xen/xen-pciback/conf_space_capability.c
++++ b/drivers/xen/xen-pciback/conf_space_capability.c
+@@ -236,10 +236,16 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
+ return PCIBIOS_SET_FAILED;
+
+ if (new_value & field_config->enable_bit) {
+- /* don't allow enabling together with other interrupt types */
++ /*
++ * Don't allow enabling together with other interrupt type, but do
++ * allow enabling MSI(-X) while INTx is still active to please Linuxes
++ * MSI(-X) startup sequence. It is safe to do, as according to PCI
++ * spec, device with enabled MSI(-X) shouldn't use INTx.
++ */
+ int int_type = xen_pcibk_get_interrupt_type(dev);
+
+ if (int_type == INTERRUPT_TYPE_NONE ||
++ int_type == INTERRUPT_TYPE_INTX ||
+ int_type == field_config->int_type)
+ goto write;
+ return PCIBIOS_SET_FAILED;
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index 981435103af1a..fc03326459664 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -104,24 +104,9 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ pci_clear_mwi(dev);
+ }
+
+- if (dev_data && dev_data->allow_interrupt_control) {
+- if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
+- if (value & PCI_COMMAND_INTX_DISABLE) {
+- pci_intx(dev, 0);
+- } else {
+- /* Do not allow enabling INTx together with MSI or MSI-X. */
+- switch (xen_pcibk_get_interrupt_type(dev)) {
+- case INTERRUPT_TYPE_NONE:
+- pci_intx(dev, 1);
+- break;
+- case INTERRUPT_TYPE_INTX:
+- break;
+- default:
+- return PCIBIOS_SET_FAILED;
+- }
+- }
+- }
+- }
++ if (dev_data && dev_data->allow_interrupt_control &&
++ ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
++ pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
+
+ cmd->val = value;
+
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 639bf628389ba..3205e5d724c8c 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -1025,7 +1025,7 @@ static int __init xenbus_init(void)
+ if (err < 0) {
+ pr_err("xenstore_late_init couldn't bind irq err=%d\n",
+ err);
+- return err;
++ goto out_error;
+ }
+
+ xs_init_irq = err;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 9474265ee7ea3..e015e1e025b6e 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2420,7 +2420,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
+ static noinline int copy_to_sk(struct btrfs_path *path,
+ struct btrfs_key *key,
+ struct btrfs_ioctl_search_key *sk,
+- size_t *buf_size,
++ u64 *buf_size,
+ char __user *ubuf,
+ unsigned long *sk_offset,
+ int *num_found)
+@@ -2552,7 +2552,7 @@ out:
+
+ static noinline int search_ioctl(struct inode *inode,
+ struct btrfs_ioctl_search_key *sk,
+- size_t *buf_size,
++ u64 *buf_size,
+ char __user *ubuf)
+ {
+ struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
+@@ -2625,7 +2625,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode,
+ struct btrfs_ioctl_search_args __user *uargs = argp;
+ struct btrfs_ioctl_search_key sk;
+ int ret;
+- size_t buf_size;
++ u64 buf_size;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -2655,8 +2655,8 @@ static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
+ struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
+ struct btrfs_ioctl_search_args_v2 args;
+ int ret;
+- size_t buf_size;
+- const size_t buf_limit = SZ_16M;
++ u64 buf_size;
++ const u64 buf_limit = SZ_16M;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 1bb55a6d79c23..aa5aadd70bbc2 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1010,6 +1010,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ ix = curp->p_idx;
+ }
+
++ if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
++ EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
++ return -EFSCORRUPTED;
++ }
++
+ len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
+ BUG_ON(len < 0);
+ if (len > 0) {
+@@ -1019,11 +1024,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
+ }
+
+- if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+- EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+- return -EFSCORRUPTED;
+- }
+-
+ ix->ei_block = cpu_to_le32(logical);
+ ext4_idx_store_pblock(ix, ptr);
+ le16_add_cpu(&curp->p_hdr->eh_entries, 1);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index a982f91b71eb2..ea05710ca9bdf 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2263,8 +2263,10 @@ skip_reading_dnode:
+ f2fs_wait_on_block_writeback(inode, blkaddr);
+
+ if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+- if (atomic_dec_and_test(&dic->remaining_pages))
++ if (atomic_dec_and_test(&dic->remaining_pages)) {
+ f2fs_decompress_cluster(dic, true);
++ break;
++ }
+ continue;
+ }
+
+@@ -2950,7 +2952,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ {
+ int ret = 0;
+ int done = 0, retry = 0;
+- struct page *pages[F2FS_ONSTACK_PAGES];
++ struct page *pages_local[F2FS_ONSTACK_PAGES];
++ struct page **pages = pages_local;
++ struct folio_batch fbatch;
+ struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct bio *bio = NULL;
+ sector_t last_block;
+@@ -2971,7 +2975,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ .private = NULL,
+ };
+ #endif
++ int nr_folios, p, idx;
+ int nr_pages;
++ unsigned int max_pages = F2FS_ONSTACK_PAGES;
+ pgoff_t index;
+ pgoff_t end; /* Inclusive */
+ pgoff_t done_index;
+@@ -2981,6 +2987,17 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ int submitted = 0;
+ int i;
+
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++ if (f2fs_compressed_file(inode) &&
++ 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
++ pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
++ cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
++ max_pages = 1 << cc.log_cluster_size;
++ }
++#endif
++
++ folio_batch_init(&fbatch);
++
+ if (get_dirty_pages(mapping->host) <=
+ SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
+ set_inode_flag(mapping->host, FI_HOT_DATA);
+@@ -3006,13 +3023,38 @@ retry:
+ tag_pages_for_writeback(mapping, index, end);
+ done_index = index;
+ while (!done && !retry && (index <= end)) {
+- nr_pages = find_get_pages_range_tag(mapping, &index, end,
+- tag, F2FS_ONSTACK_PAGES, pages);
+- if (nr_pages == 0)
++ nr_pages = 0;
++again:
++ nr_folios = filemap_get_folios_tag(mapping, &index, end,
++ tag, &fbatch);
++ if (nr_folios == 0) {
++ if (nr_pages)
++ goto write;
+ break;
++ }
+
++ for (i = 0; i < nr_folios; i++) {
++ struct folio *folio = fbatch.folios[i];
++
++ idx = 0;
++ p = folio_nr_pages(folio);
++add_more:
++ pages[nr_pages] = folio_page(folio, idx);
++ folio_get(folio);
++ if (++nr_pages == max_pages) {
++ index = folio->index + idx + 1;
++ folio_batch_release(&fbatch);
++ goto write;
++ }
++ if (++idx < p)
++ goto add_more;
++ }
++ folio_batch_release(&fbatch);
++ goto again;
++write:
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pages[i];
++ struct folio *folio = page_folio(page);
+ bool need_readd;
+ readd:
+ need_readd = false;
+@@ -3029,7 +3071,7 @@ readd:
+ }
+
+ if (!f2fs_cluster_can_merge_page(&cc,
+- page->index)) {
++ folio->index)) {
+ ret = f2fs_write_multi_pages(&cc,
+ &submitted, wbc, io_type);
+ if (!ret)
+@@ -3038,27 +3080,28 @@ readd:
+ }
+
+ if (unlikely(f2fs_cp_error(sbi)))
+- goto lock_page;
++ goto lock_folio;
+
+ if (!f2fs_cluster_is_empty(&cc))
+- goto lock_page;
++ goto lock_folio;
+
+ if (f2fs_all_cluster_page_ready(&cc,
+ pages, i, nr_pages, true))
+- goto lock_page;
++ goto lock_folio;
+
+ ret2 = f2fs_prepare_compress_overwrite(
+ inode, &pagep,
+- page->index, &fsdata);
++ folio->index, &fsdata);
+ if (ret2 < 0) {
+ ret = ret2;
+ done = 1;
+ break;
+ } else if (ret2 &&
+ (!f2fs_compress_write_end(inode,
+- fsdata, page->index, 1) ||
++ fsdata, folio->index, 1) ||
+ !f2fs_all_cluster_page_ready(&cc,
+- pages, i, nr_pages, false))) {
++ pages, i, nr_pages,
++ false))) {
+ retry = 1;
+ break;
+ }
+@@ -3071,46 +3114,47 @@ readd:
+ break;
+ }
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+-lock_page:
++lock_folio:
+ #endif
+- done_index = page->index;
++ done_index = folio->index;
+ retry_write:
+- lock_page(page);
++ folio_lock(folio);
+
+- if (unlikely(page->mapping != mapping)) {
++ if (unlikely(folio->mapping != mapping)) {
+ continue_unlock:
+- unlock_page(page);
++ folio_unlock(folio);
+ continue;
+ }
+
+- if (!PageDirty(page)) {
++ if (!folio_test_dirty(folio)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+- if (PageWriteback(page)) {
++ if (folio_test_writeback(folio)) {
+ if (wbc->sync_mode != WB_SYNC_NONE)
+- f2fs_wait_on_page_writeback(page,
++ f2fs_wait_on_page_writeback(
++ &folio->page,
+ DATA, true, true);
+ else
+ goto continue_unlock;
+ }
+
+- if (!clear_page_dirty_for_io(page))
++ if (!folio_clear_dirty_for_io(folio))
+ goto continue_unlock;
+
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+- get_page(page);
+- f2fs_compress_ctx_add_page(&cc, page);
++ folio_get(folio);
++ f2fs_compress_ctx_add_page(&cc, &folio->page);
+ continue;
+ }
+ #endif
+- ret = f2fs_write_single_data_page(page, &submitted,
+- &bio, &last_block, wbc, io_type,
+- 0, true);
++ ret = f2fs_write_single_data_page(&folio->page,
++ &submitted, &bio, &last_block,
++ wbc, io_type, 0, true);
+ if (ret == AOP_WRITEPAGE_ACTIVATE)
+- unlock_page(page);
++ folio_unlock(folio);
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+ result:
+ #endif
+@@ -3134,7 +3178,8 @@ result:
+ }
+ goto next;
+ }
+- done_index = page->index + 1;
++ done_index = folio->index +
++ folio_nr_pages(folio);
+ done = 1;
+ break;
+ }
+@@ -3182,6 +3227,11 @@ next:
+ if (bio)
+ f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
+
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++ if (pages != pages_local)
++ kfree(pages);
++#endif
++
+ return ret;
+ }
+
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 746c71716bead..d0c17366ebf48 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -3249,6 +3249,7 @@ int f2fs_precache_extents(struct inode *inode)
+ return -EOPNOTSUPP;
+
+ map.m_lblk = 0;
++ map.m_pblk = 0;
+ map.m_next_pgofs = NULL;
+ map.m_next_extent = &m_next_extent;
+ map.m_seg_type = NO_CHECK_TYPE;
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 2046f633fe57a..1ba85ef97cbd3 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -548,6 +548,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
+ }
+
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
++static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
++ const char *new_ext, bool is_ext)
++{
++ unsigned char (*ext)[F2FS_EXTENSION_LEN];
++ int ext_cnt;
++ int i;
++
++ if (is_ext) {
++ ext = F2FS_OPTION(sbi).extensions;
++ ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
++ } else {
++ ext = F2FS_OPTION(sbi).noextensions;
++ ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
++ }
++
++ for (i = 0; i < ext_cnt; i++) {
++ if (!strcasecmp(new_ext, ext[i]))
++ return true;
++ }
++
++ return false;
++}
++
+ /*
+ * 1. The same extension name cannot not appear in both compress and non-compress extension
+ * at the same time.
+@@ -1145,6 +1168,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ return -EINVAL;
+ }
+
++ if (is_compress_extension_exist(sbi, name, true)) {
++ kfree(name);
++ break;
++ }
++
+ strcpy(ext[ext_cnt], name);
+ F2FS_OPTION(sbi).compress_ext_cnt++;
+ kfree(name);
+@@ -1169,6 +1197,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ return -EINVAL;
+ }
+
++ if (is_compress_extension_exist(sbi, name, false)) {
++ kfree(name);
++ break;
++ }
++
+ strcpy(noext[noext_cnt], name);
+ F2FS_OPTION(sbi).nocompress_ext_cnt++;
+ kfree(name);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index a5c31a479aacc..be2d329843d44 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -611,6 +611,24 @@ out_free:
+ kfree(isw);
+ }
+
++static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
++ struct list_head *list, int *nr)
++{
++ struct inode *inode;
++
++ list_for_each_entry(inode, list, i_io_list) {
++ if (!inode_prepare_wbs_switch(inode, isw->new_wb))
++ continue;
++
++ isw->inodes[*nr] = inode;
++ (*nr)++;
++
++ if (*nr >= WB_MAX_INODES_PER_ISW - 1)
++ return true;
++ }
++ return false;
++}
++
+ /**
+ * cleanup_offline_cgwb - detach associated inodes
+ * @wb: target wb
+@@ -623,7 +641,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+ {
+ struct cgroup_subsys_state *memcg_css;
+ struct inode_switch_wbs_context *isw;
+- struct inode *inode;
+ int nr;
+ bool restart = false;
+
+@@ -645,17 +662,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+
+ nr = 0;
+ spin_lock(&wb->list_lock);
+- list_for_each_entry(inode, &wb->b_attached, i_io_list) {
+- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+- continue;
+-
+- isw->inodes[nr++] = inode;
+-
+- if (nr >= WB_MAX_INODES_PER_ISW - 1) {
+- restart = true;
+- break;
+- }
+- }
++ /*
++ * In addition to the inodes that have completed writeback, also switch
++ * cgwbs for those inodes only with dirty timestamps. Otherwise, those
++ * inodes won't be written back for a long time when lazytime is
++ * enabled, and thus pinning the dying cgwbs. It won't break the
++ * bandwidth restrictions, as writeback of inode metadata is not
++ * accounted for.
++ */
++ restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
++ if (!restart)
++ restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
+ spin_unlock(&wb->list_lock);
+
+ /* no attached inodes? bail out */
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 0c034ea399547..7787fb544621c 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -561,6 +561,8 @@ out:
+ */
+ int pstore_register(struct pstore_info *psi)
+ {
++ char *new_backend;
++
+ if (backend && strcmp(backend, psi->name)) {
+ pr_warn("ignoring unexpected backend '%s'\n", psi->name);
+ return -EPERM;
+@@ -580,11 +582,16 @@ int pstore_register(struct pstore_info *psi)
+ return -EINVAL;
+ }
+
++ new_backend = kstrdup(psi->name, GFP_KERNEL);
++ if (!new_backend)
++ return -ENOMEM;
++
+ mutex_lock(&psinfo_lock);
+ if (psinfo) {
+ pr_warn("backend '%s' already loaded: ignoring '%s'\n",
+ psinfo->name, psi->name);
+ mutex_unlock(&psinfo_lock);
++ kfree(new_backend);
+ return -EBUSY;
+ }
+
+@@ -617,7 +624,7 @@ int pstore_register(struct pstore_info *psi)
+ * Update the module parameter backend, so it is visible
+ * through /sys/module/pstore/parameters/backend
+ */
+- backend = kstrdup(psi->name, GFP_KERNEL);
++ backend = new_backend;
+
+ pr_info("Registered %s as persistent store backend\n", psi->name);
+
+diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h
+new file mode 100644
+index 0000000000000..0dfe35feeec60
+--- /dev/null
++++ b/include/kunit/visibility.h
+@@ -0,0 +1,33 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * KUnit API to allow symbols to be conditionally visible during KUnit
++ * testing
++ *
++ * Copyright (C) 2022, Google LLC.
++ * Author: Rae Moar <rmoar@google.com>
++ */
++
++#ifndef _KUNIT_VISIBILITY_H
++#define _KUNIT_VISIBILITY_H
++
++#if IS_ENABLED(CONFIG_KUNIT)
++ /**
++ * VISIBLE_IF_KUNIT - A macro that sets symbols to be static if
++ * CONFIG_KUNIT is not enabled. Otherwise if CONFIG_KUNIT is enabled
++ * there is no change to the symbol definition.
++ */
++ #define VISIBLE_IF_KUNIT
++ /**
++ * EXPORT_SYMBOL_IF_KUNIT(symbol) - Exports symbol into
++ * EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is
++ * enabled. Must use MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING)
++ * in test file in order to use symbols.
++ */
++ #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, \
++ EXPORTED_FOR_KUNIT_TESTING)
++#else
++ #define VISIBLE_IF_KUNIT static
++ #define EXPORT_SYMBOL_IF_KUNIT(symbol)
++#endif
++
++#endif /* _KUNIT_VISIBILITY_H */
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index aefb06373720f..15e336281d1f4 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -72,7 +72,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core,
+ unsigned long parent_rate);
+
+ /**
+- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
++ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
+ *
+ * @num: Numerator of the duty cycle ratio
+ * @den: Denominator of the duty cycle ratio
+@@ -127,7 +127,7 @@ struct clk_duty {
+ * @restore_context: Restore the context of the clock after a restoration
+ * of power.
+ *
+- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
++ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
+ * parent rate is an input parameter. It is up to the caller to
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+@@ -454,7 +454,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+@@ -469,7 +469,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+@@ -628,7 +628,7 @@ struct clk_div_table {
+ * Clock with an adjustable divider affecting its output frequency. Implements
+ * .recalc_rate, .set_rate and .round_rate
+ *
+- * Flags:
++ * @flags:
+ * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
+ * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
+ * the raw value read from the register, with the value of zero considered
+@@ -1109,11 +1109,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ * @mwidth: width of the numerator bit field
+ * @nshift: shift to the denominator bit field
+ * @nwidth: width of the denominator bit field
++ * @approximation: clk driver's callback for calculating the divider clock
+ * @lock: register lock
+ *
+ * Clock with adjustable fractional divider affecting its output frequency.
+ *
+- * Flags:
++ * @flags:
+ * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+ * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+ * is set then the numerator and denominator are both the value read
+@@ -1172,7 +1173,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
+ * Clock with an adjustable multiplier affecting its output frequency.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ *
+- * Flags:
++ * @flags:
+ * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
+ * from the register, with 0 being a valid value effectively
+ * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 2be2091c2b447..c7e0d80dbf6a5 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -191,6 +191,7 @@ enum cpuhp_state {
+ /* Must be the last timer callback */
+ CPUHP_AP_DUMMY_TIMER_STARTING,
+ CPUHP_AP_ARM_XEN_STARTING,
++ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ CPUHP_AP_ARM64_ISNDEP_STARTING,
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index c3618255b1504..41203ce27d64c 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -145,6 +145,13 @@ enum qm_vf_state {
+ QM_NOT_READY,
+ };
+
++enum qm_misc_ctl_bits {
++ QM_DRIVER_REMOVING = 0x0,
++ QM_RST_SCHED,
++ QM_RESETTING,
++ QM_MODULE_PARAM,
++};
++
+ enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+@@ -471,11 +478,11 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
+ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
+ void hisi_qm_dev_err_init(struct hisi_qm *qm);
+ void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+-int hisi_qm_diff_regs_init(struct hisi_qm *qm,
+- struct dfx_diff_registers *dregs, int reg_len);
+-void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len);
++int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
++ struct dfx_diff_registers *dregs, u32 reg_len);
++void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
+ void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+- struct dfx_diff_registers *dregs, int regs_len);
++ struct dfx_diff_registers *dregs, u32 regs_len);
+
+ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
+index 77c2885c4c130..2505d58bd5829 100644
+--- a/include/linux/hw_random.h
++++ b/include/linux/hw_random.h
+@@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng);
+ extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
+
+ extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
++extern long hwrng_yield(struct hwrng *rng);
+
+ #endif /* LINUX_HWRANDOM_H_ */
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index a0dce14090a9e..da5f5fa4a3a6a 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
+ */
+ #define idr_for_each_entry_ul(idr, entry, tmp, id) \
+ for (tmp = 0, id = 0; \
+- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ tmp = id, ++id)
+
+ /**
+@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
+ * @id: Entry ID.
+ *
+ * Continue to iterate over entries, continuing after the current position.
++ * After normal termination @entry is left with the value NULL. This
++ * is convenient for a "not found" value.
+ */
+ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
+ for (tmp = id; \
+- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ tmp = id, ++id)
+
+ /*
+diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
+index 0bc7cba798a34..b449765b5cac1 100644
+--- a/include/linux/mfd/core.h
++++ b/include/linux/mfd/core.h
+@@ -92,7 +92,7 @@ struct mfd_cell {
+ * (above) when matching OF nodes with devices that have identical
+ * compatible strings
+ */
+- const u64 of_reg;
++ u64 of_reg;
+
+ /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ bool use_of_reg;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 5a04fbf724768..0373e09359905 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -5190,5 +5190,6 @@ extern struct net_device *blackhole_netdev;
+ #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+ #define DEV_STATS_ADD(DEV, FIELD, VAL) \
+ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
+
+ #endif /* _LINUX_NETDEVICE_H */
+diff --git a/include/linux/numa.h b/include/linux/numa.h
+index 59df211d051fa..0f512c0aba54b 100644
+--- a/include/linux/numa.h
++++ b/include/linux/numa.h
+@@ -12,6 +12,7 @@
+ #define MAX_NUMNODES (1 << NODES_SHIFT)
+
+ #define NUMA_NO_NODE (-1)
++#define NUMA_NO_MEMBLK (-1)
+
+ /* optionally keep NUMA memory info available post init */
+ #ifdef CONFIG_NUMA_KEEP_MEMINFO
+@@ -43,6 +44,12 @@ static inline int phys_to_target_node(u64 start)
+ return 0;
+ }
+ #endif
++#ifndef numa_fill_memblks
++static inline int __init numa_fill_memblks(u64 start, u64 end)
++{
++ return NUMA_NO_MEMBLK;
++}
++#endif
+ #else /* !CONFIG_NUMA */
+ static inline int numa_map_to_online_node(int node)
+ {
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index bbccb40442224..03307b72de6c6 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -720,6 +720,8 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, struct folio_batch *fbatch);
+ unsigned filemap_get_folios_contig(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
++unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
++ pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
+ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
+ pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
+ struct page **pages);
+diff --git a/include/linux/string.h b/include/linux/string.h
+index cf7607b321027..26ab8928d8661 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -276,10 +276,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ */
+ #define strtomem_pad(dest, src, pad) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
++ const size_t _src_len = __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+- memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
++ memcpy_and_pad(dest, _dest_len, src, \
++ strnlen(src, min(_src_len, _dest_len)), pad); \
+ } while (0)
+
+ /**
+@@ -297,10 +299,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ */
+ #define strtomem(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
++ const size_t _src_len = __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+- memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
++ memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
+ } while (0)
+
+ /**
+diff --git a/include/linux/verification.h b/include/linux/verification.h
+index f34e50ebcf60a..cb2d47f280910 100644
+--- a/include/linux/verification.h
++++ b/include/linux/verification.h
+@@ -8,6 +8,7 @@
+ #ifndef _LINUX_VERIFICATION_H
+ #define _LINUX_VERIFICATION_H
+
++#include <linux/errno.h>
+ #include <linux/types.h>
+
+ /*
+diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
+index 35d7eedb5e8e4..3f9c166113063 100644
+--- a/include/linux/virtio_vsock.h
++++ b/include/linux/virtio_vsock.h
+@@ -7,6 +7,109 @@
+ #include <net/sock.h>
+ #include <net/af_vsock.h>
+
++#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
++
++struct virtio_vsock_skb_cb {
++ bool reply;
++ bool tap_delivered;
++};
++
++#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
++
++static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
++{
++ return (struct virtio_vsock_hdr *)skb->head;
++}
++
++static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
++{
++ return VIRTIO_VSOCK_SKB_CB(skb)->reply;
++}
++
++static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
++{
++ VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
++}
++
++static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
++{
++ return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
++}
++
++static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
++{
++ VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
++}
++
++static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
++{
++ VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
++}
++
++static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
++{
++ u32 len;
++
++ len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
++
++ if (len > 0)
++ skb_put(skb, len);
++}
++
++static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
++{
++ struct sk_buff *skb;
++
++ if (size < VIRTIO_VSOCK_SKB_HEADROOM)
++ return NULL;
++
++ skb = alloc_skb(size, mask);
++ if (!skb)
++ return NULL;
++
++ skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
++ return skb;
++}
++
++static inline void
++virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
++{
++ spin_lock_bh(&list->lock);
++ __skb_queue_head(list, skb);
++ spin_unlock_bh(&list->lock);
++}
++
++static inline void
++virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
++{
++ spin_lock_bh(&list->lock);
++ __skb_queue_tail(list, skb);
++ spin_unlock_bh(&list->lock);
++}
++
++static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
++{
++ struct sk_buff *skb;
++
++ spin_lock_bh(&list->lock);
++ skb = __skb_dequeue(list);
++ spin_unlock_bh(&list->lock);
++
++ return skb;
++}
++
++static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
++{
++ spin_lock_bh(&list->lock);
++ __skb_queue_purge(list);
++ spin_unlock_bh(&list->lock);
++}
++
++static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
++{
++ return (size_t)(skb_end_pointer(skb) - skb->head);
++}
++
+ #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
+ #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
+ #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
+@@ -35,23 +138,10 @@ struct virtio_vsock_sock {
+ u32 last_fwd_cnt;
+ u32 rx_bytes;
+ u32 buf_alloc;
+- struct list_head rx_queue;
++ struct sk_buff_head rx_queue;
+ u32 msg_count;
+ };
+
+-struct virtio_vsock_pkt {
+- struct virtio_vsock_hdr hdr;
+- struct list_head list;
+- /* socket refcnt not held, only use for cancellation */
+- struct vsock_sock *vsk;
+- void *buf;
+- u32 buf_len;
+- u32 len;
+- u32 off;
+- bool reply;
+- bool tap_delivered;
+-};
+-
+ struct virtio_vsock_pkt_info {
+ u32 remote_cid, remote_port;
+ struct vsock_sock *vsk;
+@@ -68,7 +158,7 @@ struct virtio_transport {
+ struct vsock_transport transport;
+
+ /* Takes ownership of the packet */
+- int (*send_pkt)(struct virtio_vsock_pkt *pkt);
++ int (*send_pkt)(struct sk_buff *skb);
+ };
+
+ ssize_t
+@@ -149,11 +239,10 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
+ void virtio_transport_destruct(struct vsock_sock *vsk);
+
+ void virtio_transport_recv_pkt(struct virtio_transport *t,
+- struct virtio_vsock_pkt *pkt);
+-void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
+-void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
++ struct sk_buff *skb);
++void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
+ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
+ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
+-void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);
+-
++void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
++int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
+ #endif /* _LINUX_VIRTIO_VSOCK_H */
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 7a6c3059d50b5..5bf5c1ab542ce 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -5690,6 +5690,16 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+ */
+ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+
++/**
++ * wiphy_work_flush - flush previously queued work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush, this can be %NULL to flush all work
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
++
+ struct wiphy_delayed_work {
+ struct wiphy_work work;
+ struct wiphy *wiphy;
+@@ -5733,6 +5743,17 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
++/**
++ * wiphy_delayed_work_flush - flush previously queued delayed work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++ struct wiphy_delayed_work *dwork);
++
+ /**
+ * struct wireless_dev - wireless device state
+ *
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 2f0da4f0318b5..079cc493fe67d 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -39,8 +39,8 @@ struct flowi_common {
+ #define FLOWI_FLAG_KNOWN_NH 0x02
+ __u32 flowic_secid;
+ kuid_t flowic_uid;
+- struct flowi_tunnel flowic_tun_key;
+ __u32 flowic_multipath_hash;
++ struct flowi_tunnel flowic_tun_key;
+ };
+
+ union flowi_uli {
+diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
+index 2418653a66db1..279380de904c8 100644
+--- a/include/net/netfilter/nf_nat_redirect.h
++++ b/include/net/netfilter/nf_nat_redirect.h
+@@ -6,8 +6,7 @@
+ #include <uapi/linux/netfilter/nf_nat.h>
+
+ unsigned int
+-nf_nat_redirect_ipv4(struct sk_buff *skb,
+- const struct nf_nat_ipv4_multi_range_compat *mr,
++nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ unsigned int hooknum);
+ unsigned int
+ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 548c75c8a34c7..19646fdec23dc 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -810,7 +810,7 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+ }
+
+ /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+-static inline u32 tcp_ns_to_ts(u64 ns)
++static inline u64 tcp_ns_to_ts(u64 ns)
+ {
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+ }
+diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
+index 91b4c63d5cbf4..1c9da485318f9 100644
+--- a/include/uapi/linux/psp-sev.h
++++ b/include/uapi/linux/psp-sev.h
+@@ -36,6 +36,13 @@ enum {
+ * SEV Firmware status code
+ */
+ typedef enum {
++ /*
++ * This error code is not in the SEV spec. Its purpose is to convey that
++ * there was an error that prevented the SEV firmware from being called.
++ * The SEV API error codes are 16 bits, so the -1 value will not overlap
++ * with possible values from the specification.
++ */
++ SEV_RET_NO_FW_CALL = -1,
+ SEV_RET_SUCCESS = 0,
+ SEV_RET_INVALID_PLATFORM_STATE,
+ SEV_RET_INVALID_GUEST_STATE,
+diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h
+index 256aaeff7e654..2aa39112cf8dd 100644
+--- a/include/uapi/linux/sev-guest.h
++++ b/include/uapi/linux/sev-guest.h
+@@ -52,8 +52,14 @@ struct snp_guest_request_ioctl {
+ __u64 req_data;
+ __u64 resp_data;
+
+- /* firmware error code on failure (see psp-sev.h) */
+- __u64 fw_err;
++ /* bits[63:32]: VMM error code, bits[31:0] firmware error code (see psp-sev.h) */
++ union {
++ __u64 exitinfo2;
++ struct {
++ __u32 fw_error;
++ __u32 vmm_error;
++ };
++ };
+ };
+
+ struct snp_ext_report_req {
+@@ -77,4 +83,12 @@ struct snp_ext_report_req {
+ /* Get SNP extended report as defined in the GHCB specification version 2. */
+ #define SNP_GET_EXT_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x2, struct snp_guest_request_ioctl)
+
++/* Guest message request EXIT_INFO_2 constants */
++#define SNP_GUEST_FW_ERR_MASK GENMASK_ULL(31, 0)
++#define SNP_GUEST_VMM_ERR_SHIFT 32
++#define SNP_GUEST_VMM_ERR(x) (((u64)x) << SNP_GUEST_VMM_ERR_SHIFT)
++
++#define SNP_GUEST_VMM_ERR_INVALID_LEN 1
++#define SNP_GUEST_VMM_ERR_BUSY 2
++
+ #endif /* __UAPI_LINUX_SEV_GUEST_H_ */
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index acc37e5a6d4e1..57ef6850c6a87 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -19,12 +19,15 @@
+
+ #define BGID_ARRAY 64
+
++/* BIDs are addressed by a 16-bit field in a CQE */
++#define MAX_BIDS_PER_BGID (1 << 16)
++
+ struct io_provide_buf {
+ struct file *file;
+ __u64 addr;
+ __u32 len;
+ __u32 bgid;
+- __u16 nbufs;
++ __u32 nbufs;
+ __u16 bid;
+ };
+
+@@ -281,7 +284,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ return -EINVAL;
+
+ tmp = READ_ONCE(sqe->fd);
+- if (!tmp || tmp > USHRT_MAX)
++ if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ return -EINVAL;
+
+ memset(p, 0, sizeof(*p));
+@@ -327,7 +330,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ return -EINVAL;
+
+ tmp = READ_ONCE(sqe->fd);
+- if (!tmp || tmp > USHRT_MAX)
++ if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ return -E2BIG;
+ p->nbufs = tmp;
+ p->addr = READ_ONCE(sqe->addr);
+@@ -347,7 +350,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ tmp = READ_ONCE(sqe->off);
+ if (tmp > USHRT_MAX)
+ return -E2BIG;
+- if (tmp + p->nbufs >= USHRT_MAX)
++ if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
+ return -EINVAL;
+ p->bid = tmp;
+ return 0;
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 9fe1aada3ad00..57c626cb4d1a5 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1433,16 +1433,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ int ret;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+
+- if (connect->in_progress) {
+- struct socket *socket;
+-
+- ret = -ENOTSOCK;
+- socket = sock_from_file(req->file);
+- if (socket)
+- ret = sock_error(socket->sk);
+- goto out;
+- }
+-
+ if (req_has_async_data(req)) {
+ io = req->async_data;
+ } else {
+@@ -1462,9 +1452,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ && force_nonblock) {
+ if (ret == -EINPROGRESS) {
+ connect->in_progress = true;
+- return -EAGAIN;
+- }
+- if (ret == -ECONNABORTED) {
++ } else if (ret == -ECONNABORTED) {
+ if (connect->seen_econnaborted)
+ goto out;
+ connect->seen_econnaborted = true;
+@@ -1478,6 +1466,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ memcpy(req->async_data, &__io, sizeof(__io));
+ return -EAGAIN;
+ }
++ if (connect->in_progress) {
++ /*
++ * At least bluetooth will return -EBADFD on a re-connect
++ * attempt, and it's (supposedly) also valid to get -EISCONN
++ * which means the previous result is good. For both of these,
++ * grab the sock_error() and use that for the completion.
++ */
++ if (ret == -EBADFD || ret == -EISCONN)
++ ret = sock_error(sock_from_file(req->file)->sk);
++ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ out:
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index e4e7f343346f9..ce0051eee746e 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -155,13 +155,15 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+
+ preempt_disable();
++ local_irq_save(flags);
+ if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+ __this_cpu_dec(*(htab->map_locked[hash]));
++ local_irq_restore(flags);
+ preempt_enable();
+ return -EBUSY;
+ }
+
+- raw_spin_lock_irqsave(&b->raw_lock, flags);
++ raw_spin_lock(&b->raw_lock);
+ *pflags = flags;
+
+ return 0;
+@@ -172,8 +174,9 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ unsigned long flags)
+ {
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+- raw_spin_unlock_irqrestore(&b->raw_lock, flags);
++ raw_spin_unlock(&b->raw_lock);
+ __this_cpu_dec(*(htab->map_locked[hash]));
++ local_irq_restore(flags);
+ preempt_enable();
+ }
+
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index a6b04faed282b..6212e4ae084bb 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1156,13 +1156,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ ret = -EBUSY;
+ goto out;
+ }
+- if (!atomic64_read(&map->usercnt)) {
+- /* maps with timers must be either held by user space
+- * or pinned in bpffs.
+- */
+- ret = -EPERM;
+- goto out;
+- }
+ /* allocate hrtimer via map_kmalloc to use memcg accounting */
+ t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
+ if (!t) {
+@@ -1175,7 +1168,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ rcu_assign_pointer(t->callback_fn, NULL);
+ hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+ t->timer.function = bpf_timer_cb;
+- timer->timer = t;
++ WRITE_ONCE(timer->timer, t);
++ /* Guarantee the order between timer->timer and map->usercnt. So
++ * when there are concurrent uref release and bpf timer init, either
++ * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
++ * timer or atomic64_read() below returns a zero usercnt.
++ */
++ smp_mb();
++ if (!atomic64_read(&map->usercnt)) {
++ /* maps with timers must be either held by user space
++ * or pinned in bpffs.
++ */
++ WRITE_ONCE(timer->timer, NULL);
++ kfree(t);
++ ret = -EPERM;
++ }
+ out:
+ __bpf_spin_unlock_irqrestore(&timer->lock);
+ return ret;
+@@ -1343,7 +1350,7 @@ void bpf_timer_cancel_and_free(void *val)
+ /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
+ * this timer, since it won't be initialized.
+ */
+- timer->timer = NULL;
++ WRITE_ONCE(timer->timer, NULL);
+ out:
+ __bpf_spin_unlock_irqrestore(&timer->lock);
+ if (!t)
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index 514e4582b8634..d4141b0547187 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -248,7 +248,17 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
+ * but access_ok() should be faster than find_vma()
+ */
+ if (!fshared) {
+- key->private.mm = mm;
++ /*
++ * On no-MMU, shared futexes are treated as private, therefore
++ * we must not include the current process in the key. Since
++ * there is only one address space, the address is a unique key
++ * on its own.
++ */
++ if (IS_ENABLED(CONFIG_MMU))
++ key->private.mm = mm;
++ else
++ key->private.mm = NULL;
++
+ key->private.address = address;
+ return 0;
+ }
+diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
+index 1698e77645acf..75d0ae490e29c 100644
+--- a/kernel/irq/matrix.c
++++ b/kernel/irq/matrix.c
+@@ -466,16 +466,16 @@ unsigned int irq_matrix_reserved(struct irq_matrix *m)
+ }
+
+ /**
+- * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
++ * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU
+ * @m: Pointer to the matrix to search
+ *
+- * This returns number of allocated irqs
++ * This returns number of allocated non-managed interrupts.
+ */
+ unsigned int irq_matrix_allocated(struct irq_matrix *m)
+ {
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+- return cm->allocated;
++ return cm->allocated - cm->managed_allocated;
+ }
+
+ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index 9ada0bc5247be..0e651fd4cc9fc 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -244,7 +244,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
+ * symbols are exported and normal relas can be used instead.
+ */
+ if (!sec_vmlinux && sym_vmlinux) {
+- pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
++ pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
+ sym_name);
+ return -EINVAL;
+ }
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index 720e719253cd1..e1e9f69c5dd16 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -100,7 +100,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ s.next_in = buf + gzip_hdr_len;
+ s.avail_in = size - gzip_hdr_len;
+
+- s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
++ s.workspace = vmalloc(zlib_inflate_workspacesize());
+ if (!s.workspace)
+ return -ENOMEM;
+
+@@ -138,7 +138,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ out_inflate_end:
+ zlib_inflateEnd(&s);
+ out:
+- kfree(s.workspace);
++ vfree(s.workspace);
+ return retval;
+ }
+ #elif CONFIG_MODULE_COMPRESS_XZ
+diff --git a/kernel/padata.c b/kernel/padata.c
+index de90af5fcbe6b..791d9cb07a501 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -1094,12 +1094,16 @@ EXPORT_SYMBOL(padata_alloc_shell);
+ */
+ void padata_free_shell(struct padata_shell *ps)
+ {
++ struct parallel_data *pd;
++
+ if (!ps)
+ return;
+
+ mutex_lock(&ps->pinst->lock);
+ list_del(&ps->list);
+- padata_free_pd(rcu_dereference_protected(ps->pd, 1));
++ pd = rcu_dereference_protected(ps->pd, 1);
++ if (refcount_dec_and_test(&pd->refcnt))
++ padata_free_pd(pd);
+ mutex_unlock(&ps->pinst->lock);
+
+ kfree(ps);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 55d13980e29fd..18a4f8f28a25f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2463,9 +2463,11 @@ static int migration_cpu_stop(void *data)
+ * it.
+ */
+ WARN_ON_ONCE(!pending->stop_pending);
++ preempt_disable();
+ task_rq_unlock(rq, p, &rf);
+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
++ preempt_enable();
+ return 0;
+ }
+ out:
+@@ -2746,12 +2748,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ complete = true;
+ }
+
++ preempt_disable();
+ task_rq_unlock(rq, p, rf);
+-
+ if (push_task) {
+ stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ p, &rq->push_work);
+ }
++ preempt_enable();
+
+ if (complete)
+ complete_all(&pending->done);
+@@ -2817,12 +2820,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ if (flags & SCA_MIGRATE_ENABLE)
+ p->migration_flags &= ~MDF_PUSH;
+
++ preempt_disable();
+ task_rq_unlock(rq, p, rf);
+-
+ if (!stop_pending) {
+ stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
+ }
++ preempt_enable();
+
+ if (flags & SCA_MIGRATE_ENABLE)
+ return 0;
+@@ -9255,9 +9259,11 @@ static void balance_push(struct rq *rq)
+ * Temporarily drop rq->lock such that we can wake-up the stop task.
+ * Both preemption and IRQs are still disabled.
+ */
++ preempt_disable();
+ raw_spin_rq_unlock(rq);
+ stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
+ this_cpu_ptr(&push_work));
++ preempt_enable();
+ /*
+ * At this point need_resched() is true and we'll take the loop in
+ * schedule(). The next pick is obviously going to be the stop task
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 9ce9810861ba5..389290e950bea 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2460,9 +2460,11 @@ skip:
+ double_unlock_balance(this_rq, src_rq);
+
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(this_rq);
+ stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ push_task, &src_rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(this_rq);
+ }
+ }
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 612873ec2197f..2558ab9033bee 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4279,22 +4279,6 @@ static inline unsigned long task_util_est(struct task_struct *p)
+ return max(task_util(p), _task_util_est(p));
+ }
+
+-#ifdef CONFIG_UCLAMP_TASK
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+- unsigned long uclamp_min,
+- unsigned long uclamp_max)
+-{
+- return clamp(task_util_est(p), uclamp_min, uclamp_max);
+-}
+-#else
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+- unsigned long uclamp_min,
+- unsigned long uclamp_max)
+-{
+- return task_util_est(p);
+-}
+-#endif
+-
+ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
+ struct task_struct *p)
+ {
+@@ -4585,7 +4569,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+
+ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+ {
+- return true;
++ return !cfs_rq->nr_running;
+ }
+
+ #define UPDATE_TG 0x0
+@@ -7279,7 +7263,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ target = prev_cpu;
+
+ sync_entity_load_avg(&p->se);
+- if (!uclamp_task_util(p, p_util_min, p_util_max))
++ if (!task_util_est(p) && p_util_min == 0)
+ goto unlock;
+
+ eenv_task_busy_time(&eenv, p, prev_cpu);
+@@ -7287,11 +7271,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ for (; pd; pd = pd->next) {
+ unsigned long util_min = p_util_min, util_max = p_util_max;
+ unsigned long cpu_cap, cpu_thermal_cap, util;
+- unsigned long cur_delta, max_spare_cap = 0;
++ long prev_spare_cap = -1, max_spare_cap = -1;
+ unsigned long rq_util_min, rq_util_max;
+- unsigned long prev_spare_cap = 0;
++ unsigned long cur_delta, base_energy;
+ int max_spare_cap_cpu = -1;
+- unsigned long base_energy;
+ int fits, max_fits = -1;
+
+ cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
+@@ -7354,7 +7337,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ prev_spare_cap = cpu_cap;
+ prev_fits = fits;
+ } else if ((fits > max_fits) ||
+- ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
++ ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) {
+ /*
+ * Find the CPU with the maximum spare capacity
+ * among the remaining CPUs in the performance
+@@ -7366,7 +7349,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ }
+ }
+
+- if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
++ if (max_spare_cap_cpu < 0 && prev_spare_cap < 0)
+ continue;
+
+ eenv_pd_busy_time(&eenv, cpus, p);
+@@ -7374,7 +7357,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ base_energy = compute_energy(&eenv, pd, cpus, p, -1);
+
+ /* Evaluate the energy impact of using prev_cpu. */
+- if (prev_spare_cap > 0) {
++ if (prev_spare_cap > -1) {
+ prev_delta = compute_energy(&eenv, pd, cpus, p,
+ prev_cpu);
+ /* CPU utilization has changed */
+@@ -10730,13 +10713,15 @@ more_balance:
+ busiest->push_cpu = this_cpu;
+ active_balance = 1;
+ }
+- raw_spin_rq_unlock_irqrestore(busiest, flags);
+
++ preempt_disable();
++ raw_spin_rq_unlock_irqrestore(busiest, flags);
+ if (active_balance) {
+ stop_one_cpu_nowait(cpu_of(busiest),
+ active_load_balance_cpu_stop, busiest,
+ &busiest->active_balance_work);
+ }
++ preempt_enable();
+ }
+ } else {
+ sd->nr_balance_failed = 0;
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 576eb2f51f043..76bafa8d331a7 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2109,9 +2109,11 @@ retry:
+ */
+ push_task = get_push_task(rq);
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(rq);
+ stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ push_task, &rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(rq);
+ }
+
+@@ -2448,9 +2450,11 @@ skip:
+ double_unlock_balance(this_rq, src_rq);
+
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(this_rq);
+ stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ push_task, &src_rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(this_rq);
+ }
+ }
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 56675294d7a3b..a34a4fcdab7b1 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -986,9 +986,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+ /**
+ * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
++ * @kretprobe: Is this a return probe?
+ * @name: The name of the kprobe event
+ * @loc: The location of the kprobe event
+- * @kretprobe: Is this a return probe?
+ * @...: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
+index 74982b83707ca..05ac4cdb6806a 100644
+--- a/lib/kunit/executor.c
++++ b/lib/kunit/executor.c
+@@ -102,8 +102,10 @@ static void kunit_free_suite_set(struct suite_set suite_set)
+ {
+ struct kunit_suite * const *suites;
+
+- for (suites = suite_set.start; suites < suite_set.end; suites++)
++ for (suites = suite_set.start; suites < suite_set.end; suites++) {
++ kfree((*suites)->test_cases);
+ kfree(*suites);
++ }
+ kfree(suite_set.start);
+ }
+
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 322aea78058a0..2d930470aacaa 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2262,6 +2262,60 @@ out:
+ }
+ EXPORT_SYMBOL(filemap_get_folios_contig);
+
++/**
++ * filemap_get_folios_tag - Get a batch of folios matching @tag
++ * @mapping: The address_space to search
++ * @start: The starting page index
++ * @end: The final page index (inclusive)
++ * @tag: The tag index
++ * @fbatch: The batch to fill
++ *
++ * Same as filemap_get_folios(), but only returning folios tagged with @tag.
++ *
++ * Return: The number of folios found.
++ * Also update @start to index the next folio for traversal.
++ */
++unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
++ pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
++{
++ XA_STATE(xas, &mapping->i_pages, *start);
++ struct folio *folio;
++
++ rcu_read_lock();
++ while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
++ /*
++ * Shadow entries should never be tagged, but this iteration
++ * is lockless so there is a window for page reclaim to evict
++ * a page we saw tagged. Skip over it.
++ */
++ if (xa_is_value(folio))
++ continue;
++ if (!folio_batch_add(fbatch, folio)) {
++ unsigned long nr = folio_nr_pages(folio);
++
++ if (folio_test_hugetlb(folio))
++ nr = 1;
++ *start = folio->index + nr;
++ goto out;
++ }
++ }
++ /*
++ * We come here when there is no page beyond @end. We take care to not
++ * overflow the index @start as it confuses some of the callers. This
++ * breaks the iteration when there is a page at index -1 but that is
++ * already broke anyway.
++ */
++ if (end == (pgoff_t)-1)
++ *start = (pgoff_t)-1;
++ else
++ *start = end + 1;
++out:
++ rcu_read_unlock();
++
++ return folio_batch_count(fbatch);
++}
++EXPORT_SYMBOL(filemap_get_folios_tag);
++
+ /**
+ * find_get_pages_range_tag - Find and return head pages matching @tag.
+ * @mapping: the address_space to search
+diff --git a/mm/readahead.c b/mm/readahead.c
+index b10f0cf81d804..ba43428043a35 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -749,7 +749,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
+ */
+ ret = -EINVAL;
+ if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+- !S_ISREG(file_inode(f.file)->i_mode))
++ (!S_ISREG(file_inode(f.file)->i_mode) &&
++ !S_ISBLK(file_inode(f.file)->i_mode)))
+ goto out;
+
+ ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+diff --git a/net/9p/client.c b/net/9p/client.c
+index af59c3f2ec2e7..a96e127ca4883 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -537,12 +537,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
+ return 0;
+
+ if (!p9_is_proto_dotl(c)) {
+- char *ename;
++ char *ename = NULL;
+
+ err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
+ &ename, &ecode);
+- if (err)
++ if (err) {
++ kfree(ename);
+ goto out_err;
++ }
+
+ if (p9_is_proto_dotu(c) && ecode < 512)
+ err = -ecode;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 5218c4dfe0a89..d74fe13f3dceb 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -151,7 +151,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ struct sk_buff *skb;
+ int err = 0;
+
+- bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
++ bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
+
+ hci_req_init(&req, hdev);
+
+@@ -247,7 +247,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ if (IS_ERR(skb)) {
+ if (!event)
+- bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
++ bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index 2396c99bedeaa..caf6d950d54ad 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -209,8 +209,12 @@ static int page_pool_init(struct page_pool *pool,
+ return -ENOMEM;
+ #endif
+
+- if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
++ if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
++#ifdef CONFIG_PAGE_POOL_STATS
++ free_percpu(pool->recycle_stats);
++#endif
+ return -ENOMEM;
++ }
+
+ atomic_set(&pool->pages_state_release_cnt, 0);
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8dca4a7ca4a1f..73b1e0e53534e 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3958,6 +3958,7 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
+ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct ts_config *config)
+ {
++ unsigned int patlen = config->ops->get_pattern_len(config);
+ struct ts_state state;
+ unsigned int ret;
+
+@@ -3969,7 +3970,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
+
+ ret = textsearch_find(config, &state);
+- return (ret <= to - from ? ret : UINT_MAX);
++ return (ret + patlen <= to - from ? ret : UINT_MAX);
+ }
+ EXPORT_SYMBOL(skb_find_text);
+
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 247179d4c8865..9fe6d96797169 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -628,9 +628,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (dccp_parse_options(sk, dreq, skb))
+ goto drop_and_free;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto drop_and_free;
+-
+ ireq = inet_rsk(req);
+ sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+@@ -638,6 +635,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ ireq->ireq_family = AF_INET;
+ ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
+
++ if (security_inet_conn_request(sk, skb, req))
++ goto drop_and_free;
++
+ /*
+ * Step 3: Process LISTEN state
+ *
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 6fb34eaf1237a..e0b0bf75a46c2 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -359,15 +359,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (dccp_parse_options(sk, dreq, skb))
+ goto drop_and_free;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto drop_and_free;
+-
+ ireq = inet_rsk(req);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ ireq->ireq_family = AF_INET6;
+ ireq->ir_mark = inet_request_mark(sk, skb);
+
++ if (security_inet_conn_request(sk, skb, req))
++ goto drop_and_free;
++
+ if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+ np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index b71dab630a873..80cdc6f6b34c9 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ skb = skb_copy_expand(frame->skb_std, 0,
+ skb_tailroom(frame->skb_std) + HSR_HLEN,
+ GFP_ATOMIC);
+- prp_fill_rct(skb, frame, port);
+-
+- return skb;
++ return prp_fill_rct(skb, frame, port);
+ }
+
+ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 26fb97d1d4d9a..f9514cf87649e 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -41,7 +41,6 @@ static siphash_aligned_key_t syncookie_secret[2];
+ * requested/supported by the syn/synack exchange.
+ */
+ #define TSBITS 6
+-#define TSMASK (((__u32)1 << TSBITS) - 1)
+
+ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+ u32 count, int c)
+@@ -62,27 +61,22 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+ */
+ u64 cookie_init_timestamp(struct request_sock *req, u64 now)
+ {
+- struct inet_request_sock *ireq;
+- u32 ts, ts_now = tcp_ns_to_ts(now);
++ const struct inet_request_sock *ireq = inet_rsk(req);
++ u64 ts, ts_now = tcp_ns_to_ts(now);
+ u32 options = 0;
+
+- ireq = inet_rsk(req);
+-
+ options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
+ if (ireq->sack_ok)
+ options |= TS_OPT_SACK;
+ if (ireq->ecn_ok)
+ options |= TS_OPT_ECN;
+
+- ts = ts_now & ~TSMASK;
++ ts = (ts_now >> TSBITS) << TSBITS;
+ ts |= options;
+- if (ts > ts_now) {
+- ts >>= TSBITS;
+- ts--;
+- ts <<= TSBITS;
+- ts |= options;
+- }
+- return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
++ if (ts > ts_now)
++ ts -= (1UL << TSBITS);
++
++ return ts * (NSEC_PER_SEC / TCP_TS_HZ);
+ }
+
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index d63942202493d..65dae3d43684f 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6420,22 +6420,23 @@ reset_and_undo:
+
+ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
+ {
++ struct tcp_sock *tp = tcp_sk(sk);
+ struct request_sock *req;
+
+ /* If we are still handling the SYNACK RTO, see if timestamp ECR allows
+ * undo. If peer SACKs triggered fast recovery, we can't undo here.
+ */
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
+- tcp_try_undo_loss(sk, false);
++ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
++ tcp_try_undo_recovery(sk);
+
+ /* Reset rtx states to prevent spurious retransmits_timed_out() */
+- tcp_sk(sk)->retrans_stamp = 0;
++ tp->retrans_stamp = 0;
+ inet_csk(sk)->icsk_retransmits = 0;
+
+ /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
+ * we no longer need req so release it.
+ */
+- req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
++ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ reqsk_fastopen_remove(sk, req, false);
+
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index 99ac5efe244d3..a7364ff8b558d 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -470,11 +470,15 @@ void tcp_init_metrics(struct sock *sk)
+ u32 val, crtt = 0; /* cached RTT scaled by 8 */
+
+ sk_dst_confirm(sk);
++ /* ssthresh may have been reduced unnecessarily during.
++ * 3WHS. Restore it back to its initial default.
++ */
++ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ if (!dst)
+ goto reset;
+
+ rcu_read_lock();
+- tm = tcp_get_metrics(sk, dst, true);
++ tm = tcp_get_metrics(sk, dst, false);
+ if (!tm) {
+ rcu_read_unlock();
+ goto reset;
+@@ -489,11 +493,6 @@ void tcp_init_metrics(struct sock *sk)
+ tp->snd_ssthresh = val;
+ if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
+ tp->snd_ssthresh = tp->snd_cwnd_clamp;
+- } else {
+- /* ssthresh may have been reduced unnecessarily during.
+- * 3WHS. Restore it back to its initial default.
+- */
+- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ }
+ val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ if (val && tp->reordering != val)
+@@ -908,7 +907,7 @@ static void tcp_metrics_flush_all(struct net *net)
+ match = net ? net_eq(tm_net(tm), net) :
+ !refcount_read(&tm_net(tm)->ns.count);
+ if (match) {
+- *pp = tm->tcpm_next;
++ rcu_assign_pointer(*pp, tm->tcpm_next);
+ kfree_rcu(tm, rcu_head);
+ } else {
+ pp = &tm->tcpm_next;
+@@ -949,7 +948,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
+ if (addr_same(&tm->tcpm_daddr, &daddr) &&
+ (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+ net_eq(tm_net(tm), net)) {
+- *pp = tm->tcpm_next;
++ rcu_assign_pointer(*pp, tm->tcpm_next);
+ kfree_rcu(tm, rcu_head);
+ found = true;
+ } else {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b2aa7777521f6..65abc92a81bd0 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2714,10 +2714,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ case UDP_ENCAP_ESPINUDP_NON_IKE:
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+- up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
++ WRITE_ONCE(up->encap_rcv,
++ ipv6_stub->xfrm6_udp_encap_rcv);
+ else
+ #endif
+- up->encap_rcv = xfrm4_udp_encap_rcv;
++ WRITE_ONCE(up->encap_rcv,
++ xfrm4_udp_encap_rcv);
+ #endif
+ fallthrough;
+ case UDP_ENCAP_L2TPINUDP:
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index ce2c5e728745f..3c2b2a85de367 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -161,7 +161,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+ int err;
+
+ skb_mark_not_on_list(segs);
+- err = ip6_fragment(net, sk, segs, ip6_finish_output2);
++ /* Last GSO segment can be smaller than gso_size (and MTU).
++ * Adding a fragment header would produce an "atomic fragment",
++ * which is considered harmful (RFC-8021). Avoid that.
++ */
++ err = segs->len > mtu ?
++ ip6_fragment(net, sk, segs, ip6_finish_output2) :
++ ip6_finish_output2(net, sk, segs);
+ if (err && ret == 0)
+ ret = err;
+ }
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 5014aa6634527..8698b49dfc8de 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -180,14 +180,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ treq = tcp_rsk(req);
+ treq->tfo_listener = false;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto out_free;
+-
+ req->mss = mss;
+ ireq->ir_rmt_port = th->source;
+ ireq->ir_num = ntohs(th->dest);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
++
++ if (security_inet_conn_request(sk, skb, req))
++ goto out_free;
++
+ if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
+ np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index 7cac441862e21..51bccfb00a9cd 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
+ skb->transport_header += llc_len;
+ skb_pull(skb, llc_len);
+ if (skb->protocol == htons(ETH_P_802_2)) {
+- __be16 pdulen = eth_hdr(skb)->h_proto;
+- s32 data_size = ntohs(pdulen) - llc_len;
++ __be16 pdulen;
++ s32 data_size;
++
++ if (skb->mac_len < ETH_HLEN)
++ return 0;
++
++ pdulen = eth_hdr(skb)->h_proto;
++ data_size = ntohs(pdulen) - llc_len;
+
+ if (data_size < 0 ||
+ !pskb_may_pull(skb, data_size))
+diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
+index 79d1cef8f15a9..06fb8e6944b06 100644
+--- a/net/llc/llc_s_ac.c
++++ b/net/llc/llc_s_ac.c
+@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
+ int rc = 1;
+ u32 data_size;
+
++ if (skb->mac_len < ETH_HLEN)
++ return 1;
++
+ llc_pdu_decode_sa(skb, mac_da);
+ llc_pdu_decode_da(skb, mac_sa);
+ llc_pdu_decode_ssap(skb, &dsap);
+diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
+index 05c6ae0920534..f506542925109 100644
+--- a/net/llc/llc_station.c
++++ b/net/llc/llc_station.c
+@@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
+ u32 data_size;
+ struct sk_buff *nskb;
+
++ if (skb->mac_len < ETH_HLEN)
++ goto out;
++
+ /* The test request command is type U (llc_len = 3) */
+ data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
+ nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 99a976ea17498..d5dd2d9e89b48 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1361,7 +1361,7 @@ struct ieee80211_local {
+ /* wowlan is enabled -- don't reconfig on resume */
+ bool wowlan;
+
+- struct work_struct radar_detected_work;
++ struct wiphy_work radar_detected_work;
+
+ /* number of RX chains the hardware has */
+ u8 rx_chains;
+@@ -1438,14 +1438,14 @@ struct ieee80211_local {
+ int hw_scan_ies_bufsize;
+ struct cfg80211_scan_info scan_info;
+
+- struct work_struct sched_scan_stopped_work;
++ struct wiphy_work sched_scan_stopped_work;
+ struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
+ struct cfg80211_sched_scan_request __rcu *sched_scan_req;
+ u8 scan_addr[ETH_ALEN];
+
+ unsigned long leave_oper_channel_time;
+ enum mac80211_scan_state next_scan_state;
+- struct delayed_work scan_work;
++ struct wiphy_delayed_work scan_work;
+ struct ieee80211_sub_if_data __rcu *scan_sdata;
+ /* For backward compatibility only -- do not use */
+ struct cfg80211_chan_def _oper_chandef;
+@@ -1538,9 +1538,9 @@ struct ieee80211_local {
+ /*
+ * Remain-on-channel support
+ */
+- struct delayed_work roc_work;
++ struct wiphy_delayed_work roc_work;
+ struct list_head roc_list;
+- struct work_struct hw_roc_start, hw_roc_done;
++ struct wiphy_work hw_roc_start, hw_roc_done;
+ unsigned long hw_roc_start_time;
+ u64 roc_cookie_counter;
+
+@@ -1862,7 +1862,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
+ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
+
+ /* scan/BSS handling */
+-void ieee80211_scan_work(struct work_struct *work);
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work);
+ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel **channels,
+@@ -1892,7 +1892,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_sched_scan_request *req);
+ int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
+ void ieee80211_sched_scan_end(struct ieee80211_local *local);
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work);
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++ struct wiphy_work *work);
+
+ /* off-channel/mgmt-tx */
+ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
+@@ -2483,7 +2484,8 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local);
+ void ieee80211_dfs_cac_timer(unsigned long data);
+ void ieee80211_dfs_cac_timer_work(struct work_struct *work);
+ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work);
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++ struct wiphy_work *work);
+ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_csa_settings *csa_settings);
+
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 8dd3c10a99e0b..e00e1bf0f754a 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -697,7 +697,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ ieee80211_recalc_ps(local);
+
+ if (cancel_scan)
+- flush_delayed_work(&local->scan_work);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+
+ if (local->open_count == 0) {
+ ieee80211_stop_device(local);
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 02b5abc7326bc..6faba47b7b0ea 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -334,10 +334,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ struct ieee80211_sub_if_data *sdata;
+ int ret;
+
+- /* wait for scan work complete */
+ flush_workqueue(local->workqueue);
+- flush_work(&local->sched_scan_stopped_work);
+- flush_work(&local->radar_detected_work);
+
+ rtnl_lock();
+ /* we might do interface manipulations, so need both */
+@@ -377,8 +374,8 @@ static void ieee80211_restart_work(struct work_struct *work)
+ ieee80211_scan_cancel(local);
+
+ /* make sure any new ROC will consider local->in_reconfig */
+- flush_delayed_work(&local->roc_work);
+- flush_work(&local->hw_roc_done);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work);
++ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
+
+ /* wait for all packet processing to be done */
+ synchronize_net();
+@@ -807,12 +804,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ INIT_LIST_HEAD(&local->chanctx_list);
+ mutex_init(&local->chanctx_mtx);
+
+- INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
++ wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work);
+
+ INIT_WORK(&local->restart_work, ieee80211_restart_work);
+
+- INIT_WORK(&local->radar_detected_work,
+- ieee80211_dfs_radar_detected_work);
++ wiphy_work_init(&local->radar_detected_work,
++ ieee80211_dfs_radar_detected_work);
+
+ INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+ local->smps_mode = IEEE80211_SMPS_OFF;
+@@ -823,8 +820,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ ieee80211_dynamic_ps_disable_work);
+ timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
+
+- INIT_WORK(&local->sched_scan_stopped_work,
+- ieee80211_sched_scan_stopped_work);
++ wiphy_work_init(&local->sched_scan_stopped_work,
++ ieee80211_sched_scan_stopped_work);
+
+ spin_lock_init(&local->ack_status_lock);
+ idr_init(&local->ack_status_frames);
+@@ -1471,13 +1468,15 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
+ */
+ ieee80211_remove_interfaces(local);
+
++ wiphy_lock(local->hw.wiphy);
++ wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work);
++ wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work);
++ wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work);
++ wiphy_unlock(local->hw.wiphy);
+ rtnl_unlock();
+
+- cancel_delayed_work_sync(&local->roc_work);
+ cancel_work_sync(&local->restart_work);
+ cancel_work_sync(&local->reconfig_filter);
+- flush_work(&local->sched_scan_stopped_work);
+- flush_work(&local->radar_detected_work);
+
+ ieee80211_clear_tx_pending(local);
+ rate_control_deinitialize(local);
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index d78c82d6b6966..50dc379ca097e 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -230,7 +230,7 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
+ if (dur == LONG_MAX)
+ return false;
+
+- mod_delayed_work(local->workqueue, &local->roc_work, dur);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur);
+ return true;
+ }
+
+@@ -258,7 +258,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
+ roc->notified = true;
+ }
+
+-static void ieee80211_hw_roc_start(struct work_struct *work)
++static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_start);
+@@ -285,7 +285,7 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
+
+ trace_api_ready_on_channel(local);
+
+- ieee80211_queue_work(hw, &local->hw_roc_start);
++ wiphy_work_queue(hw->wiphy, &local->hw_roc_start);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
+
+@@ -338,7 +338,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ tmp->started = true;
+ tmp->abort = true;
+ }
+- ieee80211_queue_work(&local->hw, &local->hw_roc_done);
++ wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done);
+ return;
+ }
+
+@@ -368,8 +368,8 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ ieee80211_hw_config(local, 0);
+ }
+
+- ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+- msecs_to_jiffies(min_dur));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++ msecs_to_jiffies(min_dur));
+
+ /* tell userspace or send frame(s) */
+ list_for_each_entry(tmp, &local->roc_list, list) {
+@@ -407,8 +407,8 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
+ _ieee80211_start_next_roc(local);
+ } else {
+ /* delay it a bit */
+- ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+- round_jiffies_relative(HZ/2));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++ round_jiffies_relative(HZ / 2));
+ }
+ }
+
+@@ -451,7 +451,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
+ }
+ }
+
+-static void ieee80211_roc_work(struct work_struct *work)
++static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, roc_work.work);
+@@ -461,7 +461,7 @@ static void ieee80211_roc_work(struct work_struct *work)
+ mutex_unlock(&local->mtx);
+ }
+
+-static void ieee80211_hw_roc_done(struct work_struct *work)
++static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_done);
+@@ -482,7 +482,7 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
+
+ trace_api_remain_on_channel_expired(local);
+
+- ieee80211_queue_work(hw, &local->hw_roc_done);
++ wiphy_work_queue(hw->wiphy, &local->hw_roc_done);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
+
+@@ -586,8 +586,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ /* if not HW assist, just queue & schedule work */
+ if (!local->ops->remain_on_channel) {
+ list_add_tail(&roc->list, &local->roc_list);
+- ieee80211_queue_delayed_work(&local->hw,
+- &local->roc_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy,
++ &local->roc_work, 0);
+ } else {
+ /* otherwise actually kick it off here
+ * (for error handling)
+@@ -695,7 +695,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ if (!cookie)
+ return -ENOENT;
+
+- flush_work(&local->hw_roc_start);
++ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
+
+ mutex_lock(&local->mtx);
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+@@ -745,7 +745,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ } else {
+ /* go through work struct to return to the operating channel */
+ found->abort = true;
+- mod_delayed_work(local->workqueue, &local->roc_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0);
+ }
+
+ out_unlock:
+@@ -994,9 +994,9 @@ int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+
+ void ieee80211_roc_setup(struct ieee80211_local *local)
+ {
+- INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
+- INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+- INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
++ wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start);
++ wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done);
++ wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work);
+ INIT_LIST_HEAD(&local->roc_list);
+ }
+
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index dc3cdee51e660..c37e2576f1c13 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -291,8 +291,8 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ * the beacon/proberesp rx gives us an opportunity to upgrade
+ * to active scan
+ */
+- set_bit(SCAN_BEACON_DONE, &local->scanning);
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++ set_bit(SCAN_BEACON_DONE, &local->scanning);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+@@ -522,7 +522,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw,
+
+ memcpy(&local->scan_info, info, sizeof(*info));
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+ EXPORT_SYMBOL(ieee80211_scan_completed);
+
+@@ -562,8 +562,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
+ /* We need to set power level at maximum rate for scanning. */
+ ieee80211_hw_config(local, 0);
+
+- ieee80211_queue_delayed_work(&local->hw,
+- &local->scan_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+
+ return 0;
+ }
+@@ -620,8 +619,8 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
+ lockdep_is_held(&local->mtx))))
+ return;
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+- round_jiffies_relative(0));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ round_jiffies_relative(0));
+ }
+
+ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+@@ -812,8 +811,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ }
+
+ /* Now, just wait a bit and we are all done! */
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+- next_delay);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ next_delay);
+ return 0;
+ } else {
+ /* Do normal software scan */
+@@ -1060,7 +1059,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+ local->next_scan_state = SCAN_SET_CHANNEL;
+ }
+
+-void ieee80211_scan_work(struct work_struct *work)
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, scan_work.work);
+@@ -1154,7 +1153,8 @@ void ieee80211_scan_work(struct work_struct *work)
+ }
+ } while (next_delay == 0);
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ next_delay);
+ goto out;
+
+ out_complete:
+@@ -1297,12 +1297,7 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
+ goto out;
+ }
+
+- /*
+- * If the work is currently running, it must be blocked on
+- * the mutex, but we'll set scan_sdata = NULL and it'll
+- * simply exit once it acquires the mutex.
+- */
+- cancel_delayed_work(&local->scan_work);
++ wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work);
+ /* and clean up */
+ memset(&local->scan_info, 0, sizeof(local->scan_info));
+ __ieee80211_scan_completed(&local->hw, true);
+@@ -1444,10 +1439,11 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local)
+
+ mutex_unlock(&local->mtx);
+
+- cfg80211_sched_scan_stopped(local->hw.wiphy, 0);
++ cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
+ }
+
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work)
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++ struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local,
+@@ -1470,6 +1466,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
+ if (local->in_reconfig)
+ return;
+
+- schedule_work(&local->sched_scan_stopped_work);
++ wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work);
+ }
+ EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index b8c6f6a668fc9..49b71453dec37 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2918,7 +2918,7 @@ void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
+ WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1;
+
+ if (val)
+- sta->sta.max_amsdu_subframes = 4 << val;
++ sta->sta.max_amsdu_subframes = 4 << (4 - val);
+ }
+
+ #ifdef CONFIG_LOCKDEP
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 98806c359b173..1088d90e355ba 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2234,8 +2234,8 @@ static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
+ */
+ if (aborted)
+ set_bit(SCAN_ABORTED, &local->scanning);
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+- flush_delayed_work(&local->scan_work);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+ }
+ }
+
+@@ -4069,7 +4069,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+ mutex_unlock(&local->mtx);
+ }
+
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work)
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++ struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, radar_detected_work);
+@@ -4087,9 +4088,7 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
+ }
+ mutex_unlock(&local->chanctx_mtx);
+
+- wiphy_lock(local->hw.wiphy);
+ ieee80211_dfs_cac_cancel(local);
+- wiphy_unlock(local->hw.wiphy);
+
+ if (num_chanctx > 1)
+ /* XXX: multi-channel is not supported yet */
+@@ -4104,7 +4103,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
+
+ trace_api_radar_detected(local);
+
+- schedule_work(&local->radar_detected_work);
++ wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
+ }
+ EXPORT_SYMBOL(ieee80211_radar_detected);
+
+diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
+index f91579c821e9a..5b37487d9d11f 100644
+--- a/net/netfilter/nf_nat_redirect.c
++++ b/net/netfilter/nf_nat_redirect.c
+@@ -10,6 +10,7 @@
+
+ #include <linux/if.h>
+ #include <linux/inetdevice.h>
++#include <linux/in.h>
+ #include <linux/ip.h>
+ #include <linux/kernel.h>
+ #include <linux/netdevice.h>
+@@ -24,81 +25,104 @@
+ #include <net/netfilter/nf_nat.h>
+ #include <net/netfilter/nf_nat_redirect.h>
+
++static unsigned int
++nf_nat_redirect(struct sk_buff *skb, const struct nf_nat_range2 *range,
++ const union nf_inet_addr *newdst)
++{
++ struct nf_nat_range2 newrange;
++ enum ip_conntrack_info ctinfo;
++ struct nf_conn *ct;
++
++ ct = nf_ct_get(skb, &ctinfo);
++
++ memset(&newrange, 0, sizeof(newrange));
++
++ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
++ newrange.min_addr = *newdst;
++ newrange.max_addr = *newdst;
++ newrange.min_proto = range->min_proto;
++ newrange.max_proto = range->max_proto;
++
++ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
++}
++
+ unsigned int
+-nf_nat_redirect_ipv4(struct sk_buff *skb,
+- const struct nf_nat_ipv4_multi_range_compat *mr,
++nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ unsigned int hooknum)
+ {
+- struct nf_conn *ct;
+- enum ip_conntrack_info ctinfo;
+- __be32 newdst;
+- struct nf_nat_range2 newrange;
++ union nf_inet_addr newdst = {};
+
+ WARN_ON(hooknum != NF_INET_PRE_ROUTING &&
+ hooknum != NF_INET_LOCAL_OUT);
+
+- ct = nf_ct_get(skb, &ctinfo);
+- WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)));
+-
+ /* Local packets: make them go to loopback */
+ if (hooknum == NF_INET_LOCAL_OUT) {
+- newdst = htonl(0x7F000001);
++ newdst.ip = htonl(INADDR_LOOPBACK);
+ } else {
+ const struct in_device *indev;
+
+- newdst = 0;
+-
+ indev = __in_dev_get_rcu(skb->dev);
+ if (indev) {
+ const struct in_ifaddr *ifa;
+
+ ifa = rcu_dereference(indev->ifa_list);
+ if (ifa)
+- newdst = ifa->ifa_local;
++ newdst.ip = ifa->ifa_local;
+ }
+
+- if (!newdst)
++ if (!newdst.ip)
+ return NF_DROP;
+ }
+
+- /* Transfer from original range. */
+- memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+- memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+- newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+- newrange.min_addr.ip = newdst;
+- newrange.max_addr.ip = newdst;
+- newrange.min_proto = mr->range[0].min;
+- newrange.max_proto = mr->range[0].max;
+-
+- /* Hand modified range to generic setup. */
+- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
++ return nf_nat_redirect(skb, range, &newdst);
+ }
+ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
+
+ static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+
++static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
++{
++ unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
++
++ if (ifa_addr_type & IPV6_ADDR_MAPPED)
++ return false;
++
++ if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
++ return false;
++
++ if (scope) {
++ unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
++
++ if (!(scope & ifa_scope))
++ return false;
++ }
++
++ return true;
++}
++
+ unsigned int
+ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ unsigned int hooknum)
+ {
+- struct nf_nat_range2 newrange;
+- struct in6_addr newdst;
+- enum ip_conntrack_info ctinfo;
+- struct nf_conn *ct;
++ union nf_inet_addr newdst = {};
+
+- ct = nf_ct_get(skb, &ctinfo);
+ if (hooknum == NF_INET_LOCAL_OUT) {
+- newdst = loopback_addr;
++ newdst.in6 = loopback_addr;
+ } else {
++ unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
+ struct inet6_dev *idev;
+- struct inet6_ifaddr *ifa;
+ bool addr = false;
+
+ idev = __in6_dev_get(skb->dev);
+ if (idev != NULL) {
++ const struct inet6_ifaddr *ifa;
++
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+- newdst = ifa->addr;
++ if (!nf_nat_redirect_ipv6_usable(ifa, scope))
++ continue;
++
++ newdst.in6 = ifa->addr;
+ addr = true;
+ break;
+ }
+@@ -109,12 +133,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ return NF_DROP;
+ }
+
+- newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
+- newrange.min_addr.in6 = newdst;
+- newrange.max_addr.in6 = newdst;
+- newrange.min_proto = range->min_proto;
+- newrange.max_proto = range->max_proto;
+-
+- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
++ return nf_nat_redirect(skb, range, &newdst);
+ }
+ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 5c783199b4999..d6d59e36d17a7 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3279,10 +3279,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
+ goto cont_skip;
+ if (*idx < s_idx)
+ goto cont;
+- if (*idx > s_idx) {
+- memset(&cb->args[1], 0,
+- sizeof(cb->args) - sizeof(cb->args[0]));
+- }
+ if (prule)
+ handle = prule->handle;
+ else
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 5ed64b2bd15e8..08b408d3e113d 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -64,6 +64,8 @@ static int nft_redir_init(const struct nft_ctx *ctx,
+ } else {
+ priv->sreg_proto_max = priv->sreg_proto_min;
+ }
++
++ priv->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ }
+
+ if (tb[NFTA_REDIR_FLAGS]) {
+@@ -98,25 +100,37 @@ nla_put_failure:
+ return -1;
+ }
+
+-static void nft_redir_ipv4_eval(const struct nft_expr *expr,
+- struct nft_regs *regs,
+- const struct nft_pktinfo *pkt)
++static void nft_redir_eval(const struct nft_expr *expr,
++ struct nft_regs *regs,
++ const struct nft_pktinfo *pkt)
+ {
+- struct nft_redir *priv = nft_expr_priv(expr);
+- struct nf_nat_ipv4_multi_range_compat mr;
++ const struct nft_redir *priv = nft_expr_priv(expr);
++ struct nf_nat_range2 range;
+
+- memset(&mr, 0, sizeof(mr));
++ memset(&range, 0, sizeof(range));
++ range.flags = priv->flags;
+ if (priv->sreg_proto_min) {
+- mr.range[0].min.all = (__force __be16)nft_reg_load16(
+- &regs->data[priv->sreg_proto_min]);
+- mr.range[0].max.all = (__force __be16)nft_reg_load16(
+- &regs->data[priv->sreg_proto_max]);
+- mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
++ range.min_proto.all = (__force __be16)
++ nft_reg_load16(&regs->data[priv->sreg_proto_min]);
++ range.max_proto.all = (__force __be16)
++ nft_reg_load16(&regs->data[priv->sreg_proto_max]);
+ }
+
+- mr.range[0].flags |= priv->flags;
+-
+- regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, nft_hook(pkt));
++ switch (nft_pf(pkt)) {
++ case NFPROTO_IPV4:
++ regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &range,
++ nft_hook(pkt));
++ break;
++#ifdef CONFIG_NF_TABLES_IPV6
++ case NFPROTO_IPV6:
++ regs->verdict.code = nf_nat_redirect_ipv6(pkt->skb, &range,
++ nft_hook(pkt));
++ break;
++#endif
++ default:
++ WARN_ON_ONCE(1);
++ break;
++ }
+ }
+
+ static void
+@@ -129,7 +143,7 @@ static struct nft_expr_type nft_redir_ipv4_type;
+ static const struct nft_expr_ops nft_redir_ipv4_ops = {
+ .type = &nft_redir_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+- .eval = nft_redir_ipv4_eval,
++ .eval = nft_redir_eval,
+ .init = nft_redir_init,
+ .destroy = nft_redir_ipv4_destroy,
+ .dump = nft_redir_dump,
+@@ -147,28 +161,6 @@ static struct nft_expr_type nft_redir_ipv4_type __read_mostly = {
+ };
+
+ #ifdef CONFIG_NF_TABLES_IPV6
+-static void nft_redir_ipv6_eval(const struct nft_expr *expr,
+- struct nft_regs *regs,
+- const struct nft_pktinfo *pkt)
+-{
+- struct nft_redir *priv = nft_expr_priv(expr);
+- struct nf_nat_range2 range;
+-
+- memset(&range, 0, sizeof(range));
+- if (priv->sreg_proto_min) {
+- range.min_proto.all = (__force __be16)nft_reg_load16(
+- &regs->data[priv->sreg_proto_min]);
+- range.max_proto.all = (__force __be16)nft_reg_load16(
+- &regs->data[priv->sreg_proto_max]);
+- range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+- }
+-
+- range.flags |= priv->flags;
+-
+- regs->verdict.code =
+- nf_nat_redirect_ipv6(pkt->skb, &range, nft_hook(pkt));
+-}
+-
+ static void
+ nft_redir_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ {
+@@ -179,7 +171,7 @@ static struct nft_expr_type nft_redir_ipv6_type;
+ static const struct nft_expr_ops nft_redir_ipv6_ops = {
+ .type = &nft_redir_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+- .eval = nft_redir_ipv6_eval,
++ .eval = nft_redir_eval,
+ .init = nft_redir_init,
+ .destroy = nft_redir_ipv6_destroy,
+ .dump = nft_redir_dump,
+@@ -198,20 +190,6 @@ static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
+ #endif
+
+ #ifdef CONFIG_NF_TABLES_INET
+-static void nft_redir_inet_eval(const struct nft_expr *expr,
+- struct nft_regs *regs,
+- const struct nft_pktinfo *pkt)
+-{
+- switch (nft_pf(pkt)) {
+- case NFPROTO_IPV4:
+- return nft_redir_ipv4_eval(expr, regs, pkt);
+- case NFPROTO_IPV6:
+- return nft_redir_ipv6_eval(expr, regs, pkt);
+- }
+-
+- WARN_ON_ONCE(1);
+-}
+-
+ static void
+ nft_redir_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ {
+@@ -222,7 +200,7 @@ static struct nft_expr_type nft_redir_inet_type;
+ static const struct nft_expr_ops nft_redir_inet_ops = {
+ .type = &nft_redir_inet_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+- .eval = nft_redir_inet_eval,
++ .eval = nft_redir_eval,
+ .init = nft_redir_init,
+ .destroy = nft_redir_inet_destroy,
+ .dump = nft_redir_dump,
+diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
+index 353ca7801251a..ff66b56a3f97d 100644
+--- a/net/netfilter/xt_REDIRECT.c
++++ b/net/netfilter/xt_REDIRECT.c
+@@ -46,7 +46,6 @@ static void redirect_tg_destroy(const struct xt_tgdtor_param *par)
+ nf_ct_netns_put(par->net, par->family);
+ }
+
+-/* FIXME: Take multiple ranges --RR */
+ static int redirect_tg4_check(const struct xt_tgchk_param *par)
+ {
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+@@ -65,7 +64,14 @@ static int redirect_tg4_check(const struct xt_tgchk_param *par)
+ static unsigned int
+ redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+- return nf_nat_redirect_ipv4(skb, par->targinfo, xt_hooknum(par));
++ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
++ struct nf_nat_range2 range = {
++ .flags = mr->range[0].flags,
++ .min_proto = mr->range[0].min,
++ .max_proto = mr->range[0].max,
++ };
++
++ return nf_nat_redirect_ipv4(skb, &range, xt_hooknum(par));
+ }
+
+ static struct xt_target redirect_tg_reg[] __read_mostly = {
+diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
+index 7ddb9a78e3fc8..ef93e0d3bee04 100644
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
+ {
+ struct recent_table *t = pde_data(file_inode(file));
+ struct recent_entry *e;
+- char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
++ char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
+ const char *c = buf;
+ union nf_inet_addr addr = {};
+ u_int16_t family;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 4ea41d6e36969..d676119984c09 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -274,7 +274,7 @@ static int __smc_release(struct smc_sock *smc)
+
+ if (!smc->use_fallback) {
+ rc = smc_close_active(smc);
+- sock_set_flag(sk, SOCK_DEAD);
++ smc_sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ } else {
+ if (sk->sk_state != SMC_CLOSED) {
+@@ -1710,7 +1710,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
+ if (new_clcsock)
+ sock_release(new_clcsock);
+ new_sk->sk_state = SMC_CLOSED;
+- sock_set_flag(new_sk, SOCK_DEAD);
++ smc_sock_set_flag(new_sk, SOCK_DEAD);
+ sock_put(new_sk); /* final */
+ *new_smc = NULL;
+ goto out;
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index 1d36720fc019c..bcb57e60b2155 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -377,4 +377,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
+ int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+ int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+
++static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
++{
++ set_bit(flag, &sk->sk_flags);
++}
++
+ #endif /* __SMC_H */
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 89105e95b4523..3c06625ceb200 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
+ {
+ struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
+ struct smc_connection *conn = cdcpend->conn;
++ struct smc_buf_desc *sndbuf_desc;
+ struct smc_sock *smc;
+ int diff;
+
++ sndbuf_desc = conn->sndbuf_desc;
+ smc = container_of(conn, struct smc_sock, conn);
+ bh_lock_sock(&smc->sk);
+- if (!wc_status) {
+- diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
++ if (!wc_status && sndbuf_desc) {
++ diff = smc_curs_diff(sndbuf_desc->len,
+ &cdcpend->conn->tx_curs_fin,
+ &cdcpend->cursor);
+ /* sndbuf_space is decreased in smc_sendmsg */
+@@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+ union smc_host_cursor cfed;
+ int rc;
+
+- if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
+- return -ENOBUFS;
+-
+ smc_cdc_add_pending_send(conn, pend);
+
+ conn->tx_cdc_seq++;
+@@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
+ smc->sk.sk_shutdown |= RCV_SHUTDOWN;
+ if (smc->clcsock && smc->clcsock->sk)
+ smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
+- sock_set_flag(&smc->sk, SOCK_DONE);
++ smc_sock_set_flag(&smc->sk, SOCK_DONE);
+ sock_hold(&smc->sk); /* sock_put in close_work */
+ if (!queue_work(smc_close_wq, &conn->close_work))
+ sock_put(&smc->sk);
+diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
+index dbdf03e8aa5b5..10219f55aad14 100644
+--- a/net/smc/smc_close.c
++++ b/net/smc/smc_close.c
+@@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc)
+ struct sock *sk = &smc->sk;
+
+ release_sock(sk);
+- cancel_work_sync(&smc->conn.close_work);
++ if (cancel_work_sync(&smc->conn.close_work))
++ sock_put(sk);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
+ }
+@@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc)
+ break;
+ }
+
+- sock_set_flag(sk, SOCK_DEAD);
++ smc_sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_state_change(sk);
+
+ if (release_clcsock) {
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 2eff1c7949cbc..8715c9b05f90d 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1446,7 +1446,7 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ p = (struct tipc_gap_ack_blks *)msg_data(hdr);
+ sz = ntohs(p->len);
+ /* Sanity check */
+- if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
++ if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
+ /* Good, check if the desired type exists */
+ if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
+ goto ok;
+@@ -1533,7 +1533,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
+ __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
+
+ /* Total len */
+- len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
++ len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
+ ga->len = htons(len);
+ return len;
+ }
+diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
+index e8fd257c0e688..1a9a5bdaccf4f 100644
+--- a/net/tipc/netlink.c
++++ b/net/tipc/netlink.c
+@@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+
+ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
+- [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING,
++ [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING,
+ .len = TIPC_MAX_LINK_NAME },
+ [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
+@@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
+
+ const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
+ [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
+- [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING,
++ [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING,
+ .len = TIPC_MAX_BEARER_NAME },
+ [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 17737a65c643a..0672acab27731 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -70,6 +70,8 @@ struct tls_rec {
+ char content_type;
+ struct scatterlist sg_content_type;
+
++ struct sock *sk;
++
+ char aad_space[TLS_AAD_SPACE_SIZE];
+ u8 iv_data[MAX_IV_SIZE];
+ struct aead_request aead_req;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 2af72d349192e..2e60bf06adff0 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -38,6 +38,7 @@
+ #include <linux/bug.h>
+ #include <linux/sched/signal.h>
+ #include <linux/module.h>
++#include <linux/kernel.h>
+ #include <linux/splice.h>
+ #include <crypto/aead.h>
+
+@@ -57,6 +58,7 @@ struct tls_decrypt_arg {
+ };
+
+ struct tls_decrypt_ctx {
++ struct sock *sk;
+ u8 iv[MAX_IV_SIZE];
+ u8 aad[TLS_MAX_AAD_SIZE];
+ u8 tail;
+@@ -179,18 +181,25 @@ static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
+ return sub;
+ }
+
+-static void tls_decrypt_done(struct crypto_async_request *req, int err)
++static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+ {
+- struct aead_request *aead_req = (struct aead_request *)req;
++ struct aead_request *aead_req = crypto_get_completion_data(data);
++ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct scatterlist *sgout = aead_req->dst;
+ struct scatterlist *sgin = aead_req->src;
+ struct tls_sw_context_rx *ctx;
++ struct tls_decrypt_ctx *dctx;
+ struct tls_context *tls_ctx;
+ struct scatterlist *sg;
+ unsigned int pages;
+ struct sock *sk;
++ int aead_size;
+
+- sk = (struct sock *)req->data;
++ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
++ aead_size = ALIGN(aead_size, __alignof__(*dctx));
++ dctx = (void *)((u8 *)aead_req + aead_size);
++
++ sk = dctx->sk;
+ tls_ctx = tls_get_ctx(sk);
+ ctx = tls_sw_ctx_rx(tls_ctx);
+
+@@ -242,7 +251,7 @@ static int tls_do_decryption(struct sock *sk,
+ if (darg->async) {
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+- tls_decrypt_done, sk);
++ tls_decrypt_done, aead_req);
+ atomic_inc(&ctx->decrypt_pending);
+ } else {
+ aead_request_set_callback(aead_req,
+@@ -338,6 +347,8 @@ static struct tls_rec *tls_get_rec(struct sock *sk)
+ sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
+ sg_unmark_end(&rec->sg_aead_out[1]);
+
++ rec->sk = sk;
++
+ return rec;
+ }
+
+@@ -419,22 +430,27 @@ tx_err:
+ return rc;
+ }
+
+-static void tls_encrypt_done(struct crypto_async_request *req, int err)
++static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+ {
+- struct aead_request *aead_req = (struct aead_request *)req;
+- struct sock *sk = req->data;
+- struct tls_context *tls_ctx = tls_get_ctx(sk);
+- struct tls_prot_info *prot = &tls_ctx->prot_info;
+- struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
++ struct aead_request *aead_req = crypto_get_completion_data(data);
++ struct tls_sw_context_tx *ctx;
++ struct tls_context *tls_ctx;
++ struct tls_prot_info *prot;
+ struct scatterlist *sge;
+ struct sk_msg *msg_en;
+ struct tls_rec *rec;
+ bool ready = false;
++ struct sock *sk;
+ int pending;
+
+ rec = container_of(aead_req, struct tls_rec, aead_req);
+ msg_en = &rec->msg_encrypted;
+
++ sk = rec->sk;
++ tls_ctx = tls_get_ctx(sk);
++ prot = &tls_ctx->prot_info;
++ ctx = tls_sw_ctx_tx(tls_ctx);
++
+ sge = sk_msg_elem(msg_en, msg_en->sg.curr);
+ sge->offset -= prot->prepend_size;
+ sge->length += prot->prepend_size;
+@@ -522,7 +538,7 @@ static int tls_do_encryption(struct sock *sk,
+ data_len, rec->iv_data);
+
+ aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+- tls_encrypt_done, sk);
++ tls_encrypt_done, aead_req);
+
+ /* Add the record in tx_list */
+ list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
+@@ -1495,7 +1511,8 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ * Both structs are variable length.
+ */
+ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
+- mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
++ aead_size = ALIGN(aead_size, __alignof__(*dctx));
++ mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
+ sk->sk_allocation);
+ if (!mem) {
+ err = -ENOMEM;
+@@ -1505,6 +1522,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ /* Segment the allocated memory */
+ aead_req = (struct aead_request *)mem;
+ dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
++ dctx->sk = sk;
+ sgin = &dctx->sg[0];
+ sgout = &dctx->sg[n_sgin];
+
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index 460e7fbb42da3..16575ea836590 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -42,8 +42,7 @@ struct virtio_vsock {
+ bool tx_run;
+
+ struct work_struct send_pkt_work;
+- spinlock_t send_pkt_list_lock;
+- struct list_head send_pkt_list;
++ struct sk_buff_head send_pkt_queue;
+
+ atomic_t queued_replies;
+
+@@ -101,41 +100,31 @@ virtio_transport_send_pkt_work(struct work_struct *work)
+ vq = vsock->vqs[VSOCK_VQ_TX];
+
+ for (;;) {
+- struct virtio_vsock_pkt *pkt;
+ struct scatterlist hdr, buf, *sgs[2];
+ int ret, in_sg = 0, out_sg = 0;
++ struct sk_buff *skb;
+ bool reply;
+
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- if (list_empty(&vsock->send_pkt_list)) {
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
++ skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
++ if (!skb)
+ break;
+- }
+-
+- pkt = list_first_entry(&vsock->send_pkt_list,
+- struct virtio_vsock_pkt, list);
+- list_del_init(&pkt->list);
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+- virtio_transport_deliver_tap_pkt(pkt);
++ virtio_transport_deliver_tap_pkt(skb);
++ reply = virtio_vsock_skb_reply(skb);
+
+- reply = pkt->reply;
+-
+- sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
++ sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
+ sgs[out_sg++] = &hdr;
+- if (pkt->buf) {
+- sg_init_one(&buf, pkt->buf, pkt->len);
++ if (skb->len > 0) {
++ sg_init_one(&buf, skb->data, skb->len);
+ sgs[out_sg++] = &buf;
+ }
+
+- ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
++ ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
+ /* Usually this means that there is no more space available in
+ * the vq
+ */
+ if (ret < 0) {
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- list_add(&pkt->list, &vsock->send_pkt_list);
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
++ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ break;
+ }
+
+@@ -164,32 +153,32 @@ out:
+ }
+
+ static int
+-virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
++virtio_transport_send_pkt(struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr;
+ struct virtio_vsock *vsock;
+- int len = pkt->len;
++ int len = skb->len;
++
++ hdr = virtio_vsock_hdr(skb);
+
+ rcu_read_lock();
+ vsock = rcu_dereference(the_virtio_vsock);
+ if (!vsock) {
+- virtio_transport_free_pkt(pkt);
++ kfree_skb(skb);
+ len = -ENODEV;
+ goto out_rcu;
+ }
+
+- if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
+- virtio_transport_free_pkt(pkt);
++ if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
++ kfree_skb(skb);
+ len = -ENODEV;
+ goto out_rcu;
+ }
+
+- if (pkt->reply)
++ if (virtio_vsock_skb_reply(skb))
+ atomic_inc(&vsock->queued_replies);
+
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- list_add_tail(&pkt->list, &vsock->send_pkt_list);
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
++ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+
+ out_rcu:
+@@ -201,9 +190,7 @@ static int
+ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+ {
+ struct virtio_vsock *vsock;
+- struct virtio_vsock_pkt *pkt, *n;
+ int cnt = 0, ret;
+- LIST_HEAD(freeme);
+
+ rcu_read_lock();
+ vsock = rcu_dereference(the_virtio_vsock);
+@@ -212,20 +199,7 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+ goto out_rcu;
+ }
+
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+- if (pkt->vsk != vsk)
+- continue;
+- list_move(&pkt->list, &freeme);
+- }
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
+- list_for_each_entry_safe(pkt, n, &freeme, list) {
+- if (pkt->reply)
+- cnt++;
+- list_del(&pkt->list);
+- virtio_transport_free_pkt(pkt);
+- }
++ cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
+
+ if (cnt) {
+ struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+@@ -246,38 +220,28 @@ out_rcu:
+
+ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
+ {
+- int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
+- struct virtio_vsock_pkt *pkt;
+- struct scatterlist hdr, buf, *sgs[2];
++ int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
++ struct scatterlist pkt, *p;
+ struct virtqueue *vq;
++ struct sk_buff *skb;
+ int ret;
+
+ vq = vsock->vqs[VSOCK_VQ_RX];
+
+ do {
+- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+- if (!pkt)
++ skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
++ if (!skb)
+ break;
+
+- pkt->buf = kmalloc(buf_len, GFP_KERNEL);
+- if (!pkt->buf) {
+- virtio_transport_free_pkt(pkt);
++ memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
++ sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
++ p = &pkt;
++ ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
++ if (ret < 0) {
++ kfree_skb(skb);
+ break;
+ }
+
+- pkt->buf_len = buf_len;
+- pkt->len = buf_len;
+-
+- sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
+- sgs[0] = &hdr;
+-
+- sg_init_one(&buf, pkt->buf, buf_len);
+- sgs[1] = &buf;
+- ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
+- if (ret) {
+- virtio_transport_free_pkt(pkt);
+- break;
+- }
+ vsock->rx_buf_nr++;
+ } while (vq->num_free);
+ if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
+@@ -299,12 +263,12 @@ static void virtio_transport_tx_work(struct work_struct *work)
+ goto out;
+
+ do {
+- struct virtio_vsock_pkt *pkt;
++ struct sk_buff *skb;
+ unsigned int len;
+
+ virtqueue_disable_cb(vq);
+- while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
+- virtio_transport_free_pkt(pkt);
++ while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
++ consume_skb(skb);
+ added = true;
+ }
+ } while (!virtqueue_enable_cb(vq));
+@@ -529,7 +493,7 @@ static void virtio_transport_rx_work(struct work_struct *work)
+ do {
+ virtqueue_disable_cb(vq);
+ for (;;) {
+- struct virtio_vsock_pkt *pkt;
++ struct sk_buff *skb;
+ unsigned int len;
+
+ if (!virtio_transport_more_replies(vsock)) {
+@@ -540,23 +504,22 @@ static void virtio_transport_rx_work(struct work_struct *work)
+ goto out;
+ }
+
+- pkt = virtqueue_get_buf(vq, &len);
+- if (!pkt) {
++ skb = virtqueue_get_buf(vq, &len);
++ if (!skb)
+ break;
+- }
+
+ vsock->rx_buf_nr--;
+
+ /* Drop short/long packets */
+- if (unlikely(len < sizeof(pkt->hdr) ||
+- len > sizeof(pkt->hdr) + pkt->len)) {
+- virtio_transport_free_pkt(pkt);
++ if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
++ len > virtio_vsock_skb_len(skb))) {
++ kfree_skb(skb);
+ continue;
+ }
+
+- pkt->len = len - sizeof(pkt->hdr);
+- virtio_transport_deliver_tap_pkt(pkt);
+- virtio_transport_recv_pkt(&virtio_transport, pkt);
++ virtio_vsock_skb_rx_put(skb);
++ virtio_transport_deliver_tap_pkt(skb);
++ virtio_transport_recv_pkt(&virtio_transport, skb);
+ }
+ } while (!virtqueue_enable_cb(vq));
+
+@@ -624,7 +587,7 @@ static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
+ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
+ {
+ struct virtio_device *vdev = vsock->vdev;
+- struct virtio_vsock_pkt *pkt;
++ struct sk_buff *skb;
+
+ /* Reset all connected sockets when the VQs disappear */
+ vsock_for_each_connected_socket(&virtio_transport.transport,
+@@ -651,23 +614,16 @@ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
+ virtio_reset_device(vdev);
+
+ mutex_lock(&vsock->rx_lock);
+- while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
+- virtio_transport_free_pkt(pkt);
++ while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
++ kfree_skb(skb);
+ mutex_unlock(&vsock->rx_lock);
+
+ mutex_lock(&vsock->tx_lock);
+- while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
+- virtio_transport_free_pkt(pkt);
++ while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
++ kfree_skb(skb);
+ mutex_unlock(&vsock->tx_lock);
+
+- spin_lock_bh(&vsock->send_pkt_list_lock);
+- while (!list_empty(&vsock->send_pkt_list)) {
+- pkt = list_first_entry(&vsock->send_pkt_list,
+- struct virtio_vsock_pkt, list);
+- list_del(&pkt->list);
+- virtio_transport_free_pkt(pkt);
+- }
+- spin_unlock_bh(&vsock->send_pkt_list_lock);
++ virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
+
+ /* Delete virtqueues and flush outstanding callbacks if any */
+ vdev->config->del_vqs(vdev);
+@@ -704,8 +660,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ mutex_init(&vsock->tx_lock);
+ mutex_init(&vsock->rx_lock);
+ mutex_init(&vsock->event_lock);
+- spin_lock_init(&vsock->send_pkt_list_lock);
+- INIT_LIST_HEAD(&vsock->send_pkt_list);
++ skb_queue_head_init(&vsock->send_pkt_queue);
+ INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
+ INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
+ INIT_WORK(&vsock->event_work, virtio_transport_event_work);
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index a9980e9b93040..79e79fd6efd19 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -37,53 +37,58 @@ virtio_transport_get_ops(struct vsock_sock *vsk)
+ return container_of(t, struct virtio_transport, transport);
+ }
+
+-static struct virtio_vsock_pkt *
+-virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
++/* Returns a new packet on success, otherwise returns NULL.
++ *
++ * If NULL is returned, errp is set to a negative errno.
++ */
++static struct sk_buff *
++virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
+ size_t len,
+ u32 src_cid,
+ u32 src_port,
+ u32 dst_cid,
+ u32 dst_port)
+ {
+- struct virtio_vsock_pkt *pkt;
++ const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len;
++ struct virtio_vsock_hdr *hdr;
++ struct sk_buff *skb;
++ void *payload;
+ int err;
+
+- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+- if (!pkt)
++ skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
++ if (!skb)
+ return NULL;
+
+- pkt->hdr.type = cpu_to_le16(info->type);
+- pkt->hdr.op = cpu_to_le16(info->op);
+- pkt->hdr.src_cid = cpu_to_le64(src_cid);
+- pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
+- pkt->hdr.src_port = cpu_to_le32(src_port);
+- pkt->hdr.dst_port = cpu_to_le32(dst_port);
+- pkt->hdr.flags = cpu_to_le32(info->flags);
+- pkt->len = len;
+- pkt->hdr.len = cpu_to_le32(len);
+- pkt->reply = info->reply;
+- pkt->vsk = info->vsk;
++ hdr = virtio_vsock_hdr(skb);
++ hdr->type = cpu_to_le16(info->type);
++ hdr->op = cpu_to_le16(info->op);
++ hdr->src_cid = cpu_to_le64(src_cid);
++ hdr->dst_cid = cpu_to_le64(dst_cid);
++ hdr->src_port = cpu_to_le32(src_port);
++ hdr->dst_port = cpu_to_le32(dst_port);
++ hdr->flags = cpu_to_le32(info->flags);
++ hdr->len = cpu_to_le32(len);
++ hdr->buf_alloc = cpu_to_le32(0);
++ hdr->fwd_cnt = cpu_to_le32(0);
+
+ if (info->msg && len > 0) {
+- pkt->buf = kmalloc(len, GFP_KERNEL);
+- if (!pkt->buf)
+- goto out_pkt;
+-
+- pkt->buf_len = len;
+-
+- err = memcpy_from_msg(pkt->buf, info->msg, len);
++ payload = skb_put(skb, len);
++ err = memcpy_from_msg(payload, info->msg, len);
+ if (err)
+ goto out;
+
+ if (msg_data_left(info->msg) == 0 &&
+ info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
+- pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
++ hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+
+ if (info->msg->msg_flags & MSG_EOR)
+- pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
++ hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ }
+ }
+
++ if (info->reply)
++ virtio_vsock_skb_set_reply(skb);
++
+ trace_virtio_transport_alloc_pkt(src_cid, src_port,
+ dst_cid, dst_port,
+ len,
+@@ -91,19 +96,23 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
+ info->op,
+ info->flags);
+
+- return pkt;
++ if (info->vsk && !skb_set_owner_sk_safe(skb, sk_vsock(info->vsk))) {
++ WARN_ONCE(1, "failed to allocate skb on vsock socket with sk_refcnt == 0\n");
++ goto out;
++ }
++
++ return skb;
+
+ out:
+- kfree(pkt->buf);
+-out_pkt:
+- kfree(pkt);
++ kfree_skb(skb);
+ return NULL;
+ }
+
+ /* Packet capture */
+ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ {
+- struct virtio_vsock_pkt *pkt = opaque;
++ struct virtio_vsock_hdr *pkt_hdr;
++ struct sk_buff *pkt = opaque;
+ struct af_vsockmon_hdr *hdr;
+ struct sk_buff *skb;
+ size_t payload_len;
+@@ -113,10 +122,11 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ * the payload length from the header and the buffer pointer taking
+ * care of the offset in the original packet.
+ */
+- payload_len = le32_to_cpu(pkt->hdr.len);
+- payload_buf = pkt->buf + pkt->off;
++ pkt_hdr = virtio_vsock_hdr(pkt);
++ payload_len = pkt->len;
++ payload_buf = pkt->data;
+
+- skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
++ skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
+ GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+@@ -124,16 +134,16 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ hdr = skb_put(skb, sizeof(*hdr));
+
+ /* pkt->hdr is little-endian so no need to byteswap here */
+- hdr->src_cid = pkt->hdr.src_cid;
+- hdr->src_port = pkt->hdr.src_port;
+- hdr->dst_cid = pkt->hdr.dst_cid;
+- hdr->dst_port = pkt->hdr.dst_port;
++ hdr->src_cid = pkt_hdr->src_cid;
++ hdr->src_port = pkt_hdr->src_port;
++ hdr->dst_cid = pkt_hdr->dst_cid;
++ hdr->dst_port = pkt_hdr->dst_port;
+
+ hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
+- hdr->len = cpu_to_le16(sizeof(pkt->hdr));
++ hdr->len = cpu_to_le16(sizeof(*pkt_hdr));
+ memset(hdr->reserved, 0, sizeof(hdr->reserved));
+
+- switch (le16_to_cpu(pkt->hdr.op)) {
++ switch (le16_to_cpu(pkt_hdr->op)) {
+ case VIRTIO_VSOCK_OP_REQUEST:
+ case VIRTIO_VSOCK_OP_RESPONSE:
+ hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
+@@ -154,7 +164,7 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ break;
+ }
+
+- skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
++ skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
+
+ if (payload_len) {
+ skb_put_data(skb, payload_buf, payload_len);
+@@ -163,13 +173,13 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ return skb;
+ }
+
+-void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
++void virtio_transport_deliver_tap_pkt(struct sk_buff *skb)
+ {
+- if (pkt->tap_delivered)
++ if (virtio_vsock_skb_tap_delivered(skb))
+ return;
+
+- vsock_deliver_tap(virtio_transport_build_skb, pkt);
+- pkt->tap_delivered = true;
++ vsock_deliver_tap(virtio_transport_build_skb, skb);
++ virtio_vsock_skb_set_tap_delivered(skb);
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
+
+@@ -192,8 +202,8 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
+ u32 src_cid, src_port, dst_cid, dst_port;
+ const struct virtio_transport *t_ops;
+ struct virtio_vsock_sock *vvs;
+- struct virtio_vsock_pkt *pkt;
+ u32 pkt_len = info->pkt_len;
++ struct sk_buff *skb;
+
+ info->type = virtio_transport_get_type(sk_vsock(vsk));
+
+@@ -224,42 +234,44 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
+ if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
+ return pkt_len;
+
+- pkt = virtio_transport_alloc_pkt(info, pkt_len,
++ skb = virtio_transport_alloc_skb(info, pkt_len,
+ src_cid, src_port,
+ dst_cid, dst_port);
+- if (!pkt) {
++ if (!skb) {
+ virtio_transport_put_credit(vvs, pkt_len);
+ return -ENOMEM;
+ }
+
+- virtio_transport_inc_tx_pkt(vvs, pkt);
++ virtio_transport_inc_tx_pkt(vvs, skb);
+
+- return t_ops->send_pkt(pkt);
++ return t_ops->send_pkt(skb);
+ }
+
+ static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+- struct virtio_vsock_pkt *pkt)
++ u32 len)
+ {
+- if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
++ if (vvs->rx_bytes + len > vvs->buf_alloc)
+ return false;
+
+- vvs->rx_bytes += pkt->len;
++ vvs->rx_bytes += len;
+ return true;
+ }
+
+ static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
+- struct virtio_vsock_pkt *pkt)
++ u32 len)
+ {
+- vvs->rx_bytes -= pkt->len;
+- vvs->fwd_cnt += pkt->len;
++ vvs->rx_bytes -= len;
++ vvs->fwd_cnt += len;
+ }
+
+-void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
++void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
++
+ spin_lock_bh(&vvs->rx_lock);
+ vvs->last_fwd_cnt = vvs->fwd_cnt;
+- pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
+- pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
++ hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
++ hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc);
+ spin_unlock_bh(&vvs->rx_lock);
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
+@@ -303,29 +315,29 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
+ size_t len)
+ {
+ struct virtio_vsock_sock *vvs = vsk->trans;
+- struct virtio_vsock_pkt *pkt;
+ size_t bytes, total = 0, off;
++ struct sk_buff *skb, *tmp;
+ int err = -EFAULT;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+- list_for_each_entry(pkt, &vvs->rx_queue, list) {
+- off = pkt->off;
++ skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) {
++ off = 0;
+
+ if (total == len)
+ break;
+
+- while (total < len && off < pkt->len) {
++ while (total < len && off < skb->len) {
+ bytes = len - total;
+- if (bytes > pkt->len - off)
+- bytes = pkt->len - off;
++ if (bytes > skb->len - off)
++ bytes = skb->len - off;
+
+ /* sk_lock is held by caller so no one else can dequeue.
+ * Unlock rx_lock since memcpy_to_msg() may sleep.
+ */
+ spin_unlock_bh(&vvs->rx_lock);
+
+- err = memcpy_to_msg(msg, pkt->buf + off, bytes);
++ err = memcpy_to_msg(msg, skb->data + off, bytes);
+ if (err)
+ goto out;
+
+@@ -352,37 +364,39 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ size_t len)
+ {
+ struct virtio_vsock_sock *vvs = vsk->trans;
+- struct virtio_vsock_pkt *pkt;
+ size_t bytes, total = 0;
+- u32 free_space;
++ struct sk_buff *skb;
+ int err = -EFAULT;
++ u32 free_space;
+
+ spin_lock_bh(&vvs->rx_lock);
+- while (total < len && !list_empty(&vvs->rx_queue)) {
+- pkt = list_first_entry(&vvs->rx_queue,
+- struct virtio_vsock_pkt, list);
++ while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
++ skb = skb_peek(&vvs->rx_queue);
+
+ bytes = len - total;
+- if (bytes > pkt->len - pkt->off)
+- bytes = pkt->len - pkt->off;
++ if (bytes > skb->len)
++ bytes = skb->len;
+
+ /* sk_lock is held by caller so no one else can dequeue.
+ * Unlock rx_lock since memcpy_to_msg() may sleep.
+ */
+ spin_unlock_bh(&vvs->rx_lock);
+
+- err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
++ err = memcpy_to_msg(msg, skb->data, bytes);
+ if (err)
+ goto out;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+ total += bytes;
+- pkt->off += bytes;
+- if (pkt->off == pkt->len) {
+- virtio_transport_dec_rx_pkt(vvs, pkt);
+- list_del(&pkt->list);
+- virtio_transport_free_pkt(pkt);
++ skb_pull(skb, bytes);
++
++ if (skb->len == 0) {
++ u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
++
++ virtio_transport_dec_rx_pkt(vvs, pkt_len);
++ __skb_unlink(skb, &vvs->rx_queue);
++ consume_skb(skb);
+ }
+ }
+
+@@ -414,10 +428,10 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ int flags)
+ {
+ struct virtio_vsock_sock *vvs = vsk->trans;
+- struct virtio_vsock_pkt *pkt;
+ int dequeued_len = 0;
+ size_t user_buf_len = msg_data_left(msg);
+ bool msg_ready = false;
++ struct sk_buff *skb;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+@@ -427,13 +441,18 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ }
+
+ while (!msg_ready) {
+- pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
++ struct virtio_vsock_hdr *hdr;
++ size_t pkt_len;
++
++ skb = __skb_dequeue(&vvs->rx_queue);
++ if (!skb)
++ break;
++ hdr = virtio_vsock_hdr(skb);
++ pkt_len = (size_t)le32_to_cpu(hdr->len);
+
+ if (dequeued_len >= 0) {
+- size_t pkt_len;
+ size_t bytes_to_copy;
+
+- pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
+ bytes_to_copy = min(user_buf_len, pkt_len);
+
+ if (bytes_to_copy) {
+@@ -444,7 +463,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ */
+ spin_unlock_bh(&vvs->rx_lock);
+
+- err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy);
++ err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
+ if (err) {
+ /* Copy of message failed. Rest of
+ * fragments will be freed without copy.
+@@ -461,17 +480,16 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ dequeued_len += pkt_len;
+ }
+
+- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ msg_ready = true;
+ vvs->msg_count--;
+
+- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
+ msg->msg_flags |= MSG_EOR;
+ }
+
+- virtio_transport_dec_rx_pkt(vvs, pkt);
+- list_del(&pkt->list);
+- virtio_transport_free_pkt(pkt);
++ virtio_transport_dec_rx_pkt(vvs, pkt_len);
++ kfree_skb(skb);
+ }
+
+ spin_unlock_bh(&vvs->rx_lock);
+@@ -609,7 +627,7 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk,
+
+ spin_lock_init(&vvs->rx_lock);
+ spin_lock_init(&vvs->tx_lock);
+- INIT_LIST_HEAD(&vvs->rx_queue);
++ skb_queue_head_init(&vvs->rx_queue);
+
+ return 0;
+ }
+@@ -806,16 +824,16 @@ void virtio_transport_destruct(struct vsock_sock *vsk)
+ EXPORT_SYMBOL_GPL(virtio_transport_destruct);
+
+ static int virtio_transport_reset(struct vsock_sock *vsk,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+- .reply = !!pkt,
++ .reply = !!skb,
+ .vsk = vsk,
+ };
+
+ /* Send RST only if the original pkt is not a RST pkt */
+- if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
++ if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+@@ -825,29 +843,30 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
+ * attempt was made to connect to a socket that does not exist.
+ */
+ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
+- struct virtio_vsock_pkt *reply;
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+- .type = le16_to_cpu(pkt->hdr.type),
++ .type = le16_to_cpu(hdr->type),
+ .reply = true,
+ };
++ struct sk_buff *reply;
+
+ /* Send RST only if the original pkt is not a RST pkt */
+- if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
++ if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+- reply = virtio_transport_alloc_pkt(&info, 0,
+- le64_to_cpu(pkt->hdr.dst_cid),
+- le32_to_cpu(pkt->hdr.dst_port),
+- le64_to_cpu(pkt->hdr.src_cid),
+- le32_to_cpu(pkt->hdr.src_port));
++ reply = virtio_transport_alloc_skb(&info, 0,
++ le64_to_cpu(hdr->dst_cid),
++ le32_to_cpu(hdr->dst_port),
++ le64_to_cpu(hdr->src_cid),
++ le32_to_cpu(hdr->src_port));
+ if (!reply)
+ return -ENOMEM;
+
+ if (!t) {
+- virtio_transport_free_pkt(reply);
++ kfree_skb(reply);
+ return -ENOTCONN;
+ }
+
+@@ -858,16 +877,11 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+ static void virtio_transport_remove_sock(struct vsock_sock *vsk)
+ {
+ struct virtio_vsock_sock *vvs = vsk->trans;
+- struct virtio_vsock_pkt *pkt, *tmp;
+
+ /* We don't need to take rx_lock, as the socket is closing and we are
+ * removing it.
+ */
+- list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
+- list_del(&pkt->list);
+- virtio_transport_free_pkt(pkt);
+- }
+-
++ __skb_queue_purge(&vvs->rx_queue);
+ vsock_remove_sock(vsk);
+ }
+
+@@ -981,13 +995,14 @@ EXPORT_SYMBOL_GPL(virtio_transport_release);
+
+ static int
+ virtio_transport_recv_connecting(struct sock *sk,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct vsock_sock *vsk = vsock_sk(sk);
+- int err;
+ int skerr;
++ int err;
+
+- switch (le16_to_cpu(pkt->hdr.op)) {
++ switch (le16_to_cpu(hdr->op)) {
+ case VIRTIO_VSOCK_OP_RESPONSE:
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_socket->state = SS_CONNECTED;
+@@ -1008,7 +1023,7 @@ virtio_transport_recv_connecting(struct sock *sk,
+ return 0;
+
+ destroy:
+- virtio_transport_reset(vsk, pkt);
++ virtio_transport_reset(vsk, skb);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = skerr;
+ sk_error_report(sk);
+@@ -1017,34 +1032,37 @@ destroy:
+
+ static void
+ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ bool can_enqueue, free_pkt = false;
++ struct virtio_vsock_hdr *hdr;
++ u32 len;
+
+- pkt->len = le32_to_cpu(pkt->hdr.len);
+- pkt->off = 0;
++ hdr = virtio_vsock_hdr(skb);
++ len = le32_to_cpu(hdr->len);
+
+ spin_lock_bh(&vvs->rx_lock);
+
+- can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
++ can_enqueue = virtio_transport_inc_rx_pkt(vvs, len);
+ if (!can_enqueue) {
+ free_pkt = true;
+ goto out;
+ }
+
+- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
+ vvs->msg_count++;
+
+ /* Try to copy small packets into the buffer of last packet queued,
+ * to avoid wasting memory queueing the entire buffer with a small
+ * payload.
+ */
+- if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
+- struct virtio_vsock_pkt *last_pkt;
++ if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
++ struct virtio_vsock_hdr *last_hdr;
++ struct sk_buff *last_skb;
+
+- last_pkt = list_last_entry(&vvs->rx_queue,
+- struct virtio_vsock_pkt, list);
++ last_skb = skb_peek_tail(&vvs->rx_queue);
++ last_hdr = virtio_vsock_hdr(last_skb);
+
+ /* If there is space in the last packet queued, we copy the
+ * new packet in its buffer. We avoid this if the last packet
+@@ -1052,35 +1070,35 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
+ * delimiter of SEQPACKET message, so 'pkt' is the first packet
+ * of a new message.
+ */
+- if ((pkt->len <= last_pkt->buf_len - last_pkt->len) &&
+- !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)) {
+- memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
+- pkt->len);
+- last_pkt->len += pkt->len;
++ if (skb->len < skb_tailroom(last_skb) &&
++ !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) {
++ memcpy(skb_put(last_skb, skb->len), skb->data, skb->len);
+ free_pkt = true;
+- last_pkt->hdr.flags |= pkt->hdr.flags;
++ last_hdr->flags |= hdr->flags;
++ le32_add_cpu(&last_hdr->len, len);
+ goto out;
+ }
+ }
+
+- list_add_tail(&pkt->list, &vvs->rx_queue);
++ __skb_queue_tail(&vvs->rx_queue, skb);
+
+ out:
+ spin_unlock_bh(&vvs->rx_lock);
+ if (free_pkt)
+- virtio_transport_free_pkt(pkt);
++ kfree_skb(skb);
+ }
+
+ static int
+ virtio_transport_recv_connected(struct sock *sk,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct vsock_sock *vsk = vsock_sk(sk);
+ int err = 0;
+
+- switch (le16_to_cpu(pkt->hdr.op)) {
++ switch (le16_to_cpu(hdr->op)) {
+ case VIRTIO_VSOCK_OP_RW:
+- virtio_transport_recv_enqueue(vsk, pkt);
++ virtio_transport_recv_enqueue(vsk, skb);
+ vsock_data_ready(sk);
+ return err;
+ case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
+@@ -1090,18 +1108,23 @@ virtio_transport_recv_connected(struct sock *sk,
+ sk->sk_write_space(sk);
+ break;
+ case VIRTIO_VSOCK_OP_SHUTDOWN:
+- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
+ vsk->peer_shutdown |= RCV_SHUTDOWN;
+- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ vsk->peer_shutdown |= SEND_SHUTDOWN;
+- if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+- vsock_stream_has_data(vsk) <= 0 &&
+- !sock_flag(sk, SOCK_DONE)) {
+- (void)virtio_transport_reset(vsk, NULL);
+-
+- virtio_transport_do_close(vsk, true);
++ if (vsk->peer_shutdown == SHUTDOWN_MASK) {
++ if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
++ (void)virtio_transport_reset(vsk, NULL);
++ virtio_transport_do_close(vsk, true);
++ }
++ /* Remove this socket anyway because the remote peer sent
++ * the shutdown. This way a new connection will succeed
++ * if the remote peer uses the same source port,
++ * even if the old socket is still unreleased, but now disconnected.
++ */
++ vsock_remove_sock(vsk);
+ }
+- if (le32_to_cpu(pkt->hdr.flags))
++ if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
+ sk->sk_state_change(sk);
+ break;
+ case VIRTIO_VSOCK_OP_RST:
+@@ -1112,28 +1135,30 @@ virtio_transport_recv_connected(struct sock *sk,
+ break;
+ }
+
+- virtio_transport_free_pkt(pkt);
++ kfree_skb(skb);
+ return err;
+ }
+
+ static void
+ virtio_transport_recv_disconnecting(struct sock *sk,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+- if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
++ if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
+ virtio_transport_do_close(vsk, true);
+ }
+
+ static int
+ virtio_transport_send_response(struct vsock_sock *vsk,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RESPONSE,
+- .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
+- .remote_port = le32_to_cpu(pkt->hdr.src_port),
++ .remote_cid = le64_to_cpu(hdr->src_cid),
++ .remote_port = le32_to_cpu(hdr->src_port),
+ .reply = true,
+ .vsk = vsk,
+ };
+@@ -1142,8 +1167,9 @@ virtio_transport_send_response(struct vsock_sock *vsk,
+ }
+
+ static bool virtio_transport_space_update(struct sock *sk,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ bool space_available;
+@@ -1158,8 +1184,8 @@ static bool virtio_transport_space_update(struct sock *sk,
+
+ /* buf_alloc and fwd_cnt is always included in the hdr */
+ spin_lock_bh(&vvs->tx_lock);
+- vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
+- vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
++ vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
++ vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
+ space_available = virtio_transport_has_space(vsk);
+ spin_unlock_bh(&vvs->tx_lock);
+ return space_available;
+@@ -1167,27 +1193,28 @@ static bool virtio_transport_space_update(struct sock *sk,
+
+ /* Handle server socket */
+ static int
+-virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
++virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb,
+ struct virtio_transport *t)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct vsock_sock *vchild;
+ struct sock *child;
+ int ret;
+
+- if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
+- virtio_transport_reset_no_sock(t, pkt);
++ if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) {
++ virtio_transport_reset_no_sock(t, skb);
+ return -EINVAL;
+ }
+
+ if (sk_acceptq_is_full(sk)) {
+- virtio_transport_reset_no_sock(t, pkt);
++ virtio_transport_reset_no_sock(t, skb);
+ return -ENOMEM;
+ }
+
+ child = vsock_create_connected(sk);
+ if (!child) {
+- virtio_transport_reset_no_sock(t, pkt);
++ virtio_transport_reset_no_sock(t, skb);
+ return -ENOMEM;
+ }
+
+@@ -1198,10 +1225,10 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
+ child->sk_state = TCP_ESTABLISHED;
+
+ vchild = vsock_sk(child);
+- vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
+- le32_to_cpu(pkt->hdr.dst_port));
+- vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
+- le32_to_cpu(pkt->hdr.src_port));
++ vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid),
++ le32_to_cpu(hdr->dst_port));
++ vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid),
++ le32_to_cpu(hdr->src_port));
+
+ ret = vsock_assign_transport(vchild, vsk);
+ /* Transport assigned (looking at remote_addr) must be the same
+@@ -1209,17 +1236,17 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
+ */
+ if (ret || vchild->transport != &t->transport) {
+ release_sock(child);
+- virtio_transport_reset_no_sock(t, pkt);
++ virtio_transport_reset_no_sock(t, skb);
+ sock_put(child);
+ return ret;
+ }
+
+- if (virtio_transport_space_update(child, pkt))
++ if (virtio_transport_space_update(child, skb))
+ child->sk_write_space(child);
+
+ vsock_insert_connected(vchild);
+ vsock_enqueue_accept(sk, child);
+- virtio_transport_send_response(vchild, pkt);
++ virtio_transport_send_response(vchild, skb);
+
+ release_sock(child);
+
+@@ -1237,29 +1264,30 @@ static bool virtio_transport_valid_type(u16 type)
+ * lock.
+ */
+ void virtio_transport_recv_pkt(struct virtio_transport *t,
+- struct virtio_vsock_pkt *pkt)
++ struct sk_buff *skb)
+ {
++ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ struct sockaddr_vm src, dst;
+ struct vsock_sock *vsk;
+ struct sock *sk;
+ bool space_available;
+
+- vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
+- le32_to_cpu(pkt->hdr.src_port));
+- vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
+- le32_to_cpu(pkt->hdr.dst_port));
++ vsock_addr_init(&src, le64_to_cpu(hdr->src_cid),
++ le32_to_cpu(hdr->src_port));
++ vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid),
++ le32_to_cpu(hdr->dst_port));
+
+ trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
+ dst.svm_cid, dst.svm_port,
+- le32_to_cpu(pkt->hdr.len),
+- le16_to_cpu(pkt->hdr.type),
+- le16_to_cpu(pkt->hdr.op),
+- le32_to_cpu(pkt->hdr.flags),
+- le32_to_cpu(pkt->hdr.buf_alloc),
+- le32_to_cpu(pkt->hdr.fwd_cnt));
+-
+- if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) {
+- (void)virtio_transport_reset_no_sock(t, pkt);
++ le32_to_cpu(hdr->len),
++ le16_to_cpu(hdr->type),
++ le16_to_cpu(hdr->op),
++ le32_to_cpu(hdr->flags),
++ le32_to_cpu(hdr->buf_alloc),
++ le32_to_cpu(hdr->fwd_cnt));
++
++ if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) {
++ (void)virtio_transport_reset_no_sock(t, skb);
+ goto free_pkt;
+ }
+
+@@ -1270,30 +1298,35 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
+ if (!sk) {
+ sk = vsock_find_bound_socket(&dst);
+ if (!sk) {
+- (void)virtio_transport_reset_no_sock(t, pkt);
++ (void)virtio_transport_reset_no_sock(t, skb);
+ goto free_pkt;
+ }
+ }
+
+- if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) {
+- (void)virtio_transport_reset_no_sock(t, pkt);
++ if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) {
++ (void)virtio_transport_reset_no_sock(t, skb);
+ sock_put(sk);
+ goto free_pkt;
+ }
+
++ if (!skb_set_owner_sk_safe(skb, sk)) {
++ WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n");
++ goto free_pkt;
++ }
++
+ vsk = vsock_sk(sk);
+
+ lock_sock(sk);
+
+ /* Check if sk has been closed before lock_sock */
+ if (sock_flag(sk, SOCK_DONE)) {
+- (void)virtio_transport_reset_no_sock(t, pkt);
++ (void)virtio_transport_reset_no_sock(t, skb);
+ release_sock(sk);
+ sock_put(sk);
+ goto free_pkt;
+ }
+
+- space_available = virtio_transport_space_update(sk, pkt);
++ space_available = virtio_transport_space_update(sk, skb);
+
+ /* Update CID in case it has changed after a transport reset event */
+ if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
+@@ -1304,23 +1337,23 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
+
+ switch (sk->sk_state) {
+ case TCP_LISTEN:
+- virtio_transport_recv_listen(sk, pkt, t);
+- virtio_transport_free_pkt(pkt);
++ virtio_transport_recv_listen(sk, skb, t);
++ kfree_skb(skb);
+ break;
+ case TCP_SYN_SENT:
+- virtio_transport_recv_connecting(sk, pkt);
+- virtio_transport_free_pkt(pkt);
++ virtio_transport_recv_connecting(sk, skb);
++ kfree_skb(skb);
+ break;
+ case TCP_ESTABLISHED:
+- virtio_transport_recv_connected(sk, pkt);
++ virtio_transport_recv_connected(sk, skb);
+ break;
+ case TCP_CLOSING:
+- virtio_transport_recv_disconnecting(sk, pkt);
+- virtio_transport_free_pkt(pkt);
++ virtio_transport_recv_disconnecting(sk, skb);
++ kfree_skb(skb);
+ break;
+ default:
+- (void)virtio_transport_reset_no_sock(t, pkt);
+- virtio_transport_free_pkt(pkt);
++ (void)virtio_transport_reset_no_sock(t, skb);
++ kfree_skb(skb);
+ break;
+ }
+
+@@ -1333,16 +1366,42 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
+ return;
+
+ free_pkt:
+- virtio_transport_free_pkt(pkt);
++ kfree_skb(skb);
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
+
+-void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
++/* Remove skbs found in a queue that have a vsk that matches.
++ *
++ * Each skb is freed.
++ *
++ * Returns the count of skbs that were reply packets.
++ */
++int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue)
+ {
+- kvfree(pkt->buf);
+- kfree(pkt);
++ struct sk_buff_head freeme;
++ struct sk_buff *skb, *tmp;
++ int cnt = 0;
++
++ skb_queue_head_init(&freeme);
++
++ spin_lock_bh(&queue->lock);
++ skb_queue_walk_safe(queue, skb, tmp) {
++ if (vsock_sk(skb->sk) != vsk)
++ continue;
++
++ __skb_unlink(skb, queue);
++ __skb_queue_tail(&freeme, skb);
++
++ if (virtio_vsock_skb_reply(skb))
++ cnt++;
++ }
++ spin_unlock_bh(&queue->lock);
++
++ __skb_queue_purge(&freeme);
++
++ return cnt;
+ }
+-EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
++EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs);
+
+ MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Asias He");
+diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c
+index 169a8cf65b390..89905c092645a 100644
+--- a/net/vmw_vsock/vsock_loopback.c
++++ b/net/vmw_vsock/vsock_loopback.c
+@@ -15,8 +15,7 @@
+ struct vsock_loopback {
+ struct workqueue_struct *workqueue;
+
+- spinlock_t pkt_list_lock; /* protects pkt_list */
+- struct list_head pkt_list;
++ struct sk_buff_head pkt_queue;
+ struct work_struct pkt_work;
+ };
+
+@@ -27,14 +26,12 @@ static u32 vsock_loopback_get_local_cid(void)
+ return VMADDR_CID_LOCAL;
+ }
+
+-static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
++static int vsock_loopback_send_pkt(struct sk_buff *skb)
+ {
+ struct vsock_loopback *vsock = &the_vsock_loopback;
+- int len = pkt->len;
++ int len = skb->len;
+
+- spin_lock_bh(&vsock->pkt_list_lock);
+- list_add_tail(&pkt->list, &vsock->pkt_list);
+- spin_unlock_bh(&vsock->pkt_list_lock);
++ skb_queue_tail(&vsock->pkt_queue, skb);
+
+ queue_work(vsock->workqueue, &vsock->pkt_work);
+
+@@ -44,21 +41,8 @@ static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
+ static int vsock_loopback_cancel_pkt(struct vsock_sock *vsk)
+ {
+ struct vsock_loopback *vsock = &the_vsock_loopback;
+- struct virtio_vsock_pkt *pkt, *n;
+- LIST_HEAD(freeme);
+-
+- spin_lock_bh(&vsock->pkt_list_lock);
+- list_for_each_entry_safe(pkt, n, &vsock->pkt_list, list) {
+- if (pkt->vsk != vsk)
+- continue;
+- list_move(&pkt->list, &freeme);
+- }
+- spin_unlock_bh(&vsock->pkt_list_lock);
+
+- list_for_each_entry_safe(pkt, n, &freeme, list) {
+- list_del(&pkt->list);
+- virtio_transport_free_pkt(pkt);
+- }
++ virtio_transport_purge_skbs(vsk, &vsock->pkt_queue);
+
+ return 0;
+ }
+@@ -121,20 +105,18 @@ static void vsock_loopback_work(struct work_struct *work)
+ {
+ struct vsock_loopback *vsock =
+ container_of(work, struct vsock_loopback, pkt_work);
+- LIST_HEAD(pkts);
++ struct sk_buff_head pkts;
++ struct sk_buff *skb;
+
+- spin_lock_bh(&vsock->pkt_list_lock);
+- list_splice_init(&vsock->pkt_list, &pkts);
+- spin_unlock_bh(&vsock->pkt_list_lock);
++ skb_queue_head_init(&pkts);
+
+- while (!list_empty(&pkts)) {
+- struct virtio_vsock_pkt *pkt;
++ spin_lock_bh(&vsock->pkt_queue.lock);
++ skb_queue_splice_init(&vsock->pkt_queue, &pkts);
++ spin_unlock_bh(&vsock->pkt_queue.lock);
+
+- pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
+- list_del_init(&pkt->list);
+-
+- virtio_transport_deliver_tap_pkt(pkt);
+- virtio_transport_recv_pkt(&loopback_transport, pkt);
++ while ((skb = __skb_dequeue(&pkts))) {
++ virtio_transport_deliver_tap_pkt(skb);
++ virtio_transport_recv_pkt(&loopback_transport, skb);
+ }
+ }
+
+@@ -147,8 +129,7 @@ static int __init vsock_loopback_init(void)
+ if (!vsock->workqueue)
+ return -ENOMEM;
+
+- spin_lock_init(&vsock->pkt_list_lock);
+- INIT_LIST_HEAD(&vsock->pkt_list);
++ skb_queue_head_init(&vsock->pkt_queue);
+ INIT_WORK(&vsock->pkt_work, vsock_loopback_work);
+
+ ret = vsock_core_register(&loopback_transport.transport,
+@@ -166,20 +147,12 @@ out_wq:
+ static void __exit vsock_loopback_exit(void)
+ {
+ struct vsock_loopback *vsock = &the_vsock_loopback;
+- struct virtio_vsock_pkt *pkt;
+
+ vsock_core_unregister(&loopback_transport.transport);
+
+ flush_work(&vsock->pkt_work);
+
+- spin_lock_bh(&vsock->pkt_list_lock);
+- while (!list_empty(&vsock->pkt_list)) {
+- pkt = list_first_entry(&vsock->pkt_list,
+- struct virtio_vsock_pkt, list);
+- list_del(&pkt->list);
+- virtio_transport_free_pkt(pkt);
+- }
+- spin_unlock_bh(&vsock->pkt_list_lock);
++ virtio_vsock_skb_queue_purge(&vsock->pkt_queue);
+
+ destroy_workqueue(vsock->workqueue);
+ }
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index bf2f1f583fb12..63d75fecc2c53 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1042,7 +1042,8 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
+ }
+ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
+
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++ struct wiphy_work *end)
+ {
+ unsigned int runaway_limit = 100;
+ unsigned long flags;
+@@ -1061,6 +1062,10 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
+ wk->func(&rdev->wiphy, wk);
+
+ spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++
++ if (wk == end)
++ break;
++
+ if (WARN_ON(--runaway_limit == 0))
+ INIT_LIST_HEAD(&rdev->wiphy_work_list);
+ }
+@@ -1111,7 +1116,7 @@ void wiphy_unregister(struct wiphy *wiphy)
+ #endif
+
+ /* surely nothing is reachable now, clean up work */
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ wiphy_unlock(&rdev->wiphy);
+ rtnl_unlock();
+
+@@ -1636,6 +1641,21 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
+ }
+ EXPORT_SYMBOL_GPL(wiphy_work_cancel);
+
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
++{
++ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++ unsigned long flags;
++ bool run;
++
++ spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++ run = !work || !list_empty(&work->entry);
++ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++
++ if (run)
++ cfg80211_process_wiphy_works(rdev, work);
++}
++EXPORT_SYMBOL_GPL(wiphy_work_flush);
++
+ void wiphy_delayed_work_timer(struct timer_list *t)
+ {
+ struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer);
+@@ -1668,6 +1688,16 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
+
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++ struct wiphy_delayed_work *dwork)
++{
++ lockdep_assert_held(&wiphy->mtx);
++
++ del_timer_sync(&dwork->timer);
++ wiphy_work_flush(wiphy, &dwork->work);
++}
++EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
++
+ static int __init cfg80211_init(void)
+ {
+ int err;
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 86fd79912254d..e1accacc6f233 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -461,7 +461,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, enum nl80211_iftype ntype,
+ struct vif_params *params);
+ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev);
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++ struct wiphy_work *end);
+ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+
+ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index 4d3b658030105..a88f338c61d31 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -105,14 +105,14 @@ static int wiphy_suspend(struct device *dev)
+ cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+ }
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ if (rdev->ops->suspend)
+ ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
+ if (ret == 1) {
+ /* Driver refuse to configure wowlan */
+ cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ ret = rdev_suspend(rdev, NULL);
+ }
+ if (ret == 0)
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 111d5464c12df..39e2c8883ddd4 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1348,13 +1348,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: tee:uuid */
+ static int do_tee_entry(const char *filename, void *symval, char *alias)
+ {
+- DEF_FIELD(symval, tee_client_device_id, uuid);
++ DEF_FIELD_ADDR(symval, tee_client_device_id, uuid);
+
+ sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+- uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
+- uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
+- uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
+- uuid.b[15]);
++ uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4],
++ uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9],
++ uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14],
++ uuid->b[15]);
+
+ add_wildcard(alias);
+ return 1;
+@@ -1401,10 +1401,10 @@ static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: ishtp:{guid} */
+ static int do_ishtp_entry(const char *filename, void *symval, char *alias)
+ {
+- DEF_FIELD(symval, ishtp_device_id, guid);
++ DEF_FIELD_ADDR(symval, ishtp_device_id, guid);
+
+ strcpy(alias, ISHTP_MODULE_PREFIX "{");
+- add_guid(alias, guid);
++ add_guid(alias, *guid);
+ strcat(alias, "}");
+
+ return 1;
+diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
+index cb3496e00d8a6..f334e7cccf2da 100644
+--- a/security/apparmor/Kconfig
++++ b/security/apparmor/Kconfig
+@@ -106,8 +106,8 @@ config SECURITY_APPARMOR_PARANOID_LOAD
+ Disabling the check will speed up policy loads.
+
+ config SECURITY_APPARMOR_KUNIT_TEST
+- bool "Build KUnit tests for policy_unpack.c" if !KUNIT_ALL_TESTS
+- depends on KUNIT=y && SECURITY_APPARMOR
++ tristate "Build KUnit tests for policy_unpack.c" if !KUNIT_ALL_TESTS
++ depends on KUNIT && SECURITY_APPARMOR
+ default KUNIT_ALL_TESTS
+ help
+ This builds the AppArmor KUnit tests.
+diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
+index ff23fcfefe196..065f4e346553d 100644
+--- a/security/apparmor/Makefile
++++ b/security/apparmor/Makefile
+@@ -8,6 +8,9 @@ apparmor-y := apparmorfs.o audit.o capability.o task.o ipc.o lib.o match.o \
+ resource.o secid.o file.o policy_ns.o label.o mount.o net.o
+ apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
+
++obj-$(CONFIG_SECURITY_APPARMOR_KUNIT_TEST) += apparmor_policy_unpack_test.o
++apparmor_policy_unpack_test-objs += policy_unpack_test.o
++
+ clean-files := capability_names.h rlim_names.h net_names.h
+
+ # Build a lower case string table of address family names
+diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
+index eb5f7d7f132bb..e89b701447bcb 100644
+--- a/security/apparmor/include/policy_unpack.h
++++ b/security/apparmor/include/policy_unpack.h
+@@ -48,6 +48,43 @@ enum {
+ AAFS_LOADDATA_NDENTS /* count of entries */
+ };
+
++/*
++ * The AppArmor interface treats data as a type byte followed by the
++ * actual data. The interface has the notion of a named entry
++ * which has a name (AA_NAME typecode followed by name string) followed by
++ * the entries typecode and data. Named types allow for optional
++ * elements and extensions to be added and tested for without breaking
++ * backwards compatibility.
++ */
++
++enum aa_code {
++ AA_U8,
++ AA_U16,
++ AA_U32,
++ AA_U64,
++ AA_NAME, /* same as string except it is items name */
++ AA_STRING,
++ AA_BLOB,
++ AA_STRUCT,
++ AA_STRUCTEND,
++ AA_LIST,
++ AA_LISTEND,
++ AA_ARRAY,
++ AA_ARRAYEND,
++};
++
++/*
++ * aa_ext is the read of the buffer containing the serialized profile. The
++ * data is copied into a kernel buffer in apparmorfs and then handed off to
++ * the unpack routines.
++ */
++struct aa_ext {
++ void *start;
++ void *end;
++ void *pos; /* pointer to current position in the buffer */
++ u32 version;
++};
++
+ /*
+ * struct aa_loaddata - buffer of policy raw_data set
+ *
+@@ -126,4 +163,17 @@ static inline void aa_put_loaddata(struct aa_loaddata *data)
+ kref_put(&data->count, aa_loaddata_kref);
+ }
+
++#if IS_ENABLED(CONFIG_KUNIT)
++bool aa_inbounds(struct aa_ext *e, size_t size);
++size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk);
++bool aa_unpack_X(struct aa_ext *e, enum aa_code code);
++bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name);
++bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name);
++bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name);
++size_t aa_unpack_array(struct aa_ext *e, const char *name);
++size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name);
++int aa_unpack_str(struct aa_ext *e, const char **string, const char *name);
++int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name);
++#endif
++
+ #endif /* __POLICY_INTERFACE_H */
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index fbdfcef91c616..c7b84fb568414 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -218,6 +218,7 @@ void aa_free_profile(struct aa_profile *profile)
+
+ aa_put_ns(profile->ns);
+ kfree_sensitive(profile->rename);
++ kfree_sensitive(profile->disconnected);
+
+ aa_free_file_rules(&profile->file);
+ aa_free_cap_rules(&profile->caps);
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 9c3fec2c7cf6b..7012fd82f1bb1 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -14,6 +14,7 @@
+ */
+
+ #include <asm/unaligned.h>
++#include <kunit/visibility.h>
+ #include <linux/ctype.h>
+ #include <linux/errno.h>
+ #include <linux/zlib.h>
+@@ -37,43 +38,6 @@
+ #define v7 7
+ #define v8 8 /* full network masking */
+
+-/*
+- * The AppArmor interface treats data as a type byte followed by the
+- * actual data. The interface has the notion of a named entry
+- * which has a name (AA_NAME typecode followed by name string) followed by
+- * the entries typecode and data. Named types allow for optional
+- * elements and extensions to be added and tested for without breaking
+- * backwards compatibility.
+- */
+-
+-enum aa_code {
+- AA_U8,
+- AA_U16,
+- AA_U32,
+- AA_U64,
+- AA_NAME, /* same as string except it is items name */
+- AA_STRING,
+- AA_BLOB,
+- AA_STRUCT,
+- AA_STRUCTEND,
+- AA_LIST,
+- AA_LISTEND,
+- AA_ARRAY,
+- AA_ARRAYEND,
+-};
+-
+-/*
+- * aa_ext is the read of the buffer containing the serialized profile. The
+- * data is copied into a kernel buffer in apparmorfs and then handed off to
+- * the unpack routines.
+- */
+-struct aa_ext {
+- void *start;
+- void *end;
+- void *pos; /* pointer to current position in the buffer */
+- u32 version;
+-};
+-
+ /* audit callback for unpack fields */
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+@@ -199,10 +163,11 @@ struct aa_loaddata *aa_loaddata_alloc(size_t size)
+ }
+
+ /* test if read will be in packed data bounds */
+-static bool inbounds(struct aa_ext *e, size_t size)
++VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size)
+ {
+ return (size <= e->end - e->pos);
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_inbounds);
+
+ static void *kvmemdup(const void *src, size_t len)
+ {
+@@ -214,22 +179,22 @@ static void *kvmemdup(const void *src, size_t len)
+ }
+
+ /**
+- * unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
++ * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
+ * @e: serialized data read head (NOT NULL)
+ * @chunk: start address for chunk of data (NOT NULL)
+ *
+ * Returns: the size of chunk found with the read head at the end of the chunk.
+ */
+-static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
++VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk)
+ {
+ size_t size = 0;
+ void *pos = e->pos;
+
+- if (!inbounds(e, sizeof(u16)))
++ if (!aa_inbounds(e, sizeof(u16)))
+ goto fail;
+ size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
+ e->pos += sizeof(__le16);
+- if (!inbounds(e, size))
++ if (!aa_inbounds(e, size))
+ goto fail;
+ *chunk = e->pos;
+ e->pos += size;
+@@ -239,20 +204,22 @@ fail:
+ e->pos = pos;
+ return 0;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk);
+
+ /* unpack control byte */
+-static bool unpack_X(struct aa_ext *e, enum aa_code code)
++VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code)
+ {
+- if (!inbounds(e, 1))
++ if (!aa_inbounds(e, 1))
+ return false;
+ if (*(u8 *) e->pos != code)
+ return false;
+ e->pos++;
+ return true;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X);
+
+ /**
+- * unpack_nameX - check is the next element is of type X with a name of @name
++ * aa_unpack_nameX - check is the next element is of type X with a name of @name
+ * @e: serialized data extent information (NOT NULL)
+ * @code: type code
+ * @name: name to match to the serialized element. (MAYBE NULL)
+@@ -267,7 +234,7 @@ static bool unpack_X(struct aa_ext *e, enum aa_code code)
+ *
+ * Returns: false if either match fails, the read head does not move
+ */
+-static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
++VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+ {
+ /*
+ * May need to reset pos if name or type doesn't match
+@@ -277,9 +244,9 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+ * Check for presence of a tagname, and if present name size
+ * AA_NAME tag value is a u16.
+ */
+- if (unpack_X(e, AA_NAME)) {
++ if (aa_unpack_X(e, AA_NAME)) {
+ char *tag = NULL;
+- size_t size = unpack_u16_chunk(e, &tag);
++ size_t size = aa_unpack_u16_chunk(e, &tag);
+ /* if a name is specified it must match. otherwise skip tag */
+ if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
+ goto fail;
+@@ -289,20 +256,21 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+ }
+
+ /* now check if type code matches */
+- if (unpack_X(e, code))
++ if (aa_unpack_X(e, code))
+ return true;
+
+ fail:
+ e->pos = pos;
+ return false;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX);
+
+ static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
+ {
+ void *pos = e->pos;
+
+- if (unpack_nameX(e, AA_U8, name)) {
+- if (!inbounds(e, sizeof(u8)))
++ if (aa_unpack_nameX(e, AA_U8, name)) {
++ if (!aa_inbounds(e, sizeof(u8)))
+ goto fail;
+ if (data)
+ *data = *((u8 *)e->pos);
+@@ -315,12 +283,12 @@ fail:
+ return false;
+ }
+
+-static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
++VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name)
+ {
+ void *pos = e->pos;
+
+- if (unpack_nameX(e, AA_U32, name)) {
+- if (!inbounds(e, sizeof(u32)))
++ if (aa_unpack_nameX(e, AA_U32, name)) {
++ if (!aa_inbounds(e, sizeof(u32)))
+ goto fail;
+ if (data)
+ *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
+@@ -332,13 +300,14 @@ fail:
+ e->pos = pos;
+ return false;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32);
+
+-static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
++VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name)
+ {
+ void *pos = e->pos;
+
+- if (unpack_nameX(e, AA_U64, name)) {
+- if (!inbounds(e, sizeof(u64)))
++ if (aa_unpack_nameX(e, AA_U64, name)) {
++ if (!aa_inbounds(e, sizeof(u64)))
+ goto fail;
+ if (data)
+ *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
+@@ -350,14 +319,15 @@ fail:
+ e->pos = pos;
+ return false;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64);
+
+-static size_t unpack_array(struct aa_ext *e, const char *name)
++VISIBLE_IF_KUNIT size_t aa_unpack_array(struct aa_ext *e, const char *name)
+ {
+ void *pos = e->pos;
+
+- if (unpack_nameX(e, AA_ARRAY, name)) {
++ if (aa_unpack_nameX(e, AA_ARRAY, name)) {
+ int size;
+- if (!inbounds(e, sizeof(u16)))
++ if (!aa_inbounds(e, sizeof(u16)))
+ goto fail;
+ size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
+ e->pos += sizeof(u16);
+@@ -368,18 +338,19 @@ fail:
+ e->pos = pos;
+ return 0;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array);
+
+-static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
++VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name)
+ {
+ void *pos = e->pos;
+
+- if (unpack_nameX(e, AA_BLOB, name)) {
++ if (aa_unpack_nameX(e, AA_BLOB, name)) {
+ u32 size;
+- if (!inbounds(e, sizeof(u32)))
++ if (!aa_inbounds(e, sizeof(u32)))
+ goto fail;
+ size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
+ e->pos += sizeof(u32);
+- if (inbounds(e, (size_t) size)) {
++ if (aa_inbounds(e, (size_t) size)) {
+ *blob = e->pos;
+ e->pos += size;
+ return size;
+@@ -390,15 +361,16 @@ fail:
+ e->pos = pos;
+ return 0;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob);
+
+-static int unpack_str(struct aa_ext *e, const char **string, const char *name)
++VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name)
+ {
+ char *src_str;
+ size_t size = 0;
+ void *pos = e->pos;
+ *string = NULL;
+- if (unpack_nameX(e, AA_STRING, name)) {
+- size = unpack_u16_chunk(e, &src_str);
++ if (aa_unpack_nameX(e, AA_STRING, name)) {
++ size = aa_unpack_u16_chunk(e, &src_str);
+ if (size) {
+ /* strings are null terminated, length is size - 1 */
+ if (src_str[size - 1] != 0)
+@@ -413,12 +385,13 @@ fail:
+ e->pos = pos;
+ return 0;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str);
+
+-static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
++VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name)
+ {
+ const char *tmp;
+ void *pos = e->pos;
+- int res = unpack_str(e, &tmp, name);
++ int res = aa_unpack_str(e, &tmp, name);
+ *string = NULL;
+
+ if (!res)
+@@ -432,6 +405,7 @@ static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
+
+ return res;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup);
+
+
+ /**
+@@ -446,7 +420,7 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e)
+ size_t size;
+ struct aa_dfa *dfa = NULL;
+
+- size = unpack_blob(e, &blob, "aadfa");
++ size = aa_unpack_blob(e, &blob, "aadfa");
+ if (size) {
+ /*
+ * The dfa is aligned with in the blob to 8 bytes
+@@ -482,10 +456,10 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+ void *saved_pos = e->pos;
+
+ /* exec table is optional */
+- if (unpack_nameX(e, AA_STRUCT, "xtable")) {
++ if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) {
+ int i, size;
+
+- size = unpack_array(e, NULL);
++ size = aa_unpack_array(e, NULL);
+ /* currently 4 exec bits and entries 0-3 are reserved iupcx */
+ if (size > 16 - 4)
+ goto fail;
+@@ -497,8 +471,8 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+ profile->file.trans.size = size;
+ for (i = 0; i < size; i++) {
+ char *str;
+- int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
+- /* unpack_strdup verifies that the last character is
++ int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
++ /* aa_unpack_strdup verifies that the last character is
+ * null termination byte.
+ */
+ if (!size2)
+@@ -521,7 +495,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+ goto fail;
+ /* beginning with : requires an embedded \0,
+ * verify that exactly 1 internal \0 exists
+- * trailing \0 already verified by unpack_strdup
++ * trailing \0 already verified by aa_unpack_strdup
+ *
+ * convert \0 back to : for label_parse
+ */
+@@ -533,9 +507,9 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+ /* fail - all other cases with embedded \0 */
+ goto fail;
+ }
+- if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++ if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+ return true;
+@@ -550,21 +524,21 @@ static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
+ {
+ void *pos = e->pos;
+
+- if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
++ if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) {
+ int i, size;
+
+- size = unpack_array(e, NULL);
++ size = aa_unpack_array(e, NULL);
+ profile->xattr_count = size;
+ profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
+ if (!profile->xattrs)
+ goto fail;
+ for (i = 0; i < size; i++) {
+- if (!unpack_strdup(e, &profile->xattrs[i], NULL))
++ if (!aa_unpack_strdup(e, &profile->xattrs[i], NULL))
+ goto fail;
+ }
+- if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++ if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+@@ -580,8 +554,8 @@ static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
+ void *pos = e->pos;
+ int i, size;
+
+- if (unpack_nameX(e, AA_STRUCT, "secmark")) {
+- size = unpack_array(e, NULL);
++ if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) {
++ size = aa_unpack_array(e, NULL);
+
+ profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
+ GFP_KERNEL);
+@@ -595,12 +569,12 @@ static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
+ goto fail;
+ if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
+ goto fail;
+- if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
++ if (!aa_unpack_strdup(e, &profile->secmark[i].label, NULL))
+ goto fail;
+ }
+- if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++ if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+@@ -624,26 +598,26 @@ static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
+ void *pos = e->pos;
+
+ /* rlimits are optional */
+- if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
++ if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) {
+ int i, size;
+ u32 tmp = 0;
+- if (!unpack_u32(e, &tmp, NULL))
++ if (!aa_unpack_u32(e, &tmp, NULL))
+ goto fail;
+ profile->rlimits.mask = tmp;
+
+- size = unpack_array(e, NULL);
++ size = aa_unpack_array(e, NULL);
+ if (size > RLIM_NLIMITS)
+ goto fail;
+ for (i = 0; i < size; i++) {
+ u64 tmp2 = 0;
+ int a = aa_map_resource(i);
+- if (!unpack_u64(e, &tmp2, NULL))
++ if (!aa_unpack_u64(e, &tmp2, NULL))
+ goto fail;
+ profile->rlimits.limits[a].rlim_max = tmp2;
+ }
+- if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++ if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+ return true;
+@@ -682,7 +656,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ const char *info = "failed to unpack profile";
+ size_t ns_len;
+ struct rhashtable_params params = { 0 };
+- char *key = NULL;
++ char *key = NULL, *disconnected = NULL;
+ struct aa_data *data;
+ int i, error = -EPROTO;
+ kernel_cap_t tmpcap;
+@@ -691,9 +665,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ *ns_name = NULL;
+
+ /* check that we have the right struct being passed */
+- if (!unpack_nameX(e, AA_STRUCT, "profile"))
++ if (!aa_unpack_nameX(e, AA_STRUCT, "profile"))
+ goto fail;
+- if (!unpack_str(e, &name, NULL))
++ if (!aa_unpack_str(e, &name, NULL))
+ goto fail;
+ if (*name == '\0')
+ goto fail;
+@@ -713,10 +687,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ return ERR_PTR(-ENOMEM);
+
+ /* profile renaming is optional */
+- (void) unpack_str(e, &profile->rename, "rename");
++ (void) aa_unpack_str(e, &profile->rename, "rename");
+
+ /* attachment string is optional */
+- (void) unpack_str(e, &profile->attach, "attach");
++ (void) aa_unpack_str(e, &profile->attach, "attach");
+
+ /* xmatch is optional and may be NULL */
+ profile->xmatch = unpack_dfa(e);
+@@ -728,7 +702,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ }
+ /* xmatch_len is not optional if xmatch is set */
+ if (profile->xmatch) {
+- if (!unpack_u32(e, &tmp, NULL)) {
++ if (!aa_unpack_u32(e, &tmp, NULL)) {
+ info = "missing xmatch len";
+ goto fail;
+ }
+@@ -736,15 +710,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ }
+
+ /* disconnected attachment string is optional */
+- (void) unpack_str(e, &profile->disconnected, "disconnected");
++ (void) aa_unpack_strdup(e, &disconnected, "disconnected");
++ profile->disconnected = disconnected;
+
+ /* per profile debug flags (complain, audit) */
+- if (!unpack_nameX(e, AA_STRUCT, "flags")) {
++ if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
+ info = "profile missing flags";
+ goto fail;
+ }
+ info = "failed to unpack profile flags";
+- if (!unpack_u32(e, &tmp, NULL))
++ if (!aa_unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp & PACKED_FLAG_HAT)
+ profile->label.flags |= FLAG_HAT;
+@@ -752,7 +727,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ profile->label.flags |= FLAG_DEBUG1;
+ if (tmp & PACKED_FLAG_DEBUG2)
+ profile->label.flags |= FLAG_DEBUG2;
+- if (!unpack_u32(e, &tmp, NULL))
++ if (!aa_unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
+ profile->mode = APPARMOR_COMPLAIN;
+@@ -766,16 +741,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ } else {
+ goto fail;
+ }
+- if (!unpack_u32(e, &tmp, NULL))
++ if (!aa_unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp)
+ profile->audit = AUDIT_ALL;
+
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+
+ /* path_flags is optional */
+- if (unpack_u32(e, &profile->path_flags, "path_flags"))
++ if (aa_unpack_u32(e, &profile->path_flags, "path_flags"))
+ profile->path_flags |= profile->label.flags &
+ PATH_MEDIATE_DELETED;
+ else
+@@ -783,38 +758,38 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ profile->path_flags = PATH_MEDIATE_DELETED;
+
+ info = "failed to unpack profile capabilities";
+- if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
++ if (!aa_unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
+ goto fail;
+- if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
++ if (!aa_unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
+ goto fail;
+- if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
++ if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
+ goto fail;
+- if (!unpack_u32(e, &tmpcap.cap[0], NULL))
++ if (!aa_unpack_u32(e, &tmpcap.cap[0], NULL))
+ goto fail;
+
+ info = "failed to unpack upper profile capabilities";
+- if (unpack_nameX(e, AA_STRUCT, "caps64")) {
++ if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) {
+ /* optional upper half of 64 bit caps */
+- if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
++ if (!aa_unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
+ goto fail;
+- if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
++ if (!aa_unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
+ goto fail;
+- if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
++ if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
+ goto fail;
+- if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
++ if (!aa_unpack_u32(e, &(tmpcap.cap[1]), NULL))
+ goto fail;
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ info = "failed to unpack extended profile capabilities";
+- if (unpack_nameX(e, AA_STRUCT, "capsx")) {
++ if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) {
+ /* optional extended caps mediation mask */
+- if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
++ if (!aa_unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
+ goto fail;
+- if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
++ if (!aa_unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
+ goto fail;
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+@@ -833,7 +808,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ goto fail;
+ }
+
+- if (unpack_nameX(e, AA_STRUCT, "policydb")) {
++ if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) {
+ /* generic policy dfa - optional and may be NULL */
+ info = "failed to unpack policydb";
+ profile->policy.dfa = unpack_dfa(e);
+@@ -845,7 +820,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ error = -EPROTO;
+ goto fail;
+ }
+- if (!unpack_u32(e, &profile->policy.start[0], "start"))
++ if (!aa_unpack_u32(e, &profile->policy.start[0], "start"))
+ /* default start state */
+ profile->policy.start[0] = DFA_START;
+ /* setup class index */
+@@ -855,7 +830,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ profile->policy.start[0],
+ i);
+ }
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ } else
+ profile->policy.dfa = aa_get_dfa(nulldfa);
+@@ -868,7 +843,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ info = "failed to unpack profile file rules";
+ goto fail;
+ } else if (profile->file.dfa) {
+- if (!unpack_u32(e, &profile->file.start, "dfa_start"))
++ if (!aa_unpack_u32(e, &profile->file.start, "dfa_start"))
+ /* default start state */
+ profile->file.start = DFA_START;
+ } else if (profile->policy.dfa &&
+@@ -883,7 +858,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ goto fail;
+ }
+
+- if (unpack_nameX(e, AA_STRUCT, "data")) {
++ if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
+ info = "out of memory";
+ profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
+ if (!profile->data)
+@@ -901,7 +876,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ goto fail;
+ }
+
+- while (unpack_strdup(e, &key, NULL)) {
++ while (aa_unpack_strdup(e, &key, NULL)) {
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ kfree_sensitive(key);
+@@ -909,7 +884,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ }
+
+ data->key = key;
+- data->size = unpack_blob(e, &data->data, NULL);
++ data->size = aa_unpack_blob(e, &data->data, NULL);
+ data->data = kvmemdup(data->data, data->size);
+ if (data->size && !data->data) {
+ kfree_sensitive(data->key);
+@@ -926,13 +901,13 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ }
+ }
+
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
+ info = "failed to unpack end of key, value data table";
+ goto fail;
+ }
+ }
+
+- if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
++ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
+ info = "failed to unpack end of profile";
+ goto fail;
+ }
+@@ -965,7 +940,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
+ *ns = NULL;
+
+ /* get the interface version */
+- if (!unpack_u32(e, &e->version, "version")) {
++ if (!aa_unpack_u32(e, &e->version, "version")) {
+ if (required) {
+ audit_iface(NULL, NULL, NULL, "invalid profile format",
+ e, error);
+@@ -984,7 +959,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
+ }
+
+ /* read the namespace if present */
+- if (unpack_str(e, &name, "namespace")) {
++ if (aa_unpack_str(e, &name, "namespace")) {
+ if (*name == '\0') {
+ audit_iface(NULL, NULL, NULL, "invalid namespace name",
+ e, error);
+@@ -1256,7 +1231,3 @@ fail:
+
+ return error;
+ }
+-
+-#ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
+-#include "policy_unpack_test.c"
+-#endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */
+diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
+index 0a969b2e03dba..f25cf2a023d57 100644
+--- a/security/apparmor/policy_unpack_test.c
++++ b/security/apparmor/policy_unpack_test.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include <kunit/test.h>
++#include <kunit/visibility.h>
+
+ #include "include/policy.h"
+ #include "include/policy_unpack.h"
+@@ -43,6 +44,8 @@
+ #define TEST_ARRAY_BUF_OFFSET \
+ (TEST_NAMED_ARRAY_BUF_OFFSET + 3 + strlen(TEST_ARRAY_NAME) + 1)
+
++MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
++
+ struct policy_unpack_fixture {
+ struct aa_ext *e;
+ size_t e_size;
+@@ -125,16 +128,16 @@ static void policy_unpack_test_inbounds_when_inbounds(struct kunit *test)
+ {
+ struct policy_unpack_fixture *puf = test->priv;
+
+- KUNIT_EXPECT_TRUE(test, inbounds(puf->e, 0));
+- KUNIT_EXPECT_TRUE(test, inbounds(puf->e, puf->e_size / 2));
+- KUNIT_EXPECT_TRUE(test, inbounds(puf->e, puf->e_size));
++ KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, 0));
++ KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, puf->e_size / 2));
++ KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, puf->e_size));
+ }
+
+ static void policy_unpack_test_inbounds_when_out_of_bounds(struct kunit *test)
+ {
+ struct policy_unpack_fixture *puf = test->priv;
+
+- KUNIT_EXPECT_FALSE(test, inbounds(puf->e, puf->e_size + 1));
++ KUNIT_EXPECT_FALSE(test, aa_inbounds(puf->e, puf->e_size + 1));
+ }
+
+ static void policy_unpack_test_unpack_array_with_null_name(struct kunit *test)
+@@ -144,7 +147,7 @@ static void policy_unpack_test_unpack_array_with_null_name(struct kunit *test)
+
+ puf->e->pos += TEST_ARRAY_BUF_OFFSET;
+
+- array_size = unpack_array(puf->e, NULL);
++ array_size = aa_unpack_array(puf->e, NULL);
+
+ KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -159,7 +162,7 @@ static void policy_unpack_test_unpack_array_with_name(struct kunit *test)
+
+ puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+
+- array_size = unpack_array(puf->e, name);
++ array_size = aa_unpack_array(puf->e, name);
+
+ KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -175,7 +178,7 @@ static void policy_unpack_test_unpack_array_out_of_bounds(struct kunit *test)
+ puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16);
+
+- array_size = unpack_array(puf->e, name);
++ array_size = aa_unpack_array(puf->e, name);
+
+ KUNIT_EXPECT_EQ(test, array_size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -189,7 +192,7 @@ static void policy_unpack_test_unpack_blob_with_null_name(struct kunit *test)
+ size_t size;
+
+ puf->e->pos += TEST_BLOB_BUF_OFFSET;
+- size = unpack_blob(puf->e, &blob, NULL);
++ size = aa_unpack_blob(puf->e, &blob, NULL);
+
+ KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+ KUNIT_EXPECT_TRUE(test,
+@@ -203,7 +206,7 @@ static void policy_unpack_test_unpack_blob_with_name(struct kunit *test)
+ size_t size;
+
+ puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
+- size = unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
++ size = aa_unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+
+ KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+ KUNIT_EXPECT_TRUE(test,
+@@ -222,7 +225,7 @@ static void policy_unpack_test_unpack_blob_out_of_bounds(struct kunit *test)
+ puf->e->end = puf->e->start + TEST_BLOB_BUF_OFFSET
+ + TEST_BLOB_DATA_SIZE - 1;
+
+- size = unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
++ size = aa_unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+@@ -235,7 +238,7 @@ static void policy_unpack_test_unpack_str_with_null_name(struct kunit *test)
+ size_t size;
+
+ puf->e->pos += TEST_STRING_BUF_OFFSET;
+- size = unpack_str(puf->e, &string, NULL);
++ size = aa_unpack_str(puf->e, &string, NULL);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+@@ -247,7 +250,7 @@ static void policy_unpack_test_unpack_str_with_name(struct kunit *test)
+ const char *string = NULL;
+ size_t size;
+
+- size = unpack_str(puf->e, &string, TEST_STRING_NAME);
++ size = aa_unpack_str(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+@@ -263,7 +266,7 @@ static void policy_unpack_test_unpack_str_out_of_bounds(struct kunit *test)
+ puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ + strlen(TEST_STRING_DATA) - 1;
+
+- size = unpack_str(puf->e, &string, TEST_STRING_NAME);
++ size = aa_unpack_str(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+@@ -276,7 +279,7 @@ static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test)
+ size_t size;
+
+ puf->e->pos += TEST_STRING_BUF_OFFSET;
+- size = unpack_strdup(puf->e, &string, NULL);
++ size = aa_unpack_strdup(puf->e, &string, NULL);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_FALSE(test,
+@@ -291,7 +294,7 @@ static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test)
+ char *string = NULL;
+ size_t size;
+
+- size = unpack_strdup(puf->e, &string, TEST_STRING_NAME);
++ size = aa_unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_FALSE(test,
+@@ -310,7 +313,7 @@ static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test)
+ puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ + strlen(TEST_STRING_DATA) - 1;
+
+- size = unpack_strdup(puf->e, &string, TEST_STRING_NAME);
++ size = aa_unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_NULL(test, string);
+@@ -324,7 +327,7 @@ static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test)
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+- success = unpack_nameX(puf->e, AA_U32, NULL);
++ success = aa_unpack_nameX(puf->e, AA_U32, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -338,7 +341,7 @@ static void policy_unpack_test_unpack_nameX_with_wrong_code(struct kunit *test)
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+- success = unpack_nameX(puf->e, AA_BLOB, NULL);
++ success = aa_unpack_nameX(puf->e, AA_BLOB, NULL);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -353,7 +356,7 @@ static void policy_unpack_test_unpack_nameX_with_name(struct kunit *test)
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+- success = unpack_nameX(puf->e, AA_U32, name);
++ success = aa_unpack_nameX(puf->e, AA_U32, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -368,7 +371,7 @@ static void policy_unpack_test_unpack_nameX_with_wrong_name(struct kunit *test)
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+- success = unpack_nameX(puf->e, AA_U32, name);
++ success = aa_unpack_nameX(puf->e, AA_U32, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -389,7 +392,7 @@ static void policy_unpack_test_unpack_u16_chunk_basic(struct kunit *test)
+ */
+ puf->e->end += TEST_U16_DATA;
+
+- size = unpack_u16_chunk(puf->e, &chunk);
++ size = aa_unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_PTR_EQ(test, chunk,
+ puf->e->start + TEST_U16_OFFSET + 2);
+@@ -406,7 +409,7 @@ static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_1(
+
+ puf->e->pos = puf->e->end - 1;
+
+- size = unpack_u16_chunk(puf->e, &chunk);
++ size = aa_unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_NULL(test, chunk);
+@@ -428,7 +431,7 @@ static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_2(
+ */
+ puf->e->end = puf->e->pos + TEST_U16_DATA - 1;
+
+- size = unpack_u16_chunk(puf->e, &chunk);
++ size = aa_unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_NULL(test, chunk);
+@@ -443,7 +446,7 @@ static void policy_unpack_test_unpack_u32_with_null_name(struct kunit *test)
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+- success = unpack_u32(puf->e, &data, NULL);
++ success = aa_unpack_u32(puf->e, &data, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+@@ -460,7 +463,7 @@ static void policy_unpack_test_unpack_u32_with_name(struct kunit *test)
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+- success = unpack_u32(puf->e, &data, name);
++ success = aa_unpack_u32(puf->e, &data, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+@@ -478,7 +481,7 @@ static void policy_unpack_test_unpack_u32_out_of_bounds(struct kunit *test)
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32);
+
+- success = unpack_u32(puf->e, &data, name);
++ success = aa_unpack_u32(puf->e, &data, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -493,7 +496,7 @@ static void policy_unpack_test_unpack_u64_with_null_name(struct kunit *test)
+
+ puf->e->pos += TEST_U64_BUF_OFFSET;
+
+- success = unpack_u64(puf->e, &data, NULL);
++ success = aa_unpack_u64(puf->e, &data, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+@@ -510,7 +513,7 @@ static void policy_unpack_test_unpack_u64_with_name(struct kunit *test)
+
+ puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+
+- success = unpack_u64(puf->e, &data, name);
++ success = aa_unpack_u64(puf->e, &data, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+@@ -528,7 +531,7 @@ static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
+ puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64);
+
+- success = unpack_u64(puf->e, &data, name);
++ success = aa_unpack_u64(puf->e, &data, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -538,7 +541,7 @@ static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
+ static void policy_unpack_test_unpack_X_code_match(struct kunit *test)
+ {
+ struct policy_unpack_fixture *puf = test->priv;
+- bool success = unpack_X(puf->e, AA_NAME);
++ bool success = aa_unpack_X(puf->e, AA_NAME);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start + 1);
+@@ -547,7 +550,7 @@ static void policy_unpack_test_unpack_X_code_match(struct kunit *test)
+ static void policy_unpack_test_unpack_X_code_mismatch(struct kunit *test)
+ {
+ struct policy_unpack_fixture *puf = test->priv;
+- bool success = unpack_X(puf->e, AA_STRING);
++ bool success = aa_unpack_X(puf->e, AA_STRING);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start);
+@@ -559,7 +562,7 @@ static void policy_unpack_test_unpack_X_out_of_bounds(struct kunit *test)
+ bool success;
+
+ puf->e->pos = puf->e->end;
+- success = unpack_X(puf->e, AA_NAME);
++ success = aa_unpack_X(puf->e, AA_NAME);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ }
+@@ -605,3 +608,5 @@ static struct kunit_suite apparmor_policy_unpack_test_module = {
+ };
+
+ kunit_test_suite(apparmor_policy_unpack_test_module);
++
++MODULE_LICENSE("GPL");
+diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
+index a5b10a6a33a5e..c79a12e5c9ad2 100644
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -1501,8 +1501,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ ret = component_add(cs35l41->dev, &cs35l41_hda_comp_ops);
+ if (ret) {
+ dev_err(cs35l41->dev, "Register component failed: %d\n", ret);
+- pm_runtime_disable(cs35l41->dev);
+- goto err;
++ goto err_pm;
+ }
+
+ dev_info(cs35l41->dev, "Cirrus Logic CS35L41 (%x), Revision: %02X\n", regid, reg_revid);
+@@ -1510,6 +1509,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ return 0;
+
+ err_pm:
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+ pm_runtime_put_noidle(cs35l41->dev);
+
+@@ -1528,6 +1528,7 @@ void cs35l41_hda_remove(struct device *dev)
+ struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(cs35l41->dev);
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+
+ if (cs35l41->halo_initialized)
+diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
+index 2f4b0ee93aced..e91c1a4640e46 100644
+--- a/sound/soc/codecs/cs35l41.c
++++ b/sound/soc/codecs/cs35l41.c
+@@ -374,10 +374,18 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ struct cs35l41_private *cs35l41 = data;
+ unsigned int status[4] = { 0, 0, 0, 0 };
+ unsigned int masks[4] = { 0, 0, 0, 0 };
+- int ret = IRQ_NONE;
+ unsigned int i;
++ int ret;
+
+- pm_runtime_get_sync(cs35l41->dev);
++ ret = pm_runtime_resume_and_get(cs35l41->dev);
++ if (ret < 0) {
++ dev_err(cs35l41->dev,
++ "pm_runtime_resume_and_get failed in %s: %d\n",
++ __func__, ret);
++ return IRQ_NONE;
++ }
++
++ ret = IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ regmap_read(cs35l41->regmap,
+@@ -1330,6 +1338,7 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ return 0;
+
+ err_pm:
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+ pm_runtime_put_noidle(cs35l41->dev);
+
+@@ -1346,6 +1355,7 @@ EXPORT_SYMBOL_GPL(cs35l41_probe);
+ void cs35l41_remove(struct cs35l41_private *cs35l41)
+ {
+ pm_runtime_get_sync(cs35l41->dev);
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+
+ regmap_write(cs35l41->regmap, CS35L41_IRQ1_MASK1, 0xFFFFFFFF);
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 0b1cdb2d60498..4d3c3365488a2 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -861,18 +861,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
+ void *data)
+ {
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+- int ret = -ENOTSUPP;
+
+ if (hcp->hcd.ops->hook_plugged_cb) {
+ hcp->jack = jack;
+- ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
+- hcp->hcd.data,
+- plugged_cb,
+- component->dev);
+- if (ret)
+- hcp->jack = NULL;
++ return 0;
+ }
+- return ret;
++
++ return -ENOTSUPP;
+ }
+
+ static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
+@@ -948,6 +943,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
+ return ret;
+ }
+
++static int hdmi_probe(struct snd_soc_component *component)
++{
++ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
++ int ret = 0;
++
++ if (hcp->hcd.ops->hook_plugged_cb) {
++ ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
++ hcp->hcd.data,
++ plugged_cb,
++ component->dev);
++ }
++
++ return ret;
++}
++
+ static void hdmi_remove(struct snd_soc_component *component)
+ {
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+@@ -958,6 +968,7 @@ static void hdmi_remove(struct snd_soc_component *component)
+ }
+
+ static const struct snd_soc_component_driver hdmi_driver = {
++ .probe = hdmi_probe,
+ .remove = hdmi_remove,
+ .dapm_widgets = hdmi_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
+diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
+index 3153d19136b29..84e6f9eb784dc 100644
+--- a/sound/soc/fsl/fsl_easrc.c
++++ b/sound/soc/fsl/fsl_easrc.c
+@@ -1966,17 +1966,21 @@ static int fsl_easrc_probe(struct platform_device *pdev)
+ &fsl_easrc_dai, 1);
+ if (ret) {
+ dev_err(dev, "failed to register ASoC DAI\n");
+- return ret;
++ goto err_pm_disable;
+ }
+
+ ret = devm_snd_soc_register_component(dev, &fsl_asrc_component,
+ NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ASoC platform\n");
+- return ret;
++ goto err_pm_disable;
+ }
+
+ return 0;
++
++err_pm_disable:
++ pm_runtime_disable(&pdev->dev);
++ return ret;
+ }
+
+ static int fsl_easrc_remove(struct platform_device *pdev)
+diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
+index 9014978100207..3f7ccae3f6b1a 100644
+--- a/sound/soc/fsl/mpc5200_dma.c
++++ b/sound/soc/fsl/mpc5200_dma.c
+@@ -100,6 +100,9 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
+
+ /**
+ * psc_dma_trigger: start and stop the DMA transfer.
++ * @component: triggered component
++ * @substream: triggered substream
++ * @cmd: triggered command
+ *
+ * This function is called by ALSA to start, stop, pause, and resume the DMA
+ * transfer of data.
+diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
+index 57ea815d3f041..b776c58dcf47a 100644
+--- a/sound/soc/intel/skylake/skl-sst-utils.c
++++ b/sound/soc/intel/skylake/skl-sst-utils.c
+@@ -299,6 +299,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
+ module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
+ if (!module->instance_id) {
+ ret = -ENOMEM;
++ kfree(module);
+ goto free_uuid_list;
+ }
+
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 6babadb2e6fe2..f76bae1d81a09 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -1080,7 +1080,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
+ if (!playback_codec) {
+ ret = -EINVAL;
+- dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
++ dev_err_probe(&pdev->dev, ret, "Property 'playback-codecs' missing or invalid\n");
+ goto err_playback_codec;
+ }
+
+@@ -1094,7 +1094,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ for_each_card_prelinks(card, i, dai_link) {
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
+ if (ret) {
+- dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
++ dev_err_probe(&pdev->dev, ret, "%s set playback_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 879cf1be67a9f..6eb8c6cb5e673 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3670,7 +3670,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
+ dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD);
+ break;
+ case snd_soc_dapm_clock_supply:
+- w->clk = devm_clk_get(dapm->dev, w->name);
++ w->clk = devm_clk_get(dapm->dev, widget->name);
+ if (IS_ERR(w->clk)) {
+ ret = PTR_ERR(w->clk);
+ goto request_failed;
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index eaa16755a2704..93e1c38392a32 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -434,9 +434,10 @@ int snd_sof_device_remove(struct device *dev)
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ int ret;
++ bool aborted = false;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+- cancel_work_sync(&sdev->probe_work);
++ aborted = cancel_work_sync(&sdev->probe_work);
+
+ /*
+ * Unregister any registered client device first before IPC and debugfs
+@@ -462,6 +463,9 @@ int snd_sof_device_remove(struct device *dev)
+ snd_sof_free_debug(sdev);
+ snd_sof_remove(sdev);
+ sof_ops_free(sdev);
++ } else if (aborted) {
++ /* probe_work never ran */
++ sof_ops_free(sdev);
+ }
+
+ /* release firmware */
+diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
+index 438e2fa843a03..1acc4505aa9a9 100644
+--- a/sound/soc/ti/ams-delta.c
++++ b/sound/soc/ti/ams-delta.c
+@@ -303,7 +303,7 @@ static int cx81801_open(struct tty_struct *tty)
+ static void cx81801_close(struct tty_struct *tty)
+ {
+ struct snd_soc_component *component = tty->disc_data;
+- struct snd_soc_dapm_context *dapm = &component->card->dapm;
++ struct snd_soc_dapm_context *dapm;
+
+ del_timer_sync(&cx81801_timer);
+
+@@ -315,6 +315,8 @@ static void cx81801_close(struct tty_struct *tty)
+
+ v253_ops.close(tty);
+
++ dapm = &component->card->dapm;
++
+ /* Revert back to default audio input/output constellation */
+ snd_soc_dapm_mutex_lock(dapm);
+
+diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
+index 44bbf80f0cfdd..0d0a7a19d6f95 100644
+--- a/tools/iio/iio_generic_buffer.c
++++ b/tools/iio/iio_generic_buffer.c
+@@ -54,9 +54,12 @@ enum autochan {
+ static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+ {
+ unsigned int bytes = 0;
+- int i = 0;
++ int i = 0, max = 0;
++ unsigned int misalignment;
+
+ while (i < num_channels) {
++ if (channels[i].bytes > max)
++ max = channels[i].bytes;
+ if (bytes % channels[i].bytes == 0)
+ channels[i].location = bytes;
+ else
+@@ -66,6 +69,14 @@ static unsigned int size_from_channelarray(struct iio_channel_info *channels, in
+ bytes = channels[i].location + channels[i].bytes;
+ i++;
+ }
++ /*
++ * We want the data in next sample to also be properly aligned so
++ * we'll add padding at the end if needed. Adding padding only
++ * works for channel data which size is 2^n bytes.
++ */
++ misalignment = bytes % max;
++ if (misalignment)
++ bytes += max - misalignment;
+
+ return bytes;
+ }
+diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
+index a7ecc32e35125..cda649644e32d 100644
+--- a/tools/objtool/objtool.c
++++ b/tools/objtool/objtool.c
+@@ -146,7 +146,5 @@ int main(int argc, const char **argv)
+ exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
+ pager_init(UNUSED);
+
+- objtool_run(argc, argv);
+-
+- return 0;
++ return objtool_run(argc, argv);
+ }
+diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
+index 3c36324712b6e..482d6c52e2edf 100644
+--- a/tools/perf/Documentation/perf-kwork.txt
++++ b/tools/perf/Documentation/perf-kwork.txt
+@@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies)
+ SYNOPSIS
+ --------
+ [verse]
+-'perf kwork' {record}
++'perf kwork' {record|report|latency|timehist}
+
+ DESCRIPTION
+ -----------
+diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
+index fb8c63656ad89..25cba0d61736c 100644
+--- a/tools/perf/builtin-kwork.c
++++ b/tools/perf/builtin-kwork.c
+@@ -399,12 +399,14 @@ static int work_push_atom(struct perf_kwork *kwork,
+
+ work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ if (work == NULL) {
+- free(atom);
++ atom_free(atom);
+ return -1;
+ }
+
+- if (!profile_event_match(kwork, work, sample))
++ if (!profile_event_match(kwork, work, sample)) {
++ atom_free(atom);
+ return 0;
++ }
+
+ if (dst_type < KWORK_TRACE_MAX) {
+ dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
+@@ -1670,9 +1672,10 @@ int cmd_kwork(int argc, const char **argv)
+ static struct perf_kwork kwork = {
+ .class_list = LIST_HEAD_INIT(kwork.class_list),
+ .tool = {
+- .mmap = perf_event__process_mmap,
+- .mmap2 = perf_event__process_mmap2,
+- .sample = perf_kwork__process_tracepoint_sample,
++ .mmap = perf_event__process_mmap,
++ .mmap2 = perf_event__process_mmap2,
++ .sample = perf_kwork__process_tracepoint_sample,
++ .ordered_events = true,
+ },
+ .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
+ .sort_list = LIST_HEAD_INIT(kwork.sort_list),
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index a2c74a34e4a44..bdd8dd54fdb63 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1444,7 +1444,7 @@ static int perf_stat_init_aggr_mode(void)
+ * taking the highest cpu number to be the size of
+ * the aggregation translate cpumap.
+ */
+- if (evsel_list->core.user_requested_cpus)
++ if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
+ nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
+ else
+ nr = 0;
+diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
+index 01f70b8e705a8..21f4d9ba023d9 100644
+--- a/tools/perf/util/bpf_off_cpu.c
++++ b/tools/perf/util/bpf_off_cpu.c
+@@ -98,7 +98,7 @@ static void off_cpu_finish(void *arg __maybe_unused)
+ /* v5.18 kernel added prev_state arg, so it needs to check the signature */
+ static void check_sched_switch_args(void)
+ {
+- const struct btf *btf = bpf_object__btf(skel->obj);
++ const struct btf *btf = btf__load_vmlinux_btf();
+ const struct btf_type *t1, *t2, *t3;
+ u32 type_id;
+
+@@ -116,7 +116,8 @@ static void check_sched_switch_args(void)
+ return;
+
+ t3 = btf__type_by_id(btf, t2->type);
+- if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
++ /* btf_trace func proto has one more argument for the context */
++ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
+ /* new format: pass prev_state as 4th arg */
+ skel->rodata->has_prev_state = true;
+ }
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 17a05e943b44b..bffd058cbecee 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -2645,8 +2645,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+
+ /* If we have branch cycles always annotate them. */
+ if (bs && bs->nr && entries[0].flags.cycles) {
+- int i;
+-
+ bi = sample__resolve_bstack(sample, al);
+ if (bi) {
+ struct addr_map_symbol *prev = NULL;
+@@ -2661,7 +2659,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ * Note that perf stores branches reversed from
+ * program order!
+ */
+- for (i = bs->nr - 1; i >= 0; i--) {
++ for (int i = bs->nr - 1; i >= 0; i--) {
+ addr_map_symbol__account_cycles(&bi[i].from,
+ nonany_branch_mode ? NULL : prev,
+ bi[i].flags.cycles);
+@@ -2670,6 +2668,12 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ if (total_cycles)
+ *total_cycles += bi[i].flags.cycles;
+ }
++ for (unsigned int i = 0; i < bs->nr; i++) {
++ map__put(bi[i].to.ms.map);
++ maps__put(bi[i].to.ms.maps);
++ map__put(bi[i].from.ms.map);
++ maps__put(bi[i].from.ms.maps);
++ }
+ free(bi);
+ }
+ }
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 76316e459c3de..9cd52f50ea7ac 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2555,16 +2555,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
+ save_lbr_cursor_node(thread, cursor, i);
+ }
+
+- /* Add LBR ip from first entries.to */
+- ip = entries[0].to;
+- flags = &entries[0].flags;
+- *branch_from = entries[0].from;
+- err = add_callchain_ip(thread, cursor, parent,
+- root_al, &cpumode, ip,
+- true, flags, NULL,
+- *branch_from);
+- if (err)
+- return err;
++ if (lbr_nr > 0) {
++ /* Add LBR ip from first entries.to */
++ ip = entries[0].to;
++ flags = &entries[0].flags;
++ *branch_from = entries[0].from;
++ err = add_callchain_ip(thread, cursor, parent,
++ root_al, &cpumode, ip,
++ true, flags, NULL,
++ *branch_from);
++ if (err)
++ return err;
++ }
+
+ return 0;
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+index 58fe2c586ed76..09c189761926c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
++++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+@@ -271,11 +271,11 @@ static void test_tailcall_count(const char *which)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -352,11 +352,11 @@ static void test_tailcall_4(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -442,11 +442,11 @@ static void test_tailcall_5(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -631,11 +631,11 @@ static void test_tailcall_bpf2bpf_2(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -805,11 +805,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ val.noise = noise;
+@@ -872,7 +872,7 @@ static void test_tailcall_bpf2bpf_6(void)
+ ASSERT_EQ(topts.retval, 0, "tailcall retval");
+
+ data_fd = bpf_map__fd(obj->maps.bss);
+- if (!ASSERT_GE(map_fd, 0, "bss map fd"))
++ if (!ASSERT_GE(data_fd, 0, "bss map fd"))
+ goto out;
+
+ i = 0;
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index dfe3d287f01d2..0d705fdcf3b76 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -2013,7 +2013,7 @@ run_test() {
+ case $ret in
+ 0)
+ all_skipped=false
+- [ $exitcode=$ksft_skip ] && exitcode=0
++ [ $exitcode -eq $ksft_skip ] && exitcode=0
+ ;;
+ $ksft_skip)
+ [ $all_skipped = true ] && exitcode=$ksft_skip
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 321db8850da00..bced422b78f72 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -6,13 +6,14 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ nft_concat_range.sh nft_conntrack_helper.sh \
+ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
+ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+- conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh
++ conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
++ conntrack_sctp_collision.sh xt_string.sh
+
+ HOSTPKG_CONFIG := pkg-config
+
+ CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
+ LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
+
+-TEST_GEN_FILES = nf-queue connect_close audit_logread
++TEST_GEN_FILES = nf-queue connect_close audit_logread sctp_collision
+
+ include ../lib.mk
+diff --git a/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh
+new file mode 100755
+index 0000000000000..a924e595cfd8b
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh
+@@ -0,0 +1,89 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Testing For SCTP COLLISION SCENARIO as Below:
++#
++# 14:35:47.655279 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT] [init tag: 2017837359]
++# 14:35:48.353250 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT] [init tag: 1187206187]
++# 14:35:48.353275 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT ACK] [init tag: 2017837359]
++# 14:35:48.353283 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [COOKIE ECHO]
++# 14:35:48.353977 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [COOKIE ACK]
++# 14:35:48.855335 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT ACK] [init tag: 164579970]
++#
++# TOPO: SERVER_NS (link0)<--->(link1) ROUTER_NS (link2)<--->(link3) CLIENT_NS
++
++CLIENT_NS=$(mktemp -u client-XXXXXXXX)
++CLIENT_IP="198.51.200.1"
++CLIENT_PORT=1234
++
++SERVER_NS=$(mktemp -u server-XXXXXXXX)
++SERVER_IP="198.51.100.1"
++SERVER_PORT=1234
++
++ROUTER_NS=$(mktemp -u router-XXXXXXXX)
++CLIENT_GW="198.51.200.2"
++SERVER_GW="198.51.100.2"
++
++# setup the topo
++setup() {
++ ip net add $CLIENT_NS
++ ip net add $SERVER_NS
++ ip net add $ROUTER_NS
++ ip -n $SERVER_NS link add link0 type veth peer name link1 netns $ROUTER_NS
++ ip -n $CLIENT_NS link add link3 type veth peer name link2 netns $ROUTER_NS
++
++ ip -n $SERVER_NS link set link0 up
++ ip -n $SERVER_NS addr add $SERVER_IP/24 dev link0
++ ip -n $SERVER_NS route add $CLIENT_IP dev link0 via $SERVER_GW
++
++ ip -n $ROUTER_NS link set link1 up
++ ip -n $ROUTER_NS link set link2 up
++ ip -n $ROUTER_NS addr add $SERVER_GW/24 dev link1
++ ip -n $ROUTER_NS addr add $CLIENT_GW/24 dev link2
++ ip net exec $ROUTER_NS sysctl -wq net.ipv4.ip_forward=1
++
++ ip -n $CLIENT_NS link set link3 up
++ ip -n $CLIENT_NS addr add $CLIENT_IP/24 dev link3
++ ip -n $CLIENT_NS route add $SERVER_IP dev link3 via $CLIENT_GW
++
++ # simulate the delay on OVS upcall by setting up a delay for INIT_ACK with
++ # tc on $SERVER_NS side
++ tc -n $SERVER_NS qdisc add dev link0 root handle 1: htb
++ tc -n $SERVER_NS class add dev link0 parent 1: classid 1:1 htb rate 100mbit
++ tc -n $SERVER_NS filter add dev link0 parent 1: protocol ip u32 match ip protocol 132 \
++ 0xff match u8 2 0xff at 32 flowid 1:1
++ tc -n $SERVER_NS qdisc add dev link0 parent 1:1 handle 10: netem delay 1200ms
++
++ # simulate the ctstate check on OVS nf_conntrack
++ ip net exec $ROUTER_NS iptables -A FORWARD -m state --state INVALID,UNTRACKED -j DROP
++ ip net exec $ROUTER_NS iptables -A INPUT -p sctp -j DROP
++
++ # use a smaller number for assoc's max_retrans to reproduce the issue
++ modprobe sctp
++ ip net exec $CLIENT_NS sysctl -wq net.sctp.association_max_retrans=3
++}
++
++cleanup() {
++ ip net exec $CLIENT_NS pkill sctp_collision 2>&1 >/dev/null
++ ip net exec $SERVER_NS pkill sctp_collision 2>&1 >/dev/null
++ ip net del "$CLIENT_NS"
++ ip net del "$SERVER_NS"
++ ip net del "$ROUTER_NS"
++}
++
++do_test() {
++ ip net exec $SERVER_NS ./sctp_collision server \
++ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT &
++ ip net exec $CLIENT_NS ./sctp_collision client \
++ $CLIENT_IP $CLIENT_PORT $SERVER_IP $SERVER_PORT
++}
++
++# NOTE: one way to work around the issue is set a smaller hb_interval
++# ip net exec $CLIENT_NS sysctl -wq net.sctp.hb_interval=3500
++
++# run the test case
++trap cleanup EXIT
++setup && \
++echo "Test for SCTP Collision in nf_conntrack:" && \
++do_test && echo "PASS!"
++exit $?
+diff --git a/tools/testing/selftests/netfilter/sctp_collision.c b/tools/testing/selftests/netfilter/sctp_collision.c
+new file mode 100644
+index 0000000000000..21bb1cfd8a856
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/sctp_collision.c
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <unistd.h>
++#include <arpa/inet.h>
++
++int main(int argc, char *argv[])
++{
++ struct sockaddr_in saddr = {}, daddr = {};
++ int sd, ret, len = sizeof(daddr);
++ struct timeval tv = {25, 0};
++ char buf[] = "hello";
++
++ if (argc != 6 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
++ printf("%s <server|client> <LOCAL_IP> <LOCAL_PORT> <REMOTE_IP> <REMOTE_PORT>\n",
++ argv[0]);
++ return -1;
++ }
++
++ sd = socket(AF_INET, SOCK_SEQPACKET, IPPROTO_SCTP);
++ if (sd < 0) {
++ printf("Failed to create sd\n");
++ return -1;
++ }
++
++ saddr.sin_family = AF_INET;
++ saddr.sin_addr.s_addr = inet_addr(argv[2]);
++ saddr.sin_port = htons(atoi(argv[3]));
++
++ ret = bind(sd, (struct sockaddr *)&saddr, sizeof(saddr));
++ if (ret < 0) {
++ printf("Failed to bind to address\n");
++ goto out;
++ }
++
++ ret = listen(sd, 5);
++ if (ret < 0) {
++ printf("Failed to listen on port\n");
++ goto out;
++ }
++
++ daddr.sin_family = AF_INET;
++ daddr.sin_addr.s_addr = inet_addr(argv[4]);
++ daddr.sin_port = htons(atoi(argv[5]));
++
++ /* make test shorter than 25s */
++ ret = setsockopt(sd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
++ if (ret < 0) {
++ printf("Failed to setsockopt SO_RCVTIMEO\n");
++ goto out;
++ }
++
++ if (!strcmp(argv[1], "server")) {
++ sleep(1); /* wait a bit for client's INIT */
++ ret = connect(sd, (struct sockaddr *)&daddr, len);
++ if (ret < 0) {
++ printf("Failed to connect to peer\n");
++ goto out;
++ }
++ ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len);
++ if (ret < 0) {
++ printf("Failed to recv msg %d\n", ret);
++ goto out;
++ }
++ ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len);
++ if (ret < 0) {
++ printf("Failed to send msg %d\n", ret);
++ goto out;
++ }
++ printf("Server: sent! %d\n", ret);
++ }
++
++ if (!strcmp(argv[1], "client")) {
++ usleep(300000); /* wait a bit for server's listening */
++ ret = connect(sd, (struct sockaddr *)&daddr, len);
++ if (ret < 0) {
++ printf("Failed to connect to peer\n");
++ goto out;
++ }
++ sleep(1); /* wait a bit for server's delayed INIT_ACK to reproduce the issue */
++ ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len);
++ if (ret < 0) {
++ printf("Failed to send msg %d\n", ret);
++ goto out;
++ }
++ ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len);
++ if (ret < 0) {
++ printf("Failed to recv msg %d\n", ret);
++ goto out;
++ }
++ printf("Client: rcvd! %d\n", ret);
++ }
++ ret = 0;
++out:
++ close(sd);
++ return ret;
++}
+diff --git a/tools/testing/selftests/netfilter/xt_string.sh b/tools/testing/selftests/netfilter/xt_string.sh
+new file mode 100755
+index 0000000000000..1802653a47287
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/xt_string.sh
+@@ -0,0 +1,128 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++# return code to signal skipped test
++ksft_skip=4
++rc=0
++
++if ! iptables --version >/dev/null 2>&1; then
++ echo "SKIP: Test needs iptables"
++ exit $ksft_skip
++fi
++if ! ip -V >/dev/null 2>&1; then
++ echo "SKIP: Test needs iproute2"
++ exit $ksft_skip
++fi
++if ! nc -h >/dev/null 2>&1; then
++ echo "SKIP: Test needs netcat"
++ exit $ksft_skip
++fi
++
++pattern="foo bar baz"
++patlen=11
++hdrlen=$((20 + 8)) # IPv4 + UDP
++ns="ns-$(mktemp -u XXXXXXXX)"
++trap 'ip netns del $ns' EXIT
++ip netns add "$ns"
++ip -net "$ns" link add d0 type dummy
++ip -net "$ns" link set d0 up
++ip -net "$ns" addr add 10.1.2.1/24 dev d0
++
++#ip netns exec "$ns" tcpdump -npXi d0 &
++#tcpdump_pid=$!
++#trap 'kill $tcpdump_pid; ip netns del $ns' EXIT
++
++add_rule() { # (alg, from, to)
++ ip netns exec "$ns" \
++ iptables -A OUTPUT -o d0 -m string \
++ --string "$pattern" --algo $1 --from $2 --to $3
++}
++showrules() { # ()
++ ip netns exec "$ns" iptables -v -S OUTPUT | grep '^-A'
++}
++zerorules() {
++ ip netns exec "$ns" iptables -Z OUTPUT
++}
++countrule() { # (pattern)
++ showrules | grep -c -- "$*"
++}
++send() { # (offset)
++ ( for ((i = 0; i < $1 - $hdrlen; i++)); do
++ printf " "
++ done
++ printf "$pattern"
++ ) | ip netns exec "$ns" nc -w 1 -u 10.1.2.2 27374
++}
++
++add_rule bm 1000 1500
++add_rule bm 1400 1600
++add_rule kmp 1000 1500
++add_rule kmp 1400 1600
++
++zerorules
++send 0
++send $((1000 - $patlen))
++if [ $(countrule -c 0 0) -ne 4 ]; then
++ echo "FAIL: rules match data before --from"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1000
++send $((1400 - $patlen))
++if [ $(countrule -c 2) -ne 2 ]; then
++ echo "FAIL: only two rules should match at low offset"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1500 - $patlen))
++if [ $(countrule -c 1) -ne 4 ]; then
++ echo "FAIL: all rules should match at end of packet"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1495
++if [ $(countrule -c 1) -ne 1 ]; then
++ echo "FAIL: only kmp with proper --to should match pattern spanning fragments"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1500
++if [ $(countrule -c 1) -ne 2 ]; then
++ echo "FAIL: two rules should match pattern at start of second fragment"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen))
++if [ $(countrule -c 1) -ne 2 ]; then
++ echo "FAIL: two rules should match pattern at end of largest --to"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen + 1))
++if [ $(countrule -c 1) -ne 0 ]; then
++ echo "FAIL: no rules should match pattern extending largest --to"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1600
++if [ $(countrule -c 1) -ne 0 ]; then
++ echo "FAIL: no rule should match pattern past largest --to"
++ showrules
++ ((rc--))
++fi
++
++exit $rc
+diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+index 3fd8e903118f5..3bc46d6151f44 100644
+--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+@@ -62,7 +62,7 @@ static void error_report(struct error *err, const char *test_name)
+ break;
+
+ case PIDFD_PASS:
+- ksft_test_result_pass("%s test: Passed\n");
++ ksft_test_result_pass("%s test: Passed\n", test_name);
+ break;
+
+ default:
+diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
+index e2dd4ed849846..cf4f3174c83e0 100644
+--- a/tools/testing/selftests/pidfd/pidfd_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_test.c
+@@ -380,13 +380,13 @@ static int test_pidfd_send_signal_syscall_support(void)
+
+ static void *test_pidfd_poll_exec_thread(void *priv)
+ {
+- ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++ ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ getpid(), syscall(SYS_gettid));
+ ksft_print_msg("Child Thread: doing exec of sleep\n");
+
+ execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL);
+
+- ksft_print_msg("Child Thread: DONE. pid %d tid %d\n",
++ ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n",
+ getpid(), syscall(SYS_gettid));
+ return NULL;
+ }
+@@ -426,7 +426,7 @@ static int child_poll_exec_test(void *args)
+ {
+ pthread_t t1;
+
+- ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(),
++ ksft_print_msg("Child (pidfd): starting. pid %d tid %ld\n", getpid(),
+ syscall(SYS_gettid));
+ pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL);
+ /*
+@@ -479,10 +479,10 @@ static void test_pidfd_poll_exec(int use_waitpid)
+
+ static void *test_pidfd_poll_leader_exit_thread(void *priv)
+ {
+- ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++ ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ getpid(), syscall(SYS_gettid));
+ sleep(CHILD_THREAD_MIN_WAIT);
+- ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++ ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ return NULL;
+ }
+
+@@ -491,7 +491,7 @@ static int child_poll_leader_exit_test(void *args)
+ {
+ pthread_t t1, t2;
+
+- ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++ ksft_print_msg("Child: starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+ pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+
+diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
+index df0d8d8526fc6..4418155a879b9 100644
+--- a/tools/testing/selftests/resctrl/resctrl_tests.c
++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
+@@ -228,9 +228,14 @@ int main(int argc, char **argv)
+ return ksft_exit_skip("Not running as root. Skipping...\n");
+
+ if (has_ben) {
++ if (argc - ben_ind >= BENCHMARK_ARGS)
++ ksft_exit_fail_msg("Too long benchmark command.\n");
++
+ /* Extract benchmark command from command line. */
+ for (i = ben_ind; i < argc; i++) {
+ benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
++ if (strlen(argv[i]) >= BENCHMARK_ARG_SIZE)
++ ksft_exit_fail_msg("Too long benchmark command argument.\n");
+ sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
+ }
+ benchmark_cmd[ben_count] = NULL;