summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-03-12 05:56:08 -0400
committerMike Pagano <mpagano@gentoo.org>2020-03-12 05:56:08 -0400
commit087942a4f21321a177630fffaf20442b4aa9c6d3 (patch)
treebbceb7bda6eaa5e7337f5fed87eacf029f53c4e4
parentLinux patch 5.5.8 (diff)
downloadlinux-patches-087942a4f21321a177630fffaf20442b4aa9c6d3.tar.gz
linux-patches-087942a4f21321a177630fffaf20442b4aa9c6d3.tar.bz2
linux-patches-087942a4f21321a177630fffaf20442b4aa9c6d3.zip
Linux patch 5.5.95.5-11
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1008_linux-5.5.9.patch6714
2 files changed, 6718 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e58ee4ae..170d7c9b 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-5.5.8.patch
From: http://www.kernel.org
Desc: Linux 5.5.8
+Patch: 1008_linux-5.5.9.patch
+From: http://www.kernel.org
+Desc: Linux 5.5.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-5.5.9.patch b/1008_linux-5.5.9.patch
new file mode 100644
index 00000000..21dd99d0
--- /dev/null
+++ b/1008_linux-5.5.9.patch
@@ -0,0 +1,6714 @@
+diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
+index f79683a628f0..1238e68ac1b4 100644
+--- a/Documentation/devicetree/bindings/arm/fsl.yaml
++++ b/Documentation/devicetree/bindings/arm/fsl.yaml
+@@ -139,7 +139,7 @@ properties:
+ items:
+ - enum:
+ - armadeus,imx6dl-apf6 # APF6 (Solo) SoM
+- - armadeus,imx6dl-apf6dldev # APF6 (Solo) SoM on APF6Dev board
++ - armadeus,imx6dl-apf6dev # APF6 (Solo) SoM on APF6Dev board
+ - eckelmann,imx6dl-ci4x10
+ - emtrion,emcon-mx6 # emCON-MX6S or emCON-MX6DL SoM
+ - emtrion,emcon-mx6-avari # emCON-MX6S or emCON-MX6DL SoM on Avari Base
+diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+index f5cdac8b2847..8b005192f6e8 100644
+--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
++++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+@@ -161,7 +161,7 @@ The regulator node houses sub-nodes for each regulator within the device. Each
+ sub-node is identified using the node's name, with valid values listed for each
+ of the PMICs below.
+
+-pm8005:
++pm8004:
+ s2, s5
+
+ pm8005:
+diff --git a/Makefile b/Makefile
+index a1e5190e4721..8b786a8a7289 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 5
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
+index f3ced6df0c9b..9f66f96d09c9 100644
+--- a/arch/arm/boot/dts/am437x-idk-evm.dts
++++ b/arch/arm/boot/dts/am437x-idk-evm.dts
+@@ -526,11 +526,11 @@
+ * Supply voltage supervisor on board will not allow opp50 so
+ * disable it and set opp100 as suspend OPP.
+ */
+- opp50@300000000 {
++ opp50-300000000 {
+ status = "disabled";
+ };
+
+- opp100@600000000 {
++ opp100-600000000 {
+ opp-suspend;
+ };
+ };
+diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
+index cdcba3f561c4..9f6fbe4c1fee 100644
+--- a/arch/arm/boot/dts/dra76x.dtsi
++++ b/arch/arm/boot/dts/dra76x.dtsi
+@@ -86,3 +86,8 @@
+ &usb4_tm {
+ status = "disabled";
+ };
++
++&mmc3 {
++ /* dra76x is not affected by i887 */
++ max-frequency = <96000000>;
++};
+diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
+index 93e1eb83bed9..d7d98d2069df 100644
+--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
++++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
+@@ -796,16 +796,6 @@
+ clock-div = <1>;
+ };
+
+- ipu1_gfclk_mux: ipu1_gfclk_mux@520 {
+- #clock-cells = <0>;
+- compatible = "ti,mux-clock";
+- clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>;
+- ti,bit-shift = <24>;
+- reg = <0x0520>;
+- assigned-clocks = <&ipu1_gfclk_mux>;
+- assigned-clock-parents = <&dpll_core_h22x2_ck>;
+- };
+-
+ dummy_ck: dummy_ck {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+@@ -1564,6 +1554,8 @@
+ compatible = "ti,clkctrl";
+ reg = <0x20 0x4>;
+ #clock-cells = <2>;
++ assigned-clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 24>;
++ assigned-clock-parents = <&dpll_core_h22x2_ck>;
+ };
+
+ ipu_clkctrl: ipu-clkctrl@50 {
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
+index 6486df3e2942..881cea0b61ba 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
+@@ -183,7 +183,6 @@
+ pinctrl-0 = <&pinctrl_usdhc4>;
+ bus-width = <8>;
+ non-removable;
+- vmmc-supply = <&vdd_emmc_1p8>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
+index d05be3f0e2a7..04717cf69db0 100644
+--- a/arch/arm/boot/dts/imx7-colibri.dtsi
++++ b/arch/arm/boot/dts/imx7-colibri.dtsi
+@@ -336,7 +336,6 @@
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ fsl,tuning-step = <2>;
+- max-frequency = <100000000>;
+ vmmc-supply = <&reg_module_3v3>;
+ vqmmc-supply = <&reg_DCDC3>;
+ non-removable;
+diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
+index d8acd7cc7918..497434f0629e 100644
+--- a/arch/arm/boot/dts/imx7d.dtsi
++++ b/arch/arm/boot/dts/imx7d.dtsi
+@@ -44,7 +44,7 @@
+ opp-hz = /bits/ 64 <792000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <150000>;
+- opp-supported-hw = <0xd>, <0xf>;
++ opp-supported-hw = <0xd>, <0x7>;
+ opp-suspend;
+ };
+
+@@ -52,7 +52,7 @@
+ opp-hz = /bits/ 64 <996000000>;
+ opp-microvolt = <1100000>;
+ clock-latency-ns = <150000>;
+- opp-supported-hw = <0xc>, <0xf>;
++ opp-supported-hw = <0xc>, <0x7>;
+ opp-suspend;
+ };
+
+@@ -60,7 +60,7 @@
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1225000>;
+ clock-latency-ns = <150000>;
+- opp-supported-hw = <0x8>, <0xf>;
++ opp-supported-hw = <0x8>, <0x3>;
+ opp-suspend;
+ };
+ };
+diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
+index 2f6977ada447..63d9f4a066e3 100644
+--- a/arch/arm/boot/dts/ls1021a.dtsi
++++ b/arch/arm/boot/dts/ls1021a.dtsi
+@@ -728,7 +728,7 @@
+ };
+
+ mdio0: mdio@2d24000 {
+- compatible = "fsl,etsec2-mdio";
++ compatible = "gianfar";
+ device_type = "mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -737,7 +737,7 @@
+ };
+
+ mdio1: mdio@2d64000 {
+- compatible = "fsl,etsec2-mdio";
++ compatible = "gianfar";
+ device_type = "mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
+index 35ff620537e6..03506ce46149 100644
+--- a/arch/arm/mach-imx/Makefile
++++ b/arch/arm/mach-imx/Makefile
+@@ -91,6 +91,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
+ obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
+ obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
+ endif
++AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
++obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
+ obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
+
+ obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
+diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
+index 912aeceb4ff8..5aa5796cff0e 100644
+--- a/arch/arm/mach-imx/common.h
++++ b/arch/arm/mach-imx/common.h
+@@ -109,17 +109,17 @@ void imx_cpu_die(unsigned int cpu);
+ int imx_cpu_kill(unsigned int cpu);
+
+ #ifdef CONFIG_SUSPEND
+-void v7_cpu_resume(void);
+ void imx53_suspend(void __iomem *ocram_vbase);
+ extern const u32 imx53_suspend_sz;
+ void imx6_suspend(void __iomem *ocram_vbase);
+ #else
+-static inline void v7_cpu_resume(void) {}
+ static inline void imx53_suspend(void __iomem *ocram_vbase) {}
+ static const u32 imx53_suspend_sz;
+ static inline void imx6_suspend(void __iomem *ocram_vbase) {}
+ #endif
+
++void v7_cpu_resume(void);
++
+ void imx6_pm_ccm_init(const char *ccm_compat);
+ void imx6q_pm_init(void);
+ void imx6dl_pm_init(void);
+diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
+new file mode 100644
+index 000000000000..5bd1ba7ef15b
+--- /dev/null
++++ b/arch/arm/mach-imx/resume-imx6.S
+@@ -0,0 +1,24 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright 2014 Freescale Semiconductor, Inc.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++#include <asm/asm-offsets.h>
++#include <asm/hardware/cache-l2x0.h>
++#include "hardware.h"
++
++/*
++ * The following code must assume it is running from physical address
++ * where absolute virtual addresses to the data section have to be
++ * turned into relative ones.
++ */
++
++ENTRY(v7_cpu_resume)
++ bl v7_invalidate_l1
++#ifdef CONFIG_CACHE_L2X0
++ bl l2c310_early_resume
++#endif
++ b cpu_resume
++ENDPROC(v7_cpu_resume)
+diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
+index 062391ff13da..1eabf2d2834b 100644
+--- a/arch/arm/mach-imx/suspend-imx6.S
++++ b/arch/arm/mach-imx/suspend-imx6.S
+@@ -327,17 +327,3 @@ resume:
+
+ ret lr
+ ENDPROC(imx6_suspend)
+-
+-/*
+- * The following code must assume it is running from physical address
+- * where absolute virtual addresses to the data section have to be
+- * turned into relative ones.
+- */
+-
+-ENTRY(v7_cpu_resume)
+- bl v7_invalidate_l1
+-#ifdef CONFIG_CACHE_L2X0
+- bl l2c310_early_resume
+-#endif
+- b cpu_resume
+-ENDPROC(v7_cpu_resume)
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+index f82f25c1a5f9..d5dc12878dfe 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+@@ -327,7 +327,7 @@
+ #size-cells = <0>;
+
+ bus-width = <4>;
+- max-frequency = <50000000>;
++ max-frequency = <60000000>;
+
+ non-removable;
+ disable-wp;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+index a8bb3fa9fec9..cb1b48f5b8b1 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+@@ -593,6 +593,7 @@
+ compatible = "brcm,bcm43438-bt";
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "host-wakeup";
+ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ max-speed = <2000000>;
+ clocks = <&wifi32k>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+index d3d26cca7d52..13460a360c6a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
++++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+@@ -52,11 +52,6 @@
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+-
+- ethphy1: ethernet-phy@1 {
+- compatible = "ethernet-phy-ieee802.3-c22";
+- reg = <1>;
+- };
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+index d43e1299c8ef..b47f2ce160a4 100644
+--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
++++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+@@ -102,7 +102,7 @@
+ };
+
+ gmac0: ethernet@ff800000 {
+- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
++ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
+ reg = <0xff800000 0x2000>;
+ interrupts = <0 90 4>;
+ interrupt-names = "macirq";
+@@ -117,7 +117,7 @@
+ };
+
+ gmac1: ethernet@ff802000 {
+- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
++ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
+ reg = <0xff802000 0x2000>;
+ interrupts = <0 91 4>;
+ interrupt-names = "macirq";
+@@ -132,7 +132,7 @@
+ };
+
+ gmac2: ethernet@ff804000 {
+- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
++ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
+ reg = <0xff804000 0x2000>;
+ interrupts = <0 92 4>;
+ interrupt-names = "macirq";
+diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
+index da09c884cc30..f00b394988a2 100644
+--- a/arch/csky/Kconfig
++++ b/arch/csky/Kconfig
+@@ -37,6 +37,7 @@ config CSKY
+ select GX6605S_TIMER if CPU_CK610
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_AUDITSYSCALL
++ select HAVE_COPY_THREAD_TLS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER
+@@ -75,7 +76,7 @@ config CPU_HAS_TLBI
+ config CPU_HAS_LDSTEX
+ bool
+ help
+- For SMP, CPU needs "ldex&stex" instrcutions to atomic operations.
++ For SMP, CPU needs "ldex&stex" instructions for atomic operations.
+
+ config CPU_NEED_TLBSYNC
+ bool
+diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
+index 7ab78bd0f3b1..f35a9f3315ee 100644
+--- a/arch/csky/abiv1/inc/abi/entry.h
++++ b/arch/csky/abiv1/inc/abi/entry.h
+@@ -16,14 +16,16 @@
+ #define LSAVE_A4 40
+ #define LSAVE_A5 44
+
++#define usp ss1
++
+ .macro USPTOKSP
+- mtcr sp, ss1
++ mtcr sp, usp
+ mfcr sp, ss0
+ .endm
+
+ .macro KSPTOUSP
+ mtcr sp, ss0
+- mfcr sp, ss1
++ mfcr sp, usp
+ .endm
+
+ .macro SAVE_ALL epc_inc
+@@ -45,7 +47,13 @@
+ add lr, r13
+ stw lr, (sp, 8)
+
++ mov lr, sp
++ addi lr, 32
++ addi lr, 32
++ addi lr, 16
++ bt 2f
+ mfcr lr, ss1
++2:
+ stw lr, (sp, 16)
+
+ stw a0, (sp, 20)
+@@ -79,9 +87,10 @@
+ ldw a0, (sp, 12)
+ mtcr a0, epsr
+ btsti a0, 31
++ bt 1f
+ ldw a0, (sp, 16)
+ mtcr a0, ss1
+-
++1:
+ ldw a0, (sp, 24)
+ ldw a1, (sp, 28)
+ ldw a2, (sp, 32)
+@@ -102,9 +111,9 @@
+ addi sp, 32
+ addi sp, 8
+
+- bt 1f
++ bt 2f
+ KSPTOUSP
+-1:
++2:
+ rte
+ .endm
+
+diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
+index 9897a16b45e5..94a7a58765df 100644
+--- a/arch/csky/abiv2/inc/abi/entry.h
++++ b/arch/csky/abiv2/inc/abi/entry.h
+@@ -31,7 +31,13 @@
+
+ mfcr lr, epsr
+ stw lr, (sp, 12)
++ btsti lr, 31
++ bf 1f
++ addi lr, sp, 152
++ br 2f
++1:
+ mfcr lr, usp
++2:
+ stw lr, (sp, 16)
+
+ stw a0, (sp, 20)
+@@ -64,8 +70,10 @@
+ mtcr a0, epc
+ ldw a0, (sp, 12)
+ mtcr a0, epsr
++ btsti a0, 31
+ ldw a0, (sp, 16)
+ mtcr a0, usp
++ mtcr a0, ss0
+
+ #ifdef CONFIG_CPU_HAS_HILO
+ ldw a0, (sp, 140)
+@@ -86,6 +94,9 @@
+ addi sp, 40
+ ldm r16-r30, (sp)
+ addi sp, 72
++ bf 1f
++ mfcr sp, ss0
++1:
+ rte
+ .endm
+
+diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
+index 211c983c7282..ba4018929733 100644
+--- a/arch/csky/include/uapi/asm/unistd.h
++++ b/arch/csky/include/uapi/asm/unistd.h
+@@ -1,7 +1,10 @@
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
++#define __ARCH_WANT_STAT64
++#define __ARCH_WANT_NEW_STAT
+ #define __ARCH_WANT_SYS_CLONE
++#define __ARCH_WANT_SYS_CLONE3
+ #define __ARCH_WANT_SET_GET_RLIMIT
+ #define __ARCH_WANT_TIME32_SYSCALLS
+ #include <asm-generic/unistd.h>
+diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
+index 5b84f11485ae..3821ef9b7567 100644
+--- a/arch/csky/kernel/atomic.S
++++ b/arch/csky/kernel/atomic.S
+@@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg)
+ mfcr a3, epc
+ addi a3, TRAP0_SIZE
+
+- subi sp, 8
++ subi sp, 16
+ stw a3, (sp, 0)
+ mfcr a3, epsr
+ stw a3, (sp, 4)
++ mfcr a3, usp
++ stw a3, (sp, 8)
+
+ psrset ee
+ #ifdef CONFIG_CPU_HAS_LDSTEX
+@@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg)
+ mtcr a3, epc
+ ldw a3, (sp, 4)
+ mtcr a3, epsr
+- addi sp, 8
++ ldw a3, (sp, 8)
++ mtcr a3, usp
++ addi sp, 16
+ KSPTOUSP
+ rte
+ END(csky_cmpxchg)
+diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
+index f320d9248a22..397962e11bd1 100644
+--- a/arch/csky/kernel/process.c
++++ b/arch/csky/kernel/process.c
+@@ -34,10 +34,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
+ return sw->r15;
+ }
+
+-int copy_thread(unsigned long clone_flags,
++int copy_thread_tls(unsigned long clone_flags,
+ unsigned long usp,
+ unsigned long kthread_arg,
+- struct task_struct *p)
++ struct task_struct *p,
++ unsigned long tls)
+ {
+ struct switch_stack *childstack;
+ struct pt_regs *childregs = task_pt_regs(p);
+@@ -64,7 +65,7 @@ int copy_thread(unsigned long clone_flags,
+ childregs->usp = usp;
+ if (clone_flags & CLONE_SETTLS)
+ task_thread_info(p)->tp_value = childregs->tls
+- = childregs->regs[0];
++ = tls;
+
+ childregs->a0 = 0;
+ childstack->r15 = (unsigned long) ret_from_fork;
+diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
+index b753d382e4ce..0bb0954d5570 100644
+--- a/arch/csky/kernel/smp.c
++++ b/arch/csky/kernel/smp.c
+@@ -120,7 +120,7 @@ void __init setup_smp_ipi(void)
+ int rc;
+
+ if (ipi_irq == 0)
+- panic("%s IRQ mapping failed\n", __func__);
++ return;
+
+ rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
+ &ipi_dummy_dev);
+diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile
+index c94ef6481098..efb7ebab342b 100644
+--- a/arch/csky/mm/Makefile
++++ b/arch/csky/mm/Makefile
+@@ -1,8 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
+ obj-y += cachev2.o
++CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE)
+ else
+ obj-y += cachev1.o
++CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE)
+ endif
+
+ obj-y += dma-mapping.o
+diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
+index d4c2292ea46b..00e96278b377 100644
+--- a/arch/csky/mm/init.c
++++ b/arch/csky/mm/init.c
+@@ -31,6 +31,7 @@
+
+ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+ pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
++EXPORT_SYMBOL(invalid_pte_table);
+ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ __page_aligned_bss;
+ EXPORT_SYMBOL(empty_zero_page);
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index e745abc5457a..245be4fafe13 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -2193,11 +2193,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
+ * oprofile_cpu_type already has a value, then we are
+ * possibly overriding a real PVR with a logical one,
+ * and, in that case, keep the current value for
+- * oprofile_cpu_type.
++ * oprofile_cpu_type. Futhermore, let's ensure that the
++ * fix for the PMAO bug is enabled on compatibility mode.
+ */
+ if (old.oprofile_cpu_type != NULL) {
+ t->oprofile_cpu_type = old.oprofile_cpu_type;
+ t->oprofile_type = old.oprofile_type;
++ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
+ }
+ }
+
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index f5535eae637f..ab81a727e273 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -369,7 +369,9 @@ static inline bool flush_coherent_icache(unsigned long addr)
+ */
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+ mb(); /* sync */
++ allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
+ icbi((void *)addr);
++ prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
+ mb(); /* sync */
+ isync();
+ return true;
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index e0e3a465bbfd..8dfa2cf1f05c 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -146,7 +146,7 @@ all: bzImage
+ #KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
+ KBUILD_IMAGE := $(boot)/bzImage
+
+-install: vmlinux
++install:
+ $(Q)$(MAKE) $(build)=$(boot) $@
+
+ bzImage: vmlinux
+diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
+index e2c47d3a1c89..0ff9261c915e 100644
+--- a/arch/s390/boot/Makefile
++++ b/arch/s390/boot/Makefile
+@@ -70,7 +70,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
+ $(obj)/startup.a: $(OBJECTS) FORCE
+ $(call if_changed,ar)
+
+-install: $(CONFIGURE) $(obj)/bzImage
++install:
+ sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
+ System.map "$(INSTALL_PATH)"
+
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 7b03037a8475..9c578ad5409e 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -750,6 +750,12 @@ static inline int pmd_write(pmd_t pmd)
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
+ }
+
++#define pud_write pud_write
++static inline int pud_write(pud_t pud)
++{
++ return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
++}
++
+ static inline int pmd_dirty(pmd_t pmd)
+ {
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
+diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
+index 71e3f0146cda..7870cf834533 100644
+--- a/arch/s390/include/asm/qdio.h
++++ b/arch/s390/include/asm/qdio.h
+@@ -227,7 +227,7 @@ struct qdio_buffer {
+ * @sbal: absolute SBAL address
+ */
+ struct sl_element {
+- unsigned long sbal;
++ u64 sbal;
+ } __attribute__ ((packed));
+
+ /**
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index bc61ea18e88d..60716d18ce5a 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -424,7 +424,7 @@ static void zpci_map_resources(struct pci_dev *pdev)
+
+ if (zpci_use_mio(zdev))
+ pdev->resource[i].start =
+- (resource_size_t __force) zdev->bars[i].mio_wb;
++ (resource_size_t __force) zdev->bars[i].mio_wt;
+ else
+ pdev->resource[i].start = (resource_size_t __force)
+ pci_iomap_range_fh(pdev, i, 0, 0);
+@@ -531,7 +531,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ flags |= IORESOURCE_MEM_64;
+
+ if (zpci_use_mio(zdev))
+- addr = (unsigned long) zdev->bars[i].mio_wb;
++ addr = (unsigned long) zdev->bars[i].mio_wt;
+ else
+ addr = ZPCI_ADDR(entry);
+ size = 1UL << zdev->bars[i].size;
+diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
+index 748456c365f4..9557c5a15b91 100644
+--- a/arch/x86/boot/compressed/kaslr_64.c
++++ b/arch/x86/boot/compressed/kaslr_64.c
+@@ -29,9 +29,6 @@
+ #define __PAGE_OFFSET __PAGE_OFFSET_BASE
+ #include "../../mm/ident_map.c"
+
+-/* Used by pgtable.h asm code to force instruction serialization. */
+-unsigned long __force_order;
+-
+ /* Used to track our page table allocation area. */
+ struct alloc_pgt_data {
+ unsigned char *pgt_buf;
+diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h
+index 02c6ef8f7667..07344d82e88e 100644
+--- a/arch/x86/include/asm/io_bitmap.h
++++ b/arch/x86/include/asm/io_bitmap.h
+@@ -19,7 +19,14 @@ struct task_struct;
+ void io_bitmap_share(struct task_struct *tsk);
+ void io_bitmap_exit(void);
+
+-void tss_update_io_bitmap(void);
++void native_tss_update_io_bitmap(void);
++
++#ifdef CONFIG_PARAVIRT_XXL
++#include <asm/paravirt.h>
++#else
++#define tss_update_io_bitmap native_tss_update_io_bitmap
++#endif
++
+ #else
+ static inline void io_bitmap_share(struct task_struct *tsk) { }
+ static inline void io_bitmap_exit(void) { }
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index 86e7317eb31f..694d8daf4983 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -295,6 +295,13 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
+ PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
+ }
+
++#ifdef CONFIG_X86_IOPL_IOPERM
++static inline void tss_update_io_bitmap(void)
++{
++ PVOP_VCALL0(cpu.update_io_bitmap);
++}
++#endif
++
+ static inline void paravirt_activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+ {
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 84812964d3dd..732f62e04ddb 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -140,6 +140,10 @@ struct pv_cpu_ops {
+
+ void (*load_sp0)(unsigned long sp0);
+
++#ifdef CONFIG_X86_IOPL_IOPERM
++ void (*update_io_bitmap)(void);
++#endif
++
+ void (*wbinvd)(void);
+
+ /* cpuid emulation, mostly so that caps bits can be disabled */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 2e4d90294fe6..9761e9c56756 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -462,7 +462,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
+ * cpuid bit to be set. We need to ensure that we
+ * update that bit in this CPU's "cpu_info".
+ */
+- get_cpu_cap(c);
++ set_cpu_cap(c, X86_FEATURE_OSPKE);
+ }
+
+ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 789f5e4f89de..c131ba4e70ef 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -30,6 +30,7 @@
+ #include <asm/timer.h>
+ #include <asm/special_insns.h>
+ #include <asm/tlb.h>
++#include <asm/io_bitmap.h>
+
+ /*
+ * nop stub, which must not clobber anything *including the stack* to
+@@ -341,6 +342,10 @@ struct paravirt_patch_template pv_ops = {
+ .cpu.iret = native_iret,
+ .cpu.swapgs = native_swapgs,
+
++#ifdef CONFIG_X86_IOPL_IOPERM
++ .cpu.update_io_bitmap = native_tss_update_io_bitmap,
++#endif
++
+ .cpu.start_context_switch = paravirt_nop,
+ .cpu.end_context_switch = paravirt_nop,
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 61e93a318983..3363e71589dd 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
+ /**
+ * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
+ */
+-void tss_update_io_bitmap(void)
++void native_tss_update_io_bitmap(void)
+ {
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+ struct thread_struct *t = &current->thread;
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 52a1e5192fa8..fe0e647411da 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -316,7 +316,7 @@ void efi_sync_low_kernel_mappings(void)
+ static inline phys_addr_t
+ virt_to_phys_or_null_size(void *va, unsigned long size)
+ {
+- bool bad_size;
++ phys_addr_t pa;
+
+ if (!va)
+ return 0;
+@@ -324,16 +324,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
+ if (virt_addr_valid(va))
+ return virt_to_phys(va);
+
+- /*
+- * A fully aligned variable on the stack is guaranteed not to
+- * cross a page bounary. Try to catch strings on the stack by
+- * checking that 'size' is a power of two.
+- */
+- bad_size = size > PAGE_SIZE || !is_power_of_2(size);
++ pa = slow_virt_to_phys(va);
+
+- WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
++ /* check if the object crosses a page boundary */
++ if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
++ return 0;
+
+- return slow_virt_to_phys(va);
++ return pa;
+ }
+
+ #define virt_to_phys_or_null(addr) \
+@@ -791,6 +788,8 @@ static efi_status_t
+ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *data_size, void *data)
+ {
++ u8 buf[24] __aligned(8);
++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ efi_status_t status;
+ u32 phys_name, phys_vendor, phys_attr;
+ u32 phys_data_size, phys_data;
+@@ -798,14 +797,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
++ *vnd = *vendor;
++
+ phys_data_size = virt_to_phys_or_null(data_size);
+- phys_vendor = virt_to_phys_or_null(vendor);
++ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_attr = virt_to_phys_or_null(attr);
+ phys_data = virt_to_phys_or_null_size(data, *data_size);
+
+- status = efi_thunk(get_variable, phys_name, phys_vendor,
+- phys_attr, phys_data_size, phys_data);
++ if (!phys_name || (data && !phys_data))
++ status = EFI_INVALID_PARAMETER;
++ else
++ status = efi_thunk(get_variable, phys_name, phys_vendor,
++ phys_attr, phys_data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+@@ -816,19 +820,25 @@ static efi_status_t
+ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
+ {
++ u8 buf[24] __aligned(8);
++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
++ *vnd = *vendor;
++
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+- phys_vendor = virt_to_phys_or_null(vendor);
++ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+- /* If data_size is > sizeof(u32) we've got problems */
+- status = efi_thunk(set_variable, phys_name, phys_vendor,
+- attr, data_size, phys_data);
++ if (!phys_name || !phys_data)
++ status = EFI_INVALID_PARAMETER;
++ else
++ status = efi_thunk(set_variable, phys_name, phys_vendor,
++ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+@@ -840,6 +850,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
+ {
++ u8 buf[24] __aligned(8);
++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+@@ -847,13 +859,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
+
++ *vnd = *vendor;
++
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+- phys_vendor = virt_to_phys_or_null(vendor);
++ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+- /* If data_size is > sizeof(u32) we've got problems */
+- status = efi_thunk(set_variable, phys_name, phys_vendor,
+- attr, data_size, phys_data);
++ if (!phys_name || !phys_data)
++ status = EFI_INVALID_PARAMETER;
++ else
++ status = efi_thunk(set_variable, phys_name, phys_vendor,
++ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+@@ -865,21 +881,29 @@ efi_thunk_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
+ {
++ u8 buf[24] __aligned(8);
++ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ efi_status_t status;
+ u32 phys_name_size, phys_name, phys_vendor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
++ *vnd = *vendor;
++
+ phys_name_size = virt_to_phys_or_null(name_size);
+- phys_vendor = virt_to_phys_or_null(vendor);
++ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_name = virt_to_phys_or_null_size(name, *name_size);
+
+- status = efi_thunk(get_next_variable, phys_name_size,
+- phys_name, phys_vendor);
++ if (!phys_name)
++ status = EFI_INVALID_PARAMETER;
++ else
++ status = efi_thunk(get_next_variable, phys_name_size,
++ phys_name, phys_vendor);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
++ *vendor = *vnd;
+ return status;
+ }
+
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 1f756ffffe8b..507f4fb88fa7 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -72,6 +72,9 @@
+ #include <asm/mwait.h>
+ #include <asm/pci_x86.h>
+ #include <asm/cpu.h>
++#ifdef CONFIG_X86_IOPL_IOPERM
++#include <asm/io_bitmap.h>
++#endif
+
+ #ifdef CONFIG_ACPI
+ #include <linux/acpi.h>
+@@ -837,6 +840,25 @@ static void xen_load_sp0(unsigned long sp0)
+ this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
+ }
+
++#ifdef CONFIG_X86_IOPL_IOPERM
++static void xen_update_io_bitmap(void)
++{
++ struct physdev_set_iobitmap iobitmap;
++ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
++
++ native_tss_update_io_bitmap();
++
++ iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) +
++ tss->x86_tss.io_bitmap_base;
++ if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID)
++ iobitmap.nr_ports = 0;
++ else
++ iobitmap.nr_ports = IO_BITMAP_BITS;
++
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
++}
++#endif
++
+ static void xen_io_delay(void)
+ {
+ }
+@@ -896,14 +918,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
+ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
+ {
+ int ret;
++#ifdef CONFIG_X86_64
++ unsigned int which;
++ u64 base;
++#endif
+
+ ret = 0;
+
+ switch (msr) {
+ #ifdef CONFIG_X86_64
+- unsigned which;
+- u64 base;
+-
+ case MSR_FS_BASE: which = SEGBASE_FS; goto set;
+ case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
+ case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
+@@ -1046,6 +1069,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .write_idt_entry = xen_write_idt_entry,
+ .load_sp0 = xen_load_sp0,
+
++#ifdef CONFIG_X86_IOPL_IOPERM
++ .update_io_bitmap = xen_update_io_bitmap,
++#endif
+ .io_delay = xen_io_delay,
+
+ /* Xen takes care of %gs when switching to usermode for us */
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+index e1419edde2ec..5a64607ce774 100644
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
+ kfree(bfqg);
+ }
+
+-static void bfqg_and_blkg_get(struct bfq_group *bfqg)
++void bfqg_and_blkg_get(struct bfq_group *bfqg)
+ {
+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+ bfqg_get(bfqg);
+@@ -651,6 +651,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
+ false, BFQQE_PREEMPTED);
+
++ /*
++ * get extra reference to prevent bfqq from being freed in
++ * next possible deactivate
++ */
++ bfqq->ref++;
++
+ if (bfq_bfqq_busy(bfqq))
+ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
+ else if (entity->on_st)
+@@ -670,6 +676,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+
+ if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
+ bfq_schedule_dispatch(bfqd);
++ /* release extra ref taken above */
++ bfq_put_queue(bfqq);
+ }
+
+ /**
+@@ -1398,6 +1406,10 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
+ return bfqq->bfqd->root_group;
+ }
+
++void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
++
++void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
++
+ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+ {
+ struct bfq_group *bfqg;
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 5c239c540c47..8fe4b6919511 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -614,6 +614,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqq->pos_root = NULL;
+ }
+
++ /* oom_bfqq does not participate in queue merging */
++ if (bfqq == &bfqd->oom_bfqq)
++ return;
++
+ /*
+ * bfqq cannot be merged any longer (see comments in
+ * bfq_setup_cooperator): no point in adding bfqq into the
+@@ -4822,9 +4826,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
+ {
+ struct bfq_queue *item;
+ struct hlist_node *n;
+-#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ struct bfq_group *bfqg = bfqq_group(bfqq);
+-#endif
+
+ if (bfqq->bfqd)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
+@@ -4897,9 +4899,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
+ bfqq->bfqd->last_completed_rq_bfqq = NULL;
+
+ kmem_cache_free(bfq_pool, bfqq);
+-#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ bfqg_and_blkg_put(bfqg);
+-#endif
+ }
+
+ static void bfq_put_cooperator(struct bfq_queue *bfqq)
+@@ -6387,10 +6387,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+-#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ /* release oom-queue reference to root group */
+ bfqg_and_blkg_put(bfqd->root_group);
+
++#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
+ #else
+ spin_lock_irq(&bfqd->lock);
+diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
+index 8526f20c53bc..355b8bbd1033 100644
+--- a/block/bfq-iosched.h
++++ b/block/bfq-iosched.h
+@@ -921,6 +921,7 @@ struct bfq_group {
+
+ #else
+ struct bfq_group {
++ struct bfq_entity entity;
+ struct bfq_sched_data sched_data;
+
+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
+@@ -984,6 +985,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
+ struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
++void bfqg_and_blkg_get(struct bfq_group *bfqg);
+ void bfqg_and_blkg_put(struct bfq_group *bfqg);
+
+ #ifdef CONFIG_BFQ_GROUP_IOSCHED
+diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
+index 05f0bf4a1144..44079147e396 100644
+--- a/block/bfq-wf2q.c
++++ b/block/bfq-wf2q.c
+@@ -536,7 +536,9 @@ static void bfq_get_entity(struct bfq_entity *entity)
+ bfqq->ref++;
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
+ bfqq, bfqq->ref);
+- }
++ } else
++ bfqg_and_blkg_get(container_of(entity, struct bfq_group,
++ entity));
+ }
+
+ /**
+@@ -650,8 +652,14 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
+
+ entity->on_st = false;
+ st->wsum -= entity->weight;
+- if (bfqq && !is_in_service)
++ if (is_in_service)
++ return;
++
++ if (bfqq)
+ bfq_put_queue(bfqq);
++ else
++ bfqg_and_blkg_put(container_of(entity, struct bfq_group,
++ entity));
+ }
+
+ /**
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 9fcc761031d8..59b217ffeb59 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -5226,6 +5226,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
+ binder_dev = container_of(filp->private_data,
+ struct binder_device, miscdev);
+ }
++ refcount_inc(&binder_dev->ref);
+ proc->context = &binder_dev->context;
+ binder_alloc_init(&proc->alloc);
+
+@@ -5403,6 +5404,7 @@ static int binder_node_release(struct binder_node *node, int refs)
+ static void binder_deferred_release(struct binder_proc *proc)
+ {
+ struct binder_context *context = proc->context;
++ struct binder_device *device;
+ struct rb_node *n;
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
+
+@@ -5419,6 +5421,12 @@ static void binder_deferred_release(struct binder_proc *proc)
+ context->binder_context_mgr_node = NULL;
+ }
+ mutex_unlock(&context->context_mgr_node_lock);
++ device = container_of(proc->context, struct binder_device, context);
++ if (refcount_dec_and_test(&device->ref)) {
++ kfree(context->name);
++ kfree(device);
++ }
++ proc->context = NULL;
+ binder_inner_proc_lock(proc);
+ /*
+ * Make sure proc stays alive after we
+@@ -6075,6 +6083,7 @@ static int __init init_binder_device(const char *name)
+ binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
+ binder_device->miscdev.name = name;
+
++ refcount_set(&binder_device->ref, 1);
+ binder_device->context.binder_context_mgr_uid = INVALID_UID;
+ binder_device->context.name = name;
+ mutex_init(&binder_device->context.context_mgr_node_lock);
+diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
+index ae991097d14d..283d3cb9c16e 100644
+--- a/drivers/android/binder_internal.h
++++ b/drivers/android/binder_internal.h
+@@ -8,6 +8,7 @@
+ #include <linux/list.h>
+ #include <linux/miscdevice.h>
+ #include <linux/mutex.h>
++#include <linux/refcount.h>
+ #include <linux/stddef.h>
+ #include <linux/types.h>
+ #include <linux/uidgid.h>
+@@ -33,6 +34,7 @@ struct binder_device {
+ struct miscdevice miscdev;
+ struct binder_context context;
+ struct inode *binderfs_inode;
++ refcount_t ref;
+ };
+
+ /**
+diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
+index e2580e5316a2..110e41f920c2 100644
+--- a/drivers/android/binderfs.c
++++ b/drivers/android/binderfs.c
+@@ -154,6 +154,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
+ if (!name)
+ goto err;
+
++ refcount_set(&device->ref, 1);
+ device->binderfs_inode = inode;
+ device->context.binder_context_mgr_uid = INVALID_UID;
+ device->context.name = name;
+@@ -257,8 +258,10 @@ static void binderfs_evict_inode(struct inode *inode)
+ ida_free(&binderfs_minors, device->miscdev.minor);
+ mutex_unlock(&binderfs_minors_mutex);
+
+- kfree(device->context.name);
+- kfree(device);
++ if (refcount_dec_and_test(&device->ref)) {
++ kfree(device->context.name);
++ kfree(device);
++ }
+ }
+
+ /**
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 42a672456432..3306d5ae92a6 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -745,25 +745,31 @@ static void __device_links_queue_sync_state(struct device *dev,
+ /**
+ * device_links_flush_sync_list - Call sync_state() on a list of devices
+ * @list: List of devices to call sync_state() on
++ * @dont_lock_dev: Device for which lock is already held by the caller
+ *
+ * Calls sync_state() on all the devices that have been queued for it. This
+- * function is used in conjunction with __device_links_queue_sync_state().
++ * function is used in conjunction with __device_links_queue_sync_state(). The
++ * @dont_lock_dev parameter is useful when this function is called from a
++ * context where a device lock is already held.
+ */
+-static void device_links_flush_sync_list(struct list_head *list)
++static void device_links_flush_sync_list(struct list_head *list,
++ struct device *dont_lock_dev)
+ {
+ struct device *dev, *tmp;
+
+ list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
+ list_del_init(&dev->links.defer_sync);
+
+- device_lock(dev);
++ if (dev != dont_lock_dev)
++ device_lock(dev);
+
+ if (dev->bus->sync_state)
+ dev->bus->sync_state(dev);
+ else if (dev->driver && dev->driver->sync_state)
+ dev->driver->sync_state(dev);
+
+- device_unlock(dev);
++ if (dev != dont_lock_dev)
++ device_unlock(dev);
+
+ put_device(dev);
+ }
+@@ -801,7 +807,7 @@ void device_links_supplier_sync_state_resume(void)
+ out:
+ device_links_write_unlock();
+
+- device_links_flush_sync_list(&sync_list);
++ device_links_flush_sync_list(&sync_list, NULL);
+ }
+
+ static int sync_state_resume_initcall(void)
+@@ -865,6 +871,11 @@ void device_links_driver_bound(struct device *dev)
+ driver_deferred_probe_add(link->consumer);
+ }
+
++ if (defer_sync_state_count)
++ __device_links_supplier_defer_sync(dev);
++ else
++ __device_links_queue_sync_state(dev, &sync_list);
++
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+@@ -883,7 +894,7 @@ void device_links_driver_bound(struct device *dev)
+
+ device_links_write_unlock();
+
+- device_links_flush_sync_list(&sync_list);
++ device_links_flush_sync_list(&sync_list, dev);
+ }
+
+ static void device_link_drop_managed(struct device_link *link)
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 3d79b074f958..c42447d5d5a8 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1406,7 +1406,7 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
+ }
+
+ /* 1-wire needs module's internal clocks enabled for reset */
+-static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata)
++static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
+ {
+ int offset = 0x0c; /* HDQ_CTRL_STATUS */
+ u16 val;
+@@ -1494,7 +1494,7 @@ static void sysc_init_module_quirks(struct sysc *ddata)
+ return;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
+- ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w;
++ ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
+
+ return;
+ }
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index ce41cd9b758a..2427398ff22a 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
+ dma_resv_fini(dmabuf->resv);
+
+ module_put(dmabuf->owner);
++ kfree(dmabuf->name);
+ kfree(dmabuf);
+ return 0;
+ }
+diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
+index e51d836afcc7..1092d4ce723e 100644
+--- a/drivers/dma/coh901318.c
++++ b/drivers/dma/coh901318.c
+@@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
+ return;
+ }
+
+- spin_lock(&cohc->lock);
+-
+ /*
+ * When we reach this point, at least one queue item
+ * should have been moved over from cohc->queue to
+@@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
+ if (coh901318_queue_start(cohc) == NULL)
+ cohc->busy = 0;
+
+- spin_unlock(&cohc->lock);
+-
+ /*
+ * This tasklet will remove items from cohc->active
+ * and thus terminates them.
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index c27e206a764c..67736c801f3c 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -1328,13 +1328,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
+
+ sdma_channel_synchronize(chan);
+
+- if (sdmac->event_id0)
++ if (sdmac->event_id0 >= 0)
+ sdma_event_disable(sdmac, sdmac->event_id0);
+ if (sdmac->event_id1)
+ sdma_event_disable(sdmac, sdmac->event_id1);
+
+ sdmac->event_id0 = 0;
+ sdmac->event_id1 = 0;
++ sdmac->context_loaded = false;
+
+ sdma_set_channel_priority(sdmac, 0);
+
+@@ -1628,7 +1629,7 @@ static int sdma_config(struct dma_chan *chan,
+ memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
+
+ /* Set ENBLn earlier to make sure dma request triggered after that */
+- if (sdmac->event_id0) {
++ if (sdmac->event_id0 >= 0) {
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
+index 3a45079d11ec..4a750e29bfb5 100644
+--- a/drivers/dma/tegra20-apb-dma.c
++++ b/drivers/dma/tegra20-apb-dma.c
+@@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
+
+ /* Do not allocate if desc are waiting for ack */
+ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
+- if (async_tx_test_ack(&dma_desc->txd)) {
++ if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
+ list_del(&dma_desc->node);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ dma_desc->txd.flags = 0;
+@@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
+ bool was_busy;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+- if (list_empty(&tdc->pending_sg_req)) {
+- spin_unlock_irqrestore(&tdc->lock, flags);
+- return 0;
+- }
+
+ if (!tdc->busy)
+ goto skip_dma_stop;
+diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
+index 2d263382d797..880ffd833718 100644
+--- a/drivers/edac/synopsys_edac.c
++++ b/drivers/edac/synopsys_edac.c
+@@ -479,20 +479,14 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
+ pinf = &p->ceinfo;
+ if (!priv->p_data->quirks) {
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+- "DDR ECC error type:%s Row %d Bank %d Col %d ",
+- "CE", pinf->row, pinf->bank, pinf->col);
+- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+- "Bit Position: %d Data: 0x%08x\n",
++ "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
++ "CE", pinf->row, pinf->bank, pinf->col,
+ pinf->bitpos, pinf->data);
+ } else {
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+- "DDR ECC error type:%s Row %d Bank %d Col %d ",
+- "CE", pinf->row, pinf->bank, pinf->col);
+- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+- "BankGroup Number %d Block Number %d ",
+- pinf->bankgrpnr, pinf->blknr);
+- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+- "Bit Position: %d Data: 0x%08x\n",
++ "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
++ "CE", pinf->row, pinf->bank, pinf->col,
++ pinf->bankgrpnr, pinf->blknr,
+ pinf->bitpos, pinf->data);
+ }
+
+@@ -509,10 +503,8 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
+ "UE", pinf->row, pinf->bank, pinf->col);
+ } else {
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+- "DDR ECC error type :%s Row %d Bank %d Col %d ",
+- "UE", pinf->row, pinf->bank, pinf->col);
+- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+- "BankGroup Number %d Block Number %d",
++ "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d",
++ "UE", pinf->row, pinf->bank, pinf->col,
+ pinf->bankgrpnr, pinf->blknr);
+ }
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 2b02cb165f16..a9778591341b 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -552,7 +552,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
+
+ seed = early_memremap(efi.rng_seed, sizeof(*seed));
+ if (seed != NULL) {
+- size = seed->size;
++ size = READ_ONCE(seed->size);
+ early_memunmap(seed, sizeof(*seed));
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+@@ -562,7 +562,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
+ sizeof(*seed) + size);
+ if (seed != NULL) {
+ pr_notice("seeding entropy pool\n");
+- add_bootloader_randomness(seed->bits, seed->size);
++ add_bootloader_randomness(seed->bits, size);
+ early_memunmap(seed, sizeof(*seed) + size);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
+index 03b43b7a6d1d..f71eaa5bf52d 100644
+--- a/drivers/firmware/imx/imx-scu.c
++++ b/drivers/firmware/imx/imx-scu.c
+@@ -29,6 +29,7 @@ struct imx_sc_chan {
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ int idx;
++ struct completion tx_done;
+ };
+
+ struct imx_sc_ipc {
+@@ -100,6 +101,14 @@ int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+ }
+ EXPORT_SYMBOL(imx_scu_get_handle);
+
++/* Callback called when the word of a message is ack-ed, eg read by SCU */
++static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
++{
++ struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
++
++ complete(&sc_chan->tx_done);
++}
++
+ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
+ {
+ struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
+@@ -149,6 +158,19 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
+
+ for (i = 0; i < hdr->size; i++) {
+ sc_chan = &sc_ipc->chans[i % 4];
++
++ /*
++ * SCU requires that all messages words are written
++ * sequentially but linux MU driver implements multiple
++ * independent channels for each register so ordering between
++ * different channels must be ensured by SCU API interface.
++ *
++ * Wait for tx_done before every send to ensure that no
++ * queueing happens at the mailbox channel level.
++ */
++ wait_for_completion(&sc_chan->tx_done);
++ reinit_completion(&sc_chan->tx_done);
++
+ ret = mbox_send_message(sc_chan->ch, &data[i]);
+ if (ret < 0)
+ return ret;
+@@ -247,6 +269,11 @@ static int imx_scu_probe(struct platform_device *pdev)
+ cl->knows_txdone = true;
+ cl->rx_callback = imx_scu_rx_callback;
+
++ /* Initial tx_done completion as "done" */
++ cl->tx_done = imx_scu_tx_done;
++ init_completion(&sc_chan->tx_done);
++ complete(&sc_chan->tx_done);
++
+ sc_chan->sc_ipc = sc_ipc;
+ sc_chan->idx = i % 4;
+ sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
+diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
+index 4b56a587dacd..d073cb3ce699 100644
+--- a/drivers/firmware/imx/misc.c
++++ b/drivers/firmware/imx/misc.c
+@@ -16,7 +16,7 @@ struct imx_sc_msg_req_misc_set_ctrl {
+ u32 ctrl;
+ u32 val;
+ u16 resource;
+-} __packed;
++} __packed __aligned(4);
+
+ struct imx_sc_msg_req_cpu_start {
+ struct imx_sc_rpc_msg hdr;
+@@ -24,18 +24,18 @@ struct imx_sc_msg_req_cpu_start {
+ u32 address_lo;
+ u16 resource;
+ u8 enable;
+-} __packed;
++} __packed __aligned(4);
+
+ struct imx_sc_msg_req_misc_get_ctrl {
+ struct imx_sc_rpc_msg hdr;
+ u32 ctrl;
+ u16 resource;
+-} __packed;
++} __packed __aligned(4);
+
+ struct imx_sc_msg_resp_misc_get_ctrl {
+ struct imx_sc_rpc_msg hdr;
+ u32 val;
+-} __packed;
++} __packed __aligned(4);
+
+ /*
+ * This function sets a miscellaneous control value.
+diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
+index b556612207e5..af3ae0087de4 100644
+--- a/drivers/firmware/imx/scu-pd.c
++++ b/drivers/firmware/imx/scu-pd.c
+@@ -61,7 +61,7 @@ struct imx_sc_msg_req_set_resource_power_mode {
+ struct imx_sc_rpc_msg hdr;
+ u16 resource;
+ u8 mode;
+-} __packed;
++} __packed __aligned(4);
+
+ #define IMX_SCU_PD_NAME_SIZE 20
+ struct imx_sc_pm_domain {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index d9b8e3298d78..6b5b243af15d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -54,7 +54,7 @@
+ * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
+ * first.
+ */
+-#define GFX10_NUM_GFX_RINGS 2
++#define GFX10_NUM_GFX_RINGS_NV1X 1
+ #define GFX10_MEC_HPD_SIZE 2048
+
+ #define F32_CE_PROGRAM_RAM_SIZE 65536
+@@ -1286,7 +1286,7 @@ static int gfx_v10_0_sw_init(void *handle)
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ adev->gfx.me.num_me = 1;
+- adev->gfx.me.num_pipe_per_me = 2;
++ adev->gfx.me.num_pipe_per_me = 1;
+ adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.mec.num_mec = 2;
+ adev->gfx.mec.num_pipe_per_mec = 4;
+@@ -2692,18 +2692,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
+ amdgpu_ring_commit(ring);
+
+ /* submit cs packet to copy state 0 to next available state */
+- ring = &adev->gfx.gfx_ring[1];
+- r = amdgpu_ring_alloc(ring, 2);
+- if (r) {
+- DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+- return r;
+- }
+-
+- amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+- amdgpu_ring_write(ring, 0);
++ if (adev->gfx.num_gfx_rings > 1) {
++ /* maximum supported gfx ring is 2 */
++ ring = &adev->gfx.gfx_ring[1];
++ r = amdgpu_ring_alloc(ring, 2);
++ if (r) {
++ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
++ return r;
++ }
+
+- amdgpu_ring_commit(ring);
++ amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
++ amdgpu_ring_write(ring, 0);
+
++ amdgpu_ring_commit(ring);
++ }
+ return 0;
+ }
+
+@@ -2800,39 +2802,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
+ mutex_unlock(&adev->srbm_mutex);
+
+ /* Init gfx ring 1 for pipe 1 */
+- mutex_lock(&adev->srbm_mutex);
+- gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
+- ring = &adev->gfx.gfx_ring[1];
+- rb_bufsz = order_base_2(ring->ring_size / 8);
+- tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
+- tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
+- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+- /* Initialize the ring buffer's write pointers */
+- ring->wptr = 0;
+- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
+- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
+- /* Set the wb address wether it's enabled or not */
+- rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
+- CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
+- wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
+- lower_32_bits(wptr_gpu_addr));
+- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
+- upper_32_bits(wptr_gpu_addr));
+-
+- mdelay(1);
+- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+-
+- rb_addr = ring->gpu_addr >> 8;
+- WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
+- WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
+- WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
+-
+- gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+- mutex_unlock(&adev->srbm_mutex);
+-
++ if (adev->gfx.num_gfx_rings > 1) {
++ mutex_lock(&adev->srbm_mutex);
++ gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
++ /* maximum supported gfx ring is 2 */
++ ring = &adev->gfx.gfx_ring[1];
++ rb_bufsz = order_base_2(ring->ring_size / 8);
++ tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
++ tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
++ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
++ /* Initialize the ring buffer's write pointers */
++ ring->wptr = 0;
++ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
++ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
++ /* Set the wb address wether it's enabled or not */
++ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
++ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
++ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
++ CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
++ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
++ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
++ lower_32_bits(wptr_gpu_addr));
++ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
++ upper_32_bits(wptr_gpu_addr));
++
++ mdelay(1);
++ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
++
++ rb_addr = ring->gpu_addr >> 8;
++ WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
++ WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
++ WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
++
++ gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
++ mutex_unlock(&adev->srbm_mutex);
++ }
+ /* Switch to pipe 0 */
+ mutex_lock(&adev->srbm_mutex);
+ gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
+@@ -3952,7 +3956,8 @@ static int gfx_v10_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
++ adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
++
+ adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+
+ gfx_v10_0_set_kiq_pm4_funcs(adev);
+diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+index 9b415f6569a2..04fea3cc0cfa 100644
+--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+@@ -222,7 +222,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ {
+ int ret = 0;
+
+- if (min <= 0 && max <= 0)
++ if (min < 0 && max < 0)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+index 094cfc46adac..29c11694406d 100644
+--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
++++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+@@ -373,9 +373,6 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
+ {
+ int ret = 0;
+
+- if (max < min)
+- return -EINVAL;
+-
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index 6d4a29e99ae2..3035584f6dc7 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -951,7 +951,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
+ * depending on the hardware this may require the framebuffer
+ * to be in a specific tiling format.
+ */
+- if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 ||
++ if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 &&
++ (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) ||
+ !plane->rotation_property)
+ return false;
+
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index 0810d3ef6961..6c35407a50eb 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
+ if (ret)
+ goto err_zero_use;
+
+- if (obj->import_attach)
++ if (obj->import_attach) {
+ shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+- else
++ } else {
++ pgprot_t prot = PAGE_KERNEL;
++
++ if (!shmem->map_cached)
++ prot = pgprot_writecombine(prot);
+ shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
++ VM_MAP, prot);
++ }
+
+ if (!shmem->vaddr) {
+ DRM_DEBUG_KMS("Failed to vmap pages\n");
+@@ -537,7 +542,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+ }
+
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ if (!shmem->map_cached)
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
+
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index 88232698d7a0..3fd35e6b9d53 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -1672,6 +1672,13 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
+ }
+ }
+
++ if (!(rotation & DRM_MODE_ROTATE_MASK))
++ rotation |= DRM_MODE_ROTATE_0;
++
++ /* Make sure there is exactly one rotation defined */
++ if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK))
++ return -EINVAL;
++
+ mode->rotation_reflection = rotation;
+
+ return 0;
+diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
+index 0da860200410..e2ac09894a6d 100644
+--- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
++++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
+@@ -83,7 +83,6 @@
+ #define VSIZE_OFST 20
+ #define LDI_INT_EN 0x741C
+ #define FRAME_END_INT_EN_OFST 1
+-#define UNDERFLOW_INT_EN_OFST 2
+ #define LDI_CTRL 0x7420
+ #define BPP_OFST 3
+ #define DATA_GATE_EN BIT(2)
+diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+index 73cd28a6ea07..86000127d4ee 100644
+--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
++++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+@@ -46,7 +46,6 @@ struct ade_hw_ctx {
+ struct clk *media_noc_clk;
+ struct clk *ade_pix_clk;
+ struct reset_control *reset;
+- struct work_struct display_reset_wq;
+ bool power_on;
+ int irq;
+
+@@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx)
+ */
+ ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
+ FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
+- ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
+ }
+
+ static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
+@@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
+ MASK(1), 0);
+ }
+
+-static void drm_underflow_wq(struct work_struct *work)
+-{
+- struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
+- display_reset_wq);
+- struct drm_device *drm_dev = ctx->crtc->dev;
+- struct drm_atomic_state *state;
+-
+- state = drm_atomic_helper_suspend(drm_dev);
+- drm_atomic_helper_resume(drm_dev, state);
+-}
+-
+ static irqreturn_t ade_irq_handler(int irq, void *data)
+ {
+ struct ade_hw_ctx *ctx = data;
+@@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
+ MASK(1), 1);
+ drm_crtc_handle_vblank(crtc);
+ }
+- if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
+- ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
+- MASK(1), 1);
+- DRM_ERROR("LDI underflow!");
+- schedule_work(&ctx->display_reset_wq);
+- }
+
+ return IRQ_HANDLED;
+ }
+@@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
+ if (ret)
+ return ERR_PTR(-EIO);
+
+- INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
+ ctx->crtc = crtc;
+
+ return ctx;
+diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
+index 12ba74788cce..597e45977349 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_power.c
++++ b/drivers/gpu/drm/i915/display/intel_display_power.c
+@@ -4471,13 +4471,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+
+ static void icl_mbus_init(struct drm_i915_private *dev_priv)
+ {
+- u32 val;
++ u32 mask, val;
+
+- val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
+- MBUS_ABOX_BT_CREDIT_POOL2(16) |
+- MBUS_ABOX_B_CREDIT(1) |
+- MBUS_ABOX_BW_CREDIT(1);
++ mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
++ MBUS_ABOX_BT_CREDIT_POOL2_MASK |
++ MBUS_ABOX_B_CREDIT_MASK |
++ MBUS_ABOX_BW_CREDIT_MASK;
+
++ val = I915_READ(MBUS_ABOX_CTL);
++ val &= ~mask;
++ val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
++ MBUS_ABOX_BT_CREDIT_POOL2(16) |
++ MBUS_ABOX_B_CREDIT(1) |
++ MBUS_ABOX_BW_CREDIT(1);
+ I915_WRITE(MBUS_ABOX_CTL, val);
+ }
+
+diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+index 29b2077b73d2..e81c2726f7fd 100644
+--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+@@ -567,7 +567,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+- return PTR_ERR(obj);
++ return false;
+
+ err = create_mmap_offset(obj);
+ i915_gem_object_put(obj);
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 2ae14bc14931..cc917200bdeb 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -1950,9 +1950,10 @@ out:
+ return i915_vma_get(oa_bo->vma);
+ }
+
+-static int emit_oa_config(struct i915_perf_stream *stream,
+- struct i915_oa_config *oa_config,
+- struct intel_context *ce)
++static struct i915_request *
++emit_oa_config(struct i915_perf_stream *stream,
++ struct i915_oa_config *oa_config,
++ struct intel_context *ce)
+ {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+@@ -1960,7 +1961,7 @@ static int emit_oa_config(struct i915_perf_stream *stream,
+
+ vma = get_oa_vma(stream, oa_config);
+ if (IS_ERR(vma))
+- return PTR_ERR(vma);
++ return ERR_CAST(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+@@ -1983,13 +1984,17 @@ static int emit_oa_config(struct i915_perf_stream *stream,
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start, 0,
+ I915_DISPATCH_SECURE);
++ if (err)
++ goto err_add_request;
++
++ i915_request_get(rq);
+ err_add_request:
+ i915_request_add(rq);
+ err_vma_unpin:
+ i915_vma_unpin(vma);
+ err_vma_put:
+ i915_vma_put(vma);
+- return err;
++ return err ? ERR_PTR(err) : rq;
+ }
+
+ static struct intel_context *oa_context(struct i915_perf_stream *stream)
+@@ -1997,7 +2002,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
+ return stream->pinned_ctx ?: stream->engine->kernel_context;
+ }
+
+-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
++static struct i915_request *
++hsw_enable_metric_set(struct i915_perf_stream *stream)
+ {
+ struct intel_uncore *uncore = stream->uncore;
+
+@@ -2408,7 +2414,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+ }
+
+-static int gen8_enable_metric_set(struct i915_perf_stream *stream)
++static struct i915_request *
++gen8_enable_metric_set(struct i915_perf_stream *stream)
+ {
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
+@@ -2450,12 +2457,13 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
+ */
+ ret = lrc_configure_all_contexts(stream, oa_config);
+ if (ret)
+- return ret;
++ return ERR_PTR(ret);
+
+ return emit_oa_config(stream, oa_config, oa_context(stream));
+ }
+
+-static int gen12_enable_metric_set(struct i915_perf_stream *stream)
++static struct i915_request *
++gen12_enable_metric_set(struct i915_perf_stream *stream)
+ {
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
+@@ -2488,7 +2496,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+ */
+ ret = gen12_configure_all_contexts(stream, oa_config);
+ if (ret)
+- return ret;
++ return ERR_PTR(ret);
+
+ /*
+ * For Gen12, performance counters are context
+@@ -2498,7 +2506,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+ if (stream->ctx) {
+ ret = gen12_configure_oar_context(stream, true);
+ if (ret)
+- return ret;
++ return ERR_PTR(ret);
+ }
+
+ return emit_oa_config(stream, oa_config, oa_context(stream));
+@@ -2693,6 +2701,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
+ .read = i915_oa_read,
+ };
+
++static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
++{
++ struct i915_request *rq;
++
++ rq = stream->perf->ops.enable_metric_set(stream);
++ if (IS_ERR(rq))
++ return PTR_ERR(rq);
++
++ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
++ i915_request_put(rq);
++
++ return 0;
++}
++
+ /**
+ * i915_oa_stream_init - validate combined props for OA stream and init
+ * @stream: An i915 perf stream
+@@ -2826,7 +2848,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
+ stream->ops = &i915_oa_stream_ops;
+ perf->exclusive_stream = stream;
+
+- ret = perf->ops.enable_metric_set(stream);
++ ret = i915_perf_stream_enable_sync(stream);
+ if (ret) {
+ DRM_DEBUG("Unable to enable metric set\n");
+ goto err_enable;
+@@ -3144,7 +3166,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
+ return -EINVAL;
+
+ if (config != stream->oa_config) {
+- int err;
++ struct i915_request *rq;
+
+ /*
+ * If OA is bound to a specific context, emit the
+@@ -3155,11 +3177,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
+ * When set globally, we use a low priority kernel context,
+ * so it will effectively take effect when idle.
+ */
+- err = emit_oa_config(stream, config, oa_context(stream));
+- if (err == 0)
++ rq = emit_oa_config(stream, config, oa_context(stream));
++ if (!IS_ERR(rq)) {
+ config = xchg(&stream->oa_config, config);
+- else
+- ret = err;
++ i915_request_put(rq);
++ } else {
++ ret = PTR_ERR(rq);
++ }
+ }
+
+ i915_oa_config_put(config);
+diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
+index 74ddc20a0d37..171f95a97ef6 100644
+--- a/drivers/gpu/drm/i915/i915_perf_types.h
++++ b/drivers/gpu/drm/i915/i915_perf_types.h
+@@ -339,7 +339,8 @@ struct i915_oa_ops {
+ * counter reports being sampled. May apply system constraints such as
+ * disabling EU clock gating as required.
+ */
+- int (*enable_metric_set)(struct i915_perf_stream *stream);
++ struct i915_request *
++ (*enable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable_metric_set: Remove system constraints associated with using
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+index 7f21307cda75..c26b2faa15cf 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+@@ -358,6 +358,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
+ /* Only DMA capable components need the LARB property */
+ comp->larb_dev = NULL;
+ if (type != MTK_DISP_OVL &&
++ type != MTK_DISP_OVL_2L &&
+ type != MTK_DISP_RDMA &&
+ type != MTK_DISP_WDMA)
+ return 0;
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+index 05cc04f729d6..e1cc541e0ef2 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+@@ -1109,8 +1109,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+ ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+- dev_warn(dev->dev, "pp done time out, lm=%d\n",
+- mdp5_cstate->pipeline.mixer->lm);
++ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
++ mdp5_cstate->pipeline.mixer->lm);
+ }
+
+ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+index 271aa7bbca92..73127948f54d 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+@@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
+ return num;
+ }
+
+-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ int id = dsi_mgr_connector_get_id(connector);
+@@ -479,6 +479,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct drm_panel *panel = msm_dsi->panel;
++ struct msm_dsi_pll *src_pll;
+ bool is_dual_dsi = IS_DUAL_DSI();
+ int ret;
+
+@@ -519,6 +520,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+ id, ret);
+ }
+
++ /* Save PLL status if it is a clock source */
++ src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
++ msm_dsi_pll_save_state(src_pll);
++
+ ret = msm_dsi_host_power_off(host);
+ if (ret)
+ pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+index b0cfa67d2a57..f509ebd77500 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+@@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+ if (!phy || !phy->cfg->ops.disable)
+ return;
+
+- /* Save PLL status if it is a clock source */
+- if (phy->usecase != MSM_DSI_PHY_SLAVE)
+- msm_dsi_pll_save_state(phy->pll);
+-
+ phy->cfg->ops.disable(phy);
+
+ dsi_phy_regulator_disable(phy);
+diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+index 8f6100db90ed..aa9385d5bfff 100644
+--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+@@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+ if (pll_10nm->slave)
+ dsi_pll_enable_pll_bias(pll_10nm->slave);
+
++ rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
++ if (rc) {
++ pr_err("vco_set_rate failed, rc=%d\n", rc);
++ return rc;
++ }
++
+ /* Start PLL */
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+ 0x01);
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 3107b0738e40..5d75f8cf6477 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -601,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
+ source_id = (fault_status >> 16);
+
+ /* Page fault only */
+- if ((status & mask) == BIT(i)) {
+- WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
+-
++ ret = -1;
++ if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
+ ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
+- if (!ret) {
+- mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
+- status &= ~mask;
+- continue;
+- }
+- }
+
+- /* terminal fault, print info about the fault */
+- dev_err(pfdev->dev,
+- "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+- "Reason: %s\n"
+- "raw fault status: 0x%X\n"
+- "decoded fault status: %s\n"
+- "exception type 0x%X: %s\n"
+- "access type 0x%X: %s\n"
+- "source id 0x%X\n",
+- i, addr,
+- "TODO",
+- fault_status,
+- (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+- exception_type, panfrost_exception_name(pfdev, exception_type),
+- access_type, access_type_name(pfdev, fault_status),
+- source_id);
++ if (ret)
++ /* terminal fault, print info about the fault */
++ dev_err(pfdev->dev,
++ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
++ "Reason: %s\n"
++ "raw fault status: 0x%X\n"
++ "decoded fault status: %s\n"
++ "exception type 0x%X: %s\n"
++ "access type 0x%X: %s\n"
++ "source id 0x%X\n",
++ i, addr,
++ "TODO",
++ fault_status,
++ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
++ exception_type, panfrost_exception_name(pfdev, exception_type),
++ access_type, access_type_name(pfdev, fault_status),
++ source_id);
+
+ mmu_write(pfdev, MMU_INT_CLEAR, mask);
+
+diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+index 6d61a0eb5d64..84e6bc050bf2 100644
+--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
++++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+@@ -53,6 +53,7 @@ cmdline_test(drm_cmdline_test_rotate_0)
+ cmdline_test(drm_cmdline_test_rotate_90)
+ cmdline_test(drm_cmdline_test_rotate_180)
+ cmdline_test(drm_cmdline_test_rotate_270)
++cmdline_test(drm_cmdline_test_rotate_multiple)
+ cmdline_test(drm_cmdline_test_rotate_invalid_val)
+ cmdline_test(drm_cmdline_test_rotate_truncated)
+ cmdline_test(drm_cmdline_test_hmirror)
+diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+index 013de9d27c35..035f86c5d648 100644
+--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
++++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+@@ -856,6 +856,17 @@ static int drm_cmdline_test_rotate_270(void *ignored)
+ return 0;
+ }
+
++static int drm_cmdline_test_rotate_multiple(void *ignored)
++{
++ struct drm_cmdline_mode mode = { };
++
++ FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90",
++ &no_connector,
++ &mode));
++
++ return 0;
++}
++
+ static int drm_cmdline_test_rotate_invalid_val(void *ignored)
+ {
+ struct drm_cmdline_mode mode = { };
+@@ -888,7 +899,7 @@ static int drm_cmdline_test_hmirror(void *ignored)
+ FAIL_ON(!mode.specified);
+ FAIL_ON(mode.xres != 720);
+ FAIL_ON(mode.yres != 480);
+- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_X);
++ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
+
+ FAIL_ON(mode.refresh_specified);
+
+@@ -913,7 +924,7 @@ static int drm_cmdline_test_vmirror(void *ignored)
+ FAIL_ON(!mode.specified);
+ FAIL_ON(mode.xres != 720);
+ FAIL_ON(mode.yres != 480);
+- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_Y);
++ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
+
+ FAIL_ON(mode.refresh_specified);
+
+diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
+index 8b803eb903b8..18b4881f4481 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
+@@ -106,48 +106,128 @@ static const struct de2_fmt_info de2_formats[] = {
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_XRGB4444,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_XBGR4444,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_RGBX4444,
++ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_BGRX4444,
++ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_ARGB1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_XRGB1555,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_XBGR1555,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_RGBX5551,
++ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
++ {
++ /* for DE2 VI layer which ignores alpha */
++ .drm_fmt = DRM_FORMAT_BGRX5551,
++ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
++ {
++ .drm_fmt = DRM_FORMAT_ARGB2101010,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
++ {
++ .drm_fmt = DRM_FORMAT_ABGR2101010,
++ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
++ {
++ .drm_fmt = DRM_FORMAT_RGBA1010102,
++ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
++ {
++ .drm_fmt = DRM_FORMAT_BGRA1010102,
++ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
++ .rgb = true,
++ .csc = SUN8I_CSC_MODE_OFF,
++ },
+ {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
+@@ -196,12 +276,6 @@ static const struct de2_fmt_info de2_formats[] = {
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+- {
+- .drm_fmt = DRM_FORMAT_YUV444,
+- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
+- .rgb = true,
+- .csc = SUN8I_CSC_MODE_YUV2RGB,
+- },
+ {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
+@@ -220,12 +294,6 @@ static const struct de2_fmt_info de2_formats[] = {
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+- {
+- .drm_fmt = DRM_FORMAT_YVU444,
+- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
+- .rgb = true,
+- .csc = SUN8I_CSC_MODE_YVU2RGB,
+- },
+ {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
+@@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = {
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YVU2RGB,
+ },
++ {
++ .drm_fmt = DRM_FORMAT_P010,
++ .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
++ .rgb = false,
++ .csc = SUN8I_CSC_MODE_YUV2RGB,
++ },
++ {
++ .drm_fmt = DRM_FORMAT_P210,
++ .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
++ .rgb = false,
++ .csc = SUN8I_CSC_MODE_YUV2RGB,
++ },
+ };
+
+ const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
+diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
+index c6cc94057faf..345b28b0a80a 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
+@@ -93,6 +93,10 @@
+ #define SUN8I_MIXER_FBFMT_ABGR1555 17
+ #define SUN8I_MIXER_FBFMT_RGBA5551 18
+ #define SUN8I_MIXER_FBFMT_BGRA5551 19
++#define SUN8I_MIXER_FBFMT_ARGB2101010 20
++#define SUN8I_MIXER_FBFMT_ABGR2101010 21
++#define SUN8I_MIXER_FBFMT_RGBA1010102 22
++#define SUN8I_MIXER_FBFMT_BGRA1010102 23
+
+ #define SUN8I_MIXER_FBFMT_YUYV 0
+ #define SUN8I_MIXER_FBFMT_UYVY 1
+@@ -109,6 +113,13 @@
+ /* format 12 is semi-planar YUV411 UVUV */
+ /* format 13 is semi-planar YUV411 VUVU */
+ #define SUN8I_MIXER_FBFMT_YUV411 14
++/* format 15 doesn't exist */
++/* format 16 is P010 YVU */
++#define SUN8I_MIXER_FBFMT_P010_YUV 17
++/* format 18 is P210 YVU */
++#define SUN8I_MIXER_FBFMT_P210_YUV 19
++/* format 20 is packed YVU444 10-bit */
++/* format 21 is packed YUV444 10-bit */
+
+ /*
+ * Sub-engines listed bellow are unused for now. The EN registers are here only
+diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+index 42d445d23773..b8398ca18b0f 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
++++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+@@ -398,24 +398,66 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
+ };
+
+ /*
+- * While all RGB formats are supported, VI planes don't support
+- * alpha blending, so there is no point having formats with alpha
+- * channel if their opaque analog exist.
++ * While DE2 VI layer supports same RGB formats as UI layer, alpha
++ * channel is ignored. This structure lists all unique variants
++ * where alpha channel is replaced with "don't care" (X) channel.
+ */
+ static const u32 sun8i_vi_layer_formats[] = {
++ DRM_FORMAT_BGR565,
++ DRM_FORMAT_BGR888,
++ DRM_FORMAT_BGRX4444,
++ DRM_FORMAT_BGRX5551,
++ DRM_FORMAT_BGRX8888,
++ DRM_FORMAT_RGB565,
++ DRM_FORMAT_RGB888,
++ DRM_FORMAT_RGBX4444,
++ DRM_FORMAT_RGBX5551,
++ DRM_FORMAT_RGBX8888,
++ DRM_FORMAT_XBGR1555,
++ DRM_FORMAT_XBGR4444,
++ DRM_FORMAT_XBGR8888,
++ DRM_FORMAT_XRGB1555,
++ DRM_FORMAT_XRGB4444,
++ DRM_FORMAT_XRGB8888,
++
++ DRM_FORMAT_NV16,
++ DRM_FORMAT_NV12,
++ DRM_FORMAT_NV21,
++ DRM_FORMAT_NV61,
++ DRM_FORMAT_UYVY,
++ DRM_FORMAT_VYUY,
++ DRM_FORMAT_YUYV,
++ DRM_FORMAT_YVYU,
++ DRM_FORMAT_YUV411,
++ DRM_FORMAT_YUV420,
++ DRM_FORMAT_YUV422,
++ DRM_FORMAT_YVU411,
++ DRM_FORMAT_YVU420,
++ DRM_FORMAT_YVU422,
++};
++
++static const u32 sun8i_vi_layer_de3_formats[] = {
+ DRM_FORMAT_ABGR1555,
++ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_ABGR4444,
++ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB1555,
++ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ARGB4444,
++ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
++ DRM_FORMAT_BGRA1010102,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_BGRA4444,
++ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
++ DRM_FORMAT_RGBA1010102,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBA5551,
++ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+@@ -424,6 +466,8 @@ static const u32 sun8i_vi_layer_formats[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
++ DRM_FORMAT_P010,
++ DRM_FORMAT_P210,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUYV,
+@@ -431,11 +475,9 @@ static const u32 sun8i_vi_layer_formats[] = {
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+- DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU411,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+- DRM_FORMAT_YVU444,
+ };
+
+ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
+@@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
+ int index)
+ {
+ u32 supported_encodings, supported_ranges;
++ unsigned int plane_cnt, format_count;
+ struct sun8i_vi_layer *layer;
+- unsigned int plane_cnt;
++ const u32 *formats;
+ int ret;
+
+ layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
+ if (!layer)
+ return ERR_PTR(-ENOMEM);
+
++ if (mixer->cfg->is_de3) {
++ formats = sun8i_vi_layer_de3_formats;
++ format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
++ } else {
++ formats = sun8i_vi_layer_formats;
++ format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
++ }
++
+ /* possible crtcs are set later */
+ ret = drm_universal_plane_init(drm, &layer->plane, 0,
+ &sun8i_vi_layer_funcs,
+- sun8i_vi_layer_formats,
+- ARRAY_SIZE(sun8i_vi_layer_formats),
++ formats, format_count,
+ NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialize layer\n");
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+index 6b0883a1776e..b40915638e13 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -516,6 +516,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ fbo->base.base.resv = &fbo->base.base._resv;
+
+ dma_resv_init(&fbo->base.base._resv);
++ fbo->base.base.dev = NULL;
+ ret = dma_resv_trylock(&fbo->base.base._resv);
+ WARN_ON(!ret);
+
+diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
+index 017a9e0fc3bb..3af7ec80c7da 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_object.c
++++ b/drivers/gpu/drm/virtio/virtgpu_object.c
+@@ -42,8 +42,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+ * "f91a9dd35715 Fix unlinking resources from hash
+ * table." (Feb 2019) fixes the bug.
+ */
+- static int handle;
+- handle++;
++ static atomic_t seqno = ATOMIC_INIT(0);
++ int handle = atomic_inc_return(&seqno);
+ *resid = handle + 1;
+ } else {
+ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+@@ -99,6 +99,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ return NULL;
+
+ bo->base.base.funcs = &virtio_gpu_gem_funcs;
++ bo->base.map_cached = true;
+ return &bo->base.base;
+ }
+
+diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
+index 9632e2e3c4bb..319a0519ebdb 100644
+--- a/drivers/hwmon/adt7462.c
++++ b/drivers/hwmon/adt7462.c
+@@ -413,7 +413,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
+ return 0x95;
+ break;
+ }
+- return -ENODEV;
++ return 0;
+ }
+
+ /* Provide labels for sysfs */
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 455b3659d84b..4decc1d4cc99 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1202,6 +1202,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
+ /* Sharing an ib_cm_id with different handlers is not
+ * supported */
+ spin_unlock_irqrestore(&cm.lock, flags);
++ ib_destroy_cm_id(cm_id);
+ return ERR_PTR(-EINVAL);
+ }
+ refcount_inc(&cm_id_priv->refcount);
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index af1afc17b8bd..0b530646f1e5 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3182,19 +3182,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
++ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
+ if (id_priv->state == RDMA_CM_IDLE) {
+ ret = cma_bind_addr(id, src_addr, dst_addr);
+- if (ret)
++ if (ret) {
++ memset(cma_dst_addr(id_priv), 0,
++ rdma_addr_size(dst_addr));
+ return ret;
++ }
+ }
+
+- if (cma_family(id_priv) != dst_addr->sa_family)
++ if (cma_family(id_priv) != dst_addr->sa_family) {
++ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ return -EINVAL;
++ }
+
+- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
++ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
++ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ return -EINVAL;
++ }
+
+- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
+ if (cma_any_addr(dst_addr)) {
+ ret = cma_resolve_loopback(id_priv);
+ } else {
+diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
+index d657d90e618b..9dc7383eef49 100644
+--- a/drivers/infiniband/core/core_priv.h
++++ b/drivers/infiniband/core/core_priv.h
+@@ -338,6 +338,21 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
+ qp->pd = pd;
+ qp->uobject = uobj;
+ qp->real_qp = qp;
++
++ qp->qp_type = attr->qp_type;
++ qp->qp_context = attr->qp_context;
++ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
++ qp->send_cq = attr->send_cq;
++ qp->recv_cq = attr->recv_cq;
++ qp->srq = attr->srq;
++ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
++ qp->event_handler = attr->event_handler;
++
++ atomic_set(&qp->usecnt, 0);
++ spin_lock_init(&qp->mr_lock);
++ INIT_LIST_HEAD(&qp->rdma_mrs);
++ INIT_LIST_HEAD(&qp->sig_mrs);
++
+ /*
+ * We don't track XRC QPs for now, because they don't have PD
+ * and more importantly they are created internaly by driver,
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index ade71823370f..da8adadf4755 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -159,8 +159,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
+ {
+ struct list_head *e, *tmp;
+
+- list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
++ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
++ list_del(e);
+ kfree(list_entry(e, struct iwcm_work, free_list));
++ }
+ }
+
+ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index cbf6041a5d4a..ba76709897bb 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -1756,6 +1756,8 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (ret)
+ goto err_msg;
+ } else {
++ if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
++ goto err_msg;
+ qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
+ if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
+ cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
+diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
+index 4fad732f9b3c..06e5b6787443 100644
+--- a/drivers/infiniband/core/rw.c
++++ b/drivers/infiniband/core/rw.c
+@@ -273,6 +273,23 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ return 1;
+ }
+
++static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
++ u32 sg_cnt, enum dma_data_direction dir)
++{
++ if (is_pci_p2pdma_page(sg_page(sg)))
++ pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
++ else
++ ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
++}
++
++static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
++ u32 sg_cnt, enum dma_data_direction dir)
++{
++ if (is_pci_p2pdma_page(sg_page(sg)))
++ return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
++ return ib_dma_map_sg(dev, sg, sg_cnt, dir);
++}
++
+ /**
+ * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
+ * @ctx: context to initialize
+@@ -295,11 +312,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct ib_device *dev = qp->pd->device;
+ int ret;
+
+- if (is_pci_p2pdma_page(sg_page(sg)))
+- ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+- else
+- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+-
++ ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
+ if (!ret)
+ return -ENOMEM;
+ sg_cnt = ret;
+@@ -338,7 +351,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ return ret;
+
+ out_unmap_sg:
+- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
++ rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
+ return ret;
+ }
+ EXPORT_SYMBOL(rdma_rw_ctx_init);
+@@ -588,11 +601,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ break;
+ }
+
+- if (is_pci_p2pdma_page(sg_page(sg)))
+- pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg,
+- sg_cnt, dir);
+- else
+- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
++ rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ }
+ EXPORT_SYMBOL(rdma_rw_ctx_destroy);
+
+diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
+index 2b4d80393bd0..2d5608315dc8 100644
+--- a/drivers/infiniband/core/security.c
++++ b/drivers/infiniband/core/security.c
+@@ -340,15 +340,19 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
+ return NULL;
+
+ if (qp_attr_mask & IB_QP_PORT)
+- new_pps->main.port_num =
+- (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
++ new_pps->main.port_num = qp_attr->port_num;
++ else if (qp_pps)
++ new_pps->main.port_num = qp_pps->main.port_num;
++
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+- new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
+- qp_attr->pkey_index;
++ new_pps->main.pkey_index = qp_attr->pkey_index;
++ else if (qp_pps)
++ new_pps->main.pkey_index = qp_pps->main.pkey_index;
++
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
+ new_pps->main.state = IB_PORT_PKEY_VALID;
+
+- if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
++ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
+ new_pps->main.port_num = qp_pps->main.port_num;
+ new_pps->main.pkey_index = qp_pps->main.pkey_index;
+ if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index b9baf7d0a5cb..eb22cb4f26b4 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -187,14 +187,28 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
+ odp_data->page_shift = PAGE_SHIFT;
+ odp_data->notifier.ops = ops;
+
++ /*
++ * A mmget must be held when registering a notifier, the owming_mm only
++ * has a mm_grab at this point.
++ */
++ if (!mmget_not_zero(umem->owning_mm)) {
++ ret = -EFAULT;
++ goto out_free;
++ }
++
+ odp_data->tgid = get_pid(root->tgid);
+ ret = ib_init_umem_odp(odp_data, ops);
+- if (ret) {
+- put_pid(odp_data->tgid);
+- kfree(odp_data);
+- return ERR_PTR(ret);
+- }
++ if (ret)
++ goto out_tgid;
++ mmput(umem->owning_mm);
+ return odp_data;
++
++out_tgid:
++ put_pid(odp_data->tgid);
++ mmput(umem->owning_mm);
++out_free:
++ kfree(odp_data);
++ return ERR_PTR(ret);
+ }
+ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
+
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index 86e93ac46d75..c3a67ad82ddd 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -1433,17 +1433,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
+ if (ret)
+ goto err_cb;
+
+- qp->pd = pd;
+- qp->send_cq = attr.send_cq;
+- qp->recv_cq = attr.recv_cq;
+- qp->srq = attr.srq;
+- qp->rwq_ind_tbl = ind_tbl;
+- qp->event_handler = attr.event_handler;
+- qp->qp_context = attr.qp_context;
+- qp->qp_type = attr.qp_type;
+- atomic_set(&qp->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+- qp->port = 0;
+ if (attr.send_cq)
+ atomic_inc(&attr.send_cq->usecnt);
+ if (attr.recv_cq)
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index dd765e176cdd..eb1c68311e52 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -1182,16 +1182,6 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
+ if (ret)
+ goto err;
+
+- qp->qp_type = qp_init_attr->qp_type;
+- qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
+-
+- atomic_set(&qp->usecnt, 0);
+- qp->mrs_used = 0;
+- spin_lock_init(&qp->mr_lock);
+- INIT_LIST_HEAD(&qp->rdma_mrs);
+- INIT_LIST_HEAD(&qp->sig_mrs);
+- qp->port = 0;
+-
+ if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
+ struct ib_qp *xrc_qp =
+ create_xrc_qp_user(qp, qp_init_attr, udata);
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
+index 089e201d7550..2f6323ad9c59 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
+ opa_get_lid(packet->dlid, 9B));
+ if (!mcast)
+ goto drop;
++ rcu_read_lock();
+ list_for_each_entry_rcu(p, &mcast->qp_list, list) {
+ packet->qp = p->qp;
+ if (hfi1_do_pkey_check(packet))
+- goto drop;
++ goto unlock_drop;
+ spin_lock_irqsave(&packet->qp->r_lock, flags);
+ packet_handler = qp_ok(packet);
+ if (likely(packet_handler))
+@@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
+ ibp->rvp.n_pkt_drops++;
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+ }
++ rcu_read_unlock();
+ /*
+ * Notify rvt_multicast_detach() if it is waiting for us
+ * to finish.
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index b06f32ff5748..b3561e4c44e8 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -629,6 +629,7 @@ struct mlx5_ib_mr {
+
+ /* For ODP and implicit */
+ atomic_t num_deferred_work;
++ wait_queue_head_t q_deferred_work;
+ struct xarray implicit_children;
+ union {
+ struct rcu_head rcu;
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 8247c26a1ce9..443de6fb578b 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -197,7 +197,8 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
+ mr->parent = NULL;
+ mlx5_mr_cache_free(mr->dev, mr);
+ ib_umem_odp_release(odp);
+- atomic_dec(&imr->num_deferred_work);
++ if (atomic_dec_and_test(&imr->num_deferred_work))
++ wake_up(&imr->q_deferred_work);
+ }
+
+ static void free_implicit_child_mr_work(struct work_struct *work)
+@@ -516,6 +517,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
+ imr->umem = &umem_odp->umem;
+ imr->is_odp_implicit = true;
+ atomic_set(&imr->num_deferred_work, 0);
++ init_waitqueue_head(&imr->q_deferred_work);
+ xa_init(&imr->implicit_children);
+
+ err = mlx5_ib_update_xlt(imr, 0,
+@@ -573,10 +575,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
+ * under xa_lock while the child is in the xarray. Thus at this point
+ * it is only decreasing, and all work holding it is now on the wq.
+ */
+- if (atomic_read(&imr->num_deferred_work)) {
+- flush_workqueue(system_unbound_wq);
+- WARN_ON(atomic_read(&imr->num_deferred_work));
+- }
++ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
+
+ /*
+ * Fence the imr before we destroy the children. This allows us to
+@@ -607,10 +606,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&mr->dev->odp_srcu);
+
+- if (atomic_read(&mr->num_deferred_work)) {
+- flush_workqueue(system_unbound_wq);
+- WARN_ON(atomic_read(&mr->num_deferred_work));
+- }
++ wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));
+
+ dma_fence_odp_mr(mr);
+ }
+@@ -1682,7 +1678,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)
+ u32 i;
+
+ for (i = 0; i < work->num_sge; ++i)
+- atomic_dec(&work->frags[i].mr->num_deferred_work);
++ if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work))
++ wake_up(&work->frags[i].mr->q_deferred_work);
+ kvfree(work);
+ }
+
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
+index 33778d451b82..5ef93f8f17a1 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.c
++++ b/drivers/infiniband/hw/qib/qib_verbs.c
+@@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
+ if (mcast == NULL)
+ goto drop;
+ this_cpu_inc(ibp->pmastats->n_multicast_rcv);
++ rcu_read_lock();
+ list_for_each_entry_rcu(p, &mcast->qp_list, list)
+ qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
++ rcu_read_unlock();
+ /*
+ * Notify rvt_multicast_detach() if it is waiting for us
+ * to finish.
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index c147f0613d95..1e2fdd21ba6e 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -388,6 +388,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
+ { .max_segment_size = SZ_2G };
+ base_dev->num_comp_vectors = num_possible_cpus();
+
++ xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
++ xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
++
+ ib_set_device_ops(base_dev, &siw_device_ops);
+ rv = ib_device_set_netdev(base_dev, netdev, 1);
+ if (rv)
+@@ -415,9 +418,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
+ sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
+ sdev->attrs.max_srq_sge = SIW_MAX_SGE;
+
+- xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
+- xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
+-
+ INIT_LIST_HEAD(&sdev->cep_list);
+ INIT_LIST_HEAD(&sdev->qp_list);
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index d7cbca8bf2cd..b5ae9f7c0510 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -2533,6 +2533,7 @@ static int __init early_amd_iommu_init(void)
+ struct acpi_table_header *ivrs_base;
+ acpi_status status;
+ int i, remap_cache_sz, ret = 0;
++ u32 pci_id;
+
+ if (!amd_iommu_detected)
+ return -ENODEV;
+@@ -2620,6 +2621,16 @@ static int __init early_amd_iommu_init(void)
+ if (ret)
+ goto out;
+
++ /* Disable IOMMU if there's Stoney Ridge graphics */
++ for (i = 0; i < 32; i++) {
++ pci_id = read_pci_config(0, i, 0, 0);
++ if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
++ pr_info("Disable IOMMU on Stoney Ridge\n");
++ amd_iommu_disabled = true;
++ break;
++ }
++ }
++
+ /* Disable any previously enabled IOMMUs */
+ if (!is_kdump_kernel() || amd_iommu_disabled)
+ disable_iommus();
+@@ -2728,7 +2739,7 @@ static int __init state_next(void)
+ ret = early_amd_iommu_init();
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
+ if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
+- pr_info("AMD IOMMU disabled on kernel command-line\n");
++ pr_info("AMD IOMMU disabled\n");
+ init_state = IOMMU_CMDLINE_DISABLED;
+ ret = -EINVAL;
+ }
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 2d32821b3a5b..f4be63671233 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -2846,8 +2846,8 @@ static void cache_postsuspend(struct dm_target *ti)
+ prevent_background_work(cache);
+ BUG_ON(atomic_read(&cache->nr_io_migrations));
+
+- cancel_delayed_work(&cache->waker);
+- flush_workqueue(cache->wq);
++ cancel_delayed_work_sync(&cache->waker);
++ drain_workqueue(cache->wq);
+ WARN_ON(cache->tracker.in_flight);
+
+ /*
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index b225b3e445fa..e1ad0b53f681 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -201,17 +201,19 @@ struct dm_integrity_c {
+ __u8 log2_blocks_per_bitmap_bit;
+
+ unsigned char mode;
+- int suspending;
+
+ int failed;
+
+ struct crypto_shash *internal_hash;
+
++ struct dm_target *ti;
++
+ /* these variables are locked with endio_wait.lock */
+ struct rb_root in_progress;
+ struct list_head wait_list;
+ wait_queue_head_t endio_wait;
+ struct workqueue_struct *wait_wq;
++ struct workqueue_struct *offload_wq;
+
+ unsigned char commit_seq;
+ commit_id_t commit_ids[N_COMMIT_IDS];
+@@ -1439,7 +1441,7 @@ static void dec_in_flight(struct dm_integrity_io *dio)
+ dio->range.logical_sector += dio->range.n_sectors;
+ bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+- queue_work(ic->wait_wq, &dio->work);
++ queue_work(ic->offload_wq, &dio->work);
+ return;
+ }
+ do_endio_flush(ic, dio);
+@@ -1865,7 +1867,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+
+ if (need_sync_io && from_map) {
+ INIT_WORK(&dio->work, integrity_bio_wait);
+- queue_work(ic->metadata_wq, &dio->work);
++ queue_work(ic->offload_wq, &dio->work);
+ return;
+ }
+
+@@ -2315,7 +2317,7 @@ static void integrity_writer(struct work_struct *w)
+ unsigned prev_free_sectors;
+
+ /* the following test is not needed, but it tests the replay code */
+- if (READ_ONCE(ic->suspending) && !ic->meta_dev)
++ if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
+ return;
+
+ spin_lock_irq(&ic->endio_wait.lock);
+@@ -2376,7 +2378,7 @@ static void integrity_recalc(struct work_struct *w)
+
+ next_chunk:
+
+- if (unlikely(READ_ONCE(ic->suspending)))
++ if (unlikely(dm_suspended(ic->ti)))
+ goto unlock_ret;
+
+ range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
+@@ -2501,7 +2503,7 @@ static void bitmap_block_work(struct work_struct *w)
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
+ remove_range(ic, &dio->range);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+- queue_work(ic->wait_wq, &dio->work);
++ queue_work(ic->offload_wq, &dio->work);
+ } else {
+ block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_SET);
+@@ -2524,7 +2526,7 @@ static void bitmap_block_work(struct work_struct *w)
+
+ remove_range(ic, &dio->range);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+- queue_work(ic->wait_wq, &dio->work);
++ queue_work(ic->offload_wq, &dio->work);
+ }
+
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
+@@ -2804,8 +2806,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
+
+ del_timer_sync(&ic->autocommit_timer);
+
+- WRITE_ONCE(ic->suspending, 1);
+-
+ if (ic->recalc_wq)
+ drain_workqueue(ic->recalc_wq);
+
+@@ -2834,8 +2834,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
+ #endif
+ }
+
+- WRITE_ONCE(ic->suspending, 0);
+-
+ BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
+
+ ic->journal_uptodate = true;
+@@ -2888,17 +2886,24 @@ static void dm_integrity_resume(struct dm_target *ti)
+ } else {
+ replay_journal(ic);
+ if (ic->mode == 'B') {
+- int mode;
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+
+- mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
+- block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
+- block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
+- block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
++ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
++ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
++ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
++ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
++ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
++ block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
++ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
++ block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
++ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
++ block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
++ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
++ }
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ }
+@@ -2967,7 +2972,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ DMEMIT(" meta_device:%s", ic->meta_dev->name);
+ if (ic->sectors_per_block != 1)
+ DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
+- if (ic->recalculate_flag)
++ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ DMEMIT(" recalculate");
+ DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
+ DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+@@ -3623,6 +3628,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ }
+ ti->private = ic;
+ ti->per_io_data_size = sizeof(struct dm_integrity_io);
++ ic->ti = ti;
+
+ ic->in_progress = RB_ROOT;
+ INIT_LIST_HEAD(&ic->wait_list);
+@@ -3836,6 +3842,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ goto bad;
+ }
+
++ ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
++ METADATA_WORKQUEUE_MAX_ACTIVE);
++ if (!ic->offload_wq) {
++ ti->error = "Cannot allocate workqueue";
++ r = -ENOMEM;
++ goto bad;
++ }
++
+ ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
+ if (!ic->commit_wq) {
+ ti->error = "Cannot allocate workqueue";
+@@ -4140,6 +4154,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ destroy_workqueue(ic->metadata_wq);
+ if (ic->wait_wq)
+ destroy_workqueue(ic->wait_wq);
++ if (ic->offload_wq)
++ destroy_workqueue(ic->offload_wq);
+ if (ic->commit_wq)
+ destroy_workqueue(ic->commit_wq);
+ if (ic->writer_wq)
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 8bb723f1a569..4cd8868f8004 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -960,9 +960,9 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ DMWARN("%s: __commit_transaction() failed, error = %d",
+ __func__, r);
+ }
++ pmd_write_unlock(pmd);
+ if (!pmd->fail_io)
+ __destroy_persistent_data_objects(pmd);
+- pmd_write_unlock(pmd);
+
+ kfree(pmd);
+ return 0;
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 9b0a3bf6a4a1..cd9e4c8a023a 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -625,6 +625,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
+ wc->freelist_size++;
+ }
+
++static inline void writecache_verify_watermark(struct dm_writecache *wc)
++{
++ if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
++ queue_work(wc->writeback_wq, &wc->writeback_work);
++}
++
+ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
+ {
+ struct wc_entry *e;
+@@ -646,8 +652,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
+ list_del(&e->lru);
+ }
+ wc->freelist_size--;
+- if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
+- queue_work(wc->writeback_wq, &wc->writeback_work);
++
++ writecache_verify_watermark(wc);
+
+ return e;
+ }
+@@ -838,7 +844,7 @@ static void writecache_suspend(struct dm_target *ti)
+ }
+ wc_unlock(wc);
+
+- flush_workqueue(wc->writeback_wq);
++ drain_workqueue(wc->writeback_wq);
+
+ wc_lock(wc);
+ if (flush_on_suspend)
+@@ -961,6 +967,8 @@ erase_this:
+ writecache_commit_flushed(wc, false);
+ }
+
++ writecache_verify_watermark(wc);
++
+ wc_unlock(wc);
+ }
+
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index 70a1063161c0..b1e64cd31647 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -533,8 +533,9 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+
+ /* Get the BIO chunk work. If one is not active yet, create one */
+ cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
+- if (!cw) {
+-
++ if (cw) {
++ dmz_get_chunk_work(cw);
++ } else {
+ /* Create a new chunk work */
+ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
+ if (unlikely(!cw)) {
+@@ -543,7 +544,7 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+ }
+
+ INIT_WORK(&cw->work, dmz_chunk_work);
+- refcount_set(&cw->refcount, 0);
++ refcount_set(&cw->refcount, 1);
+ cw->target = dmz;
+ cw->chunk = chunk;
+ bio_list_init(&cw->bio_list);
+@@ -556,7 +557,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+ }
+
+ bio_list_add(&cw->bio_list, bio);
+- dmz_get_chunk_work(cw);
+
+ dmz_reclaim_bio_acc(dmz->reclaim);
+ if (queue_work(dmz->chunk_wq, &cw->work))
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index b89f07ee2eff..0413018c8305 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1788,7 +1788,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
+ * With request-based DM we only need to check the
+ * top-level queue for congestion.
+ */
+- r = md->queue->backing_dev_info->wb.state & bdi_bits;
++ struct backing_dev_info *bdi = md->queue->backing_dev_info;
++ r = bdi->wb.congested->state & bdi_bits;
+ } else {
+ map = dm_get_live_table_fast(md);
+ if (map)
+@@ -1854,15 +1855,6 @@ static const struct dax_operations dm_dax_ops;
+
+ static void dm_wq_work(struct work_struct *work);
+
+-static void dm_init_normal_md_queue(struct mapped_device *md)
+-{
+- /*
+- * Initialize aspects of queue that aren't relevant for blk-mq
+- */
+- md->queue->backing_dev_info->congested_data = md;
+- md->queue->backing_dev_info->congested_fn = dm_any_congested;
+-}
+-
+ static void cleanup_mapped_device(struct mapped_device *md)
+ {
+ if (md->wq)
+@@ -2249,6 +2241,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
+ }
+ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+
++static void dm_init_congested_fn(struct mapped_device *md)
++{
++ md->queue->backing_dev_info->congested_data = md;
++ md->queue->backing_dev_info->congested_fn = dm_any_congested;
++}
++
+ /*
+ * Setup the DM device's queue based on md's type
+ */
+@@ -2265,11 +2263,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
+ DMERR("Cannot initialize queue for request-based dm-mq mapped device");
+ return r;
+ }
++ dm_init_congested_fn(md);
+ break;
+ case DM_TYPE_BIO_BASED:
+ case DM_TYPE_DAX_BIO_BASED:
+ case DM_TYPE_NVME_BIO_BASED:
+- dm_init_normal_md_queue(md);
++ dm_init_congested_fn(md);
+ break;
+ case DM_TYPE_NONE:
+ WARN_ON_ONCE(true);
+@@ -2368,6 +2367,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!dm_suspended_md(md)) {
+ dm_table_presuspend_targets(map);
++ set_bit(DMF_SUSPENDED, &md->flags);
+ dm_table_postsuspend_targets(map);
+ }
+ /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 7c429ce98bae..668770e9f609 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -639,9 +639,9 @@ int media_get_pad_index(struct media_entity *entity, bool is_sink,
+ return -EINVAL;
+
+ for (i = 0; i < entity->num_pads; i++) {
+- if (entity->pads[i].flags == MEDIA_PAD_FL_SINK)
++ if (entity->pads[i].flags & MEDIA_PAD_FL_SINK)
+ pad_is_sink = true;
+- else if (entity->pads[i].flags == MEDIA_PAD_FL_SOURCE)
++ else if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ pad_is_sink = false;
+ else
+ continue; /* This is an error! */
+diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+index 3c93d9232c3c..b6e39fbd8ad5 100644
+--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
++++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+@@ -27,17 +27,17 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
+ { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
+- { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+- { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
++ { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
++ { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+- { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+- { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
++ { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
++ { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+- { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
++ { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_BGRA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+- { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
++ { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGBA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+- { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
++ { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_HSV},
+ { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
+ };
+
+@@ -175,22 +175,14 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_HSV32:
+- rf->cr = rf->luma + 1;
+- rf->cb = rf->cr + 2;
+- rf->luma += 2;
+- break;
+- case V4L2_PIX_FMT_BGR32:
+- case V4L2_PIX_FMT_XBGR32:
+- rf->cb = rf->luma;
+- rf->cr = rf->cb + 2;
+- rf->luma++;
+- break;
+ case V4L2_PIX_FMT_ARGB32:
+ rf->alpha = rf->luma;
+ rf->cr = rf->luma + 1;
+ rf->cb = rf->cr + 2;
+ rf->luma += 2;
+ break;
++ case V4L2_PIX_FMT_BGR32:
++ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_ABGR32:
+ rf->cb = rf->luma;
+ rf->cr = rf->cb + 2;
+@@ -198,10 +190,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
+ rf->alpha = rf->cr + 1;
+ break;
+ case V4L2_PIX_FMT_BGRX32:
+- rf->cb = rf->luma + 1;
+- rf->cr = rf->cb + 2;
+- rf->luma += 2;
+- break;
+ case V4L2_PIX_FMT_BGRA32:
+ rf->alpha = rf->luma;
+ rf->cb = rf->luma + 1;
+@@ -209,10 +197,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
+ rf->luma += 2;
+ break;
+ case V4L2_PIX_FMT_RGBX32:
+- rf->cr = rf->luma;
+- rf->cb = rf->cr + 2;
+- rf->luma++;
+- break;
+ case V4L2_PIX_FMT_RGBA32:
+ rf->alpha = rf->luma + 3;
+ rf->cr = rf->luma;
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index 1afd9c6ad908..cc34c5ab7009 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -880,12 +880,12 @@ int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+- ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
++ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+- ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
++ ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
+index b155e9549076..b680b0caa69b 100644
+--- a/drivers/misc/habanalabs/device.c
++++ b/drivers/misc/habanalabs/device.c
+@@ -598,7 +598,9 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
+ goto out;
+ }
+
+- hdev->asic_funcs->halt_coresight(hdev);
++ if (!hdev->hard_reset_pending)
++ hdev->asic_funcs->halt_coresight(hdev);
++
+ hdev->in_debug = 0;
+
+ goto out;
+@@ -1189,6 +1191,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
+ if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
++ hdev->asic_funcs->halt_engines(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
+diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
+index 7344e8a222ae..b8a8de24aaf7 100644
+--- a/drivers/misc/habanalabs/goya/goya.c
++++ b/drivers/misc/habanalabs/goya/goya.c
+@@ -895,6 +895,11 @@ void goya_init_dma_qmans(struct hl_device *hdev)
+ */
+ static void goya_disable_external_queues(struct hl_device *hdev)
+ {
++ struct goya_device *goya = hdev->asic_specific;
++
++ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
++ return;
++
+ WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
+@@ -956,6 +961,11 @@ static int goya_stop_external_queues(struct hl_device *hdev)
+ {
+ int rc, retval = 0;
+
++ struct goya_device *goya = hdev->asic_specific;
++
++ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
++ return retval;
++
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_0_GLBL_CFG1,
+ mmDMA_QM_0_CP_STS,
+@@ -1744,9 +1754,18 @@ void goya_init_tpc_qmans(struct hl_device *hdev)
+ */
+ static void goya_disable_internal_queues(struct hl_device *hdev)
+ {
++ struct goya_device *goya = hdev->asic_specific;
++
++ if (!(goya->hw_cap_initialized & HW_CAP_MME))
++ goto disable_tpc;
++
+ WREG32(mmMME_QM_GLBL_CFG0, 0);
+ WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
+
++disable_tpc:
++ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
++ return;
++
+ WREG32(mmTPC0_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
+
+@@ -1782,8 +1801,12 @@ static void goya_disable_internal_queues(struct hl_device *hdev)
+ */
+ static int goya_stop_internal_queues(struct hl_device *hdev)
+ {
++ struct goya_device *goya = hdev->asic_specific;
+ int rc, retval = 0;
+
++ if (!(goya->hw_cap_initialized & HW_CAP_MME))
++ goto stop_tpc;
++
+ /*
+ * Each queue (QMAN) is a separate H/W logic. That means that each
+ * QMAN can be stopped independently and failure to stop one does NOT
+@@ -1810,6 +1833,10 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
+ retval = -EIO;
+ }
+
++stop_tpc:
++ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
++ return retval;
++
+ rc = goya_stop_queue(hdev,
+ mmTPC0_QM_GLBL_CFG1,
+ mmTPC0_QM_CP_STS,
+@@ -1975,6 +2002,11 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
+
+ static void goya_dma_stall(struct hl_device *hdev)
+ {
++ struct goya_device *goya = hdev->asic_specific;
++
++ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
++ return;
++
+ WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
+@@ -1984,6 +2016,11 @@ static void goya_dma_stall(struct hl_device *hdev)
+
+ static void goya_tpc_stall(struct hl_device *hdev)
+ {
++ struct goya_device *goya = hdev->asic_specific;
++
++ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
++ return;
++
+ WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
+@@ -1996,6 +2033,11 @@ static void goya_tpc_stall(struct hl_device *hdev)
+
+ static void goya_mme_stall(struct hl_device *hdev)
+ {
++ struct goya_device *goya = hdev->asic_specific;
++
++ if (!(goya->hw_cap_initialized & HW_CAP_MME))
++ return;
++
+ WREG32(mmMME_STALL, 0xFFFFFFFF);
+ }
+
+@@ -4648,8 +4690,6 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
+
+ rc = goya_send_job_on_qman0(hdev, job);
+
+- hl_cb_put(job->patched_cb);
+-
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ cb->cs_cnt--;
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index d1955543acd1..b0f5280a83cb 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -69,8 +69,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+ /* Force link status for IMP port */
+ reg = core_readl(priv, offset);
+ reg |= (MII_SW_OR | LINK_STS);
+- if (priv->type == BCM7278_DEVICE_ID)
+- reg |= GMII_SPEED_UP_2G;
++ reg &= ~GMII_SPEED_UP_2G;
+ core_writel(priv, reg, offset);
+
+ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+index cc70c606b6ef..251767c31f7e 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+@@ -337,6 +337,8 @@ struct aq_fw_ops {
+
+ void (*enable_ptp)(struct aq_hw_s *self, int enable);
+
++ void (*adjust_ptp)(struct aq_hw_s *self, uint64_t adj);
++
+ int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
+
+ int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+index fce587aaba33..d20d91cdece8 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -1165,6 +1165,8 @@ static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
+ {
+ self->ptp_clk_offset += delta;
+
++ self->aq_fw_ops->adjust_ptp(self, self->ptp_clk_offset);
++
+ return 0;
+ }
+
+@@ -1215,7 +1217,7 @@ static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
+ fwreq.ptp_gpio_ctrl.index = index;
+ fwreq.ptp_gpio_ctrl.period = period;
+ /* Apply time offset */
+- fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
++ fwreq.ptp_gpio_ctrl.start = start;
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+index f547baa6c954..354705f9bc49 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+@@ -22,6 +22,7 @@
+ #define HW_ATL_MIF_ADDR 0x0208U
+ #define HW_ATL_MIF_VAL 0x020CU
+
++#define HW_ATL_MPI_RPC_ADDR 0x0334U
+ #define HW_ATL_RPC_CONTROL_ADR 0x0338U
+ #define HW_ATL_RPC_STATE_ADR 0x033CU
+
+@@ -53,15 +54,14 @@ enum mcp_area {
+ };
+
+ static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+-
+ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+-
+ static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
+ static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
+ static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
+ static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
+ static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
++static u32 aq_fw1x_rpc_get(struct aq_hw_s *self);
+
+ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+ {
+@@ -476,6 +476,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
+ self, self->mbox_addr,
+ self->mbox_addr != 0U,
+ 1000U, 10000U);
++ err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self,
++ self->rpc_addr,
++ self->rpc_addr != 0U,
++ 1000U, 100000U);
+
+ return err;
+ }
+@@ -531,6 +535,12 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ self, fw.val,
+ sw.tid == fw.tid,
+ 1000U, 100000U);
++ if (err < 0)
++ goto err_exit;
++
++ err = aq_hw_err_from_flags(self);
++ if (err < 0)
++ goto err_exit;
+
+ if (fw.len == 0xFFFFU) {
+ err = hw_atl_utils_fw_rpc_call(self, sw.len);
+@@ -1025,6 +1035,11 @@ static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
+ return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
+ }
+
++static u32 aq_fw1x_rpc_get(struct aq_hw_s *self)
++{
++ return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR);
++}
++
+ const struct aq_fw_ops aq_fw_1x_ops = {
+ .init = hw_atl_utils_mpi_create,
+ .deinit = hw_atl_fw1x_deinit,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+index 97ebf849695f..77a4ed64830f 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+@@ -30,6 +30,9 @@
+ #define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378
+ #define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c
+
++#define HW_ATL_FW3X_PTP_ADJ_LSW_ADDR 0x50a0
++#define HW_ATL_FW3X_PTP_ADJ_MSW_ADDR 0x50a4
++
+ #define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
+ #define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
+ #define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
+@@ -475,6 +478,14 @@ static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
+ aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
+ }
+
++static void aq_fw3x_adjust_ptp(struct aq_hw_s *self, uint64_t adj)
++{
++ aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_LSW_ADDR,
++ (adj >> 0) & 0xffffffff);
++ aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_MSW_ADDR,
++ (adj >> 32) & 0xffffffff);
++}
++
+ static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
+ {
+ if (self->fw_ver_actual < HW_ATL_FW_VER_LED)
+@@ -633,4 +644,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
+ .enable_ptp = aq_fw3x_enable_ptp,
+ .led_control = aq_fw2x_led_control,
+ .set_phyloopback = aq_fw2x_set_phyloopback,
++ .adjust_ptp = aq_fw3x_adjust_ptp,
+ };
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index c4f6ec0cd183..00751771f662 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -410,10 +410,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+ lmac = &bgx->lmac[lmacid];
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+- if (enable)
++ if (enable) {
+ cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
+- else
++
++ /* enable TX FIFO Underflow interrupt */
++ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S,
++ GMI_TXX_INT_UNDFLW);
++ } else {
+ cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
++
++ /* Disable TX FIFO Underflow interrupt */
++ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C,
++ GMI_TXX_INT_UNDFLW);
++ }
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ if (bgx->is_rgx)
+@@ -1535,6 +1544,48 @@ static int bgx_init_phy(struct bgx *bgx)
+ return bgx_init_of_phy(bgx);
+ }
+
++static irqreturn_t bgx_intr_handler(int irq, void *data)
++{
++ struct bgx *bgx = (struct bgx *)data;
++ u64 status, val;
++ int lmac;
++
++ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
++ status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
++ if (status & GMI_TXX_INT_UNDFLW) {
++ pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
++ bgx->bgx_id, lmac);
++ val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG);
++ val &= ~CMR_EN;
++ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
++ val |= CMR_EN;
++ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
++ }
++ /* clear interrupts */
++ bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static void bgx_register_intr(struct pci_dev *pdev)
++{
++ struct bgx *bgx = pci_get_drvdata(pdev);
++ int ret;
++
++ ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET,
++ BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES);
++ if (ret < 0) {
++ pci_err(pdev, "Req for #%d msix vectors failed\n",
++ BGX_LMAC_VEC_OFFSET);
++ return;
++ }
++ ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL,
++ bgx, "BGX%d", bgx->bgx_id);
++ if (ret)
++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
++}
++
+ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ int err;
+@@ -1550,7 +1601,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ pci_set_drvdata(pdev, bgx);
+
+- err = pci_enable_device(pdev);
++ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+@@ -1604,6 +1655,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ bgx_init_hw(bgx);
+
++ bgx_register_intr(pdev);
++
+ /* Enable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ err = bgx_lmac_enable(bgx, lmac);
+@@ -1620,6 +1673,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ err_enable:
+ bgx_vnic[bgx->bgx_id] = NULL;
++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
+ err_release_regions:
+ pci_release_regions(pdev);
+ err_disable_device:
+@@ -1637,6 +1691,8 @@ static void bgx_remove(struct pci_dev *pdev)
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ bgx_lmac_disable(bgx, lmac);
+
++ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
++
+ bgx_vnic[bgx->bgx_id] = NULL;
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+index 25888706bdcd..cdea49392185 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+@@ -180,6 +180,15 @@
+ #define BGX_GMP_GMI_TXX_BURST 0x38228
+ #define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
+ #define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
++#define BGX_GMP_GMI_TXX_INT 0x38500
++#define BGX_GMP_GMI_TXX_INT_W1S 0x38508
++#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510
++#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518
++#define GMI_TXX_INT_PTP_LOST BIT_ULL(4)
++#define GMI_TXX_INT_LATE_COL BIT_ULL(3)
++#define GMI_TXX_INT_XSDEF BIT_ULL(2)
++#define GMI_TXX_INT_XSCOL BIT_ULL(1)
++#define GMI_TXX_INT_UNDFLW BIT_ULL(0)
+
+ #define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
+ #define BGX_MSIX_VEC_0_29_CTL 0x400008
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index cce90b5925d9..70060c51854f 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -1405,6 +1405,8 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
+ mac_addr = of_get_mac_address(np);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(pdata->dev_addr, mac_addr);
++ else if (PTR_ERR(mac_addr) == -EPROBE_DEFER)
++ return ERR_CAST(mac_addr);
+
+ return pdata;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 9bd166e3dff3..594f6dbb2110 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -2977,13 +2977,6 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+ else
+ return -EINVAL;
+
+- /* Tell the OS link is going down, the link will go back up when fw
+- * says it is ready asynchronously
+- */
+- ice_print_link_msg(vsi, false);
+- netif_carrier_off(netdev);
+- netif_tx_stop_all_queues(netdev);
+-
+ /* Set the FC mode and only restart AN if link is up */
+ status = ice_set_fc(pi, &aq_failures, link_up);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+index c6c7d1defbd7..aade62a9ee5c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+@@ -2307,7 +2307,9 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *dmn = sb->dmn;
+ struct mlx5dr_cmd_caps *caps;
++ u8 *bit_mask = sb->bit_mask;
+ u8 *tag = hw_ste->tag;
++ bool source_gvmi_set;
+
+ DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
+
+@@ -2328,7 +2330,8 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ if (!vport_cap)
+ return -EINVAL;
+
+- if (vport_cap->vport_gvmi)
++ source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
++ if (vport_cap->vport_gvmi && source_gvmi_set)
+ MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_eswitch_owner_vhca_id = 0;
+diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
+index a41a90c589db..1c9e70c8cc30 100644
+--- a/drivers/net/ethernet/micrel/ks8851_mll.c
++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
+@@ -156,24 +156,6 @@ static int msg_enable;
+ * chip is busy transferring packet data (RX/TX FIFO accesses).
+ */
+
+-/**
+- * ks_rdreg8 - read 8 bit register from device
+- * @ks : The chip information
+- * @offset: The register address
+- *
+- * Read a 8bit register from the chip, returning the result
+- */
+-static u8 ks_rdreg8(struct ks_net *ks, int offset)
+-{
+- u16 data;
+- u8 shift_bit = offset & 0x03;
+- u8 shift_data = (offset & 1) << 3;
+- ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
+- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+- data = ioread16(ks->hw_addr);
+- return (u8)(data >> shift_data);
+-}
+-
+ /**
+ * ks_rdreg16 - read 16 bit register from device
+ * @ks : The chip information
+@@ -184,27 +166,11 @@ static u8 ks_rdreg8(struct ks_net *ks, int offset)
+
+ static u16 ks_rdreg16(struct ks_net *ks, int offset)
+ {
+- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
++ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ return ioread16(ks->hw_addr);
+ }
+
+-/**
+- * ks_wrreg8 - write 8bit register value to chip
+- * @ks: The chip information
+- * @offset: The register address
+- * @value: The value to write
+- *
+- */
+-static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
+-{
+- u8 shift_bit = (offset & 0x03);
+- u16 value_write = (u16)(value << ((offset & 1) << 3));
+- ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
+- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+- iowrite16(value_write, ks->hw_addr);
+-}
+-
+ /**
+ * ks_wrreg16 - write 16bit register value to chip
+ * @ks: The chip information
+@@ -215,7 +181,7 @@ static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
+
+ static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
+ {
+- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
++ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ iowrite16(value, ks->hw_addr);
+ }
+@@ -231,7 +197,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
+ {
+ len >>= 1;
+ while (len--)
+- *wptr++ = (u16)ioread16(ks->hw_addr);
++ *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
+ }
+
+ /**
+@@ -245,7 +211,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
+ {
+ len >>= 1;
+ while (len--)
+- iowrite16(*wptr++, ks->hw_addr);
++ iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
+ }
+
+ static void ks_disable_int(struct ks_net *ks)
+@@ -324,8 +290,7 @@ static void ks_read_config(struct ks_net *ks)
+ u16 reg_data = 0;
+
+ /* Regardless of bus width, 8 bit read should always work.*/
+- reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
+- reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
++ reg_data = ks_rdreg16(ks, KS_CCR);
+
+ /* addr/data bus are multiplexed */
+ ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
+@@ -429,7 +394,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
+
+ /* 1. set sudo DMA mode */
+ ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+
+ /* 2. read prepend data */
+ /**
+@@ -446,7 +411,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
+ ks_inblk(ks, buf, ALIGN(len, 4));
+
+ /* 4. reset sudo DMA Mode */
+- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ }
+
+ /**
+@@ -679,13 +644,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
+ ks->txh.txw[1] = cpu_to_le16(len);
+
+ /* 1. set sudo-DMA mode */
+- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+ /* 2. write status/lenth info */
+ ks_outblk(ks, ks->txh.txw, 4);
+ /* 3. write pkt data */
+ ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
+ /* 4. reset sudo-DMA mode */
+- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
++ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
+ ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+ /* 6. wait until TXQCR_METFE is auto-cleared */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 6bc1bdb137ae..caa4d4c687b9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4289,6 +4289,8 @@ static void stmmac_init_fs(struct net_device *dev)
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
+
++ rtnl_lock();
++
+ /* Create per netdev entries */
+ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
+
+@@ -4300,14 +4302,13 @@ static void stmmac_init_fs(struct net_device *dev)
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
+ &stmmac_dma_cap_fops);
+
+- register_netdevice_notifier(&stmmac_notifier);
++ rtnl_unlock();
+ }
+
+ static void stmmac_exit_fs(struct net_device *dev)
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+- unregister_netdevice_notifier(&stmmac_notifier);
+ debugfs_remove_recursive(priv->dbgfs_dir);
+ }
+ #endif /* CONFIG_DEBUG_FS */
+@@ -4825,14 +4826,14 @@ int stmmac_dvr_remove(struct device *dev)
+
+ netdev_info(priv->dev, "%s: removing driver", __func__);
+
+-#ifdef CONFIG_DEBUG_FS
+- stmmac_exit_fs(ndev);
+-#endif
+ stmmac_stop_all_dma(priv);
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ netif_carrier_off(ndev);
+ unregister_netdev(ndev);
++#ifdef CONFIG_DEBUG_FS
++ stmmac_exit_fs(ndev);
++#endif
+ phylink_destroy(priv->phylink);
+ if (priv->plat->stmmac_rst)
+ reset_control_assert(priv->plat->stmmac_rst);
+@@ -5052,6 +5053,7 @@ static int __init stmmac_init(void)
+ /* Create debugfs main directory if it doesn't exist yet */
+ if (!stmmac_fs_dir)
+ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
++ register_netdevice_notifier(&stmmac_notifier);
+ #endif
+
+ return 0;
+@@ -5060,6 +5062,7 @@ static int __init stmmac_init(void)
+ static void __exit stmmac_exit(void)
+ {
+ #ifdef CONFIG_DEBUG_FS
++ unregister_netdevice_notifier(&stmmac_notifier);
+ debugfs_remove_recursive(stmmac_fs_dir);
+ #endif
+ }
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index ada59df642d2..a4d8c90ee7cc 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1165,8 +1165,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
+ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen, u32 *result)
+ {
++ union nvme_result res = { 0 };
+ struct nvme_command c;
+- union nvme_result res;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index bb5e13ad1aff..d3f23d6254e4 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2747,6 +2747,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
+ dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
+ return NVME_QUIRK_NO_APST;
++ } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
++ pdev->device == 0xa808 || pdev->device == 0xa809)) ||
++ (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
++ /*
++ * Forcing to use host managed nvme power settings for
++ * lowest idle power with quick resume latency on
++ * Samsung and Toshiba SSDs based on suspend behavior
++ * on Coffee Lake board for LENOVO C640
++ */
++ if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
++ dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
++ return NVME_QUIRK_SIMPLE_SUSPEND;
+ }
+
+ return 0;
+@@ -3109,7 +3121,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+- { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
++ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
++ .driver_data = NVME_QUIRK_SINGLE_VECTOR },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR |
+diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
+index 1169f3e83a6f..b1c04f71a31d 100644
+--- a/drivers/phy/allwinner/phy-sun50i-usb3.c
++++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
+@@ -49,7 +49,7 @@
+ #define SUNXI_LOS_BIAS(n) ((n) << 3)
+ #define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
+ #define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
+-#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2)
++#define SUNXI_TXVBOOSTLVL_MASK GENMASK(2, 0)
+
+ struct sun50i_usb3_phy {
+ struct phy *phy;
+diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+index f20524f0c21d..94a34cf75eb3 100644
+--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
++++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+@@ -20,6 +20,7 @@
+
+ #define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */
+ #define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */
++#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */
+ #define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */
+ #define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */
+
+@@ -243,10 +244,24 @@ static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data)
+ {
+ struct phy_mdm6600 *ddata = data;
+ struct gpio_desc *mode_gpio1;
++ int error, wakeup;
+
+ mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1];
+- dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n",
+- gpiod_get_value(mode_gpio1));
++ wakeup = gpiod_get_value(mode_gpio1);
++ if (!wakeup)
++ return IRQ_NONE;
++
++ dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup);
++ error = pm_runtime_get_sync(ddata->dev);
++ if (error < 0) {
++ pm_runtime_put_noidle(ddata->dev);
++
++ return IRQ_NONE;
++ }
++
++ /* Just wake-up and kick the autosuspend timer */
++ pm_runtime_mark_last_busy(ddata->dev);
++ pm_runtime_put_autosuspend(ddata->dev);
+
+ return IRQ_HANDLED;
+ }
+@@ -496,8 +511,14 @@ static void phy_mdm6600_modem_wake(struct work_struct *work)
+
+ ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work);
+ phy_mdm6600_wake_modem(ddata);
++
++ /*
++ * The modem does not always stay awake 1.2 seconds after toggling
++ * the wake GPIO, and sometimes it idles after about some 600 ms
++ * making writes time out.
++ */
+ schedule_delayed_work(&ddata->modem_wake_work,
+- msecs_to_jiffies(MDM6600_MODEM_IDLE_DELAY_MS));
++ msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS));
+ }
+
+ static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev)
+diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
+index bdfaf7edb75a..992bc18101ef 100644
+--- a/drivers/regulator/stm32-vrefbuf.c
++++ b/drivers/regulator/stm32-vrefbuf.c
+@@ -88,7 +88,7 @@ static int stm32_vrefbuf_disable(struct regulator_dev *rdev)
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+- val = (val & ~STM32_ENVR) | STM32_HIZ;
++ val &= ~STM32_ENVR;
+ writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
+
+ pm_runtime_mark_last_busy(priv->dev);
+@@ -175,6 +175,7 @@ static const struct regulator_desc stm32_vrefbuf_regu = {
+ .volt_table = stm32_vrefbuf_voltages,
+ .n_voltages = ARRAY_SIZE(stm32_vrefbuf_voltages),
+ .ops = &stm32_vrefbuf_volt_ops,
++ .off_on_delay = 1000,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ };
+diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
+index 2a3f874a21d5..9cebff8e8d74 100644
+--- a/drivers/s390/cio/blacklist.c
++++ b/drivers/s390/cio/blacklist.c
+@@ -303,8 +303,10 @@ static void *
+ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+ {
+ struct ccwdev_iter *iter;
++ loff_t p = *offset;
+
+- if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
++ (*offset)++;
++ if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ iter = it;
+ if (iter->devno == __MAX_SUBCHANNEL) {
+@@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+ return NULL;
+ } else
+ iter->devno++;
+- (*offset)++;
+ return iter;
+ }
+
+diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
+index dc430bd86ade..58eaac70dba7 100644
+--- a/drivers/s390/cio/qdio_setup.c
++++ b/drivers/s390/cio/qdio_setup.c
+@@ -8,6 +8,7 @@
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+ #include <linux/export.h>
++#include <linux/io.h>
+ #include <asm/qdio.h>
+
+ #include "cio.h"
+@@ -205,7 +206,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+
+ /* fill in sl */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+- q->sl->element[j].sbal = (unsigned long)q->sbal[j];
++ q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
+ }
+
+ static void setup_queues(struct qdio_irq *irq_ptr,
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 10edfd6fc930..4fd7b0ceb4ff 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -4749,10 +4749,10 @@ static void qeth_qdio_establish_cq(struct qeth_card *card,
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ int offset = QDIO_MAX_BUFFERS_PER_Q *
+ (card->qdio.no_in_queues - 1);
+- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+- in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
+- virt_to_phys(card->qdio.c_q->bufs[i].buffer);
+- }
++
++ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
++ in_sbal_ptrs[offset + i] =
++ card->qdio.c_q->bufs[i].buffer;
+
+ queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
+ }
+@@ -4786,10 +4786,9 @@ static int qeth_qdio_establish(struct qeth_card *card)
+ rc = -ENOMEM;
+ goto out_free_qib_param;
+ }
+- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+- in_sbal_ptrs[i] = (struct qdio_buffer *)
+- virt_to_phys(card->qdio.in_q->bufs[i].buffer);
+- }
++
++ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
++ in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
+
+ queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
+ GFP_KERNEL);
+@@ -4810,11 +4809,11 @@ static int qeth_qdio_establish(struct qeth_card *card)
+ rc = -ENOMEM;
+ goto out_free_queue_start_poll;
+ }
++
+ for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
+- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
+- out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
+- card->qdio.out_qs[i]->bufs[j]->buffer);
+- }
++ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
++ out_sbal_ptrs[k] =
++ card->qdio.out_qs[i]->bufs[j]->buffer;
+
+ memset(&init_data, 0, sizeof(struct qdio_initialize));
+ init_data.cdev = CARD_DDEV(card);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 46bc062d873e..d86838801805 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -594,7 +594,8 @@ retry_alloc:
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+- GFP_KERNEL, &fusion->io_request_frames_phys);
++ GFP_KERNEL | __GFP_NOWARN,
++ &fusion->io_request_frames_phys);
+ if (!fusion->io_request_frames) {
+ if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
+ instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
+@@ -632,7 +633,7 @@ retry_alloc:
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+- GFP_KERNEL,
++ GFP_KERNEL | __GFP_NOWARN,
+ &fusion->io_request_frames_phys);
+
+ if (!fusion->io_request_frames) {
+diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
+index fb70b8a3f7c5..20d37eaeb5f2 100644
+--- a/drivers/soc/imx/soc-imx-scu.c
++++ b/drivers/soc/imx/soc-imx-scu.c
+@@ -25,7 +25,7 @@ struct imx_sc_msg_misc_get_soc_id {
+ u32 id;
+ } resp;
+ } data;
+-} __packed;
++} __packed __aligned(4);
+
+ struct imx_sc_msg_misc_get_soc_uid {
+ struct imx_sc_rpc_msg hdr;
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index fd8007ebb145..13def7f78b9e 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -149,6 +149,7 @@ struct atmel_qspi {
+ struct clk *qspick;
+ struct platform_device *pdev;
+ const struct atmel_qspi_caps *caps;
++ resource_size_t mmap_size;
+ u32 pending;
+ u32 mr;
+ u32 scr;
+@@ -329,6 +330,14 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+ u32 sr, offset;
+ int err;
+
++ /*
++ * Check if the address exceeds the MMIO window size. An improvement
++ * would be to add support for regular SPI mode and fall back to it
++ * when the flash memories overrun the controller's memory space.
++ */
++ if (op->addr.val + op->data.nbytes > aq->mmap_size)
++ return -ENOTSUPP;
++
+ err = atmel_qspi_set_cfg(aq, op, &offset);
+ if (err)
+ return err;
+@@ -480,6 +489,8 @@ static int atmel_qspi_probe(struct platform_device *pdev)
+ goto exit;
+ }
+
++ aq->mmap_size = resource_size(res);
++
+ /* Get the peripheral clock */
+ aq->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(aq->pclk))
+diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
+index 7327309ea3d5..6c235306c0e4 100644
+--- a/drivers/spi/spi-bcm63xx-hsspi.c
++++ b/drivers/spi/spi-bcm63xx-hsspi.c
+@@ -366,7 +366,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+ goto out_disable_clk;
+
+ rate = clk_get_rate(pll_clk);
+- clk_disable_unprepare(pll_clk);
+ if (!rate) {
+ ret = -EINVAL;
+ goto out_disable_pll_clk;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 1e217e3e9486..2ab6e782f14c 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -396,6 +396,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ else
+ retval = get_user(tmp, (u32 __user *)arg);
+ if (retval == 0) {
++ struct spi_controller *ctlr = spi->controller;
+ u32 save = spi->mode;
+
+ if (tmp & ~SPI_MODE_MASK) {
+@@ -403,6 +404,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ break;
+ }
+
++ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
++ ctlr->cs_gpiods[spi->chip_select])
++ tmp |= SPI_CS_HIGH;
++
+ tmp |= spi->mode & ~SPI_MODE_MASK;
+ spi->mode = (u16)tmp;
+ retval = spi_setup(spi);
+diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
+index 26108c96b674..37c7cf6b7d8a 100644
+--- a/drivers/staging/media/hantro/hantro_drv.c
++++ b/drivers/staging/media/hantro/hantro_drv.c
+@@ -553,13 +553,13 @@ static int hantro_attach_func(struct hantro_dev *vpu,
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+- ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1,
++ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+- ret = media_create_pad_link(&func->proc, 0, &func->sink, 0,
++ ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
+index a8b4d0c5ab7e..032f3264fba1 100644
+--- a/drivers/staging/speakup/selection.c
++++ b/drivers/staging/speakup/selection.c
+@@ -51,9 +51,7 @@ static void __speakup_set_selection(struct work_struct *work)
+ goto unref;
+ }
+
+- console_lock();
+ set_selection_kernel(&sel, tty);
+- console_unlock();
+
+ unref:
+ tty_kref_put(tty);
+diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
+index ce5309d00280..0f64a10ba51f 100644
+--- a/drivers/tty/serdev/core.c
++++ b/drivers/tty/serdev/core.c
+@@ -18,6 +18,7 @@
+ #include <linux/sched.h>
+ #include <linux/serdev.h>
+ #include <linux/slab.h>
++#include <linux/platform_data/x86/apple.h>
+
+ static bool is_registered;
+ static DEFINE_IDA(ctrl_ida);
+@@ -630,6 +631,15 @@ static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
+ if (ret)
+ return ret;
+
++ /*
++ * Apple machines provide an empty resource template, so on those
++ * machines just look for immediate children with a "baud" property
++ * (from the _DSM method) instead.
++ */
++ if (!lookup.controller_handle && x86_apple_machine &&
++ !acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, NULL))
++ acpi_get_parent(adev->handle, &lookup.controller_handle);
++
+ /* Make sure controller and ResourceSource handle match */
+ if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
+ return -ENODEV;
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 108cd55f9c4d..405370c6eee5 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -25,6 +25,14 @@
+
+ #include "8250.h"
+
++#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
++#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
++#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
++#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
++#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
++#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
++#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
++
+ #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
+ #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
+ #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
+@@ -677,6 +685,22 @@ static int __maybe_unused exar_resume(struct device *dev)
+
+ static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
+
++static const struct exar8250_board acces_com_2x = {
++ .num_ports = 2,
++ .setup = pci_xr17c154_setup,
++};
++
++static const struct exar8250_board acces_com_4x = {
++ .num_ports = 4,
++ .setup = pci_xr17c154_setup,
++};
++
++static const struct exar8250_board acces_com_8x = {
++ .num_ports = 8,
++ .setup = pci_xr17c154_setup,
++};
++
++
+ static const struct exar8250_board pbn_fastcom335_2 = {
+ .num_ports = 2,
+ .setup = pci_fastcom335_setup,
+@@ -745,6 +769,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
+ }
+
+ static const struct pci_device_id exar_pci_tbl[] = {
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
++ EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
++
++
+ CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
+ CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
+ CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect),
+diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
+index 3bdd56a1021b..ea12f10610b6 100644
+--- a/drivers/tty/serial/ar933x_uart.c
++++ b/drivers/tty/serial/ar933x_uart.c
+@@ -286,6 +286,10 @@ static void ar933x_uart_set_termios(struct uart_port *port,
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_HOST_INT_EN);
+
++ /* enable RX and TX ready overide */
++ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
++ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
++
+ /* reenable the UART */
+ ar933x_uart_rmw(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
+@@ -418,6 +422,10 @@ static int ar933x_uart_startup(struct uart_port *port)
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_HOST_INT_EN);
+
++ /* enable RX and TX ready overide */
++ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
++ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
++
+ /* Enable RX interrupts */
+ up->ier = AR933X_UART_INT_RX_VALID;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 4e128d19e0ad..8a69ec282a43 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -268,6 +268,7 @@ struct lpuart_port {
+ int rx_dma_rng_buf_len;
+ unsigned int dma_tx_nents;
+ wait_queue_head_t dma_wait;
++ bool id_allocated;
+ };
+
+ struct lpuart_soc_data {
+@@ -2429,19 +2430,6 @@ static int lpuart_probe(struct platform_device *pdev)
+ if (!sport)
+ return -ENOMEM;
+
+- ret = of_alias_get_id(np, "serial");
+- if (ret < 0) {
+- ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
+- if (ret < 0) {
+- dev_err(&pdev->dev, "port line is full, add device failed\n");
+- return ret;
+- }
+- }
+- if (ret >= ARRAY_SIZE(lpuart_ports)) {
+- dev_err(&pdev->dev, "serial%d out of range\n", ret);
+- return -EINVAL;
+- }
+- sport->port.line = ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sport->port.membase))
+@@ -2485,9 +2473,25 @@ static int lpuart_probe(struct platform_device *pdev)
+ }
+ }
+
++ ret = of_alias_get_id(np, "serial");
++ if (ret < 0) {
++ ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "port line is full, add device failed\n");
++ return ret;
++ }
++ sport->id_allocated = true;
++ }
++ if (ret >= ARRAY_SIZE(lpuart_ports)) {
++ dev_err(&pdev->dev, "serial%d out of range\n", ret);
++ ret = -EINVAL;
++ goto failed_out_of_range;
++ }
++ sport->port.line = ret;
++
+ ret = lpuart_enable_clks(sport);
+ if (ret)
+- return ret;
++ goto failed_clock_enable;
+ sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
+
+ lpuart_ports[sport->port.line] = sport;
+@@ -2537,6 +2541,10 @@ static int lpuart_probe(struct platform_device *pdev)
+ failed_attach_port:
+ failed_irq_request:
+ lpuart_disable_clks(sport);
++failed_clock_enable:
++failed_out_of_range:
++ if (sport->id_allocated)
++ ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
+ return ret;
+ }
+
+@@ -2546,7 +2554,8 @@ static int lpuart_remove(struct platform_device *pdev)
+
+ uart_remove_one_port(&lpuart_reg, &sport->port);
+
+- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
++ if (sport->id_allocated)
++ ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
+
+ lpuart_disable_clks(sport);
+
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index c12a12556339..4e9a590712cb 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -851,7 +851,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
+
+ port->membase = devm_ioremap_resource(&pdev->dev, reg);
+ if (IS_ERR(port->membase))
+- return -PTR_ERR(port->membase);
++ return PTR_ERR(port->membase);
+
+ mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
+ GFP_KERNEL);
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 44d974d4159f..d7d2e4b844bc 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -16,6 +16,7 @@
+ #include <linux/tty.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
++#include <linux/mutex.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+
+@@ -45,6 +46,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */
+ static int sel_end;
+ static int sel_buffer_lth;
+ static char *sel_buffer;
++static DEFINE_MUTEX(sel_lock);
+
+ /* clear_selection, highlight and highlight_pointer can be called
+ from interrupt (via scrollback/front) */
+@@ -179,14 +181,14 @@ int set_selection_user(const struct tiocl_selection __user *sel,
+ return set_selection_kernel(&v, tty);
+ }
+
+-int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
++static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
+ {
+ struct vc_data *vc = vc_cons[fg_console].d;
+ int new_sel_start, new_sel_end, spc;
+ char *bp, *obp;
+ int i, ps, pe, multiplier;
+ u32 c;
+- int mode;
++ int mode, ret = 0;
+
+ poke_blanked_console();
+
+@@ -334,7 +336,21 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
+ }
+ }
+ sel_buffer_lth = bp - sel_buffer;
+- return 0;
++
++ return ret;
++}
++
++int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
++{
++ int ret;
++
++ mutex_lock(&sel_lock);
++ console_lock();
++ ret = __set_selection_kernel(v, tty);
++ console_unlock();
++ mutex_unlock(&sel_lock);
++
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(set_selection_kernel);
+
+@@ -364,6 +380,7 @@ int paste_selection(struct tty_struct *tty)
+ tty_buffer_lock_exclusive(&vc->port);
+
+ add_wait_queue(&vc->paste_wait, &wait);
++ mutex_lock(&sel_lock);
+ while (sel_buffer && sel_buffer_lth > pasted) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+@@ -371,7 +388,9 @@ int paste_selection(struct tty_struct *tty)
+ break;
+ }
+ if (tty_throttled(tty)) {
++ mutex_unlock(&sel_lock);
+ schedule();
++ mutex_lock(&sel_lock);
+ continue;
+ }
+ __set_current_state(TASK_RUNNING);
+@@ -380,6 +399,7 @@ int paste_selection(struct tty_struct *tty)
+ count);
+ pasted += count;
+ }
++ mutex_unlock(&sel_lock);
+ remove_wait_queue(&vc->paste_wait, &wait);
+ __set_current_state(TASK_RUNNING);
+
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 3b4ccc2a30c1..e9e27ba69d5d 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3046,10 +3046,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+ switch (type)
+ {
+ case TIOCL_SETSEL:
+- console_lock();
+ ret = set_selection_user((struct tiocl_selection
+ __user *)(p+1), tty);
+- console_unlock();
+ break;
+ case TIOCL_PASTESEL:
+ ret = paste_selection(tty);
+diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
+index 02f6ca2cb1ba..f624cc87cbab 100644
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -2107,7 +2107,7 @@ found:
+ /* Update ring only if removed request is on pending_req_list list */
+ if (req_on_hw_ring) {
+ link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
+- (priv_req->start_trb * TRB_SIZE));
++ ((priv_req->end_trb + 1) * TRB_SIZE));
+ link_trb->control = (link_trb->control & TRB_CYCLE) |
+ TRB_TYPE(TRB_LINK) | TRB_CHAIN;
+
+@@ -2152,11 +2152,21 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
+ {
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct usb_request *request;
++ struct cdns3_request *priv_req;
++ struct cdns3_trb *trb = NULL;
+ int ret;
+ int val;
+
+ trace_cdns3_halt(priv_ep, 0, 0);
+
++ request = cdns3_next_request(&priv_ep->pending_req_list);
++ if (request) {
++ priv_req = to_cdns3_request(request);
++ trb = priv_req->trb;
++ if (trb)
++ trb->control = trb->control ^ TRB_CYCLE;
++ }
++
+ writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
+
+ /* wait for EPRST cleared */
+@@ -2167,10 +2177,11 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
+
+ priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
+
+- request = cdns3_next_request(&priv_ep->pending_req_list);
+-
+- if (request)
++ if (request) {
++ if (trb)
++ trb->control = trb->control ^ TRB_CYCLE;
+ cdns3_rearm_transfer(priv_ep, 1);
++ }
+
+ cdns3_start_all_request(priv_dev, priv_ep);
+ return ret;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 1d212f82c69b..54cd8ef795ec 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -988,13 +988,17 @@ int usb_remove_device(struct usb_device *udev)
+ {
+ struct usb_hub *hub;
+ struct usb_interface *intf;
++ int ret;
+
+ if (!udev->parent) /* Can't remove a root hub */
+ return -EINVAL;
+ hub = usb_hub_to_struct_hub(udev->parent);
+ intf = to_usb_interface(hub->intfdev);
+
+- usb_autopm_get_interface(intf);
++ ret = usb_autopm_get_interface(intf);
++ if (ret < 0)
++ return ret;
++
+ set_bit(udev->portnum, hub->removed_bits);
+ hub_port_logical_disconnect(hub, udev->portnum);
+ usb_autopm_put_interface(intf);
+@@ -1866,7 +1870,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+
+ if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
+ hub->quirk_disable_autosuspend = 1;
+- usb_autopm_get_interface(intf);
++ usb_autopm_get_interface_no_resume(intf);
+ }
+
+ if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index bbbb35fa639f..235a7c645503 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -213,7 +213,10 @@ static int usb_port_runtime_resume(struct device *dev)
+ if (!port_dev->is_superspeed && peer)
+ pm_runtime_get_sync(&peer->dev);
+
+- usb_autopm_get_interface(intf);
++ retval = usb_autopm_get_interface(intf);
++ if (retval < 0)
++ return retval;
++
+ retval = usb_hub_set_port_power(hdev, hub, port1, true);
+ msleep(hub_power_on_good_delay(hub));
+ if (udev && !retval) {
+@@ -266,7 +269,10 @@ static int usb_port_runtime_suspend(struct device *dev)
+ if (usb_port_block_power_off)
+ return -EBUSY;
+
+- usb_autopm_get_interface(intf);
++ retval = usb_autopm_get_interface(intf);
++ if (retval < 0)
++ return retval;
++
+ retval = usb_hub_set_port_power(hdev, hub, port1, false);
+ usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
+ if (!port_dev->is_superspeed)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 2b24336a72e5..2dac3e7cdd97 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -231,6 +231,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Logitech PTZ Pro Camera */
+ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+
++ /* Logitech Screen Share */
++ { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
++
+ /* Logitech Quickcam Fusion */
+ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index e0cb1c2d5675..6ac02ba5e4a1 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1068,7 +1068,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ unsigned int rem = length % maxp;
+ unsigned chain = true;
+
+- if (sg_is_last(s))
++ /*
++ * IOMMU driver is coalescing the list of sgs which shares a
++ * page boundary into one and giving it to USB driver. With
++ * this the number of sgs mapped is not equal to the number of
++ * sgs passed. So mark the chain bit to false if it isthe last
++ * mapped sg.
++ */
++ if (i == remaining - 1)
+ chain = false;
+
+ if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index cd303a3ea680..223f72d4d9ed 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -438,9 +438,13 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
+ if (!val)
+ return 0;
+ if (speed < USB_SPEED_SUPER)
+- return DIV_ROUND_UP(val, 2);
++ return min(val, 500U) / 2;
+ else
+- return DIV_ROUND_UP(val, 8);
++ /*
++ * USB 3.x supports up to 900mA, but since 900 isn't divisible
++ * by 8 the integral division will effectively cap to 896mA.
++ */
++ return min(val, 900U) / 8;
+ }
+
+ static int config_buf(struct usb_configuration *config,
+@@ -852,6 +856,10 @@ static int set_config(struct usb_composite_dev *cdev,
+
+ /* when we return, be sure our power usage is valid */
+ power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
++ if (gadget->speed < USB_SPEED_SUPER)
++ power = min(power, 500U);
++ else
++ power = min(power, 900U);
+ done:
+ usb_gadget_vbus_draw(gadget, power);
+ if (result >= 0 && cdev->delayed_status)
+@@ -2278,7 +2286,7 @@ void composite_resume(struct usb_gadget *gadget)
+ {
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_function *f;
+- u16 maxpower;
++ unsigned maxpower;
+
+ /* REVISIT: should we have config level
+ * suspend/resume callbacks?
+@@ -2292,10 +2300,14 @@ void composite_resume(struct usb_gadget *gadget)
+ f->resume(f);
+ }
+
+- maxpower = cdev->config->MaxPower;
++ maxpower = cdev->config->MaxPower ?
++ cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
++ if (gadget->speed < USB_SPEED_SUPER)
++ maxpower = min(maxpower, 500U);
++ else
++ maxpower = min(maxpower, 900U);
+
+- usb_gadget_vbus_draw(gadget, maxpower ?
+- maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
++ usb_gadget_vbus_draw(gadget, maxpower);
+ }
+
+ cdev->suspended = 0;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 6f8b67e61771..bdac92d3a8d0 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1162,18 +1162,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
+ {
+ struct ffs_io_data *io_data = kiocb->private;
+ struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
++ unsigned long flags;
+ int value;
+
+ ENTER();
+
+- spin_lock_irq(&epfile->ffs->eps_lock);
++ spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
+
+ if (likely(io_data && io_data->ep && io_data->req))
+ value = usb_ep_dequeue(io_data->ep, io_data->req);
+ else
+ value = -EINVAL;
+
+- spin_unlock_irq(&epfile->ffs->eps_lock);
++ spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
+
+ return value;
+ }
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index f986e5c55974..8167d379e115 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -561,8 +561,10 @@ static int gs_start_io(struct gs_port *port)
+ port->n_read = 0;
+ started = gs_start_rx(port);
+
+- /* unblock any pending writes into our circular buffer */
+ if (started) {
++ gs_start_tx(port);
++ /* Unblock any pending writes into our circular buffer, in case
++ * we didn't in gs_start_tx() */
+ tty_wakeup(port->port.tty);
+ } else {
+ gs_free_requests(ep, head, &port->read_allocated);
+diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
+index 10c9e7f6273e..29fe5771c21b 100644
+--- a/drivers/usb/misc/usb251xb.c
++++ b/drivers/usb/misc/usb251xb.c
+@@ -424,10 +424,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
+ return err;
+ }
+
+- hub->vdd = devm_regulator_get(dev, "vdd");
+- if (IS_ERR(hub->vdd))
+- return PTR_ERR(hub->vdd);
+-
+ if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
+ hub->vendor_id = USB251XB_DEF_VENDOR_ID;
+
+@@ -640,6 +636,13 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
+ }
+ #endif /* CONFIG_OF */
+
++static void usb251xb_regulator_disable_action(void *data)
++{
++ struct usb251xb *hub = data;
++
++ regulator_disable(hub->vdd);
++}
++
+ static int usb251xb_probe(struct usb251xb *hub)
+ {
+ struct device *dev = hub->dev;
+@@ -676,10 +679,19 @@ static int usb251xb_probe(struct usb251xb *hub)
+ if (err)
+ return err;
+
++ hub->vdd = devm_regulator_get(dev, "vdd");
++ if (IS_ERR(hub->vdd))
++ return PTR_ERR(hub->vdd);
++
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
++ err = devm_add_action_or_reset(dev,
++ usb251xb_regulator_disable_action, hub);
++ if (err)
++ return err;
++
+ err = usb251xb_connect(hub);
+ if (err) {
+ dev_err(dev, "Failed to connect hub (%d)\n", err);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 1cd9b6305b06..1880f3e13f57 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1258,6 +1258,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
+ USB_SC_RBC, USB_PR_BULK, NULL,
+ 0 ),
+
++UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
++ "Samsung",
++ "Flash Drive FIT",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64),
++
+ /* aeb */
+ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
+ "Feiya",
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index de7b8382aba9..998b0de1812f 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1316,6 +1316,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
+ static int vgacon_resize(struct vc_data *c, unsigned int width,
+ unsigned int height, unsigned int user)
+ {
++ if ((width << 1) * height > vga_vram_size)
++ return -EINVAL;
++
+ if (width % 2 || width > screen_info.orig_video_cols ||
+ height > (screen_info.orig_video_lines * vga_default_font_height)/
+ c->vc_font.height)
+diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
+index e149e66a6ea9..e92f38fcb7a4 100644
+--- a/drivers/watchdog/da9062_wdt.c
++++ b/drivers/watchdog/da9062_wdt.c
+@@ -94,13 +94,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd)
+ struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+ int ret;
+
+- ret = da9062_reset_watchdog_timer(wdt);
+- if (ret) {
+- dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
+- ret);
+- return ret;
+- }
+-
+ ret = regmap_update_bits(wdt->hw->regmap,
+ DA9062AA_CONTROL_D,
+ DA9062AA_TWDSCALE_MASK,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index e6901744a5be..e47708a9bf8b 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8444,6 +8444,7 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
+ {
+ struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
+ struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
++ u16 csum_size;
+ blk_status_t ret;
+
+ /*
+@@ -8463,7 +8464,8 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
+
+ file_offset -= dip->logical_offset;
+ file_offset >>= inode->i_sb->s_blocksize_bits;
+- io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
++ csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy);
++ io_bio->csum = orig_io_bio->csum + csum_size * file_offset;
+
+ return 0;
+ }
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 239338d57086..af789aac8ef7 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1277,6 +1277,7 @@ struct cifs_fid {
+ __u64 volatile_fid; /* volatile file id for smb2 */
+ __u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */
+ __u8 create_guid[16];
++ __u32 access;
+ struct cifs_pending_open *pending_open;
+ unsigned int epoch;
+ #ifdef CONFIG_CIFS_DEBUG2
+@@ -1737,6 +1738,12 @@ static inline bool is_retryable_error(int error)
+ return false;
+ }
+
++
++/* cifs_get_writable_file() flags */
++#define FIND_WR_ANY 0
++#define FIND_WR_FSUID_ONLY 1
++#define FIND_WR_WITH_DELETE 2
++
+ #define MID_FREE 0
+ #define MID_REQUEST_ALLOCATED 1
+ #define MID_REQUEST_SUBMITTED 2
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index d6100299458a..3b583150bcd5 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -134,11 +134,12 @@ extern bool backup_cred(struct cifs_sb_info *);
+ extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
+ extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
+ unsigned int bytes_written);
+-extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
++extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
+ extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+- bool fsuid_only,
++ int flags,
+ struct cifsFileInfo **ret_file);
+ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
++ int flags,
+ struct cifsFileInfo **ret_file);
+ extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
+ extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index cc86a67225d1..69c38c379f33 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -1492,6 +1492,7 @@ openRetry:
+ *oplock = rsp->OplockLevel;
+ /* cifs fid stays in le */
+ oparms->fid->netfid = rsp->Fid;
++ oparms->fid->access = desired_access;
+
+ /* Let caller know file was created so we can set the mode. */
+ /* Do we care about the CreateAction in any other cases? */
+@@ -2115,7 +2116,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
+ wdata2->tailsz = tailsz;
+ wdata2->bytes = cur_len;
+
+- rc = cifs_get_writable_file(CIFS_I(inode), false,
++ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
+ &wdata2->cfile);
+ if (!wdata2->cfile) {
+ cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 043288b5c728..5b1460486535 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1964,7 +1964,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+
+ /* Return -EBADF if no handle is found and general rc otherwise */
+ int
+-cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
++cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
+ struct cifsFileInfo **ret_file)
+ {
+ struct cifsFileInfo *open_file, *inv_file = NULL;
+@@ -1972,7 +1972,8 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
+ bool any_available = false;
+ int rc = -EBADF;
+ unsigned int refind = 0;
+-
++ bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
++ bool with_delete = flags & FIND_WR_WITH_DELETE;
+ *ret_file = NULL;
+
+ /*
+@@ -2004,6 +2005,8 @@ refind_writable:
+ continue;
+ if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
+ continue;
++ if (with_delete && !(open_file->fid.access & DELETE))
++ continue;
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+ if (!open_file->invalidHandle) {
+ /* found a good writable file */
+@@ -2051,12 +2054,12 @@ refind_writable:
+ }
+
+ struct cifsFileInfo *
+-find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
++find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
+ {
+ struct cifsFileInfo *cfile;
+ int rc;
+
+- rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
++ rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
+ if (rc)
+ cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
+
+@@ -2065,6 +2068,7 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
+
+ int
+ cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
++ int flags,
+ struct cifsFileInfo **ret_file)
+ {
+ struct list_head *tmp;
+@@ -2091,7 +2095,7 @@ cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+ kfree(full_path);
+ cinode = CIFS_I(d_inode(cfile->dentry));
+ spin_unlock(&tcon->open_file_lock);
+- return cifs_get_writable_file(cinode, 0, ret_file);
++ return cifs_get_writable_file(cinode, flags, ret_file);
+ }
+
+ spin_unlock(&tcon->open_file_lock);
+@@ -2168,7 +2172,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
+ if (mapping->host->i_size - offset < (loff_t)to)
+ to = (unsigned)(mapping->host->i_size - offset);
+
+- rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
++ rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
++ &open_file);
+ if (!rc) {
+ bytes_written = cifs_write(open_file, open_file->pid,
+ write_data, to - from, &offset);
+@@ -2361,7 +2366,7 @@ retry:
+ if (cfile)
+ cifsFileInfo_put(cfile);
+
+- rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
++ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
+
+ /* in case of an error store it to return later */
+ if (rc)
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index b3f3675e1878..e9a7536c2a5e 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2074,6 +2074,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
+ struct inode *inode = d_inode(dentry);
+ struct super_block *sb = dentry->d_sb;
+ char *full_path = NULL;
++ int count = 0;
+
+ if (inode == NULL)
+ return -ENOENT;
+@@ -2095,15 +2096,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
+ full_path, inode, inode->i_count.counter,
+ dentry, cifs_get_time(dentry), jiffies);
+
++again:
+ if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
+ rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
+ else
+ rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
+ xid, NULL);
+-
++ if (rc == -EAGAIN && count++ < 10)
++ goto again;
+ out:
+ kfree(full_path);
+ free_xid(xid);
++
+ return rc;
+ }
+
+@@ -2279,7 +2283,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
+ * writebehind data than the SMB timeout for the SetPathInfo
+ * request would allow
+ */
+- open_file = find_writable_file(cifsInode, true);
++ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+ if (open_file) {
+ tcon = tlink_tcon(open_file->tlink);
+ server = tcon->ses->server;
+@@ -2429,7 +2433,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
+ args->ctime = NO_CHANGE_64;
+
+ args->device = 0;
+- open_file = find_writable_file(cifsInode, true);
++ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+ if (open_file) {
+ u16 nfid = open_file->fid.netfid;
+ u32 npid = open_file->pid;
+@@ -2532,7 +2536,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ rc = 0;
+
+ if (attrs->ia_valid & ATTR_MTIME) {
+- rc = cifs_get_writable_file(cifsInode, false, &wfile);
++ rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
+ if (!rc) {
+ tcon = tlink_tcon(wfile->tlink);
+ rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index d70a2bb062df..e523c05a4487 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -765,7 +765,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
+ struct cifs_tcon *tcon;
+
+ /* if the file is already open for write, just use that fileid */
+- open_file = find_writable_file(cinode, true);
++ open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+ if (open_file) {
+ fid.netfid = open_file->fid.netfid;
+ netpid = open_file->pid;
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 5ef5e97a6d13..bd3669532a09 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -526,7 +526,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
+ cifs_i = CIFS_I(inode);
+ dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
+ data.Attributes = cpu_to_le32(dosattrs);
+- cifs_get_writable_path(tcon, name, &cfile);
++ cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
+ tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
+ FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+ CREATE_NOT_FILE, ACL_NO_MODE,
+@@ -582,7 +582,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
+ {
+ struct cifsFileInfo *cfile;
+
+- cifs_get_writable_path(tcon, from_name, &cfile);
++ cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
+
+ return smb2_set_path_attr(xid, tcon, from_name, to_name,
+ cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 65f76be0f454..5b62840853ff 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1366,6 +1366,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
+
+ cfile->fid.persistent_fid = fid->persistent_fid;
+ cfile->fid.volatile_fid = fid->volatile_fid;
++ cfile->fid.access = fid->access;
+ #ifdef CONFIG_CIFS_DEBUG2
+ cfile->fid.mid = fid->mid;
+ #endif /* CIFS_DEBUG2 */
+@@ -3225,7 +3226,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
+ * some servers (Windows2016) will not reflect recent writes in
+ * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
+ */
+- wrcfile = find_writable_file(cifsi, false);
++ wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
+ if (wrcfile) {
+ filemap_write_and_wait(inode->i_mapping);
+ smb2_flush_file(xid, tcon, &wrcfile->fid);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 6c9497c18f0b..fc32fe546c1a 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2749,6 +2749,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ atomic_inc(&tcon->num_remote_opens);
+ oparms->fid->persistent_fid = rsp->PersistentFileId;
+ oparms->fid->volatile_fid = rsp->VolatileFileId;
++ oparms->fid->access = oparms->desired_access;
+ #ifdef CONFIG_CIFS_DEBUG2
+ oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
+ #endif /* CIFS_DEBUG2 */
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index 5f04c5c810fb..d40cbad16659 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -749,6 +749,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
+ return NULL;
+
+ init_rwsem(&ei->truncate_lock);
++ /* Zeroing to allow iput() even if partial initialized inode. */
++ ei->mmu_private = 0;
++ ei->i_start = 0;
++ ei->i_logstart = 0;
++ ei->i_attrs = 0;
++ ei->i_pos = 0;
++
+ return &ei->vfs_inode;
+ }
+
+@@ -1373,16 +1380,6 @@ out:
+ return 0;
+ }
+
+-static void fat_dummy_inode_init(struct inode *inode)
+-{
+- /* Initialize this dummy inode to work as no-op. */
+- MSDOS_I(inode)->mmu_private = 0;
+- MSDOS_I(inode)->i_start = 0;
+- MSDOS_I(inode)->i_logstart = 0;
+- MSDOS_I(inode)->i_attrs = 0;
+- MSDOS_I(inode)->i_pos = 0;
+-}
+-
+ static int fat_read_root(struct inode *inode)
+ {
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+@@ -1843,13 +1840,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
+ fat_inode = new_inode(sb);
+ if (!fat_inode)
+ goto out_fail;
+- fat_dummy_inode_init(fat_inode);
+ sbi->fat_inode = fat_inode;
+
+ fsinfo_inode = new_inode(sb);
+ if (!fsinfo_inode)
+ goto out_fail;
+- fat_dummy_inode_init(fsinfo_inode);
+ fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+ sbi->fsinfo_inode = fsinfo_inode;
+ insert_inode_hash(fsinfo_inode);
+diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
+index e34a7b7f848a..294b2931c4cc 100644
+--- a/include/drm/drm_gem_shmem_helper.h
++++ b/include/drm/drm_gem_shmem_helper.h
+@@ -96,6 +96,11 @@ struct drm_gem_shmem_object {
+ * The address are un-mapped when the count reaches zero.
+ */
+ unsigned int vmap_use_count;
++
++ /**
++ * @map_cached: map object cached (instead of using writecombine).
++ */
++ bool map_cached;
+ };
+
+ #define to_drm_gem_shmem_obj(obj) \
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index cfaa8feecfe8..70f4278bb193 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2687,6 +2687,10 @@ static inline bool debug_pagealloc_enabled_static(void)
+ #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
+ extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
++/*
++ * When called in DEBUG_PAGEALLOC context, the call should most likely be
++ * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
++ */
+ static inline void
+ kernel_map_pages(struct page *page, int numpages, int enable)
+ {
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index b0ee5eedeccd..0ff2f43ac9cd 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -8315,6 +8315,8 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
+
+ sgs->group_capacity = group->sgc->capacity;
+
++ sgs->group_weight = group->group_weight;
++
+ sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
+
+ /*
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index a6d3016410eb..840ef7af20e0 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -1896,8 +1896,11 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
+ }
+
+ ret = 0;
+- if (bt == NULL)
++ if (bt == NULL) {
+ ret = blk_trace_setup_queue(q, bdev);
++ bt = rcu_dereference_protected(q->blk_trace,
++ lockdep_is_held(&q->blk_trace_mutex));
++ }
+
+ if (ret == 0) {
+ if (attr == &dev_attr_act_mask)
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 54c106bdbafd..c9f8163bd5bf 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3032,8 +3032,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ return;
+
+ flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
+- pmdval = *pvmw->pmd;
+- pmdp_invalidate(vma, address, pvmw->pmd);
++ pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
+ if (pmd_dirty(pmdval))
+ set_page_dirty(page);
+ entry = make_migration_entry(page, pmd_write(pmdval));
+diff --git a/mm/memory.c b/mm/memory.c
+index 45442d9a4f52..0eba7af05777 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2221,7 +2221,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
+ bool ret;
+ void *kaddr;
+ void __user *uaddr;
+- bool force_mkyoung;
++ bool locked = false;
+ struct vm_area_struct *vma = vmf->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr = vmf->address;
+@@ -2246,11 +2246,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
+ * On architectures with software "accessed" bits, we would
+ * take a double page fault, so mark it accessed here.
+ */
+- force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
+- if (force_mkyoung) {
++ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
+ pte_t entry;
+
+ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
++ locked = true;
+ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+ /*
+ * Other thread has already handled the fault
+@@ -2274,18 +2274,37 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
+ * zeroes.
+ */
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
++ if (locked)
++ goto warn;
++
++ /* Re-validate under PTL if the page is still mapped */
++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
++ locked = true;
++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
++ /* The PTE changed under us. Retry page fault. */
++ ret = false;
++ goto pte_unlock;
++ }
++
+ /*
+- * Give a warn in case there can be some obscure
+- * use-case
++ * The same page can be mapped back since last copy attampt.
++ * Try to copy again under PTL.
+ */
+- WARN_ON_ONCE(1);
+- clear_page(kaddr);
++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
++ /*
++ * Give a warn in case there can be some obscure
++ * use-case
++ */
++warn:
++ WARN_ON_ONCE(1);
++ clear_page(kaddr);
++ }
+ }
+
+ ret = true;
+
+ pte_unlock:
+- if (force_mkyoung)
++ if (locked)
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(dst);
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 0ddff29079c3..673900faea76 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -599,7 +599,13 @@ EXPORT_SYMBOL_GPL(restore_online_page_callback);
+
+ void generic_online_page(struct page *page, unsigned int order)
+ {
+- kernel_map_pages(page, 1 << order, 1);
++ /*
++ * Freeing the page with debug_pagealloc enabled will try to unmap it,
++ * so we should map it first. This is better than introducing a special
++ * case in page freeing fast path.
++ */
++ if (debug_pagealloc_enabled_static())
++ kernel_map_pages(page, 1 << order, 1);
+ __free_pages_core(page, order);
+ totalram_pages_add(1UL << order);
+ #ifdef CONFIG_HIGHMEM
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 7a8e84f86831..311c0dadf71c 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -161,6 +161,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ return pages;
+ }
+
++/*
++ * Used when setting automatic NUMA hinting protection where it is
++ * critical that a numa hinting PMD is not confused with a bad PMD.
++ */
++static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
++{
++ pmd_t pmdval = pmd_read_atomic(pmd);
++
++ /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ barrier();
++#endif
++
++ if (pmd_none(pmdval))
++ return 1;
++ if (pmd_trans_huge(pmdval))
++ return 0;
++ if (unlikely(pmd_bad(pmdval))) {
++ pmd_clear_bad(pmd);
++ return 1;
++ }
++
++ return 0;
++}
++
+ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+ pud_t *pud, unsigned long addr, unsigned long end,
+ pgprot_t newprot, int dirty_accountable, int prot_numa)
+@@ -178,8 +203,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+ unsigned long this_pages;
+
+ next = pmd_addr_end(addr, end);
+- if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
+- && pmd_none_or_clear_bad(pmd))
++
++ /*
++ * Automatic NUMA balancing walks the tables with mmap_sem
++ * held for read. It's possible a parallel update to occur
++ * between pmd_trans_huge() and a pmd_none_or_clear_bad()
++ * check leading to a false positive and clearing.
++ * Hence, it's necessary to atomically read the PMD value
++ * for all the checks.
++ */
++ if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
++ pmd_none_or_clear_bad_unless_trans_huge(pmd))
+ goto next;
+
+ /* invoke the mmu notifier if the pmd is populated */
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index 1b68a131083c..8c835ad63729 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -358,21 +358,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
+ return 0;
+ }
+
+-static bool select_all(const struct xt_hashlimit_htable *ht,
+- const struct dsthash_ent *he)
+-{
+- return true;
+-}
+-
+-static bool select_gc(const struct xt_hashlimit_htable *ht,
+- const struct dsthash_ent *he)
+-{
+- return time_after_eq(jiffies, he->expires);
+-}
+-
+-static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
+- bool (*select)(const struct xt_hashlimit_htable *ht,
+- const struct dsthash_ent *he))
++static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all)
+ {
+ unsigned int i;
+
+@@ -382,7 +368,7 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
+
+ spin_lock_bh(&ht->lock);
+ hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
+- if ((*select)(ht, dh))
++ if (time_after_eq(jiffies, dh->expires) || select_all)
+ dsthash_free(ht, dh);
+ }
+ spin_unlock_bh(&ht->lock);
+@@ -396,7 +382,7 @@ static void htable_gc(struct work_struct *work)
+
+ ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
+
+- htable_selective_cleanup(ht, select_gc);
++ htable_selective_cleanup(ht, false);
+
+ queue_delayed_work(system_power_efficient_wq,
+ &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
+@@ -416,15 +402,6 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
+ remove_proc_entry(hinfo->name, parent);
+ }
+
+-static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+-{
+- cancel_delayed_work_sync(&hinfo->gc_work);
+- htable_remove_proc_entry(hinfo);
+- htable_selective_cleanup(hinfo, select_all);
+- kfree(hinfo->name);
+- vfree(hinfo);
+-}
+-
+ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
+ const char *name,
+ u_int8_t family)
+@@ -446,8 +423,13 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
+ {
+ if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
+ hlist_del(&hinfo->node);
++ htable_remove_proc_entry(hinfo);
+ mutex_unlock(&hashlimit_mutex);
+- htable_destroy(hinfo);
++
++ cancel_delayed_work_sync(&hinfo->gc_work);
++ htable_selective_cleanup(hinfo, true);
++ kfree(hinfo->name);
++ vfree(hinfo);
+ }
+ }
+
+diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
+index 111898aad56e..f0c908241966 100644
+--- a/security/integrity/platform_certs/load_uefi.c
++++ b/security/integrity/platform_certs/load_uefi.c
+@@ -35,16 +35,18 @@ static __init bool uefi_check_ignore_db(void)
+ * Get a certificate list blob from the named EFI variable.
+ */
+ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
+- unsigned long *size)
++ unsigned long *size, efi_status_t *status)
+ {
+- efi_status_t status;
+ unsigned long lsize = 4;
+ unsigned long tmpdb[4];
+ void *db;
+
+- status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
+- if (status != EFI_BUFFER_TOO_SMALL) {
+- pr_err("Couldn't get size: 0x%lx\n", status);
++ *status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
++ if (*status == EFI_NOT_FOUND)
++ return NULL;
++
++ if (*status != EFI_BUFFER_TOO_SMALL) {
++ pr_err("Couldn't get size: 0x%lx\n", *status);
+ return NULL;
+ }
+
+@@ -52,10 +54,10 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
+ if (!db)
+ return NULL;
+
+- status = efi.get_variable(name, guid, NULL, &lsize, db);
+- if (status != EFI_SUCCESS) {
++ *status = efi.get_variable(name, guid, NULL, &lsize, db);
++ if (*status != EFI_SUCCESS) {
+ kfree(db);
+- pr_err("Error reading db var: 0x%lx\n", status);
++ pr_err("Error reading db var: 0x%lx\n", *status);
+ return NULL;
+ }
+
+@@ -74,6 +76,7 @@ static int __init load_uefi_certs(void)
+ efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
+ void *db = NULL, *dbx = NULL, *mok = NULL;
+ unsigned long dbsize = 0, dbxsize = 0, moksize = 0;
++ efi_status_t status;
+ int rc = 0;
+
+ if (!efi.get_variable)
+@@ -83,9 +86,12 @@ static int __init load_uefi_certs(void)
+ * an error if we can't get them.
+ */
+ if (!uefi_check_ignore_db()) {
+- db = get_cert_list(L"db", &secure_var, &dbsize);
++ db = get_cert_list(L"db", &secure_var, &dbsize, &status);
+ if (!db) {
+- pr_err("MODSIGN: Couldn't get UEFI db list\n");
++ if (status == EFI_NOT_FOUND)
++ pr_debug("MODSIGN: db variable wasn't found\n");
++ else
++ pr_err("MODSIGN: Couldn't get UEFI db list\n");
+ } else {
+ rc = parse_efi_signature_list("UEFI:db",
+ db, dbsize, get_handler_for_db);
+@@ -96,9 +102,12 @@ static int __init load_uefi_certs(void)
+ }
+ }
+
+- mok = get_cert_list(L"MokListRT", &mok_var, &moksize);
++ mok = get_cert_list(L"MokListRT", &mok_var, &moksize, &status);
+ if (!mok) {
+- pr_info("Couldn't get UEFI MokListRT\n");
++ if (status == EFI_NOT_FOUND)
++ pr_debug("MokListRT variable wasn't found\n");
++ else
++ pr_info("Couldn't get UEFI MokListRT\n");
+ } else {
+ rc = parse_efi_signature_list("UEFI:MokListRT",
+ mok, moksize, get_handler_for_db);
+@@ -107,9 +116,12 @@ static int __init load_uefi_certs(void)
+ kfree(mok);
+ }
+
+- dbx = get_cert_list(L"dbx", &secure_var, &dbxsize);
++ dbx = get_cert_list(L"dbx", &secure_var, &dbxsize, &status);
+ if (!dbx) {
+- pr_info("Couldn't get UEFI dbx list\n");
++ if (status == EFI_NOT_FOUND)
++ pr_debug("dbx variable wasn't found\n");
++ else
++ pr_info("Couldn't get UEFI dbx list\n");
+ } else {
+ rc = parse_efi_signature_list("UEFI:dbx",
+ dbx, dbxsize,
+diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
+index cfab60d88c92..09ff209df4a3 100644
+--- a/sound/hda/ext/hdac_ext_controller.c
++++ b/sound/hda/ext/hdac_ext_controller.c
+@@ -254,6 +254,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
+ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
+ struct hdac_ext_link *link)
+ {
++ unsigned long codec_mask;
+ int ret = 0;
+
+ mutex_lock(&bus->lock);
+@@ -280,9 +281,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
+ * HDA spec section 4.3 - Codec Discovery
+ */
+ udelay(521);
+- bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+- dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
+- snd_hdac_chip_writew(bus, STATESTS, bus->codec_mask);
++ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
++ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", codec_mask);
++ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
++ if (!bus->codec_mask)
++ bus->codec_mask = codec_mask;
+ }
+
+ mutex_unlock(&bus->lock);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7ba3ef6b673d..4436ebbea108 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
++ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
+@@ -5920,7 +5921,8 @@ enum {
+ ALC289_FIXUP_DUAL_SPK,
+ ALC294_FIXUP_SPK2_TO_DAC1,
+ ALC294_FIXUP_ASUS_DUAL_SPK,
+-
++ ALC285_FIXUP_THINKPAD_HEADSET_JACK,
++ ALC294_FIXUP_ASUS_HPE,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6684,6 +6686,8 @@ static const struct hda_fixup alc269_fixups[] = {
+ [ALC285_FIXUP_SPEAKER2_TO_DAC1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
+ },
+ [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ .type = HDA_FIXUP_PINS,
+@@ -7040,7 +7044,23 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
+ },
+-
++ [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_headset_jack,
++ .chained = true,
++ .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1
++ },
++ [ALC294_FIXUP_ASUS_HPE] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* Set EAPD high */
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7115,6 +7135,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
++ SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -7204,6 +7226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+@@ -7274,8 +7297,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+- SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
+- SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
++ SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
++ SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+ SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 861210f6bf4f..4cbef9affffd 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -1564,13 +1564,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ }
+
+ pcm512x->sclk = devm_clk_get(dev, NULL);
+- if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
++ ret = -EPROBE_DEFER;
++ goto err;
++ }
+ if (!IS_ERR(pcm512x->sclk)) {
+ ret = clk_prepare_enable(pcm512x->sclk);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable SCLK: %d\n", ret);
+- return ret;
++ goto err;
+ }
+ }
+
+diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+index 4e45901e3a2f..11eaee9ae41f 100644
+--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+@@ -100,6 +100,8 @@ static struct snd_soc_card hda_soc_card = {
+ .late_probe = skl_hda_card_late_probe,
+ };
+
++static char hda_soc_components[30];
++
+ #define IDISP_DAI_COUNT 3
+ #define HDAC_DAI_COUNT 2
+ #define DMIC_DAI_COUNT 2
+@@ -183,6 +185,12 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
+ hda_soc_card.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&hda_soc_card, ctx);
+
++ if (mach->mach_params.dmic_num > 0) {
++ snprintf(hda_soc_components, sizeof(hda_soc_components),
++ "cfg-dmics:%d", mach->mach_params.dmic_num);
++ hda_soc_card.components = hda_soc_components;
++ }
++
+ return devm_snd_soc_register_card(&pdev->dev, &hda_soc_card);
+ }
+
+diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
+index 3466675f2678..a15aa2ffa681 100644
+--- a/sound/soc/intel/skylake/skl-debug.c
++++ b/sound/soc/intel/skylake/skl-debug.c
+@@ -34,8 +34,8 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
+ int i;
+ ssize_t ret = 0;
+
+- for (i = 0; i < max_pin; i++)
+- ret += snprintf(buf + size, MOD_BUF - size,
++ for (i = 0; i < max_pin; i++) {
++ ret += scnprintf(buf + size, MOD_BUF - size,
+ "%s %d\n\tModule %d\n\tInstance %d\n\t"
+ "In-used %s\n\tType %s\n"
+ "\tState %d\n\tIndex %d\n",
+@@ -45,13 +45,15 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
+ m_pin[i].in_use ? "Used" : "Unused",
+ m_pin[i].is_dynamic ? "Dynamic" : "Static",
+ m_pin[i].pin_state, i);
++ size += ret;
++ }
+ return ret;
+ }
+
+ static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf,
+ ssize_t size, bool direction)
+ {
+- return snprintf(buf + size, MOD_BUF - size,
++ return scnprintf(buf + size, MOD_BUF - size,
+ "%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t"
+ "Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t"
+ "Sample Type %d\n\tCh Map %#x\n",
+@@ -75,16 +77,16 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
+ if (!buf)
+ return -ENOMEM;
+
+- ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
++ ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
+ "\tInstance id %d\n\tPvt_id %d\n", mconfig->guid,
+ mconfig->id.module_id, mconfig->id.instance_id,
+ mconfig->id.pvt_id);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Resources:\n\tCPC %#x\n\tIBS %#x\n\tOBS %#x\t\n",
+ res->cpc, res->ibs, res->obs);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Module data:\n\tCore %d\n\tIn queue %d\n\t"
+ "Out queue %d\n\tType %s\n",
+ mconfig->core_id, mconfig->max_in_queue,
+@@ -94,38 +96,38 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
+ ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true);
+ ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Fixup:\n\tParams %#x\n\tConverter %#x\n",
+ mconfig->params_fixup, mconfig->converter);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n",
+ mconfig->dev_type, mconfig->vbus_id,
+ mconfig->hw_conn_type, mconfig->time_slot);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t"
+ "Pages %#x\n", mconfig->pipe->ppl_id,
+ mconfig->pipe->pipe_priority, mconfig->pipe->conn_type,
+ mconfig->pipe->memory_pages);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n",
+ mconfig->pipe->p_params->host_dma_id,
+ mconfig->pipe->p_params->link_dma_id);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n",
+ mconfig->pipe->p_params->ch,
+ mconfig->pipe->p_params->s_freq,
+ mconfig->pipe->p_params->s_fmt);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "\tLink %#x\n\tStream %#x\n",
+ mconfig->pipe->p_params->linktype,
+ mconfig->pipe->p_params->stream);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "\tState %d\n\tPassthru %s\n",
+ mconfig->pipe->state,
+ mconfig->pipe->passthru ? "true" : "false");
+@@ -135,7 +137,7 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
+ ret += skl_print_pins(mconfig->m_out_pin, buf,
+ mconfig->max_out_queue, ret, false);
+
+- ret += snprintf(buf + ret, MOD_BUF - ret,
++ ret += scnprintf(buf + ret, MOD_BUF - ret,
+ "Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t"
+ "Homogeneous Output %s\n\tIn Queue Mask %d\n\t"
+ "Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t"
+@@ -191,7 +193,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
+ __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
+
+ for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
+- ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
++ ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
+ hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4,
+ tmp + ret, FW_REG_BUF - ret, 0);
+ ret += strlen(tmp + ret);
+diff --git a/sound/soc/intel/skylake/skl-ssp-clk.c b/sound/soc/intel/skylake/skl-ssp-clk.c
+index 1c0e5226cb5b..bd43885f3805 100644
+--- a/sound/soc/intel/skylake/skl-ssp-clk.c
++++ b/sound/soc/intel/skylake/skl-ssp-clk.c
+@@ -384,9 +384,11 @@ static int skl_clk_dev_probe(struct platform_device *pdev)
+ &clks[i], clk_pdata, i);
+
+ if (IS_ERR(data->clk[data->avail_clk_cnt])) {
+- ret = PTR_ERR(data->clk[data->avail_clk_cnt++]);
++ ret = PTR_ERR(data->clk[data->avail_clk_cnt]);
+ goto err_unreg_skl_clk;
+ }
++
++ data->avail_clk_cnt++;
+ }
+
+ platform_set_drvdata(pdev, data);
+diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
+index b94680fb26fa..a770e66b233a 100644
+--- a/sound/soc/soc-component.c
++++ b/sound/soc/soc-component.c
+@@ -452,7 +452,7 @@ int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream)
+ int ret;
+
+ for_each_rtd_components(rtd, rtdcom, component) {
+- if (component->driver->ioctl) {
++ if (component->driver->sync_stop) {
+ ret = component->driver->sync_stop(component,
+ substream);
+ if (ret < 0)
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 935b5375ecc5..ebd785f9aa46 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -4749,7 +4749,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
+ continue;
+ if (w->power) {
+ dapm_seq_insert(w, &down_list, false);
+- w->power = 0;
++ w->new_power = 0;
+ powerdown = 1;
+ }
+ }
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 01e7bc03d92f..8de29f48442f 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -3192,16 +3192,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
+ unsigned long flags;
+
+ /* FE state */
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ "[%s - %s]\n", fe->dai_link->name,
+ stream ? "Capture" : "Playback");
+
+- offset += snprintf(buf + offset, size - offset, "State: %s\n",
++ offset += scnprintf(buf + offset, size - offset, "State: %s\n",
+ dpcm_state_string(fe->dpcm[stream].state));
+
+ if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ "Hardware Params: "
+ "Format = %s, Channels = %d, Rate = %d\n",
+ snd_pcm_format_name(params_format(params)),
+@@ -3209,10 +3209,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
+ params_rate(params));
+
+ /* BEs state */
+- offset += snprintf(buf + offset, size - offset, "Backends:\n");
++ offset += scnprintf(buf + offset, size - offset, "Backends:\n");
+
+ if (list_empty(&fe->dpcm[stream].be_clients)) {
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ " No active DSP links\n");
+ goto out;
+ }
+@@ -3222,16 +3222,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be = dpcm->be;
+ params = &dpcm->hw_params;
+
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ "- %s\n", be->dai_link->name);
+
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ " State: %s\n",
+ dpcm_state_string(be->dpcm[stream].state));
+
+ if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
+- offset += snprintf(buf + offset, size - offset,
++ offset += scnprintf(buf + offset, size - offset,
+ " Hardware Params: "
+ "Format = %s, Channels = %d, Rate = %d\n",
+ snd_pcm_format_name(params_format(params)),
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 0119f07cece6..56a7142f15a0 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -2335,8 +2335,11 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
+ }
+
+ ret = soc_tplg_link_config(tplg, _link);
+- if (ret < 0)
++ if (ret < 0) {
++ if (!abi_match)
++ kfree(_link);
+ return ret;
++ }
+
+ /* offset by version-specific struct size and
+ * real priv data size
+@@ -2500,7 +2503,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
+ {
+ struct snd_soc_tplg_manifest *manifest, *_manifest;
+ bool abi_match;
+- int err;
++ int ret = 0;
+
+ if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
+ return 0;
+@@ -2513,19 +2516,19 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
+ _manifest = manifest;
+ } else {
+ abi_match = false;
+- err = manifest_new_ver(tplg, manifest, &_manifest);
+- if (err < 0)
+- return err;
++ ret = manifest_new_ver(tplg, manifest, &_manifest);
++ if (ret < 0)
++ return ret;
+ }
+
+ /* pass control to component driver for optional further init */
+ if (tplg->comp && tplg->ops && tplg->ops->manifest)
+- return tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
++ ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
+
+ if (!abi_match) /* free the duplicated one */
+ kfree(_manifest);
+
+- return 0;
++ return ret;
+ }
+
+ /* validate header magic, size and type */
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 82ecadda886c..a1780259292f 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -351,7 +351,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
+ const char *tplg_filename;
+ const char *idisp_str;
+ const char *dmic_str;
+- int dmic_num;
++ int dmic_num = 0;
+ int codec_num = 0;
+ int i;
+ #endif
+@@ -472,6 +472,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
+ mach_params->codec_mask = bus->codec_mask;
+ mach_params->platform = dev_name(sdev->dev);
+ mach_params->common_hdmi_codec_drv = hda_codec_use_common_hdmi;
++ mach_params->dmic_num = dmic_num;
+ }
+
+ /* create codec instances */
+diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
+index dfe429f9e33f..c7a408f36733 100644
+--- a/sound/soc/sof/ipc.c
++++ b/sound/soc/sof/ipc.c
+@@ -495,7 +495,7 @@ int snd_sof_ipc_stream_posn(struct snd_sof_dev *sdev,
+
+ /* send IPC to the DSP */
+ err = sof_ipc_tx_message(sdev->ipc,
+- stream.hdr.cmd, &stream, sizeof(stream), &posn,
++ stream.hdr.cmd, &stream, sizeof(stream), posn,
+ sizeof(*posn));
+ if (err < 0) {
+ dev_err(sdev->dev, "error: failed to get stream %d position\n",
+diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
+index ede040cf82ad..20e9a189ad92 100644
+--- a/tools/perf/arch/arm/util/cs-etm.c
++++ b/tools/perf/arch/arm/util/cs-etm.c
+@@ -865,9 +865,12 @@ static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
+ struct evsel *evsel;
+
+ evlist__for_each_entry(ptr->evlist, evsel) {
+- if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
++ if (evsel->core.attr.type == ptr->cs_etm_pmu->type) {
++ if (evsel->disabled)
++ return 0;
+ return perf_evlist__enable_event_idx(ptr->evlist,
+ evsel, idx);
++ }
+ }
+
+ return -EINVAL;
+diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
+index eba6541ec0f1..1d993c27242b 100644
+--- a/tools/perf/arch/arm64/util/arm-spe.c
++++ b/tools/perf/arch/arm64/util/arm-spe.c
+@@ -165,9 +165,12 @@ static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
+ struct evsel *evsel;
+
+ evlist__for_each_entry(sper->evlist, evsel) {
+- if (evsel->core.attr.type == sper->arm_spe_pmu->type)
++ if (evsel->core.attr.type == sper->arm_spe_pmu->type) {
++ if (evsel->disabled)
++ return 0;
+ return perf_evlist__enable_event_idx(sper->evlist,
+ evsel, idx);
++ }
+ }
+ return -EINVAL;
+ }
+diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
+index 27d9e214d068..39e363151ad7 100644
+--- a/tools/perf/arch/x86/util/intel-bts.c
++++ b/tools/perf/arch/x86/util/intel-bts.c
+@@ -420,9 +420,12 @@ static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
+ struct evsel *evsel;
+
+ evlist__for_each_entry(btsr->evlist, evsel) {
+- if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
++ if (evsel->core.attr.type == btsr->intel_bts_pmu->type) {
++ if (evsel->disabled)
++ return 0;
+ return perf_evlist__enable_event_idx(btsr->evlist,
+ evsel, idx);
++ }
+ }
+ return -EINVAL;
+ }
+diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
+index 20df442fdf36..be07d6886256 100644
+--- a/tools/perf/arch/x86/util/intel-pt.c
++++ b/tools/perf/arch/x86/util/intel-pt.c
+@@ -1173,9 +1173,12 @@ static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
+ struct evsel *evsel;
+
+ evlist__for_each_entry(ptr->evlist, evsel) {
+- if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
++ if (evsel->core.attr.type == ptr->intel_pt_pmu->type) {
++ if (evsel->disabled)
++ return 0;
+ return perf_evlist__enable_event_idx(ptr->evlist, evsel,
+ idx);
++ }
+ }
+ return -EINVAL;
+ }
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index 1c8a1963d03f..3ed0134a764d 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -83,17 +83,20 @@ else
+ $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS))
+ endif
+
++define INSTALL_SINGLE_RULE
++ $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
++ $(if $(INSTALL_LIST),@echo rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
++ $(if $(INSTALL_LIST),@rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
++endef
++
+ define INSTALL_RULE
+- @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
+- mkdir -p ${INSTALL_PATH}; \
+- echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \
+- rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \
+- fi
+- @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \
+- mkdir -p ${INSTALL_PATH}; \
+- echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \
+- rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \
+- fi
++ $(eval INSTALL_LIST = $(TEST_PROGS)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_FILES)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_GEN_PROGS)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
++ $(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE)
+ endef
+
+ install: all
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh
+index e6fd7a18c655..0266443601bc 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh
+@@ -63,22 +63,23 @@ test_span_gre_mac()
+ {
+ local tundev=$1; shift
+ local direction=$1; shift
+- local prot=$1; shift
+ local what=$1; shift
+
+- local swp3mac=$(mac_get $swp3)
+- local h3mac=$(mac_get $h3)
++ case "$direction" in
++ ingress) local src_mac=$(mac_get $h1); local dst_mac=$(mac_get $h2)
++ ;;
++ egress) local src_mac=$(mac_get $h2); local dst_mac=$(mac_get $h1)
++ ;;
++ esac
+
+ RET=0
+
+ mirror_install $swp1 $direction $tundev "matchall $tcflags"
+- tc filter add dev $h3 ingress pref 77 prot $prot \
+- flower ip_proto 0x2f src_mac $swp3mac dst_mac $h3mac \
+- action pass
++ icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac"
+
+- mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
++ mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10
+
+- tc filter del dev $h3 ingress pref 77
++ icmp_capture_uninstall h3-${tundev}
+ mirror_uninstall $swp1 $direction
+
+ log_test "$direction $what: envelope MAC ($tcflags)"
+@@ -120,14 +121,14 @@ test_ip6gretap()
+
+ test_gretap_mac()
+ {
+- test_span_gre_mac gt4 ingress ip "mirror to gretap"
+- test_span_gre_mac gt4 egress ip "mirror to gretap"
++ test_span_gre_mac gt4 ingress "mirror to gretap"
++ test_span_gre_mac gt4 egress "mirror to gretap"
+ }
+
+ test_ip6gretap_mac()
+ {
+- test_span_gre_mac gt6 ingress ipv6 "mirror to ip6gretap"
+- test_span_gre_mac gt6 egress ipv6 "mirror to ip6gretap"
++ test_span_gre_mac gt6 ingress "mirror to ip6gretap"
++ test_span_gre_mac gt6 egress "mirror to ip6gretap"
+ }
+
+ test_all()
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+index bb10e33690b2..ce6bea9675c0 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+@@ -516,9 +516,9 @@ test_tos()
+ RET=0
+
+ tc filter add dev v1 egress pref 77 prot ip \
+- flower ip_tos 0x40 action pass
+- vxlan_ping_test $h1 192.0.2.3 "-Q 0x40" v1 egress 77 10
+- vxlan_ping_test $h1 192.0.2.3 "-Q 0x30" v1 egress 77 0
++ flower ip_tos 0x14 action pass
++ vxlan_ping_test $h1 192.0.2.3 "-Q 0x14" v1 egress 77 10
++ vxlan_ping_test $h1 192.0.2.3 "-Q 0x18" v1 egress 77 0
+ tc filter del dev v1 egress pref 77 prot ip
+
+ log_test "VXLAN: envelope TOS inheritance"
+diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
+index 8d069490e17b..161facf28d6a 100644
+--- a/tools/testing/selftests/pidfd/.gitignore
++++ b/tools/testing/selftests/pidfd/.gitignore
+@@ -2,3 +2,4 @@ pidfd_open_test
+ pidfd_poll_test
+ pidfd_test
+ pidfd_wait
++pidfd_fdinfo_test
+diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
+index 8155c2ea7ccb..b630c7b5950a 100755
+--- a/tools/testing/selftests/tpm2/test_smoke.sh
++++ b/tools/testing/selftests/tpm2/test_smoke.sh
+@@ -1,8 +1,17 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
++self.flags = flags
+
+-python -m unittest -v tpm2_tests.SmokeTest
+-python -m unittest -v tpm2_tests.AsyncTest
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
++
++if [ -f /dev/tpm0 ] ; then
++ python -m unittest -v tpm2_tests.SmokeTest
++ python -m unittest -v tpm2_tests.AsyncTest
++else
++ exit $ksft_skip
++fi
+
+ CLEAR_CMD=$(which tpm2_clear)
+ if [ -n $CLEAR_CMD ]; then
+diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh
+index a6f5e346635e..180b469c53b4 100755
+--- a/tools/testing/selftests/tpm2/test_space.sh
++++ b/tools/testing/selftests/tpm2/test_space.sh
+@@ -1,4 +1,11 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+-python -m unittest -v tpm2_tests.SpaceTest
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
++if [ -f /dev/tpmrm0 ] ; then
++ python -m unittest -v tpm2_tests.SpaceTest
++else
++ exit $ksft_skip
++fi