summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-10-29 08:05:50 -0400
committerMike Pagano <mpagano@gentoo.org>2019-10-29 08:05:50 -0400
commitb5c397963982ec8b83950f7e9a2ed6c989fa8678 (patch)
tree80658c0dcc13bb50eb3845e3fab9807be9814391
parentLinux patch 5.3.7 (diff)
downloadlinux-patches-b5c39796.tar.gz
linux-patches-b5c39796.tar.bz2
linux-patches-b5c39796.zip
Linux patch 5.3.85.3-10
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1007_linux-5.3.8.patch7745
2 files changed, 7749 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e15ba253..bc9694a4 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch: 1006_linux-5.3.7.patch
From: http://www.kernel.org
Desc: Linux 5.3.7
+Patch: 1007_linux-5.3.8.patch
+From: http://www.kernel.org
+Desc: Linux 5.3.8
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1007_linux-5.3.8.patch b/1007_linux-5.3.8.patch
new file mode 100644
index 00000000..8323ef75
--- /dev/null
+++ b/1007_linux-5.3.8.patch
@@ -0,0 +1,7745 @@
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index 3e57d09246e6..6e52d334bc55 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -107,6 +107,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Cavium | ThunderX2 SMMUv3| #126 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
++| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Makefile b/Makefile
+index 7a3e659c79ae..445f9488d8ba 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 3
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
+index 18f70b35da4c..204bccfcc110 100644
+--- a/arch/arm/boot/dts/am335x-icev2.dts
++++ b/arch/arm/boot/dts/am335x-icev2.dts
+@@ -432,7 +432,7 @@
+ pinctrl-0 = <&mmc0_pins_default>;
+ };
+
+-&gpio0 {
++&gpio0_target {
+ /* Do not idle the GPIO used for holding the VTT regulator */
+ ti,no-reset-on-init;
+ ti,no-idle-on-init;
+diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
+index 46849d6ecb3e..3287cf695b5a 100644
+--- a/arch/arm/boot/dts/am33xx-l4.dtsi
++++ b/arch/arm/boot/dts/am33xx-l4.dtsi
+@@ -127,7 +127,7 @@
+ ranges = <0x0 0x5000 0x1000>;
+ };
+
+- target-module@7000 { /* 0x44e07000, ap 14 20.0 */
++ gpio0_target: target-module@7000 { /* 0x44e07000, ap 14 20.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "gpio1";
+ reg = <0x7000 0x4>,
+@@ -2038,7 +2038,9 @@
+ reg = <0xe000 0x4>,
+ <0xe054 0x4>;
+ reg-names = "rev", "sysc";
+- ti,sysc-midle ;
++ ti,sysc-midle = <SYSC_IDLE_FORCE>,
++ <SYSC_IDLE_NO>,
++ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
+index 848e2a8884e2..14bbc438055f 100644
+--- a/arch/arm/boot/dts/am4372.dtsi
++++ b/arch/arm/boot/dts/am4372.dtsi
+@@ -337,6 +337,8 @@
+ ti,hwmods = "dss_dispc";
+ clocks = <&disp_clk>;
+ clock-names = "fck";
++
++ max-memory-bandwidth = <230000000>;
+ };
+
+ rfbi: rfbi@4832a800 {
+diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
+index 21e5914fdd62..099d6fe2a57a 100644
+--- a/arch/arm/boot/dts/dra7-l4.dtsi
++++ b/arch/arm/boot/dts/dra7-l4.dtsi
+@@ -2762,7 +2762,7 @@
+ interrupt-names = "tx", "rx";
+ dmas = <&edma_xbar 129 1>, <&edma_xbar 128 1>;
+ dma-names = "tx", "rx";
+- clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 22>,
++ clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 0>,
+ <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>,
+ <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 28>;
+ clock-names = "fck", "ahclkx", "ahclkr";
+@@ -2799,8 +2799,8 @@
+ interrupt-names = "tx", "rx";
+ dmas = <&edma_xbar 131 1>, <&edma_xbar 130 1>;
+ dma-names = "tx", "rx";
+- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 22>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 24>,
++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 0>,
++ <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>,
+ <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 28>;
+ clock-names = "fck", "ahclkx", "ahclkr";
+ status = "disabled";
+@@ -2818,9 +2818,8 @@
+ <SYSC_IDLE_SMART>;
+ /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 28>;
+- clock-names = "fck", "ahclkx", "ahclkr";
++ <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
++ clock-names = "fck", "ahclkx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x68000 0x2000>,
+@@ -2836,7 +2835,7 @@
+ interrupt-names = "tx", "rx";
+ dmas = <&edma_xbar 133 1>, <&edma_xbar 132 1>;
+ dma-names = "tx", "rx";
+- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 22>,
++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>,
+ <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
+ clock-names = "fck", "ahclkx";
+ status = "disabled";
+@@ -2854,9 +2853,8 @@
+ <SYSC_IDLE_SMART>;
+ /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 28>;
+- clock-names = "fck", "ahclkx", "ahclkr";
++ <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>;
++ clock-names = "fck", "ahclkx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x6c000 0x2000>,
+@@ -2872,7 +2870,7 @@
+ interrupt-names = "tx", "rx";
+ dmas = <&edma_xbar 135 1>, <&edma_xbar 134 1>;
+ dma-names = "tx", "rx";
+- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 22>,
++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>,
+ <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>;
+ clock-names = "fck", "ahclkx";
+ status = "disabled";
+@@ -2890,9 +2888,8 @@
+ <SYSC_IDLE_SMART>;
+ /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 28>;
+- clock-names = "fck", "ahclkx", "ahclkr";
++ <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>;
++ clock-names = "fck", "ahclkx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x70000 0x2000>,
+@@ -2908,7 +2905,7 @@
+ interrupt-names = "tx", "rx";
+ dmas = <&edma_xbar 137 1>, <&edma_xbar 136 1>;
+ dma-names = "tx", "rx";
+- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 22>,
++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>,
+ <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>;
+ clock-names = "fck", "ahclkx";
+ status = "disabled";
+@@ -2926,9 +2923,8 @@
+ <SYSC_IDLE_SMART>;
+ /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 28>;
+- clock-names = "fck", "ahclkx", "ahclkr";
++ <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>;
++ clock-names = "fck", "ahclkx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x74000 0x2000>,
+@@ -2944,7 +2940,7 @@
+ interrupt-names = "tx", "rx";
+ dmas = <&edma_xbar 139 1>, <&edma_xbar 138 1>;
+ dma-names = "tx", "rx";
+- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 22>,
++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>,
+ <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>;
+ clock-names = "fck", "ahclkx";
+ status = "disabled";
+@@ -2962,9 +2958,8 @@
+ <SYSC_IDLE_SMART>;
+ /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 28>;
+- clock-names = "fck", "ahclkx", "ahclkr";
++ <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>;
++ clock-names = "fck", "ahclkx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x78000 0x2000>,
+@@ -2980,7 +2975,7 @@
+ interrupt-names = "tx", "rx";
+ dmas = <&edma_xbar 141 1>, <&edma_xbar 140 1>;
+ dma-names = "tx", "rx";
+- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 22>,
++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>,
+ <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>;
+ clock-names = "fck", "ahclkx";
+ status = "disabled";
+@@ -2998,9 +2993,8 @@
+ <SYSC_IDLE_SMART>;
+ /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>,
+- <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 28>;
+- clock-names = "fck", "ahclkx", "ahclkr";
++ <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>;
++ clock-names = "fck", "ahclkx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x7c000 0x2000>,
+@@ -3016,7 +3010,7 @@
+ interrupt-names = "tx", "rx";
+ dmas = <&edma_xbar 143 1>, <&edma_xbar 142 1>;
+ dma-names = "tx", "rx";
+- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 22>,
++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>,
+ <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>;
+ clock-names = "fck", "ahclkx";
+ status = "disabled";
+diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+index adb6271f819b..7773876d165f 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+@@ -811,7 +811,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
++ SYSC_HAS_RESET_STATUS,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+index c965af275e34..81d9912f17c8 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+@@ -231,8 +231,9 @@ static struct omap_hwmod am33xx_control_hwmod = {
+ static struct omap_hwmod_class_sysconfig lcdc_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x54,
+- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
+- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE,
++ .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART,
+ .sysc_fields = &omap_hwmod_sysc_type2,
+ };
+
+diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
+index 1fde1bf53fb6..7ac9af56762d 100644
+--- a/arch/arm/mach-omap2/pm.c
++++ b/arch/arm/mach-omap2/pm.c
+@@ -74,83 +74,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
+ return 0;
+ }
+
+-/*
+- * This API is to be called during init to set the various voltage
+- * domains to the voltage as per the opp table. Typically we boot up
+- * at the nominal voltage. So this function finds out the rate of
+- * the clock associated with the voltage domain, finds out the correct
+- * opp entry and sets the voltage domain to the voltage specified
+- * in the opp entry
+- */
+-static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
+- const char *oh_name)
+-{
+- struct voltagedomain *voltdm;
+- struct clk *clk;
+- struct dev_pm_opp *opp;
+- unsigned long freq, bootup_volt;
+- struct device *dev;
+-
+- if (!vdd_name || !clk_name || !oh_name) {
+- pr_err("%s: invalid parameters\n", __func__);
+- goto exit;
+- }
+-
+- if (!strncmp(oh_name, "mpu", 3))
+- /*
+- * All current OMAPs share voltage rail and clock
+- * source, so CPU0 is used to represent the MPU-SS.
+- */
+- dev = get_cpu_device(0);
+- else
+- dev = omap_device_get_by_hwmod_name(oh_name);
+-
+- if (IS_ERR(dev)) {
+- pr_err("%s: Unable to get dev pointer for hwmod %s\n",
+- __func__, oh_name);
+- goto exit;
+- }
+-
+- voltdm = voltdm_lookup(vdd_name);
+- if (!voltdm) {
+- pr_err("%s: unable to get vdd pointer for vdd_%s\n",
+- __func__, vdd_name);
+- goto exit;
+- }
+-
+- clk = clk_get(NULL, clk_name);
+- if (IS_ERR(clk)) {
+- pr_err("%s: unable to get clk %s\n", __func__, clk_name);
+- goto exit;
+- }
+-
+- freq = clk_get_rate(clk);
+- clk_put(clk);
+-
+- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+- if (IS_ERR(opp)) {
+- pr_err("%s: unable to find boot up OPP for vdd_%s\n",
+- __func__, vdd_name);
+- goto exit;
+- }
+-
+- bootup_volt = dev_pm_opp_get_voltage(opp);
+- dev_pm_opp_put(opp);
+-
+- if (!bootup_volt) {
+- pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
+- __func__, vdd_name);
+- goto exit;
+- }
+-
+- voltdm_scale(voltdm, bootup_volt);
+- return 0;
+-
+-exit:
+- pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
+- return -EINVAL;
+-}
+-
+ #ifdef CONFIG_SUSPEND
+ static int omap_pm_enter(suspend_state_t suspend_state)
+ {
+@@ -208,25 +131,6 @@ void omap_common_suspend_init(void *pm_suspend)
+ }
+ #endif /* CONFIG_SUSPEND */
+
+-static void __init omap3_init_voltages(void)
+-{
+- if (!soc_is_omap34xx())
+- return;
+-
+- omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
+- omap2_set_init_voltage("core", "l3_ick", "l3_main");
+-}
+-
+-static void __init omap4_init_voltages(void)
+-{
+- if (!soc_is_omap44xx())
+- return;
+-
+- omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
+- omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
+- omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
+-}
+-
+ int __maybe_unused omap_pm_nop_init(void)
+ {
+ return 0;
+@@ -246,10 +150,6 @@ int __init omap2_common_pm_late_init(void)
+ omap4_twl_init();
+ omap_voltage_late_init();
+
+- /* Initialize the voltages */
+- omap3_init_voltages();
+- omap4_init_voltages();
+-
+ /* Smartreflex device init */
+ omap_devinit_smartreflex();
+
+diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
+index d687a73044bf..cb2aaf98e243 100644
+--- a/arch/arm/xen/efi.c
++++ b/arch/arm/xen/efi.c
+@@ -19,7 +19,9 @@ void __init xen_efi_runtime_setup(void)
+ efi.get_variable = xen_efi_get_variable;
+ efi.get_next_variable = xen_efi_get_next_variable;
+ efi.set_variable = xen_efi_set_variable;
++ efi.set_variable_nonblocking = xen_efi_set_variable;
+ efi.query_variable_info = xen_efi_query_variable_info;
++ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
+ efi.update_capsule = xen_efi_update_capsule;
+ efi.query_capsule_caps = xen_efi_query_capsule_caps;
+ efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 3adcec05b1f6..e8cf56283871 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -601,6 +601,23 @@ config CAVIUM_ERRATUM_30115
+
+ If unsure, say Y.
+
++config CAVIUM_TX2_ERRATUM_219
++ bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
++ default y
++ help
++ On Cavium ThunderX2, a load, store or prefetch instruction between a
++ TTBR update and the corresponding context synchronizing operation can
++ cause a spurious Data Abort to be delivered to any hardware thread in
++ the CPU core.
++
++ Work around the issue by avoiding the problematic code sequence and
++ trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
++ trap handler performs the corresponding register access, skips the
++ instruction and ensures context synchronization by virtue of the
++ exception return.
++
++ If unsure, say Y.
++
+ config QCOM_FALKOR_ERRATUM_1003
+ bool "Falkor E1003: Incorrect translation due to ASID change"
+ default y
+diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
+index f19fe4b9acc4..ac1dbca3d0cd 100644
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -52,7 +52,9 @@
+ #define ARM64_HAS_IRQ_PRIO_MASKING 42
+ #define ARM64_HAS_DCPODP 43
+ #define ARM64_WORKAROUND_1463225 44
++#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
++#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
+
+-#define ARM64_NCAPS 45
++#define ARM64_NCAPS 47
+
+ #endif /* __ASM_CPUCAPS_H */
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 1e43ba5c79b7..27b4a973f16d 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -12,6 +12,7 @@
+ #include <asm/cpu.h>
+ #include <asm/cputype.h>
+ #include <asm/cpufeature.h>
++#include <asm/smp_plat.h>
+
+ static bool __maybe_unused
+ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+@@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+ return (need_wa > 0);
+ }
+
++static const __maybe_unused struct midr_range tx2_family_cpus[] = {
++ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
++ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
++ {},
++};
++
++static bool __maybe_unused
++needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
++ int scope)
++{
++ int i;
++
++ if (!is_affected_midr_range_list(entry, scope) ||
++ !is_hyp_mode_available())
++ return false;
++
++ for_each_possible_cpu(i) {
++ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
++ return true;
++ }
++
++ return false;
++}
++
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
+
+ static const struct midr_range arm64_harden_el2_vectors[] = {
+@@ -851,6 +876,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_cortex_a76_erratum_1463225,
+ },
++ {
++ .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
++ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
++ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
++ },
++#endif
++#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
++ {
++ .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
++ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
++ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
++ .matches = needs_tx2_tvm_workaround,
++ },
+ #endif
+ {
+ }
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 84a822748c84..109894bd3194 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -1070,7 +1070,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ #else
+ ldr x30, =vectors
+ #endif
++alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
+ prfm plil1strm, [x30, #(1b - tramp_vectors)]
++alternative_else_nop_endif
+ msr vbar_el1, x30
+ add x30, x30, #(1b - tramp_vectors)
+ isb
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index adaf266d8de8..7fdc821ebb78 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
+ {
+ u64 hcr = vcpu->arch.hcr_el2;
+
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
++ hcr |= HCR_TVM;
++
+ write_sysreg(hcr, hcr_el2);
+
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
+@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+ * the crucial bit is "On taking a vSError interrupt,
+ * HCR_EL2.VSE is cleared to 0."
+ */
+- if (vcpu->arch.hcr_el2 & HCR_VSE)
+- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
++ if (vcpu->arch.hcr_el2 & HCR_VSE) {
++ vcpu->arch.hcr_el2 &= ~HCR_VSE;
++ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
++ }
+
+ if (has_vhe())
+ deactivate_traps_vhe();
+@@ -393,6 +398,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
+ return true;
+ }
+
++static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
++{
++ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
++ int rt = kvm_vcpu_sys_get_rt(vcpu);
++ u64 val = vcpu_get_reg(vcpu, rt);
++
++ /*
++ * The normal sysreg handling code expects to see the traps,
++ * let's not do anything here.
++ */
++ if (vcpu->arch.hcr_el2 & HCR_TVM)
++ return false;
++
++ switch (sysreg) {
++ case SYS_SCTLR_EL1:
++ write_sysreg_el1(val, SYS_SCTLR);
++ break;
++ case SYS_TTBR0_EL1:
++ write_sysreg_el1(val, SYS_TTBR0);
++ break;
++ case SYS_TTBR1_EL1:
++ write_sysreg_el1(val, SYS_TTBR1);
++ break;
++ case SYS_TCR_EL1:
++ write_sysreg_el1(val, SYS_TCR);
++ break;
++ case SYS_ESR_EL1:
++ write_sysreg_el1(val, SYS_ESR);
++ break;
++ case SYS_FAR_EL1:
++ write_sysreg_el1(val, SYS_FAR);
++ break;
++ case SYS_AFSR0_EL1:
++ write_sysreg_el1(val, SYS_AFSR0);
++ break;
++ case SYS_AFSR1_EL1:
++ write_sysreg_el1(val, SYS_AFSR1);
++ break;
++ case SYS_MAIR_EL1:
++ write_sysreg_el1(val, SYS_MAIR);
++ break;
++ case SYS_AMAIR_EL1:
++ write_sysreg_el1(val, SYS_AMAIR);
++ break;
++ case SYS_CONTEXTIDR_EL1:
++ write_sysreg_el1(val, SYS_CONTEXTIDR);
++ break;
++ default:
++ return false;
++ }
++
++ __kvm_skip_instr(vcpu);
++ return true;
++}
++
+ /*
+ * Return true when we were able to fixup the guest exit and should return to
+ * the guest, false when we should restore the host state and return to the
+@@ -412,6 +472,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ if (*exit_code != ARM_EXCEPTION_TRAP)
+ goto exit;
+
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
++ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
++ handle_tx2_tvm(vcpu))
++ return true;
++
+ /*
+ * We trap the first access to the FP/SIMD to save the host context
+ * and restore the guest context lazily.
+diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
+index 63a9f33aa43e..5cfc9d347826 100644
+--- a/arch/mips/boot/dts/qca/ar9331.dtsi
++++ b/arch/mips/boot/dts/qca/ar9331.dtsi
+@@ -99,7 +99,7 @@
+
+ miscintc: interrupt-controller@18060010 {
+ compatible = "qca,ar7240-misc-intc";
+- reg = <0x18060010 0x4>;
++ reg = <0x18060010 0x8>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
+index ffefc1cb2612..98c3a7feb10f 100644
+--- a/arch/mips/loongson64/common/serial.c
++++ b/arch/mips/loongson64/common/serial.c
+@@ -110,7 +110,7 @@ static int __init serial_init(void)
+ }
+ module_init(serial_init);
+
+-static void __init serial_exit(void)
++static void __exit serial_exit(void)
+ {
+ platform_device_unregister(&uart8250_device);
+ }
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index bece1264d1c5..b0f70006bd85 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -655,6 +655,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
+ int restore_scratch)
+ {
+ if (restore_scratch) {
++ /*
++ * Ensure the MFC0 below observes the value written to the
++ * KScratch register by the prior MTC0.
++ */
++ if (scratch_reg >= 0)
++ uasm_i_ehb(p);
++
+ /* Reset default page size */
+ if (PM_DEFAULT_MASK >> 16) {
+ uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+@@ -669,12 +676,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
+ uasm_i_mtc0(p, 0, C0_PAGEMASK);
+ uasm_il_b(p, r, lid);
+ }
+- if (scratch_reg >= 0) {
+- uasm_i_ehb(p);
++ if (scratch_reg >= 0)
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
+- } else {
++ else
+ UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+- }
+ } else {
+ /* Reset default page size */
+ if (PM_DEFAULT_MASK >> 16) {
+@@ -923,6 +928,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ }
+ if (mode != not_refill && check_for_high_segbits) {
+ uasm_l_large_segbits_fault(l, *p);
++
++ if (mode == refill_scratch && scratch_reg >= 0)
++ uasm_i_ehb(p);
++
+ /*
+ * We get here if we are an xsseg address, or if we are
+ * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
+@@ -941,12 +950,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ uasm_i_jr(p, ptr);
+
+ if (mode == refill_scratch) {
+- if (scratch_reg >= 0) {
+- uasm_i_ehb(p);
++ if (scratch_reg >= 0)
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
+- } else {
++ else
+ UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+- }
+ } else {
+ uasm_i_nop(p);
+ }
+diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
+index 92a9b5f12f98..f29f682352f0 100644
+--- a/arch/parisc/mm/ioremap.c
++++ b/arch/parisc/mm/ioremap.c
+@@ -3,7 +3,7 @@
+ * arch/parisc/mm/ioremap.c
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
++ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
+ addr = (void __iomem *) area->addr;
+ if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+ phys_addr, pgprot)) {
+- vfree(addr);
++ vunmap(addr);
+ return NULL;
+ }
+
+@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
+ }
+ EXPORT_SYMBOL(__ioremap);
+
+-void iounmap(const volatile void __iomem *addr)
++void iounmap(const volatile void __iomem *io_addr)
+ {
+- if (addr > high_memory)
+- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
++ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
++
++ if (is_vmalloc_addr((void *)addr))
++ vunmap((void *)addr);
+ }
+ EXPORT_SYMBOL(iounmap);
+diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
+index 591bfb4bfd0f..a3f9c665bb5b 100644
+--- a/arch/powerpc/kvm/book3s_xive.c
++++ b/arch/powerpc/kvm/book3s_xive.c
+@@ -1217,6 +1217,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_vcpu *xc;
+ int i, r = -EBUSY;
++ u32 vp_id;
+
+ pr_devel("connect_vcpu(cpu=%d)\n", cpu);
+
+@@ -1228,25 +1229,32 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ return -EPERM;
+ if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
+ return -EBUSY;
+- if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
+- pr_devel("Duplicate !\n");
+- return -EEXIST;
+- }
+ if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
+ pr_devel("Out of bounds !\n");
+ return -EINVAL;
+ }
+- xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+- if (!xc)
+- return -ENOMEM;
+
+ /* We need to synchronize with queue provisioning */
+ mutex_lock(&xive->lock);
++
++ vp_id = kvmppc_xive_vp(xive, cpu);
++ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
++ pr_devel("Duplicate !\n");
++ r = -EEXIST;
++ goto bail;
++ }
++
++ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
++ if (!xc) {
++ r = -ENOMEM;
++ goto bail;
++ }
++
+ vcpu->arch.xive_vcpu = xc;
+ xc->xive = xive;
+ xc->vcpu = vcpu;
+ xc->server_num = cpu;
+- xc->vp_id = kvmppc_xive_vp(xive, cpu);
++ xc->vp_id = vp_id;
+ xc->mfrr = 0xff;
+ xc->valid = true;
+
+diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
+index 955b820ffd6d..fe3ed50e0818 100644
+--- a/arch/powerpc/kvm/book3s_xive.h
++++ b/arch/powerpc/kvm/book3s_xive.h
+@@ -220,6 +220,18 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
+ return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
+ }
+
++static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
++{
++ struct kvm_vcpu *vcpu = NULL;
++ int i;
++
++ kvm_for_each_vcpu(i, vcpu, kvm) {
++ if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
++ return true;
++ }
++ return false;
++}
++
+ /*
+ * Mapping between guest priorities and host priorities
+ * is as follow.
+diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
+index 248c1ea9e788..78b906ffa0d2 100644
+--- a/arch/powerpc/kvm/book3s_xive_native.c
++++ b/arch/powerpc/kvm/book3s_xive_native.c
+@@ -106,6 +106,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_vcpu *xc = NULL;
+ int rc;
++ u32 vp_id;
+
+ pr_devel("native_connect_vcpu(server=%d)\n", server_num);
+
+@@ -124,7 +125,8 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+
+ mutex_lock(&xive->lock);
+
+- if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
++ vp_id = kvmppc_xive_vp(xive, server_num);
++ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
+ pr_devel("Duplicate !\n");
+ rc = -EEXIST;
+ goto bail;
+@@ -141,7 +143,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ xc->vcpu = vcpu;
+ xc->server_num = server_num;
+
+- xc->vp_id = kvmppc_xive_vp(xive, server_num);
++ xc->vp_id = vp_id;
+ xc->valid = true;
+ vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
+
+diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
+index 5a02b7d50940..9c992a88d858 100644
+--- a/arch/riscv/include/asm/asm.h
++++ b/arch/riscv/include/asm/asm.h
+@@ -22,6 +22,7 @@
+
+ #define REG_L __REG_SEL(ld, lw)
+ #define REG_S __REG_SEL(sd, sw)
++#define REG_SC __REG_SEL(sc.d, sc.w)
+ #define SZREG __REG_SEL(8, 4)
+ #define LGREG __REG_SEL(3, 2)
+
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 9b60878a4469..2a82e0a5af46 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -98,7 +98,26 @@ _save_context:
+ */
+ .macro RESTORE_ALL
+ REG_L a0, PT_SSTATUS(sp)
+- REG_L a2, PT_SEPC(sp)
++ /*
++ * The current load reservation is effectively part of the processor's
++ * state, in the sense that load reservations cannot be shared between
++ * different hart contexts. We can't actually save and restore a load
++ * reservation, so instead here we clear any existing reservation --
++ * it's always legal for implementations to clear load reservations at
++ * any point (as long as the forward progress guarantee is kept, but
++ * we'll ignore that here).
++ *
++ * Dangling load reservations can be the result of taking a trap in the
++ * middle of an LR/SC sequence, but can also be the result of a taken
++ * forward branch around an SC -- which is how we implement CAS. As a
++ * result we need to clear reservations between the last CAS and the
++ * jump back to the new context. While it is unlikely the store
++ * completes, implementations are allowed to expand reservations to be
++ * arbitrarily large.
++ */
++ REG_L a2, PT_SEPC(sp)
++ REG_SC x0, a2, PT_SEPC(sp)
++
+ csrw CSR_SSTATUS, a0
+ csrw CSR_SEPC, a2
+
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 42bf939693d3..ed9cd9944d4f 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -11,6 +11,7 @@
+ #include <linux/swap.h>
+ #include <linux/sizes.h>
+ #include <linux/of_fdt.h>
++#include <linux/libfdt.h>
+
+ #include <asm/fixmap.h>
+ #include <asm/tlbflush.h>
+@@ -82,6 +83,8 @@ disable:
+ }
+ #endif /* CONFIG_BLK_DEV_INITRD */
+
++static phys_addr_t dtb_early_pa __initdata;
++
+ void __init setup_bootmem(void)
+ {
+ struct memblock_region *reg;
+@@ -117,7 +120,12 @@ void __init setup_bootmem(void)
+ setup_initrd();
+ #endif /* CONFIG_BLK_DEV_INITRD */
+
+- early_init_fdt_reserve_self();
++ /*
++ * Avoid using early_init_fdt_reserve_self() since __pa() does
++ * not work for DTB pointers that are fixmap addresses
++ */
++ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
++
+ early_init_fdt_scan_reserved_mem();
+ memblock_allow_resize();
+ memblock_dump_all();
+@@ -393,6 +401,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+
+ /* Save pointer to DTB for early FDT parsing */
+ dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
++ /* Save physical address for memblock reservation */
++ dtb_early_pa = dtb_pa;
+ }
+
+ static void __init setup_vm_final(void)
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index 7b0d05414618..ceeacbeff600 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -101,10 +101,18 @@ static void handle_relocs(unsigned long offset)
+ dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
+ for (rela = rela_start; rela < rela_end; rela++) {
+ loc = rela->r_offset + offset;
+- val = rela->r_addend + offset;
++ val = rela->r_addend;
+ r_sym = ELF64_R_SYM(rela->r_info);
+- if (r_sym)
+- val += dynsym[r_sym].st_value;
++ if (r_sym) {
++ if (dynsym[r_sym].st_shndx != SHN_UNDEF)
++ val += dynsym[r_sym].st_value + offset;
++ } else {
++ /*
++ * 0 == undefined symbol table index (STN_UNDEF),
++ * used for R_390_RELATIVE, only add KASLR offset
++ */
++ val += offset;
++ }
+ r_type = ELF64_R_TYPE(rela->r_info);
+ rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
+ if (rc)
+diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
+index bb59dd964590..de8f0bf5f238 100644
+--- a/arch/s390/include/asm/hugetlb.h
++++ b/arch/s390/include/asm/hugetlb.h
+@@ -12,8 +12,6 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+
+-
+-#define is_hugepage_only_range(mm, addr, len) 0
+ #define hugetlb_free_pgd_range free_pgd_range
+ #define hugepages_supported() (MACHINE_HAS_EDAT1)
+
+@@ -23,6 +21,13 @@ pte_t huge_ptep_get(pte_t *ptep);
+ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
++static inline bool is_hugepage_only_range(struct mm_struct *mm,
++ unsigned long addr,
++ unsigned long len)
++{
++ return false;
++}
++
+ /*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 9b274fcaacb6..70ac23e50cae 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1268,7 +1268,8 @@ static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
+
+ #define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
+ #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
+-#define pte_unmap(pte) do { } while (0)
++
++static inline void pte_unmap(pte_t *pte) { }
+
+ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
+ {
+diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c
+index 3b664cb3ec4d..d5035de9020e 100644
+--- a/arch/s390/kernel/machine_kexec_reloc.c
++++ b/arch/s390/kernel/machine_kexec_reloc.c
+@@ -27,6 +27,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
+ *(u32 *)loc = val;
+ break;
+ case R_390_64: /* Direct 64 bit. */
++ case R_390_GLOB_DAT:
+ *(u64 *)loc = val;
+ break;
+ case R_390_PC16: /* PC relative 16 bit. */
+diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
+index 5c056b8aebef..e01078e93dd3 100644
+--- a/arch/x86/hyperv/hv_apic.c
++++ b/arch/x86/hyperv/hv_apic.c
+@@ -260,11 +260,21 @@ void __init hv_apic_init(void)
+ }
+
+ if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
+- pr_info("Hyper-V: Using MSR based APIC access\n");
++ pr_info("Hyper-V: Using enlightened APIC (%s mode)",
++ x2apic_enabled() ? "x2apic" : "xapic");
++ /*
++ * With x2apic, architectural x2apic MSRs are equivalent to the
++ * respective synthetic MSRs, so there's no need to override
++ * the apic accessors. The only exception is
++ * hv_apic_eoi_write, because it benefits from lazy EOI when
++ * available, but it works for both xapic and x2apic modes.
++ */
+ apic_set_eoi_write(hv_apic_eoi_write);
+- apic->read = hv_apic_read;
+- apic->write = hv_apic_write;
+- apic->icr_write = hv_apic_icr_write;
+- apic->icr_read = hv_apic_icr_read;
++ if (!x2apic_enabled()) {
++ apic->read = hv_apic_read;
++ apic->write = hv_apic_write;
++ apic->icr_write = hv_apic_icr_write;
++ apic->icr_read = hv_apic_icr_read;
++ }
+ }
+ }
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 35c225ede0e4..61d93f062a36 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -734,5 +734,28 @@ do { \
+ if (unlikely(__gu_err)) goto err_label; \
+ } while (0)
+
++/*
++ * We want the unsafe accessors to always be inlined and use
++ * the error labels - thus the macro games.
++ */
++#define unsafe_copy_loop(dst, src, len, type, label) \
++ while (len >= sizeof(type)) { \
++ unsafe_put_user(*(type *)src,(type __user *)dst,label); \
++ dst += sizeof(type); \
++ src += sizeof(type); \
++ len -= sizeof(type); \
++ }
++
++#define unsafe_copy_to_user(_dst,_src,_len,label) \
++do { \
++ char __user *__ucu_dst = (_dst); \
++ const char *__ucu_src = (_src); \
++ size_t __ucu_len = (_len); \
++ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
++ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
++ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
++ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
++} while (0)
++
+ #endif /* _ASM_X86_UACCESS_H */
+
+diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
+index 609e499387a1..0cad36d1457a 100644
+--- a/arch/x86/kernel/apic/x2apic_cluster.c
++++ b/arch/x86/kernel/apic/x2apic_cluster.c
+@@ -158,7 +158,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
+ {
+ struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
+
+- cpumask_clear_cpu(dead_cpu, &cmsk->mask);
++ if (cmsk)
++ cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+ free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
+ return 0;
+ }
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 29ffa495bd1c..206a4b6144c2 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
+ * we might write invalid pmds, when the kernel is relocated
+ * cleanup_highmap() fixes this up along with the mappings
+ * beyond _end.
++ *
++ * Only the region occupied by the kernel image has so far
++ * been checked against the table of usable memory regions
++ * provided by the firmware, so invalidate pages outside that
++ * region. A page table entry that maps to a reserved area of
++ * memory would allow processor speculation into that area,
++ * and on some hardware (particularly the UV platform) even
++ * speculative access to some reserved areas is caught as an
++ * error, causing the BIOS to halt the system.
+ */
+
+ pmd = fixup_pointer(level2_kernel_pgt, physaddr);
+- for (i = 0; i < PTRS_PER_PMD; i++) {
++
++ /* invalidate pages before the kernel image */
++ for (i = 0; i < pmd_index((unsigned long)_text); i++)
++ pmd[i] &= ~_PAGE_PRESENT;
++
++ /* fixup pages that are part of the kernel image */
++ for (; i <= pmd_index((unsigned long)_end); i++)
+ if (pmd[i] & _PAGE_PRESENT)
+ pmd[i] += load_delta;
+- }
++
++ /* invalidate pages after the kernel image */
++ for (; i < PTRS_PER_PMD; i++)
++ pmd[i] &= ~_PAGE_PRESENT;
+
+ /*
+ * Fixup phys_base - remove the memory encryption mask to obtain
+diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
+index 0d3365cb64de..7e3eb70f411a 100644
+--- a/arch/x86/xen/efi.c
++++ b/arch/x86/xen/efi.c
+@@ -65,7 +65,9 @@ static efi_system_table_t __init *xen_efi_probe(void)
+ efi.get_variable = xen_efi_get_variable;
+ efi.get_next_variable = xen_efi_get_next_variable;
+ efi.set_variable = xen_efi_set_variable;
++ efi.set_variable_nonblocking = xen_efi_set_variable;
+ efi.query_variable_info = xen_efi_query_variable_info;
++ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
+ efi.update_capsule = xen_efi_update_capsule;
+ efi.query_capsule_caps = xen_efi_query_capsule_caps;
+ efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
+diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
+index aeb15f4c755b..be8b2be5a98b 100644
+--- a/arch/xtensa/include/asm/bitops.h
++++ b/arch/xtensa/include/asm/bitops.h
+@@ -148,7 +148,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+ " getex %0\n"
+ " beqz %0, 1b\n"
+ : "=&a" (tmp)
+- : "a" (~mask), "a" (p)
++ : "a" (mask), "a" (p)
+ : "memory");
+ }
+
+diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
+index 04f19de46700..4092555828b1 100644
+--- a/arch/xtensa/kernel/xtensa_ksyms.c
++++ b/arch/xtensa/kernel/xtensa_ksyms.c
+@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
+ // FIXME EXPORT_SYMBOL(screen_info);
+ #endif
+
+-EXPORT_SYMBOL(outsb);
+-EXPORT_SYMBOL(outsw);
+-EXPORT_SYMBOL(outsl);
+-EXPORT_SYMBOL(insb);
+-EXPORT_SYMBOL(insw);
+-EXPORT_SYMBOL(insl);
+-
+ extern long common_exception_return;
+ EXPORT_SYMBOL(common_exception_return);
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index a79b9ad1aba1..ed41cde93641 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1998,6 +1998,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
+ }
+
+ blk_add_rq_to_plug(plug, rq);
++ } else if (q->elevator) {
++ blk_mq_sched_insert_request(rq, false, true, true);
+ } else if (plug && !blk_queue_nomerges(q)) {
+ /*
+ * We do limited plugging. If the bio can be merged, do that.
+@@ -2021,8 +2023,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
+ blk_mq_try_issue_directly(data.hctx, same_queue_rq,
+ &cookie);
+ }
+- } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
+- !data.hctx->dispatch_busy)) {
++ } else if ((q->nr_hw_queues > 1 && is_sync) ||
++ !data.hctx->dispatch_busy) {
+ blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+ } else {
+ blk_mq_sched_insert_request(rq, false, true, true);
+diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
+index c0f0778d5396..8378f68a21ac 100644
+--- a/block/blk-rq-qos.h
++++ b/block/blk-rq-qos.h
+@@ -103,16 +103,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
+
+ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
+ {
+- struct rq_qos *cur, *prev = NULL;
+- for (cur = q->rq_qos; cur; cur = cur->next) {
+- if (cur == rqos) {
+- if (prev)
+- prev->next = rqos->next;
+- else
+- q->rq_qos = cur;
++ struct rq_qos **cur;
++
++ for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
++ if (*cur == rqos) {
++ *cur = rqos->next;
+ break;
+ }
+- prev = cur;
+ }
+
+ blk_mq_debugfs_unregister_rqos(rqos);
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 3b2525908dd8..a1a858ad4d18 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -905,8 +905,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
+ pcc_data[pcc_ss_id]->refcount--;
+ if (!pcc_data[pcc_ss_id]->refcount) {
+ pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
+- pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
+ kfree(pcc_data[pcc_ss_id]);
++ pcc_data[pcc_ss_id] = NULL;
+ }
+ }
+ }
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index 1413324982f0..14e68f202f81 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -1322,7 +1322,7 @@ static ssize_t scrub_show(struct device *dev,
+ nfit_device_lock(dev);
+ nd_desc = dev_get_drvdata(dev);
+ if (!nd_desc) {
+- device_unlock(dev);
++ nfit_device_unlock(dev);
+ return rc;
+ }
+ acpi_desc = to_acpi_desc(nd_desc);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index dc1c83eafc22..1c5278207153 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -95,10 +95,6 @@ DEFINE_SHOW_ATTRIBUTE(proc);
+ #define SZ_1K 0x400
+ #endif
+
+-#ifndef SZ_4M
+-#define SZ_4M 0x400000
+-#endif
+-
+ #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
+
+ enum {
+@@ -5195,9 +5191,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+ if (proc->tsk != current->group_leader)
+ return -EINVAL;
+
+- if ((vma->vm_end - vma->vm_start) > SZ_4M)
+- vma->vm_end = vma->vm_start + SZ_4M;
+-
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index 6d79a1b0d446..8fe99b20ca02 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -22,6 +22,7 @@
+ #include <asm/cacheflush.h>
+ #include <linux/uaccess.h>
+ #include <linux/highmem.h>
++#include <linux/sizes.h>
+ #include "binder_alloc.h"
+ #include "binder_trace.h"
+
+@@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ alloc->buffer = (void __user *)vma->vm_start;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+- alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
++ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
++ SZ_4M);
++ alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
+ sizeof(alloc->pages[0]),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+@@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+- alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 3e63294304c7..691852b8bb41 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1617,7 +1617,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
+ */
+ if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
+ return;
+- if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
++
++ /* Skip applying the quirk on Denverton and beyond */
++ if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
+ return;
+
+ /*
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 1669d41fcddc..810329523c28 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/acpi.h>
++#include <linux/cpufreq.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+ #include <linux/fwnode.h>
+@@ -3150,6 +3151,8 @@ void device_shutdown(void)
+ wait_for_device_probe();
+ device_block_probing();
+
++ cpufreq_suspend();
++
+ spin_lock(&devices_kset->list_lock);
+ /*
+ * Walk the devices list backward, shutting down each in turn.
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index 20c39d1bcef8..9b9abc4fcfb7 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -554,6 +554,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
+ pfn >>= PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return -ENXIO;
++ /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
++ if (!pfn_to_online_page(pfn))
++ return -EIO;
+ ret = soft_offline_page(pfn_to_page(pfn), 0);
+ return ret == 0 ? count : ret;
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 1410fa893653..f6f77eaa7217 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -994,6 +994,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
+ blk_queue_write_cache(lo->lo_queue, true, false);
+
++ if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
++ /* In case of direct I/O, match underlying block size */
++ unsigned short bsize = bdev_logical_block_size(
++ inode->i_sb->s_bdev);
++
++ blk_queue_logical_block_size(lo->lo_queue, bsize);
++ blk_queue_physical_block_size(lo->lo_queue, bsize);
++ blk_queue_io_min(lo->lo_queue, bsize);
++ }
++
+ loop_update_rotational(lo);
+ loop_update_dio(lo);
+ set_capacity(lo->lo_disk, size);
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index d58a359a6622..4285e75e52c3 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram)
+ static ssize_t backing_dev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
++ struct file *file;
+ struct zram *zram = dev_to_zram(dev);
+- struct file *file = zram->backing_dev;
+ char *p;
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+- if (!zram->backing_dev) {
++ file = zram->backing_dev;
++ if (!file) {
+ memcpy(buf, "none\n", 5);
+ up_read(&zram->init_lock);
+ return 5;
+diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
+index b57fe09b428b..9dd6185a4b4e 100644
+--- a/drivers/clk/ti/clk-7xx.c
++++ b/drivers/clk/ti/clk-7xx.c
+@@ -683,7 +683,7 @@ static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst
+ { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" },
+ { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" },
+ { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" },
+- { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:24" },
++ { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:22" },
+ { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" },
+ { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" },
+ { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" },
+@@ -828,8 +828,8 @@ static struct ti_dt_clk dra7xx_clks[] = {
+ DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"),
+ DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"),
+ DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"),
+- DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:22"),
+- DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:24"),
++ DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:24"),
++ DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:22"),
+ DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"),
+ DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index c28ebf2810f1..f970f87ce86e 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2746,14 +2746,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
+
+-/*
+- * Stop cpufreq at shutdown to make sure it isn't holding any locks
+- * or mutexes when secondary CPUs are halted.
+- */
+-static struct syscore_ops cpufreq_syscore_ops = {
+- .shutdown = cpufreq_suspend,
+-};
+-
+ struct kobject *cpufreq_global_kobject;
+ EXPORT_SYMBOL(cpufreq_global_kobject);
+
+@@ -2765,8 +2757,6 @@ static int __init cpufreq_core_init(void)
+ cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
+ BUG_ON(!cpufreq_global_kobject);
+
+- register_syscore_ops(&cpufreq_syscore_ops);
+-
+ return 0;
+ }
+ module_param(off, int, 0444);
+diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
+index 7f19f1c672c3..2059e43ccc01 100644
+--- a/drivers/edac/ghes_edac.c
++++ b/drivers/edac/ghes_edac.c
+@@ -553,7 +553,11 @@ void ghes_edac_unregister(struct ghes *ghes)
+ if (!ghes_pvt)
+ return;
+
++ if (atomic_dec_return(&ghes_init))
++ return;
++
+ mci = ghes_pvt->mci;
++ ghes_pvt = NULL;
+ edac_mc_del_mc(mci->pdev);
+ edac_mc_free(mci);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+index eba42c752bca..82155ac3288a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+@@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
+ u32 val = 0;
+ u32 count = 0;
+ struct device *dev;
+- struct i2s_platform_data *i2s_pdata;
++ struct i2s_platform_data *i2s_pdata = NULL;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+@@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
+ adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
+ GFP_KERNEL);
+
+- if (adev->acp.acp_cell == NULL)
+- return -ENOMEM;
++ if (adev->acp.acp_cell == NULL) {
++ r = -ENOMEM;
++ goto failure;
++ }
+
+ adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
+ if (adev->acp.acp_res == NULL) {
+- kfree(adev->acp.acp_cell);
+- return -ENOMEM;
++ r = -ENOMEM;
++ goto failure;
+ }
+
+ i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
+ if (i2s_pdata == NULL) {
+- kfree(adev->acp.acp_res);
+- kfree(adev->acp.acp_cell);
+- return -ENOMEM;
++ r = -ENOMEM;
++ goto failure;
+ }
+
+ switch (adev->asic_type) {
+@@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
+ r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
+ ACP_DEVS);
+ if (r)
+- return r;
++ goto failure;
+
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
+ if (r) {
+ dev_err(dev, "Failed to add dev to genpd\n");
+- return r;
++ goto failure;
+ }
+ }
+
+@@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+- return -ETIMEDOUT;
++ r = -ETIMEDOUT;
++ goto failure;
+ }
+ udelay(100);
+ }
+@@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+- return -ETIMEDOUT;
++ r = -ETIMEDOUT;
++ goto failure;
+ }
+ udelay(100);
+ }
+@@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
+ val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+ return 0;
++
++failure:
++ kfree(i2s_pdata);
++ kfree(adev->acp.acp_res);
++ kfree(adev->acp.acp_cell);
++ kfree(adev->acp.acp_genpd);
++ return r;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 8b26c970a3cb..90df22081a25 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -536,7 +536,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+
+ list_for_each_entry(lobj, validated, tv.head) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
+- bool binding_userptr = false;
+ struct mm_struct *usermm;
+
+ usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
+@@ -553,7 +552,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
+ lobj->user_pages);
+- binding_userptr = true;
+ }
+
+ if (p->evictable == lobj)
+@@ -563,10 +561,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+ if (r)
+ return r;
+
+- if (binding_userptr) {
+- kvfree(lobj->user_pages);
+- lobj->user_pages = NULL;
+- }
++ kvfree(lobj->user_pages);
++ lobj->user_pages = NULL;
+ }
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 5376328d3fd0..a7cd4a03bf38 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1030,6 +1030,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ return -ENODEV;
+ }
+
++#ifdef CONFIG_DRM_AMDGPU_SI
++ if (!amdgpu_si_support) {
++ switch (flags & AMD_ASIC_MASK) {
++ case CHIP_TAHITI:
++ case CHIP_PITCAIRN:
++ case CHIP_VERDE:
++ case CHIP_OLAND:
++ case CHIP_HAINAN:
++ dev_info(&pdev->dev,
++ "SI support provided by radeon.\n");
++ dev_info(&pdev->dev,
++ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
++ );
++ return -ENODEV;
++ }
++ }
++#endif
++#ifdef CONFIG_DRM_AMDGPU_CIK
++ if (!amdgpu_cik_support) {
++ switch (flags & AMD_ASIC_MASK) {
++ case CHIP_KAVERI:
++ case CHIP_BONAIRE:
++ case CHIP_HAWAII:
++ case CHIP_KABINI:
++ case CHIP_MULLINS:
++ dev_info(&pdev->dev,
++ "CIK support provided by radeon.\n");
++ dev_info(&pdev->dev,
++ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
++ );
++ return -ENODEV;
++ }
++ }
++#endif
++
+ /* Get rid of things like offb */
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
+ if (ret)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 00beba533582..56b4c241a14b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
+ struct amdgpu_device *adev;
+ int r, acpi_status;
+
+-#ifdef CONFIG_DRM_AMDGPU_SI
+- if (!amdgpu_si_support) {
+- switch (flags & AMD_ASIC_MASK) {
+- case CHIP_TAHITI:
+- case CHIP_PITCAIRN:
+- case CHIP_VERDE:
+- case CHIP_OLAND:
+- case CHIP_HAINAN:
+- dev_info(dev->dev,
+- "SI support provided by radeon.\n");
+- dev_info(dev->dev,
+- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+- );
+- return -ENODEV;
+- }
+- }
+-#endif
+-#ifdef CONFIG_DRM_AMDGPU_CIK
+- if (!amdgpu_cik_support) {
+- switch (flags & AMD_ASIC_MASK) {
+- case CHIP_KAVERI:
+- case CHIP_BONAIRE:
+- case CHIP_HAWAII:
+- case CHIP_KABINI:
+- case CHIP_MULLINS:
+- dev_info(dev->dev,
+- "CIK support provided by radeon.\n");
+- dev_info(dev->dev,
+- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+- );
+- return -ENODEV;
+- }
+- }
+-#endif
+-
+ adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
+ if (adev == NULL) {
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index b70b3c45bb29..65044b1b3d4c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
+ * Open up a stream for HW test
+ */
+ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 1024;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+- uint64_t dummy;
++ uint64_t addr;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+@@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+
+ ib = &job->ibs[0];
+
+- dummy = ib->gpu_addr + 1024;
++ addr = amdgpu_bo_gpu_offset(bo);
+
+ /* stitch together an VCE create msg */
+ ib->length_dw = 0;
+@@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+
+ ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+- ib->ptr[ib->length_dw++] = dummy;
++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
++ ib->ptr[ib->length_dw++] = addr;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+@@ -1110,13 +1111,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
+ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ {
+ struct dma_fence *fence = NULL;
++ struct amdgpu_bo *bo = NULL;
+ long r;
+
+ /* skip vce ring1/2 ib test for now, since it's not reliable */
+ if (ring != &ring->adev->vce.ring[0])
+ return 0;
+
+- r = amdgpu_vce_get_create_msg(ring, 1, NULL);
++ r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &bo, NULL, NULL);
++ if (r)
++ return r;
++
++ r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
+ if (r)
+ goto error;
+
+@@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+
+ error:
+ dma_fence_put(fence);
++ amdgpu_bo_unreserve(bo);
++ amdgpu_bo_unref(&bo);
+ return r;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+index 30ea54dd9117..e802f7d9db0a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+@@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
+ int amdgpu_vce_suspend(struct amdgpu_device *adev);
+ int amdgpu_vce_resume(struct amdgpu_device *adev);
+ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_bo *bo,
+ struct dma_fence **fence);
+ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 2e12eeb314a7..a3fe8b01d234 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -517,13 +517,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
+ }
+
+ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+- struct dma_fence **fence)
++ struct amdgpu_bo *bo,
++ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+- uint64_t dummy;
++ uint64_t addr;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+@@ -531,14 +532,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ return r;
+
+ ib = &job->ibs[0];
+- dummy = ib->gpu_addr + 1024;
++ addr = amdgpu_bo_gpu_offset(bo);
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+- ib->ptr[ib->length_dw++] = dummy;
++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
++ ib->ptr[ib->length_dw++] = addr;
+ ib->ptr[ib->length_dw++] = 0x0000000b;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+@@ -569,13 +570,14 @@ err:
+ }
+
+ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+- struct dma_fence **fence)
++ struct amdgpu_bo *bo,
++ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+- uint64_t dummy;
++ uint64_t addr;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+@@ -583,14 +585,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ return r;
+
+ ib = &job->ibs[0];
+- dummy = ib->gpu_addr + 1024;
++ addr = amdgpu_bo_gpu_offset(bo);
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = handle;
+- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+- ib->ptr[ib->length_dw++] = dummy;
++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
++ ib->ptr[ib->length_dw++] = addr;
+ ib->ptr[ib->length_dw++] = 0x0000000b;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+@@ -623,13 +625,20 @@ err:
+ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ {
+ struct dma_fence *fence = NULL;
++ struct amdgpu_bo *bo = NULL;
+ long r;
+
+- r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
++ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &bo, NULL, NULL);
++ if (r)
++ return r;
++
++ r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
+ if (r)
+ goto error;
+
+- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
++ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
+ if (r)
+ goto error;
+
+@@ -641,6 +650,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+
+ error:
+ dma_fence_put(fence);
++ amdgpu_bo_unreserve(bo);
++ amdgpu_bo_unref(&bo);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+index 15c371fac469..0d131e1d6efc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+@@ -1086,7 +1086,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq); /* reference */
+- amdgpu_ring_write(ring, 0xfffffff); /* mask */
++ amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 670784a78512..217084d56ab8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
+ * Open up a stream for HW test
+ */
+ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+- uint64_t dummy;
++ uint64_t addr;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+@@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
+ return r;
+
+ ib = &job->ibs[0];
+- dummy = ib->gpu_addr + 1024;
++ addr = amdgpu_bo_gpu_offset(bo);
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00010000;
+- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+- ib->ptr[ib->length_dw++] = dummy;
++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
++ ib->ptr[ib->length_dw++] = addr;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+@@ -268,13 +269,14 @@ err:
+ */
+ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+ uint32_t handle,
++ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+- uint64_t dummy;
++ uint64_t addr;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+@@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+ return r;
+
+ ib = &job->ibs[0];
+- dummy = ib->gpu_addr + 1024;
++ addr = amdgpu_bo_gpu_offset(bo);
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00010000;
+- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+- ib->ptr[ib->length_dw++] = dummy;
++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
++ ib->ptr[ib->length_dw++] = addr;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+@@ -327,13 +329,20 @@ err:
+ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ {
+ struct dma_fence *fence = NULL;
++ struct amdgpu_bo *bo = NULL;
+ long r;
+
+- r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
++ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &bo, NULL, NULL);
++ if (r)
++ return r;
++
++ r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
+ if (r)
+ goto error;
+
+- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
++ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
+ if (r)
+ goto error;
+
+@@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+
+ error:
+ dma_fence_put(fence);
++ amdgpu_bo_unreserve(bo);
++ amdgpu_bo_unref(&bo);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+index a6bfe7651d07..c5e2f8c1741b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+@@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
+ * Open up a stream for HW test
+ */
+ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+- uint64_t dummy;
++ uint64_t addr;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+@@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
+ return r;
+
+ ib = &job->ibs[0];
+- dummy = ib->gpu_addr + 1024;
++ addr = amdgpu_bo_gpu_offset(bo);
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+- ib->ptr[ib->length_dw++] = dummy;
++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
++ ib->ptr[ib->length_dw++] = addr;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+@@ -275,13 +276,14 @@ err:
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+- struct dma_fence **fence)
++ struct amdgpu_bo *bo,
++ struct dma_fence **fence)
+ {
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+- uint64_t dummy;
++ uint64_t addr;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+@@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
+ return r;
+
+ ib = &job->ibs[0];
+- dummy = ib->gpu_addr + 1024;
++ addr = amdgpu_bo_gpu_offset(bo);
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+- ib->ptr[ib->length_dw++] = dummy;
++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
++ ib->ptr[ib->length_dw++] = addr;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002;
+@@ -334,13 +336,20 @@ err:
+ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ {
+ struct dma_fence *fence = NULL;
++ struct amdgpu_bo *bo = NULL;
+ long r;
+
+- r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
++ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &bo, NULL, NULL);
++ if (r)
++ return r;
++
++ r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
+ if (r)
+ goto error;
+
+- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
++ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
+ if (r)
+ goto error;
+
+@@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+
+ error:
+ dma_fence_put(fence);
++ amdgpu_bo_unreserve(bo);
++ amdgpu_bo_unref(&bo);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 6248c8455314..45f74219e79e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -668,6 +668,7 @@ struct clock_source *dce100_clock_source_create(
+ return &clk_src->base;
+ }
+
++ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index 764329264c3b..0cb83b0e0e1e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -714,6 +714,7 @@ struct clock_source *dce110_clock_source_create(
+ return &clk_src->base;
+ }
+
++ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index 7a04be74c9cf..918455caa9a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -687,6 +687,7 @@ struct clock_source *dce112_clock_source_create(
+ return &clk_src->base;
+ }
+
++ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index ae38c9c7277c..49f3f0fad763 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -500,6 +500,7 @@ static struct clock_source *dce120_clock_source_create(
+ return &clk_src->base;
+ }
+
++ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 860a524ebcfa..952440893fbb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -701,6 +701,7 @@ struct clock_source *dce80_clock_source_create(
+ return &clk_src->base;
+ }
+
++ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index a12530a3ab9c..3f25e8da5396 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -786,6 +786,7 @@ struct clock_source *dcn10_clock_source_create(
+ return &clk_src->base;
+ }
+
++ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index b949e202d6cb..5b7ff6c549f1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -955,6 +955,7 @@ struct clock_source *dcn20_clock_source_create(
+ return &clk_src->base;
+ }
+
++ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+index 2851cac94d86..b72840c06ab7 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+@@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct komeda_data_flow_cfg dflow;
+ int err;
+
+- if (!writeback_job || !writeback_job->fb) {
++ if (!writeback_job)
+ return 0;
+- }
+
+ if (!crtc_st->active) {
+ DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
+@@ -166,8 +165,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
+ &komeda_wb_encoder_helper_funcs,
+ formats, n_formats);
+ komeda_put_fourcc_list(formats);
+- if (err)
++ if (err) {
++ kfree(kwb_conn);
+ return err;
++ }
+
+ drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
+
+diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
+index 2e812525025d..a59227b2cdb5 100644
+--- a/drivers/gpu/drm/arm/malidp_mw.c
++++ b/drivers/gpu/drm/arm/malidp_mw.c
+@@ -130,7 +130,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_framebuffer *fb;
+ int i, n_planes;
+
+- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
++ if (!conn_state->writeback_job)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+@@ -247,7 +247,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
+
+ mw_state = to_mw_state(conn_state);
+
+- if (conn_state->writeback_job && conn_state->writeback_job->fb) {
++ if (conn_state->writeback_job) {
+ struct drm_framebuffer *fb = conn_state->writeback_job->fb;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev,
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index 419381abbdd1..14aeaf736321 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
+ return -EINVAL;
+ }
+
+- if (writeback_job->out_fence && !writeback_job->fb) {
+- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+- connector->base.id, connector->name);
+- return -EINVAL;
++ if (!writeback_job->fb) {
++ if (writeback_job->out_fence) {
++ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
++ connector->base.id, connector->name);
++ return -EINVAL;
++ }
++
++ drm_writeback_cleanup_job(writeback_job);
++ state->writeback_job = NULL;
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 82a4ceed3fcf..6b0177112e18 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -159,6 +159,9 @@ static const struct edid_quirk {
+ /* Medion MD 30217 PG */
+ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+
++ /* Lenovo G50 */
++ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
++
+ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+
+diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
+index ff138b6ec48b..43d9e3bb3a94 100644
+--- a/drivers/gpu/drm/drm_writeback.c
++++ b/drivers/gpu/drm/drm_writeback.c
+@@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job)
+ if (job->fb)
+ drm_framebuffer_put(job->fb);
+
++ if (job->out_fence)
++ dma_fence_put(job->out_fence);
++
+ kfree(job);
+ }
+ EXPORT_SYMBOL(drm_writeback_cleanup_job);
+@@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+ {
+ unsigned long flags;
+ struct drm_writeback_job *job;
++ struct dma_fence *out_fence;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ job = list_first_entry_or_null(&wb_connector->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+- if (job) {
++ if (job)
+ list_del(&job->list_entry);
+- if (job->out_fence) {
+- if (status)
+- dma_fence_set_error(job->out_fence, status);
+- dma_fence_signal(job->out_fence);
+- dma_fence_put(job->out_fence);
+- }
+- }
++
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+
+ if (WARN_ON(!job))
+ return;
+
++ out_fence = job->out_fence;
++ if (out_fence) {
++ if (status)
++ dma_fence_set_error(out_fence, status);
++ dma_fence_signal(out_fence);
++ dma_fence_put(out_fence);
++ job->out_fence = NULL;
++ }
++
+ INIT_WORK(&job->cleanup_work, cleanup_work);
+ queue_work(system_long_wq, &job->cleanup_work);
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index 3ef4e9f573cf..b1025c248bb9 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -1269,7 +1269,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+ DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(port), info->alternate_ddc_pin,
+- port_name(p), port_name(port));
++ port_name(p), port_name(p));
+
+ /*
+ * If we have multiple ports supposedly sharing the
+@@ -1277,9 +1277,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+ * port. Otherwise they share the same ddc bin and
+ * system couldn't communicate with them separately.
+ *
+- * Give child device order the priority, first come first
+- * served.
++ * Give inverse child device order the priority,
++ * last one wins. Yes, there are real machines
++ * (eg. Asrock B250M-HDV) where VBT has both
++ * port A and port E with the same AUX ch and
++ * we must pick port E :(
+ */
++ info = &dev_priv->vbt.ddi_port_info[p];
++
+ info->supports_dvi = false;
+ info->supports_hdmi = false;
+ info->alternate_ddc_pin = 0;
+@@ -1315,7 +1320,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+ DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(port), info->alternate_aux_channel,
+- port_name(p), port_name(port));
++ port_name(p), port_name(p));
+
+ /*
+ * If we have multiple ports supposedlt sharing the
+@@ -1323,9 +1328,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+ * port. Otherwise they share the same aux channel
+ * and system couldn't communicate with them separately.
+ *
+- * Give child device order the priority, first come first
+- * served.
++ * Give inverse child device order the priority,
++ * last one wins. Yes, there are real machines
++ * (eg. Asrock B250M-HDV) where VBT has both
++ * port A and port E with the same AUX ch and
++ * we must pick port E :(
+ */
++ info = &dev_priv->vbt.ddi_port_info[p];
++
+ info->supports_dp = false;
+ info->alternate_aux_channel = 0;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index c201289039fe..5bd27941811f 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -365,6 +365,7 @@ err:
+ return VM_FAULT_OOM;
+ case -ENOSPC:
+ case -EFAULT:
++ case -ENODEV: /* bad object, how did you get here! */
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
+@@ -475,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
+ if (!obj)
+ return -ENOENT;
+
++ if (i915_gem_object_never_bind_ggtt(obj)) {
++ ret = -ENODEV;
++ goto out;
++ }
++
+ ret = create_mmap_offset(obj);
+ if (ret == 0)
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+
++out:
+ i915_gem_object_put(obj);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+index dfebd5706f16..e44d3f49c1d6 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+@@ -152,6 +152,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
+ return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+ }
+
++static inline bool
++i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
++{
++ return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
++}
++
+ static inline bool
+ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
+ {
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+index 18bf4f8d6d80..d5453e85df5e 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+@@ -31,7 +31,8 @@ struct drm_i915_gem_object_ops {
+ #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
+ #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+ #define I915_GEM_OBJECT_IS_PROXY BIT(2)
+-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
++#define I915_GEM_OBJECT_NO_GGTT BIT(3)
++#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
+
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+index 528b61678334..cd30e83c3205 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+@@ -694,6 +694,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
+ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE |
++ I915_GEM_OBJECT_NO_GGTT |
+ I915_GEM_OBJECT_ASYNC_CANCEL,
+ .get_pages = i915_gem_userptr_get_pages,
+ .put_pages = i915_gem_userptr_put_pages,
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 8a659d3d7435..7f6af4ca0968 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1030,6 +1030,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
++ if (i915_gem_object_never_bind_ggtt(obj))
++ return ERR_PTR(-ENODEV);
++
+ if (flags & PIN_MAPPABLE &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
+ /* If the required space is larger than the available
+diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
+index 9bb9260d9181..b05c7c513436 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_job.c
++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
+@@ -384,13 +384,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
+ job_read(pfdev, JS_TAIL_LO(js)),
+ sched_job);
+
+- mutex_lock(&pfdev->reset_lock);
++ if (!mutex_trylock(&pfdev->reset_lock))
++ return;
+
+- for (i = 0; i < NUM_JOB_SLOTS; i++)
+- drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
++ for (i = 0; i < NUM_JOB_SLOTS; i++) {
++ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
++
++ drm_sched_stop(sched, sched_job);
++ if (js != i)
++ /* Ensure any timeouts on other slots have finished */
++ cancel_delayed_work_sync(&sched->work_tdr);
++ }
+
+- if (sched_job)
+- drm_sched_increase_karma(sched_job);
++ drm_sched_increase_karma(sched_job);
+
+ /* panfrost_core_dump(pfdev); */
+
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index 5cc0fbb04ab1..7033f3a38c87 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -380,19 +380,11 @@ radeon_pci_remove(struct pci_dev *pdev)
+ static void
+ radeon_pci_shutdown(struct pci_dev *pdev)
+ {
+- struct drm_device *ddev = pci_get_drvdata(pdev);
+-
+ /* if we are running in a VM, make sure the device
+ * torn down properly on reboot/shutdown
+ */
+ if (radeon_device_is_virtual())
+ radeon_pci_remove(pdev);
+-
+- /* Some adapters need to be suspended before a
+- * shutdown occurs in order to prevent an error
+- * during kexec.
+- */
+- radeon_suspend_kms(ddev, true, true, false);
+ }
+
+ static int radeon_pmops_suspend(struct device *dev)
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+index ae07290bba6a..04efa78d70b6 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+@@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
+ struct drm_device *dev = encoder->dev;
+ struct drm_framebuffer *fb;
+
+- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
++ if (!conn_state->writeback_job)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+@@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ unsigned int i;
+
+ state = rcrtc->writeback.base.state;
+- if (!state || !state->writeback_job || !state->writeback_job->fb)
++ if (!state || !state->writeback_job)
+ return;
+
+ fb = state->writeback_job->fb;
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index 6dacff49c1cc..a77cd0344d22 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+ else
+ ret = vmf_insert_pfn(&cvma, address, pfn);
+
+- /*
+- * Somebody beat us to this PTE or prefaulting to
+- * an already populated PTE, or prefaulting error.
+- */
+-
+- if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
+- break;
+- else if (unlikely(ret & VM_FAULT_ERROR))
+- goto out_io_unlock;
++ /* Never error on prefaulted PTEs */
++ if (unlikely((ret & VM_FAULT_ERROR))) {
++ if (i == 0)
++ goto out_io_unlock;
++ else
++ break;
++ }
+
+ address += PAGE_SIZE;
+ if (unlikely(++page_offset >= page_last))
+diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
+index 96f91c1b4b6e..e92fa1275034 100644
+--- a/drivers/gpu/drm/vc4/vc4_txp.c
++++ b/drivers/gpu/drm/vc4/vc4_txp.c
+@@ -229,7 +229,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
+ int i;
+
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
+- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
++ if (!conn_state->writeback_job)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+@@ -269,8 +269,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
+ u32 ctrl;
+ int i;
+
+- if (WARN_ON(!conn_state->writeback_job ||
+- !conn_state->writeback_job->fb))
++ if (WARN_ON(!conn_state->writeback_job))
+ return;
+
+ mode = &conn_state->crtc->state->adjusted_mode;
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index aa772ee0706f..35c284af574d 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -275,13 +275,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+ struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
+ {
+ int err;
+- struct fw_ri_tpte tpt;
++ struct fw_ri_tpte *tpt;
+ u32 stag_idx;
+ static atomic_t key;
+
+ if (c4iw_fatal_error(rdev))
+ return -EIO;
+
++ tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
++ if (!tpt)
++ return -ENOMEM;
++
+ stag_state = stag_state > 0;
+ stag_idx = (*stag) >> 8;
+
+@@ -291,6 +295,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+ mutex_lock(&rdev->stats.lock);
+ rdev->stats.stag.fail++;
+ mutex_unlock(&rdev->stats.lock);
++ kfree(tpt);
+ return -ENOMEM;
+ }
+ mutex_lock(&rdev->stats.lock);
+@@ -305,28 +310,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+
+ /* write TPT entry */
+ if (reset_tpt_entry)
+- memset(&tpt, 0, sizeof(tpt));
++ memset(tpt, 0, sizeof(*tpt));
+ else {
+- tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
++ tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
+ FW_RI_TPTE_STAGSTATE_V(stag_state) |
+ FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
+- tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
++ tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+ (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
+ FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
+ FW_RI_VA_BASED_TO))|
+ FW_RI_TPTE_PS_V(page_size));
+- tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
++ tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+ FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
+- tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+- tpt.va_hi = cpu_to_be32((u32)(to >> 32));
+- tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+- tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
+- tpt.len_hi = cpu_to_be32((u32)(len >> 32));
++ tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
++ tpt->va_hi = cpu_to_be32((u32)(to >> 32));
++ tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
++ tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
++ tpt->len_hi = cpu_to_be32((u32)(len >> 32));
+ }
+ err = write_adapter_mem(rdev, stag_idx +
+ (rdev->lldi.vr->stag.start >> 5),
+- sizeof(tpt), &tpt, skb, wr_waitp);
++ sizeof(*tpt), tpt, skb, wr_waitp);
+
+ if (reset_tpt_entry) {
+ c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+@@ -334,6 +339,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+ rdev->stats.stag.cur -= 32;
+ mutex_unlock(&rdev->stats.lock);
+ }
++ kfree(tpt);
+ return err;
+ }
+
+diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
+index fd355cf59397..3daf11a7df25 100644
+--- a/drivers/input/misc/da9063_onkey.c
++++ b/drivers/input/misc/da9063_onkey.c
+@@ -232,10 +232,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
+ onkey->input->phys = onkey->phys;
+ onkey->input->dev.parent = &pdev->dev;
+
+- if (onkey->key_power)
+- input_set_capability(onkey->input, EV_KEY, KEY_POWER);
+-
+- input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
++ input_set_capability(onkey->input, EV_KEY, KEY_POWER);
+
+ INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
+
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 04fe43440a3c..2d8434b7b623 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1827,31 +1827,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
+ leave_breadcrumbs);
+ }
+
+-static bool elantech_use_host_notify(struct psmouse *psmouse,
+- struct elantech_device_info *info)
+-{
+- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+- return true;
+-
+- switch (info->bus) {
+- case ETP_BUS_PS2_ONLY:
+- /* expected case */
+- break;
+- case ETP_BUS_SMB_HST_NTFY_ONLY:
+- case ETP_BUS_PS2_SMB_HST_NTFY:
+- /* SMbus implementation is stable since 2018 */
+- if (dmi_get_bios_year() >= 2018)
+- return true;
+- /* fall through */
+- default:
+- psmouse_dbg(psmouse,
+- "Ignoring SMBus bus provider %d\n", info->bus);
+- break;
+- }
+-
+- return false;
+-}
+-
+ /**
+ * elantech_setup_smbus - called once the PS/2 devices are enumerated
+ * and decides to instantiate a SMBus InterTouch device.
+@@ -1871,7 +1846,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
+ * i2c_blacklist_pnp_ids.
+ * Old ICs are up to the user to decide.
+ */
+- if (!elantech_use_host_notify(psmouse, info) ||
++ if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+ psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
+ return -ENXIO;
+ }
+@@ -1891,6 +1866,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
+ return 0;
+ }
+
++static bool elantech_use_host_notify(struct psmouse *psmouse,
++ struct elantech_device_info *info)
++{
++ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
++ return true;
++
++ switch (info->bus) {
++ case ETP_BUS_PS2_ONLY:
++ /* expected case */
++ break;
++ case ETP_BUS_SMB_ALERT_ONLY:
++ /* fall-through */
++ case ETP_BUS_PS2_SMB_ALERT:
++ psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
++ break;
++ case ETP_BUS_SMB_HST_NTFY_ONLY:
++ /* fall-through */
++ case ETP_BUS_PS2_SMB_HST_NTFY:
++ return true;
++ default:
++ psmouse_dbg(psmouse,
++ "Ignoring SMBus bus provider %d.\n",
++ info->bus);
++ }
++
++ return false;
++}
++
+ int elantech_init_smbus(struct psmouse *psmouse)
+ {
+ struct elantech_device_info info;
+diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
+index 772493b1f665..190b9974526b 100644
+--- a/drivers/input/rmi4/rmi_driver.c
++++ b/drivers/input/rmi4/rmi_driver.c
+@@ -146,7 +146,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
+ }
+
+ mutex_lock(&data->irq_mutex);
+- bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
++ bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
+ data->irq_count);
+ /*
+ * At this point, irq_status has all bits that are set in the
+@@ -385,6 +385,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
+ bitmap_copy(data->current_irq_mask, data->new_irq_mask,
+ data->num_of_irq_regs);
+
++ bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
++
+ error_unlock:
+ mutex_unlock(&data->irq_mutex);
+ return error;
+@@ -398,6 +400,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
+ struct device *dev = &rmi_dev->dev;
+
+ mutex_lock(&data->irq_mutex);
++ bitmap_andnot(data->fn_irq_bits,
++ data->fn_irq_bits, mask, data->irq_count);
+ bitmap_andnot(data->new_irq_mask,
+ data->current_irq_mask, mask, data->irq_count);
+
+diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
+index 34923399ece4..1139714e72e2 100644
+--- a/drivers/input/touchscreen/st1232.c
++++ b/drivers/input/touchscreen/st1232.c
+@@ -81,8 +81,10 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts)
+ for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
+ finger[i].is_valid = buf[i + y] >> 7;
+ if (finger[i].is_valid) {
+- finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
+- finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
++ finger[i].x = ((buf[i + y] & 0x0070) << 4) |
++ buf[i + y + 1];
++ finger[i].y = ((buf[i + y] & 0x0007) << 8) |
++ buf[i + y + 2];
+
+ /* st1232 includes a z-axis / touch strength */
+ if (ts->chip_info->have_z)
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index c72c036aea76..daefc52b0ec5 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
+ }
+ }
+
+-static void plic_irq_enable(struct irq_data *d)
++static void plic_irq_unmask(struct irq_data *d)
+ {
+ unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+ cpu_online_mask);
+@@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d)
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ }
+
+-static void plic_irq_disable(struct irq_data *d)
++static void plic_irq_mask(struct irq_data *d)
+ {
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ }
+@@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d,
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+- if (!irqd_irq_disabled(d)) {
+- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+- }
++ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
++ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+@@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d,
+ }
+ #endif
+
++static void plic_irq_eoi(struct irq_data *d)
++{
++ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
++
++ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++}
++
+ static struct irq_chip plic_chip = {
+ .name = "SiFive PLIC",
+- /*
+- * There is no need to mask/unmask PLIC interrupts. They are "masked"
+- * by reading claim and "unmasked" when writing it back.
+- */
+- .irq_enable = plic_irq_enable,
+- .irq_disable = plic_irq_disable,
++ .irq_mask = plic_irq_mask,
++ .irq_unmask = plic_irq_unmask,
++ .irq_eoi = plic_irq_eoi,
+ #ifdef CONFIG_SMP
+ .irq_set_affinity = plic_set_affinity,
+ #endif
+@@ -152,7 +154,7 @@ static struct irq_chip plic_chip = {
+ static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+ {
+- irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
++ irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
+ irq_set_chip_data(irq, NULL);
+ irq_set_noprobe(irq);
+ return 0;
+@@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs)
+ hwirq);
+ else
+ generic_handle_irq(irq);
+- writel(hwirq, claim);
+ }
+ csr_set(sie, SIE_SEIE);
+ }
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index d249cf8ac277..8346e6d1816c 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache)
+
+ static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
+ {
+- return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
++ return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
+ }
+
+ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
+@@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
+ {
+ struct dm_cache_migration *mg;
+
+- mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
+- if (!mg)
+- return NULL;
++ mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
+
+ memset(mg, 0, sizeof(*mg));
+
+@@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
+ struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
+
+ cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
+- if (!cell_prealloc) {
+- defer_bio(cache, bio);
+- return false;
+- }
+
+ build_key(oblock, end, &key);
+ r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
+@@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
+ struct dm_bio_prison_cell_v2 *prealloc;
+
+ prealloc = alloc_prison_cell(cache);
+- if (!prealloc) {
+- DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
+- mg_complete(mg, false);
+- return -ENOMEM;
+- }
+
+ /*
+ * Prevent writes to the block, but allow reads to continue.
+@@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
+ }
+
+ mg = alloc_migration(cache);
+- if (!mg) {
+- policy_complete_background_work(cache->policy, op, false);
+- background_work_end(cache);
+- return -ENOMEM;
+- }
+
+ mg->op = op;
+ mg->overwrite_bio = bio;
+@@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
+ struct dm_bio_prison_cell_v2 *prealloc;
+
+ prealloc = alloc_prison_cell(cache);
+- if (!prealloc) {
+- invalidate_complete(mg, false);
+- return -ENOMEM;
+- }
+
+ build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
+ r = dm_cell_lock_v2(cache->prison, &key,
+@@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
+ return -EPERM;
+
+ mg = alloc_migration(cache);
+- if (!mg) {
+- background_work_end(cache);
+- return -ENOMEM;
+- }
+
+ mg->overwrite_bio = bio;
+ mg->invalidate_cblock = cblock;
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 297bbc0f41f0..c3445d2cedb9 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -151,7 +151,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ } else {
+ pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
+ mdname(mddev));
+- pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
++ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
+ err = -ENOTSUPP;
+ goto abort;
+ }
+diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
+index 32747425297d..64fff6abe60e 100644
+--- a/drivers/memstick/host/jmb38x_ms.c
++++ b/drivers/memstick/host/jmb38x_ms.c
+@@ -941,7 +941,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
+ if (!cnt) {
+ rc = -ENODEV;
+ pci_dev_busy = 1;
+- goto err_out;
++ goto err_out_int;
+ }
+
+ jm = kzalloc(sizeof(struct jmb38x_ms)
+diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
+index f7bdae5354c3..5047f7343ffc 100644
+--- a/drivers/mmc/host/cqhci.c
++++ b/drivers/mmc/host/cqhci.c
+@@ -611,7 +611,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ cq_host->slot[tag].flags = 0;
+
+ cq_host->qcnt += 1;
+-
++ /* Make sure descriptors are ready before ringing the doorbell */
++ wmb();
+ cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
+ if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
+ pr_debug("%s: cqhci: doorbell not set for tag %d\n",
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index b334e81c5cab..9a0bc0c5fa4b 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -17,6 +17,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmaengine.h>
++#include <linux/dma/mxs-dma.h>
+ #include <linux/highmem.h>
+ #include <linux/clk.h>
+ #include <linux/err.h>
+@@ -266,7 +267,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
+ ssp->ssp_pio_words[2] = cmd1;
+ ssp->dma_dir = DMA_NONE;
+ ssp->slave_dirn = DMA_TRANS_NONE;
+- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
++ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
+ if (!desc)
+ goto out;
+
+@@ -311,7 +312,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
+ ssp->ssp_pio_words[2] = cmd1;
+ ssp->dma_dir = DMA_NONE;
+ ssp->slave_dirn = DMA_TRANS_NONE;
+- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
++ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
+ if (!desc)
+ goto out;
+
+@@ -441,7 +442,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
+ host->data = data;
+ ssp->dma_dir = dma_data_dir;
+ ssp->slave_dirn = slave_dirn;
+- desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++ desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
+ if (!desc)
+ goto out;
+
+diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
+index 41c2677c587f..083e7e053c95 100644
+--- a/drivers/mmc/host/sdhci-omap.c
++++ b/drivers/mmc/host/sdhci-omap.c
+@@ -372,7 +372,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ * on temperature
+ */
+ if (temperature < -20000)
+- phase_delay = min(max_window + 4 * max_len - 24,
++ phase_delay = min(max_window + 4 * (max_len - 1) - 24,
+ max_window +
+ DIV_ROUND_UP(13 * max_len, 16) * 4);
+ else if (temperature < 20000)
+diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
+index 16f15c93a102..bbeeb8618c80 100644
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -705,7 +705,7 @@ qca8k_setup(struct dsa_switch *ds)
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+
+ /* Setup connection between CPU port & user ports */
+- for (i = 0; i < DSA_MAX_PORTS; i++) {
++ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, i)) {
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+@@ -1074,7 +1074,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
+ if (id != QCA8K_ID_QCA8337)
+ return -ENODEV;
+
+- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
++ priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
+ if (!priv->ds)
+ return -ENOMEM;
+
+diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
+index a268085ffad2..f5cc8b0a7c74 100644
+--- a/drivers/net/dsa/rtl8366rb.c
++++ b/drivers/net/dsa/rtl8366rb.c
+@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ dev_err(smi->dev, "failed to get parent IRQ\n");
+- return irq ? irq : -EINVAL;
++ ret = irq ? irq : -EINVAL;
++ goto out_put_node;
+ }
+
+ /* This clears the IRQ status register */
+@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+ &val);
+ if (ret) {
+ dev_err(smi->dev, "can't read interrupt status\n");
+- return ret;
++ goto out_put_node;
+ }
+
+ /* Fetch IRQ edge information from the descriptor */
+@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+ val);
+ if (ret) {
+ dev_err(smi->dev, "could not configure IRQ polarity\n");
+- return ret;
++ goto out_put_node;
+ }
+
+ ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+ "RTL8366RB", smi);
+ if (ret) {
+ dev_err(smi->dev, "unable to request irq: %d\n", ret);
+- return ret;
++ goto out_put_node;
+ }
+ smi->irqdomain = irq_domain_add_linear(intc,
+ RTL8366RB_NUM_INTERRUPT,
+@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+ smi);
+ if (!smi->irqdomain) {
+ dev_err(smi->dev, "failed to create IRQ domain\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out_put_node;
+ }
+ for (i = 0; i < smi->num_ports; i++)
+ irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+
+- return 0;
++out_put_node:
++ of_node_put(intc);
++ return ret;
+ }
+
+ static int rtl8366rb_set_addr(struct realtek_smi *smi)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+index b4a0fb281e69..bb65dd39f847 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+@@ -194,9 +194,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
+ {
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+- aq_nic_set_packet_filter(aq_nic, ndev->flags);
+-
+- aq_nic_set_multicast_list(aq_nic, ndev);
++ (void)aq_nic_set_multicast_list(aq_nic, ndev);
+ }
+
+ static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index 8f66e7817811..2a18439b36fb 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -631,9 +631,12 @@ err_exit:
+
+ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
+ {
+- unsigned int packet_filter = self->packet_filter;
++ const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
++ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
++ unsigned int packet_filter = ndev->flags;
+ struct netdev_hw_addr *ha = NULL;
+ unsigned int i = 0U;
++ int err = 0;
+
+ self->mc_list.count = 0;
+ if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+@@ -641,29 +644,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
+ } else {
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+-
+- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+- break;
+ }
+ }
+
+- if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+- packet_filter |= IFF_ALLMULTI;
+- } else {
+- netdev_for_each_mc_addr(ha, ndev) {
+- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+-
+- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+- break;
++ cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
++ if (cfg->is_mc_list_enabled) {
++ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
++ packet_filter |= IFF_ALLMULTI;
++ } else {
++ netdev_for_each_mc_addr(ha, ndev) {
++ ether_addr_copy(self->mc_list.ar[i++],
++ ha->addr);
++ }
+ }
+ }
+
+ if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
+- packet_filter |= IFF_MULTICAST;
+ self->mc_list.count = i;
+- self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+- self->mc_list.ar,
+- self->mc_list.count);
++ err = hw_ops->hw_multicast_list_set(self->aq_hw,
++ self->mc_list.ar,
++ self->mc_list.count);
+ }
+ return aq_nic_set_packet_filter(self, packet_filter);
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 3901d7994ca1..76bdbe1596d6 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -313,6 +313,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ break;
+
+ buff->is_error |= buff_->is_error;
++ buff->is_cso_err |= buff_->is_cso_err;
+
+ } while (!buff_->is_eop);
+
+@@ -320,7 +321,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ err = 0;
+ goto err_exit;
+ }
+- if (buff->is_error) {
++ if (buff->is_error || buff->is_cso_err) {
+ buff_ = buff;
+ do {
+ next_ = buff_->next,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+index 30f7fc4c97ff..2ad3fa6316ce 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -818,14 +818,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ cfg->is_vlan_force_promisc);
+
+ hw_atl_rpfl2multicast_flr_en_set(self,
+- IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
++ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
++ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+
+ hw_atl_rpfl2_accept_all_mc_packets_set(self,
+- IS_FILTER_ENABLED(IFF_ALLMULTI));
++ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
++ IS_FILTER_ENABLED(IFF_MULTICAST));
+
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+- cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
+
+ for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
+ hw_atl_rpfl2_uc_flr_en_set(self,
+@@ -968,14 +969,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
+
+ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
+ {
++ int err;
++ u32 val;
++
+ hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+- hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
++ hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
+
+- return aq_hw_err_from_flags(self);
++ err = aq_hw_err_from_flags(self);
++
++ if (err)
++ goto err_exit;
++
++ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
++ self, val, val == 1, 1000U, 10000U);
++
++err_exit:
++ return err;
+ }
+
+ static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+index 1149812ae463..6f340695e6bd 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+@@ -606,12 +606,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
+ HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
+ }
+
+-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
++void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
+ {
++ u32 val;
++
++ val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
++ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
++ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
++
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
+- init);
++ val ^ 1);
++}
++
++u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
++{
++ return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
++ RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
++ RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
+ }
+
+ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+index 0c37abbabca5..c3ee278c3747 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+@@ -313,8 +313,11 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
+
+-/* set rdm rx dma descriptor cache init */
+-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
++/* toggle rdm rx dma descriptor cache init */
++void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
++
++/* get rdm rx dma descriptor cache init done */
++u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
+
+ /* set rx xoff enable (per tc) */
+ void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+index c3febcdfa92e..35887ad89025 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+@@ -318,6 +318,25 @@
+ /* default value of bitfield rdm_desc_init_i */
+ #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+
++/* rdm_desc_init_done_i bitfield definitions
++ * preprocessor definitions for the bitfield rdm_desc_init_done_i.
++ * port="pif_rdm_desc_init_done_i"
++ */
++
++/* register address for bitfield rdm_desc_init_done_i */
++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
++/* bitmask for bitfield rdm_desc_init_done_i */
++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
++/* inverted bitmask for bitfield rdm_desc_init_done_i */
++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
++/* lower bit position of bitfield rdm_desc_init_done_i */
++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
++/* width of bitfield rdm_desc_init_done_i */
++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
++/* default value of bitfield rdm_desc_init_done_i */
++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
++
++
+ /* rx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_rdm_int_desc_wrb_en_i"
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+index da726489e3c8..7bc51f8d6f2f 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+@@ -337,7 +337,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
+ /* Convert PHY temperature from 1/256 degree Celsius
+ * to 1/1000 degree Celsius.
+ */
+- *temp = temp_res * 1000 / 256;
++ *temp = (temp_res & 0xFFFF) * 1000 / 256;
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
+index 6703960c7cf5..d1101eea15c2 100644
+--- a/drivers/net/ethernet/atheros/ag71xx.c
++++ b/drivers/net/ethernet/atheros/ag71xx.c
+@@ -526,7 +526,7 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
+ struct device *dev = &ag->pdev->dev;
+ struct net_device *ndev = ag->ndev;
+ static struct mii_bus *mii_bus;
+- struct device_node *np;
++ struct device_node *np, *mnp;
+ int err;
+
+ np = dev->of_node;
+@@ -571,7 +571,9 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
+ msleep(200);
+ }
+
+- err = of_mdiobus_register(mii_bus, np);
++ mnp = of_get_child_by_name(np, "mdio");
++ err = of_mdiobus_register(mii_bus, mnp);
++ of_node_put(mnp);
+ if (err)
+ goto mdio_err_put_clk;
+
+diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
+index e24f5d2b6afe..53055ce5dfd6 100644
+--- a/drivers/net/ethernet/broadcom/Kconfig
++++ b/drivers/net/ethernet/broadcom/Kconfig
+@@ -8,7 +8,6 @@ config NET_VENDOR_BROADCOM
+ default y
+ depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
+ SIBYTE_SB1xxx_SOC
+- select DIMLIB
+ ---help---
+ If you have a network (Ethernet) chipset belonging to this class,
+ say Y.
+@@ -69,6 +68,7 @@ config BCMGENET
+ select FIXED_PHY
+ select BCM7XXX_PHY
+ select MDIO_BCM_UNIMAC
++ select DIMLIB
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset.
+@@ -188,6 +188,7 @@ config SYSTEMPORT
+ select MII
+ select PHYLIB
+ select FIXED_PHY
++ select DIMLIB
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset using an internal
+@@ -200,6 +201,7 @@ config BNXT
+ select LIBCRC32C
+ select NET_DEVLINK
+ select PAGE_POOL
++ select DIMLIB
+ ---help---
+ This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
+ Ethernet cards. To compile this driver as a module, choose M here:
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index 4a8fc03d82fd..dbc69d8fa05f 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -366,6 +366,7 @@ struct bcmgenet_mib_counters {
+ #define EXT_PWR_DOWN_PHY_EN (1 << 20)
+
+ #define EXT_RGMII_OOB_CTRL 0x0C
++#define RGMII_MODE_EN_V123 (1 << 0)
+ #define RGMII_LINK (1 << 4)
+ #define OOB_DISABLE (1 << 5)
+ #define RGMII_MODE_EN (1 << 6)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 970e478a9017..e7c291bf4ed1 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -258,7 +258,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+ */
+ if (priv->ext_phy) {
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+- reg |= RGMII_MODE_EN | id_mode_dis;
++ reg |= id_mode_dis;
++ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
++ reg |= RGMII_MODE_EN_V123;
++ else
++ reg |= RGMII_MODE_EN;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ }
+
+@@ -273,11 +277,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct phy_device *phydev;
+- u32 phy_flags;
++ u32 phy_flags = 0;
+ int ret;
+
+ /* Communicate the integrated PHY revision */
+- phy_flags = priv->gphy_rev;
++ if (priv->internal_phy)
++ phy_flags = priv->gphy_rev;
+
+ /* Initialize link state variables that bcmgenet_mii_setup() uses */
+ priv->old_link = -1;
+diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
+index 3e863a71c513..7df5d7d211d4 100644
+--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
+@@ -148,11 +148,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
+ {
+ u32 time_cnt;
+ u32 reg_value;
++ int ret;
+
+ regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
+
+ for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
+- regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
++ ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
++ if (ret)
++ return ret;
++
+ reg_value &= st_msk;
+ if ((!!check_st) == (!!reg_value))
+ break;
+diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
+index 211c5f74b4c8..aec7e98bcc85 100644
+--- a/drivers/net/ethernet/i825xx/lasi_82596.c
++++ b/drivers/net/ethernet/i825xx/lasi_82596.c
+@@ -96,6 +96,8 @@
+
+ #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
+
++#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
++
+ #define DMA_WBACK(ndev, addr, len) \
+ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
+
+@@ -200,7 +202,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
+
+ unregister_netdev (dev);
+ dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
+- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
++ lp->dma_addr, LIB82596_DMA_ATTR);
+ free_netdev (dev);
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
+index 1274ad24d6af..f9742af7f142 100644
+--- a/drivers/net/ethernet/i825xx/lib82596.c
++++ b/drivers/net/ethernet/i825xx/lib82596.c
+@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
+
+ dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
+ &lp->dma_addr, GFP_KERNEL,
+- DMA_ATTR_NON_CONSISTENT);
++ LIB82596_DMA_ATTR);
+ if (!dma) {
+ printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
+ return -ENOMEM;
+@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
+ i = register_netdev(dev);
+ if (i) {
+ dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
+- dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
++ dma, lp->dma_addr, LIB82596_DMA_ATTR);
+ return i;
+ }
+
+diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
+index 6eb6c2ff7f09..6436a98c5953 100644
+--- a/drivers/net/ethernet/i825xx/sni_82596.c
++++ b/drivers/net/ethernet/i825xx/sni_82596.c
+@@ -24,6 +24,8 @@
+
+ static const char sni_82596_string[] = "snirm_82596";
+
++#define LIB82596_DMA_ATTR 0
++
+ #define DMA_WBACK(priv, addr, len) do { } while (0)
+ #define DMA_INV(priv, addr, len) do { } while (0)
+ #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
+@@ -152,7 +154,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
+
+ unregister_netdev(dev);
+ dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
+- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
++ lp->dma_addr, LIB82596_DMA_ATTR);
+ iounmap(lp->ca);
+ iounmap(lp->mpu_port);
+ free_netdev (dev);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 5cb55ea671e3..964e7d62f4b1 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2772,12 +2772,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
+
+ if (adapter->resetting &&
+ adapter->reset_reason == VNIC_RESET_MOBILITY) {
+- u64 val = (0xff000000) | scrq->hw_irq;
++ struct irq_desc *desc = irq_to_desc(scrq->irq);
++ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+- rc = plpar_hcall_norets(H_EOI, val);
+- if (rc)
+- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+- val, rc);
++ chip->irq_eoi(&desc->irq_data);
+ }
+
+ rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
+diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
+index 2451d4a96490..041fb9f38eca 100644
+--- a/drivers/net/ethernet/mscc/ocelot_board.c
++++ b/drivers/net/ethernet/mscc/ocelot_board.c
+@@ -287,13 +287,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
+ continue;
+
+ phy = of_phy_find_device(phy_node);
++ of_node_put(phy_node);
+ if (!phy)
+ continue;
+
+ err = ocelot_probe_port(ocelot, port, regs, phy);
+ if (err) {
+ of_node_put(portnp);
+- return err;
++ goto out_put_ports;
+ }
+
+ phy_mode = of_get_phy_mode(portnp);
+@@ -321,7 +322,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
+ "invalid phy mode for port%d, (Q)SGMII only\n",
+ port);
+ of_node_put(portnp);
+- return -EINVAL;
++ err = -EINVAL;
++ goto out_put_ports;
+ }
+
+ serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+@@ -334,7 +336,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
+ "missing SerDes phys for port%d\n",
+ port);
+
+- goto err_probe_ports;
++ of_node_put(portnp);
++ goto out_put_ports;
+ }
+
+ ocelot->ports[port]->serdes = serdes;
+@@ -346,9 +349,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
+
+ dev_info(&pdev->dev, "Ocelot switch probed\n");
+
+- return 0;
+-
+-err_probe_ports:
++out_put_ports:
++ of_node_put(ports);
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index fc9954e4a772..9c73fb759b57 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -407,8 +407,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ int numhashregs = (hw->multicast_filter_bins >> 5);
+ int mcbitslog2 = hw->mcast_bits_log2;
+ unsigned int value;
++ u32 mc_filter[8];
+ int i;
+
++ memset(mc_filter, 0, sizeof(mc_filter));
++
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value &= ~GMAC_PACKET_FILTER_HMC;
+ value &= ~GMAC_PACKET_FILTER_HPF;
+@@ -422,16 +425,13 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ /* Pass all multi */
+ value |= GMAC_PACKET_FILTER_PM;
+ /* Set all the bits of the HASH tab */
+- for (i = 0; i < numhashregs; i++)
+- writel(0xffffffff, ioaddr + GMAC_HASH_TAB(i));
++ memset(mc_filter, 0xff, sizeof(mc_filter));
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+- u32 mc_filter[8];
+
+ /* Hash filter for multicast */
+ value |= GMAC_PACKET_FILTER_HMC;
+
+- memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper n bits of the calculated CRC are used to
+ * index the contents of the hash table. The number of
+@@ -446,10 +446,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ */
+ mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
+ }
+- for (i = 0; i < numhashregs; i++)
+- writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
+ }
+
++ for (i = 0; i < numhashregs; i++)
++ writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
++
+ value |= GMAC_PACKET_FILTER_HPF;
+
+ /* Handle multiple unicast addresses */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index 85c68b7ee8c6..46d74f407aab 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -370,7 +370,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
+ dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
+
+ /* Handle multiple unicast addresses */
+- if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
++ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
+ value |= XGMAC_FILTER_PR;
+ } else {
+ struct netdev_hw_addr *ha;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 5c4408bdc843..fe2d3029de5e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -626,6 +626,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
++ ts_event_en = PTP_TCR_TSEVNTENA;
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+@@ -4453,11 +4454,9 @@ int stmmac_suspend(struct device *dev)
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+- mutex_lock(&priv->lock);
++ phylink_mac_change(priv->phylink, false);
+
+- rtnl_lock();
+- phylink_stop(priv->phylink);
+- rtnl_unlock();
++ mutex_lock(&priv->lock);
+
+ netif_device_detach(ndev);
+ stmmac_stop_all_queues(priv);
+@@ -4472,11 +4471,19 @@ int stmmac_suspend(struct device *dev)
+ stmmac_pmt(priv, priv->hw, priv->wolopts);
+ priv->irq_wake = 1;
+ } else {
++ mutex_unlock(&priv->lock);
++ rtnl_lock();
++ phylink_stop(priv->phylink);
++ rtnl_unlock();
++ mutex_lock(&priv->lock);
++
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ pinctrl_pm_select_sleep_state(priv->device);
+ /* Disable clock in case of PWM is off */
+- clk_disable(priv->plat->pclk);
+- clk_disable(priv->plat->stmmac_clk);
++ if (priv->plat->clk_ptp_ref)
++ clk_disable_unprepare(priv->plat->clk_ptp_ref);
++ clk_disable_unprepare(priv->plat->pclk);
++ clk_disable_unprepare(priv->plat->stmmac_clk);
+ }
+ mutex_unlock(&priv->lock);
+
+@@ -4539,8 +4546,10 @@ int stmmac_resume(struct device *dev)
+ } else {
+ pinctrl_pm_select_default_state(priv->device);
+ /* enable the clk previously disabled */
+- clk_enable(priv->plat->stmmac_clk);
+- clk_enable(priv->plat->pclk);
++ clk_prepare_enable(priv->plat->stmmac_clk);
++ clk_prepare_enable(priv->plat->pclk);
++ if (priv->plat->clk_ptp_ref)
++ clk_prepare_enable(priv->plat->clk_ptp_ref);
+ /* reset the phy so that it's ready */
+ if (priv->mii)
+ stmmac_mdio_reset(priv->mii);
+@@ -4562,12 +4571,16 @@ int stmmac_resume(struct device *dev)
+
+ stmmac_start_all_queues(priv);
+
+- rtnl_lock();
+- phylink_start(priv->phylink);
+- rtnl_unlock();
+-
+ mutex_unlock(&priv->lock);
+
++ if (!device_may_wakeup(priv->device)) {
++ rtnl_lock();
++ phylink_start(priv->phylink);
++ rtnl_unlock();
++ }
++
++ phylink_mac_change(priv->phylink, true);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(stmmac_resume);
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index b188fce3f641..658b399ac9ea 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -3152,12 +3152,12 @@ static int ca8210_probe(struct spi_device *spi_device)
+ goto error;
+ }
+
++ priv->spi->dev.platform_data = pdata;
+ ret = ca8210_get_platform_data(priv->spi, pdata);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
+ goto error;
+ }
+- priv->spi->dev.platform_data = pdata;
+
+ ret = ca8210_dev_com_init(priv);
+ if (ret) {
+diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
+index f61d094746c0..1a251f76d09b 100644
+--- a/drivers/net/netdevsim/fib.c
++++ b/drivers/net/netdevsim/fib.c
+@@ -241,8 +241,8 @@ static struct pernet_operations nsim_fib_net_ops = {
+
+ void nsim_fib_exit(void)
+ {
+- unregister_pernet_subsys(&nsim_fib_net_ops);
+ unregister_fib_notifier(&nsim_fib_nb);
++ unregister_pernet_subsys(&nsim_fib_net_ops);
+ }
+
+ int nsim_fib_init(void)
+@@ -258,6 +258,7 @@ int nsim_fib_init(void)
+ err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
+ if (err < 0) {
+ pr_err("Failed to register fib notifier\n");
++ unregister_pernet_subsys(&nsim_fib_net_ops);
+ goto err_out;
+ }
+
+diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
+index e282600bd83e..c1d345c3cab3 100644
+--- a/drivers/net/phy/mdio_device.c
++++ b/drivers/net/phy/mdio_device.c
+@@ -121,7 +121,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
+ return;
+
+ if (mdiodev->reset_gpio)
+- gpiod_set_value(mdiodev->reset_gpio, value);
++ gpiod_set_value_cansleep(mdiodev->reset_gpio, value);
+
+ if (mdiodev->reset_ctrl) {
+ if (value)
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 2fea5541c35a..63dedec0433d 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -341,6 +341,35 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
+ return genphy_config_aneg(phydev);
+ }
+
++static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
++ const u32 ksz_phy_id)
++{
++ int ret;
++
++ if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
++ return 0;
++
++ ret = phy_read(phydev, MII_BMSR);
++ if (ret < 0)
++ return ret;
++
++ /* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same
++ * exact PHY ID. However, they can be told apart by the extended
++ * capability registers presence. The KSZ8051 PHY has them while
++ * the switch does not.
++ */
++ ret &= BMSR_ERCAP;
++ if (ksz_phy_id == PHY_ID_KSZ8051)
++ return ret;
++ else
++ return !ret;
++}
++
++static int ksz8051_match_phy_device(struct phy_device *phydev)
++{
++ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
++}
++
+ static int ksz8081_config_init(struct phy_device *phydev)
+ {
+ /* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
+@@ -364,6 +393,11 @@ static int ksz8061_config_init(struct phy_device *phydev)
+ return kszphy_config_init(phydev);
+ }
+
++static int ksz8795_match_phy_device(struct phy_device *phydev)
++{
++ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
++}
++
+ static int ksz9021_load_values_from_of(struct phy_device *phydev,
+ const struct device_node *of_node,
+ u16 reg,
+@@ -1017,8 +1051,6 @@ static struct phy_driver ksphy_driver[] = {
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+- .phy_id = PHY_ID_KSZ8051,
+- .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Micrel KSZ8051",
+ /* PHY_BASIC_FEATURES */
+ .driver_data = &ksz8051_type,
+@@ -1029,6 +1061,7 @@ static struct phy_driver ksphy_driver[] = {
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
++ .match_phy_device = ksz8051_match_phy_device,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+@@ -1141,13 +1174,12 @@ static struct phy_driver ksphy_driver[] = {
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+- .phy_id = PHY_ID_KSZ8795,
+- .phy_id_mask = MICREL_PHY_ID_MASK,
+- .name = "Micrel KSZ8795",
++ .name = "Micrel KSZ87XX Switch",
+ /* PHY_BASIC_FEATURES */
+ .config_init = kszphy_config_init,
+ .config_aneg = ksz8873mll_config_aneg,
+ .read_status = ksz8873mll_read_status,
++ .match_phy_device = ksz8795_match_phy_device,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
+index 7935593debb1..a1caeee12236 100644
+--- a/drivers/net/phy/phy-c45.c
++++ b/drivers/net/phy/phy-c45.c
+@@ -323,6 +323,8 @@ int genphy_c45_read_pma(struct phy_device *phydev)
+ {
+ int val;
+
++ linkmode_zero(phydev->lp_advertising);
++
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 6b0f89369b46..0ff8df35c779 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -457,6 +457,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
+ val);
+ change_autoneg = true;
+ break;
++ case MII_CTRL1000:
++ mii_ctrl1000_mod_linkmode_adv_t(phydev->advertising,
++ val);
++ change_autoneg = true;
++ break;
+ default:
+ /* do nothing */
+ break;
+@@ -561,9 +566,6 @@ int phy_start_aneg(struct phy_device *phydev)
+ if (AUTONEG_DISABLE == phydev->autoneg)
+ phy_sanitize_settings(phydev);
+
+- /* Invalidate LP advertising flags */
+- linkmode_zero(phydev->lp_advertising);
+-
+ err = phy_config_aneg(phydev);
+ if (err < 0)
+ goto out_unlock;
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 27ebc2c6c2d0..d6c9350b65bf 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1823,7 +1823,14 @@ int genphy_read_status(struct phy_device *phydev)
+
+ linkmode_zero(phydev->lp_advertising);
+
+- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
++ if (phydev->autoneg == AUTONEG_ENABLE) {
++ if (!phydev->autoneg_complete) {
++ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
++ 0);
++ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
++ return 0;
++ }
++
+ if (phydev->is_gigabit_capable) {
+ lpagb = phy_read(phydev, MII_STAT1000);
+ if (lpagb < 0)
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 04137ac373b0..9eedc0714422 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -4533,10 +4533,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
+ struct r8152 *tp = usb_get_intfdata(intf);
+
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+- mutex_lock(&tp->control);
+ tp->rtl_ops.init(tp);
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
+- mutex_unlock(&tp->control);
++ set_ethernet_addr(tp);
+ return rtl8152_resume(intf);
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 3b12e7ad35e1..acbadfdbdd3f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
+
+ /* 9000 Series */
+- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++
+ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+@@ -643,34 +645,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
++
++ {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
+@@ -726,62 +728,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++
++ {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
+@@ -821,34 +821,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
++
++ {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
++
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 240f762b3749..103ed00775eb 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -719,7 +719,6 @@ err_unmap:
+ xenvif_unmap_frontend_data_rings(queue);
+ netif_napi_del(&queue->napi);
+ err:
+- module_put(THIS_MODULE);
+ return err;
+ }
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index d3d6b7bd6903..36a5ed1eacbe 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -103,10 +103,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
+ */
+ if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ return;
+- revalidate_disk(ns->disk);
+ blk_set_queue_dying(ns->queue);
+ /* Forcibly unquiesce queues to avoid blocking dispatch */
+ blk_mq_unquiesce_queue(ns->queue);
++ /*
++ * Revalidate after unblocking dispatchers that may be holding bd_butex
++ */
++ revalidate_disk(ns->disk);
+ }
+
+ static void nvme_queue_scan(struct nvme_ctrl *ctrl)
+@@ -849,7 +852,7 @@ out:
+ static int nvme_submit_user_cmd(struct request_queue *q,
+ struct nvme_command *cmd, void __user *ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+- u32 meta_seed, u32 *result, unsigned timeout)
++ u32 meta_seed, u64 *result, unsigned timeout)
+ {
+ bool write = nvme_is_write(cmd);
+ struct nvme_ns *ns = q->queuedata;
+@@ -890,7 +893,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
+ else
+ ret = nvme_req(req)->status;
+ if (result)
+- *result = le32_to_cpu(nvme_req(req)->result.u32);
++ *result = le64_to_cpu(nvme_req(req)->result.u64);
+ if (meta && !ret && !write) {
+ if (copy_to_user(meta_buffer, meta, meta_len))
+ ret = -EFAULT;
+@@ -1336,6 +1339,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct nvme_command c;
+ unsigned timeout = 0;
+ u32 effects;
++ u64 result;
++ int status;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EACCES;
++ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
++ return -EFAULT;
++ if (cmd.flags)
++ return -EINVAL;
++
++ memset(&c, 0, sizeof(c));
++ c.common.opcode = cmd.opcode;
++ c.common.flags = cmd.flags;
++ c.common.nsid = cpu_to_le32(cmd.nsid);
++ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
++ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
++ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
++ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
++ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
++ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
++ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
++ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
++
++ if (cmd.timeout_ms)
++ timeout = msecs_to_jiffies(cmd.timeout_ms);
++
++ effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
++ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
++ (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
++ (void __user *)(uintptr_t)cmd.metadata,
++ cmd.metadata_len, 0, &result, timeout);
++ nvme_passthru_end(ctrl, effects);
++
++ if (status >= 0) {
++ if (put_user(result, &ucmd->result))
++ return -EFAULT;
++ }
++
++ return status;
++}
++
++static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
++ struct nvme_passthru_cmd64 __user *ucmd)
++{
++ struct nvme_passthru_cmd64 cmd;
++ struct nvme_command c;
++ unsigned timeout = 0;
++ u32 effects;
+ int status;
+
+ if (!capable(CAP_SYS_ADMIN))
+@@ -1406,6 +1457,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
+ srcu_read_unlock(&head->srcu, idx);
+ }
+
++static bool is_ctrl_ioctl(unsigned int cmd)
++{
++ if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
++ return true;
++ if (is_sed_ioctl(cmd))
++ return true;
++ return false;
++}
++
++static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
++ void __user *argp,
++ struct nvme_ns_head *head,
++ int srcu_idx)
++{
++ struct nvme_ctrl *ctrl = ns->ctrl;
++ int ret;
++
++ nvme_get_ctrl(ns->ctrl);
++ nvme_put_ns_from_disk(head, srcu_idx);
++
++ switch (cmd) {
++ case NVME_IOCTL_ADMIN_CMD:
++ ret = nvme_user_cmd(ctrl, NULL, argp);
++ break;
++ case NVME_IOCTL_ADMIN64_CMD:
++ ret = nvme_user_cmd64(ctrl, NULL, argp);
++ break;
++ default:
++ ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
++ break;
++ }
++ nvme_put_ctrl(ctrl);
++ return ret;
++}
++
+ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+ {
+@@ -1423,20 +1509,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+ * seperately and drop the ns SRCU reference early. This avoids a
+ * deadlock when deleting namespaces using the passthrough interface.
+ */
+- if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
+- struct nvme_ctrl *ctrl = ns->ctrl;
+-
+- nvme_get_ctrl(ns->ctrl);
+- nvme_put_ns_from_disk(head, srcu_idx);
+-
+- if (cmd == NVME_IOCTL_ADMIN_CMD)
+- ret = nvme_user_cmd(ctrl, NULL, argp);
+- else
+- ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
+-
+- nvme_put_ctrl(ctrl);
+- return ret;
+- }
++ if (is_ctrl_ioctl(cmd))
++ return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
+
+ switch (cmd) {
+ case NVME_IOCTL_ID:
+@@ -1449,6 +1523,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+ case NVME_IOCTL_SUBMIT_IO:
+ ret = nvme_submit_io(ns, argp);
+ break;
++ case NVME_IOCTL_IO64_CMD:
++ ret = nvme_user_cmd64(ns->ctrl, ns, argp);
++ break;
+ default:
+ if (ns->ndev)
+ ret = nvme_nvm_ioctl(ns, cmd, arg);
+@@ -2267,6 +2344,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
+ .vid = 0x14a4,
+ .fr = "22301111",
+ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
++ },
++ {
++ /*
++ * This Kingston E8FK11.T firmware version has no interrupt
++ * after resume with actions related to suspend to idle
++ * https://bugzilla.kernel.org/show_bug.cgi?id=204887
++ */
++ .vid = 0x2646,
++ .fr = "E8FK11.T",
++ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+ }
+ };
+
+@@ -2510,8 +2597,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ list_add_tail(&subsys->entry, &nvme_subsystems);
+ }
+
+- if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
+- dev_name(ctrl->device))) {
++ ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
++ dev_name(ctrl->device));
++ if (ret) {
+ dev_err(ctrl->device,
+ "failed to create sysfs link from subsystem.\n");
+ goto out_put_subsystem;
+@@ -2812,6 +2900,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_cmd(ctrl, NULL, argp);
++ case NVME_IOCTL_ADMIN64_CMD:
++ return nvme_user_cmd64(ctrl, NULL, argp);
+ case NVME_IOCTL_IO_CMD:
+ return nvme_dev_user_cmd(ctrl, argp);
+ case NVME_IOCTL_RESET:
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 732d5b63ec05..2303d44fc3cb 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -769,7 +769,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
+ struct bio_vec *bv)
+ {
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+- unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
++ unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
++ unsigned int first_prp_len = dev->ctrl.page_size - offset;
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+@@ -2894,11 +2895,21 @@ static int nvme_suspend(struct device *dev)
+ if (ret < 0)
+ goto unfreeze;
+
++ /*
++ * A saved state prevents pci pm from generically controlling the
++ * device's power. If we're using protocol specific settings, we don't
++ * want pci interfering.
++ */
++ pci_save_state(pdev);
++
+ ret = nvme_set_power_state(ctrl, ctrl->npss);
+ if (ret < 0)
+ goto unfreeze;
+
+ if (ret) {
++ /* discard the saved state */
++ pci_load_saved_state(pdev, NULL);
++
+ /*
+ * Clearing npss forces a controller reset on resume. The
+ * correct value will be resdicovered then.
+@@ -2906,14 +2917,7 @@ static int nvme_suspend(struct device *dev)
+ nvme_dev_disable(ndev, true);
+ ctrl->npss = 0;
+ ret = 0;
+- goto unfreeze;
+ }
+- /*
+- * A saved state prevents pci pm from generically controlling the
+- * device's power. If we're using protocol specific settings, we don't
+- * want pci interfering.
+- */
+- pci_save_state(pdev);
+ unfreeze:
+ nvme_unfreeze(ctrl);
+ return ret;
+@@ -3038,6 +3042,9 @@ static const struct pci_device_id nvme_id_table[] = {
+ .driver_data = NVME_QUIRK_LIGHTNVM, },
+ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++ { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
++ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 1a6449bc547b..842ef876724f 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
+ static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+ {
+ return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+- ibdev->attrs.max_fast_reg_page_list_len);
++ ibdev->attrs.max_fast_reg_page_list_len - 1);
+ }
+
+ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
+@@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
+ const int cq_factor = send_wr_factor + 1; /* + RECV */
+ int comp_vector, idx = nvme_rdma_queue_idx(queue);
+ enum ib_poll_context poll_ctx;
+- int ret;
++ int ret, pages_per_mr;
+
+ queue->device = nvme_rdma_find_get_device(queue->cm_id);
+ if (!queue->device) {
+@@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
+ goto out_destroy_qp;
+ }
+
++ /*
++ * Currently we don't use SG_GAPS MR's so if the first entry is
++ * misaligned we'll end up using two entries for a single data page,
++ * so one additional entry is required.
++ */
++ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
+ ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+ queue->queue_size,
+ IB_MR_TYPE_MEM_REG,
+- nvme_rdma_get_max_fr_pages(ibdev), 0);
++ pages_per_mr, 0);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to initialize MR pool sized %d for QID %d\n",
+@@ -614,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
+ if (!ret) {
+ set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
+ } else {
+- __nvme_rdma_stop_queue(queue);
++ if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
++ __nvme_rdma_stop_queue(queue);
+ dev_info(ctrl->ctrl.device,
+ "failed to connect queue: %d ret=%d\n", idx, ret);
+ }
+@@ -824,8 +831,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ if (error)
+ goto out_stop_queue;
+
+- ctrl->ctrl.max_hw_sectors =
+- (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
++ ctrl->ctrl.max_segments = ctrl->max_fr_pages;
++ ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
+
+ error = nvme_init_identify(&ctrl->ctrl);
+ if (error)
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 606b13d35d16..bdadb27b28bb 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1039,7 +1039,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
+ {
+ struct nvme_tcp_queue *queue =
+ container_of(w, struct nvme_tcp_queue, io_work);
+- unsigned long start = jiffies + msecs_to_jiffies(1);
++ unsigned long deadline = jiffies + msecs_to_jiffies(1);
+
+ do {
+ bool pending = false;
+@@ -1064,7 +1064,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
+ if (!pending)
+ return;
+
+- } while (time_after(jiffies, start)); /* quota is exhausted */
++ } while (!time_after(jiffies, deadline)); /* quota is exhausted */
+
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ }
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 7989703b883c..6bd610ee2cd7 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -324,8 +324,10 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
+ if (!target)
+ return -ENODEV;
+
+- if (!of_device_is_available(target))
++ if (!of_device_is_available(target)) {
++ of_node_put(target);
+ return 0;
++ }
+
+ rmem = __find_rmem(target);
+ of_node_put(target);
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c
+index b313aca9894f..4c7feb3ac4cd 100644
+--- a/drivers/opp/of.c
++++ b/drivers/opp/of.c
+@@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
+ {
+ struct dev_pm_opp *opp;
+
+- lockdep_assert_held(&opp_table_lock);
+-
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index b97d9e10c9cc..57f15a7e6f0b 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -958,19 +958,6 @@ void pci_refresh_power_state(struct pci_dev *dev)
+ pci_update_current_state(dev, dev->current_state);
+ }
+
+-/**
+- * pci_power_up - Put the given device into D0 forcibly
+- * @dev: PCI device to power up
+- */
+-void pci_power_up(struct pci_dev *dev)
+-{
+- if (platform_pci_power_manageable(dev))
+- platform_pci_set_power_state(dev, PCI_D0);
+-
+- pci_raw_set_power_state(dev, PCI_D0);
+- pci_update_current_state(dev, PCI_D0);
+-}
+-
+ /**
+ * pci_platform_power_transition - Use platform to change device power state
+ * @dev: PCI device to handle.
+@@ -1153,6 +1140,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ }
+ EXPORT_SYMBOL(pci_set_power_state);
+
++/**
++ * pci_power_up - Put the given device into D0 forcibly
++ * @dev: PCI device to power up
++ */
++void pci_power_up(struct pci_dev *dev)
++{
++ __pci_start_power_transition(dev, PCI_D0);
++ pci_raw_set_power_state(dev, PCI_D0);
++ pci_update_current_state(dev, PCI_D0);
++}
++
+ /**
+ * pci_choose_state - Choose the power state of a PCI device
+ * @dev: PCI device to be suspended
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index 03ec7a5d9d0b..bf049d1bbb87 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1513,7 +1513,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
+- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {
+@@ -1521,7 +1520,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {
+@@ -1529,7 +1527,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
+- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {
+@@ -1537,7 +1534,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {}
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+index 6462d3ca7ceb..f2f5fcd9a237 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
+ PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
+ BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
+ 18, 2, "gpio", "uart"),
+- PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
+- PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
+- PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
+- PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
++ PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
++ PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
++ PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
++ PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
+
+ };
+
+@@ -221,11 +221,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
+ };
+
+ static inline void armada_37xx_update_reg(unsigned int *reg,
+- unsigned int offset)
++ unsigned int *offset)
+ {
+ /* We never have more than 2 registers */
+- if (offset >= GPIO_PER_REG) {
+- offset -= GPIO_PER_REG;
++ if (*offset >= GPIO_PER_REG) {
++ *offset -= GPIO_PER_REG;
+ *reg += sizeof(u32);
+ }
+ }
+@@ -376,7 +376,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
+ {
+ int offset = irqd_to_hwirq(d);
+
+- armada_37xx_update_reg(reg, offset);
++ armada_37xx_update_reg(reg, &offset);
+ }
+
+ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
+@@ -386,7 +386,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int reg = OUTPUT_EN;
+ unsigned int mask;
+
+- armada_37xx_update_reg(&reg, offset);
++ armada_37xx_update_reg(&reg, &offset);
+ mask = BIT(offset);
+
+ return regmap_update_bits(info->regmap, reg, mask, 0);
+@@ -399,7 +399,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int reg = OUTPUT_EN;
+ unsigned int val, mask;
+
+- armada_37xx_update_reg(&reg, offset);
++ armada_37xx_update_reg(&reg, &offset);
+ mask = BIT(offset);
+ regmap_read(info->regmap, reg, &val);
+
+@@ -413,7 +413,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int reg = OUTPUT_EN;
+ unsigned int mask, val, ret;
+
+- armada_37xx_update_reg(&reg, offset);
++ armada_37xx_update_reg(&reg, &offset);
+ mask = BIT(offset);
+
+ ret = regmap_update_bits(info->regmap, reg, mask, mask);
+@@ -434,7 +434,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
+ unsigned int reg = INPUT_VAL;
+ unsigned int val, mask;
+
+- armada_37xx_update_reg(&reg, offset);
++ armada_37xx_update_reg(&reg, &offset);
+ mask = BIT(offset);
+
+ regmap_read(info->regmap, reg, &val);
+@@ -449,7 +449,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ unsigned int reg = OUTPUT_VAL;
+ unsigned int mask, val;
+
+- armada_37xx_update_reg(&reg, offset);
++ armada_37xx_update_reg(&reg, &offset);
+ mask = BIT(offset);
+ val = value ? mask : 0;
+
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 1058b4b5cc1e..35a0e9569239 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -539,8 +539,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
+ if (filp->f_inode->i_cdev == &zcrypt_cdev) {
+ struct zcdn_device *zcdndev;
+
+- if (mutex_lock_interruptible(&ap_perms_mutex))
+- return -ERESTARTSYS;
++ mutex_lock(&ap_perms_mutex);
+ zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
+ mutex_unlock(&ap_perms_mutex);
+ if (zcdndev) {
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 296bbc3c4606..cf63916814cc 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -27,6 +27,11 @@
+
+ struct kmem_cache *zfcp_fsf_qtcb_cache;
+
++static bool ber_stop = true;
++module_param(ber_stop, bool, 0600);
++MODULE_PARM_DESC(ber_stop,
++ "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
++
+ static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
+ {
+ struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+@@ -236,10 +241,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
+ case FSF_STATUS_READ_SENSE_DATA_AVAIL:
+ break;
+ case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
+- dev_warn(&adapter->ccw_device->dev,
+- "The error threshold for checksum statistics "
+- "has been exceeded\n");
+ zfcp_dbf_hba_bit_err("fssrh_3", req);
++ if (ber_stop) {
++ dev_warn(&adapter->ccw_device->dev,
++ "All paths over this FCP device are disused because of excessive bit errors\n");
++ zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
++ } else {
++ dev_warn(&adapter->ccw_device->dev,
++ "The error threshold for checksum statistics has been exceeded\n");
++ }
+ break;
+ case FSF_STATUS_READ_LINK_DOWN:
+ zfcp_fsf_status_read_link_down(req);
+diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
+index 5f8153c37f77..76751d6c7f0d 100644
+--- a/drivers/scsi/ch.c
++++ b/drivers/scsi/ch.c
+@@ -579,7 +579,6 @@ ch_release(struct inode *inode, struct file *file)
+ scsi_changer *ch = file->private_data;
+
+ scsi_device_put(ch->device);
+- ch->device = NULL;
+ file->private_data = NULL;
+ kref_put(&ch->ref, ch_destroy);
+ return 0;
+diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
+index 45a66048801b..ff6d4aa92421 100644
+--- a/drivers/scsi/megaraid.c
++++ b/drivers/scsi/megaraid.c
+@@ -4183,11 +4183,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
+ pdev->subsystem_device == 0xC000)
+- return -ENODEV;
++ goto out_disable_device;
+ /* Now check the magic signature byte */
+ pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
+ if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
+- return -ENODEV;
++ goto out_disable_device;
+ /* Ok it is probably a megaraid */
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index bad2b12604f1..a2922b17b55b 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -2338,6 +2338,7 @@ typedef struct fc_port {
+ unsigned int query:1;
+ unsigned int id_changed:1;
+ unsigned int scan_needed:1;
++ unsigned int n2n_flag:1;
+
+ struct completion nvme_del_done;
+ uint32_t nvme_prli_service_param;
+@@ -2388,7 +2389,6 @@ typedef struct fc_port {
+ uint8_t fc4_type;
+ uint8_t fc4f_nvme;
+ uint8_t scan_state;
+- uint8_t n2n_flag;
+
+ unsigned long last_queue_full;
+ unsigned long last_ramp_up;
+@@ -2979,6 +2979,7 @@ enum scan_flags_t {
+ enum fc4type_t {
+ FS_FC4TYPE_FCP = BIT_0,
+ FS_FC4TYPE_NVME = BIT_1,
++ FS_FCP_IS_N2N = BIT_7,
+ };
+
+ struct fab_scan_rp {
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index afcd9a885884..cd74cc9651de 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -746,12 +746,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
+ break;
+ default:
+ if ((id.b24 != fcport->d_id.b24 &&
+- fcport->d_id.b24) ||
++ fcport->d_id.b24 &&
++ fcport->loop_id != FC_NO_LOOP_ID) ||
+ (fcport->loop_id != FC_NO_LOOP_ID &&
+ fcport->loop_id != loop_id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
++ if (fcport->n2n_flag)
++ fcport->d_id.b24 = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+@@ -759,6 +762,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
+ }
+
+ fcport->loop_id = loop_id;
++ if (fcport->n2n_flag)
++ fcport->d_id.b24 = id.b24;
+
+ wwn = wwn_to_u64(fcport->port_name);
+ qlt_find_sess_invalidate_other(vha, wwn,
+@@ -966,7 +971,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
+ wwn = wwn_to_u64(e->port_name);
+
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
+- "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
++ "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
+ __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
+ e->port_id[0], e->current_login_state, e->last_login_state,
+ (loop_id & 0x7fff));
+@@ -1498,7 +1503,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
+ return 0;
+
+- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
++ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
++ !N2N_TOPO(vha->hw)) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return 0;
+@@ -1569,8 +1575,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+- "%s %d %8phC post NVMe PRLI\n",
+- __func__, __LINE__, fcport->port_name);
++ "%s %d %8phC post %s PRLI\n",
++ __func__, __LINE__, fcport->port_name,
++ fcport->fc4f_nvme ? "NVME" : "FC");
+ qla24xx_post_prli_work(vha, fcport);
+ }
+ break;
+@@ -1924,17 +1931,38 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+ break;
+ }
+
+- if (ea->fcport->n2n_flag) {
++ if (ea->fcport->fc4f_nvme) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post fc4 prli\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->fc4f_nvme = 0;
+- ea->fcport->n2n_flag = 0;
+ qla24xx_post_prli_work(vha, ea->fcport);
++ return;
++ }
++
++ /* at this point both PRLI NVME & PRLI FCP failed */
++ if (N2N_TOPO(vha->hw)) {
++ if (ea->fcport->n2n_link_reset_cnt < 3) {
++ ea->fcport->n2n_link_reset_cnt++;
++ /*
++ * remote port is not sending Plogi. Reset
++ * link to kick start his state machine
++ */
++ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
++ } else {
++ ql_log(ql_log_warn, vha, 0x2119,
++ "%s %d %8phC Unable to reconnect\n",
++ __func__, __LINE__, ea->fcport->port_name);
++ }
++ } else {
++ /*
++ * switch connect. login failed. Take connection
++ * down and allow relogin to retrigger
++ */
++ ea->fcport->flags &= ~FCF_ASYNC_SENT;
++ ea->fcport->keep_nport_handle = 0;
++ qlt_schedule_sess_for_deletion(ea->fcport);
+ }
+- ql_dbg(ql_dbg_disc, vha, 0x2119,
+- "%s %d %8phC unhandle event of %x\n",
+- __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
+ break;
+ }
+ }
+@@ -3268,7 +3296,7 @@ try_eft:
+
+ for (j = 0; j < 2; j++, fwdt++) {
+ if (!fwdt->template) {
+- ql_log(ql_log_warn, vha, 0x00ba,
++ ql_dbg(ql_dbg_init, vha, 0x00ba,
+ "-> fwdt%u no template\n", j);
+ continue;
+ }
+@@ -5078,28 +5106,47 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
+ unsigned long flags;
+
+ /* Inititae N2N login. */
+- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+- /* borrowing */
+- u32 *bp, i, sz;
+-
+- memset(ha->init_cb, 0, ha->init_cb_size);
+- sz = min_t(int, sizeof(struct els_plogi_payload),
+- ha->init_cb_size);
+- rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+- (void *)ha->init_cb, sz);
+- if (rval == QLA_SUCCESS) {
+- bp = (uint32_t *)ha->init_cb;
+- for (i = 0; i < sz/4 ; i++, bp++)
+- *bp = cpu_to_be32(*bp);
++ if (N2N_TOPO(ha)) {
++ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
++ /* borrowing */
++ u32 *bp, i, sz;
++
++ memset(ha->init_cb, 0, ha->init_cb_size);
++ sz = min_t(int, sizeof(struct els_plogi_payload),
++ ha->init_cb_size);
++ rval = qla24xx_get_port_login_templ(vha,
++ ha->init_cb_dma, (void *)ha->init_cb, sz);
++ if (rval == QLA_SUCCESS) {
++ bp = (uint32_t *)ha->init_cb;
++ for (i = 0; i < sz/4 ; i++, bp++)
++ *bp = cpu_to_be32(*bp);
+
+- memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
+- sizeof(ha->plogi_els_payld.data));
+- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+- } else {
+- ql_dbg(ql_dbg_init, vha, 0x00d1,
+- "PLOGI ELS param read fail.\n");
++ memcpy(&ha->plogi_els_payld.data,
++ (void *)ha->init_cb,
++ sizeof(ha->plogi_els_payld.data));
++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
++ } else {
++ ql_dbg(ql_dbg_init, vha, 0x00d1,
++ "PLOGI ELS param read fail.\n");
++ goto skip_login;
++ }
++ }
++
++ list_for_each_entry(fcport, &vha->vp_fcports, list) {
++ if (fcport->n2n_flag) {
++ qla24xx_fcport_handle_login(vha, fcport);
++ return QLA_SUCCESS;
++ }
++ }
++skip_login:
++ spin_lock_irqsave(&vha->work_lock, flags);
++ vha->scan.scan_retry++;
++ spin_unlock_irqrestore(&vha->work_lock, flags);
++
++ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
++ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
++ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+- return QLA_SUCCESS;
+ }
+
+ found_devs = 0;
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 133f5f6270ff..abfb9c800ce2 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -2257,7 +2257,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
++ ql_dbg(ql_dbg_disc, vha, 0x105a,
+ "Entered %s.\n", __func__);
+
+ if (IS_CNA_CAPABLE(vha->hw)) {
+@@ -3891,14 +3891,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
+ case TOPO_N2N:
+ ha->current_topology = ISP_CFG_N;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
++ list_for_each_entry(fcport, &vha->vp_fcports, list) {
++ fcport->scan_state = QLA_FCPORT_SCAN;
++ fcport->n2n_flag = 0;
++ }
++
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f1.port_name, 1);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+- fcport->dm_login_expire = jiffies + 3*HZ;
++ fcport->dm_login_expire = jiffies + 2*HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
++ fcport->n2n_flag = 1;
++ fcport->keep_nport_handle = 1;
++ if (vha->flags.nvme_enabled)
++ fcport->fc4f_nvme = 1;
++
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ set_bit(RELOGIN_NEEDED,
+@@ -3932,7 +3942,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
+ rptid_entry->u.f1.port_name,
+ rptid_entry->u.f1.node_name,
+ NULL,
+- FC4_TYPE_UNKNOWN);
++ FS_FCP_IS_N2N);
+ }
+
+ /* if our portname is higher then initiate N2N login */
+@@ -4031,6 +4041,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
++ fcport->n2n_flag = 0;
+ }
+
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+@@ -4040,6 +4051,14 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
+ fcport->login_retry = vha->hw->login_retry_count;
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
++ fcport->keep_nport_handle = 1;
++ fcport->n2n_flag = 1;
++ fcport->d_id.b.domain =
++ rptid_entry->u.f2.remote_nport_id[2];
++ fcport->d_id.b.area =
++ rptid_entry->u.f2.remote_nport_id[1];
++ fcport->d_id.b.al_pa =
++ rptid_entry->u.f2.remote_nport_id[0];
+ }
+ }
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 4fda308c3ef5..2835afbd2edc 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1153,6 +1153,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
+ qla2x00_mark_all_devices_lost(vha, 0);
+
+ wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
++ flush_workqueue(vha->hw->wq);
+ }
+
+ /*
+@@ -5049,6 +5050,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
+
+ memcpy(fcport->port_name, e->u.new_sess.port_name,
+ WWN_SIZE);
++
++ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
++ fcport->n2n_flag = 1;
++
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC mem alloc fail.\n",
+@@ -5145,11 +5150,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
+ if (dfcp)
+ qlt_schedule_sess_for_deletion(tfcp);
+
+-
+- if (N2N_TOPO(vha->hw))
+- fcport->flags &= ~FCF_FABRIC_DEVICE;
+-
+ if (N2N_TOPO(vha->hw)) {
++ fcport->flags &= ~FCF_FABRIC_DEVICE;
++ fcport->keep_nport_handle = 1;
+ if (vha->flags.nvme_enabled) {
+ fcport->fc4f_nvme = 1;
+ fcport->n2n_flag = 1;
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 459c28aa3b94..1bb0fc9324ea 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -954,7 +954,7 @@ void qlt_free_session_done(struct work_struct *work)
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ bool logout_started = false;
+- scsi_qla_host_t *base_vha;
++ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ struct qlt_plogi_ack_t *own =
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
+
+@@ -1021,6 +1021,7 @@ void qlt_free_session_done(struct work_struct *work)
+
+ if (logout_started) {
+ bool traced = false;
++ u16 cnt = 0;
+
+ while (!READ_ONCE(sess->logout_completed)) {
+ if (!traced) {
+@@ -1030,6 +1031,9 @@ void qlt_free_session_done(struct work_struct *work)
+ traced = true;
+ }
+ msleep(100);
++ cnt++;
++ if (cnt > 200)
++ break;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xf087,
+@@ -1102,6 +1106,7 @@ void qlt_free_session_done(struct work_struct *work)
+ }
+
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++ sess->free_pending = 0;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
+@@ -1110,17 +1115,8 @@ void qlt_free_session_done(struct work_struct *work)
+ if (tgt && (tgt->sess_count == 0))
+ wake_up_all(&tgt->waitQ);
+
+- if (vha->fcport_count == 0)
+- wake_up_all(&vha->fcport_waitQ);
+-
+- base_vha = pci_get_drvdata(ha->pdev);
+-
+- sess->free_pending = 0;
+-
+- if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
+- return;
+-
+- if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
++ if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
++ (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+@@ -1133,6 +1129,9 @@ void qlt_free_session_done(struct work_struct *work)
+ break;
+ }
+ }
++
++ if (vha->fcport_count == 0)
++ wake_up_all(&vha->fcport_waitQ);
+ }
+
+ /* ha->tgt.sess_lock supposed to be held on entry */
+@@ -1162,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess)
+ sess->last_login_gen = sess->login_gen;
+
+ INIT_WORK(&sess->free_work, qlt_free_session_done);
+- schedule_work(&sess->free_work);
++ queue_work(sess->vha->hw->wq, &sess->free_work);
+ }
+ EXPORT_SYMBOL(qlt_unreg_sess);
+
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 1c470e31ae81..ae2fa170f6ad 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -967,6 +967,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
+ ses->data_direction = scmd->sc_data_direction;
+ ses->sdb = scmd->sdb;
+ ses->result = scmd->result;
++ ses->resid_len = scmd->req.resid_len;
+ ses->underflow = scmd->underflow;
+ ses->prot_op = scmd->prot_op;
+ ses->eh_eflags = scmd->eh_eflags;
+@@ -977,6 +978,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
+ memset(scmd->cmnd, 0, BLK_MAX_CDB);
+ memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+ scmd->result = 0;
++ scmd->req.resid_len = 0;
+
+ if (sense_bytes) {
+ scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
+@@ -1029,6 +1031,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
+ scmd->sc_data_direction = ses->data_direction;
+ scmd->sdb = ses->sdb;
+ scmd->result = ses->result;
++ scmd->req.resid_len = ses->resid_len;
+ scmd->underflow = ses->underflow;
+ scmd->prot_op = ses->prot_op;
+ scmd->eh_eflags = ses->eh_eflags;
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 64c96c7828ee..6d7362e7367e 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -730,6 +730,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct kernfs_node *kn;
++ struct scsi_device *sdev = to_scsi_device(dev);
++
++ /*
++ * We need to try to get module, avoiding the module been removed
++ * during delete.
++ */
++ if (scsi_device_get(sdev))
++ return -ENODEV;
+
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ WARN_ON_ONCE(!kn);
+@@ -744,9 +752,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
+ * state into SDEV_DEL.
+ */
+ device_remove_file(dev, attr);
+- scsi_remove_device(to_scsi_device(dev));
++ scsi_remove_device(sdev);
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
++ scsi_device_put(sdev);
+ return count;
+ };
+ static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 149d406aacc9..2d77f32e13d5 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1655,7 +1655,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
+ /* we need to evaluate the error return */
+ if (scsi_sense_valid(sshdr) &&
+ (sshdr->asc == 0x3a || /* medium not present */
+- sshdr->asc == 0x20)) /* invalid command */
++ sshdr->asc == 0x20 || /* invalid command */
++ (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
+ /* this is no error here */
+ return 0;
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 029da74bb2f5..e674f6148f69 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -8095,6 +8095,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
+ {
+ int ret = 0;
+
++ if (!hba->is_powered)
++ goto out;
++
+ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ goto out;
+
+diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
+index eee1998c4b18..fac38c842ac5 100644
+--- a/drivers/staging/wlan-ng/cfg80211.c
++++ b/drivers/staging/wlan-ng/cfg80211.c
+@@ -469,10 +469,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
+ /* Set the encryption - we only support wep */
+ if (is_wep) {
+ if (sme->key) {
+- if (sme->key_idx >= NUM_WEPKEYS) {
+- err = -EINVAL;
+- goto exit;
+- }
++ if (sme->key_idx >= NUM_WEPKEYS)
++ return -EINVAL;
+
+ result = prism2_domibset_uint32(wlandev,
+ DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
+diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
+index 502e9bf1746f..4a80103675d5 100644
+--- a/drivers/usb/class/usblp.c
++++ b/drivers/usb/class/usblp.c
+@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
+ kfree(usblp->readbuf);
+ kfree(usblp->device_id_string);
+ kfree(usblp->statusbuf);
++ usb_put_intf(usblp->intf);
+ kfree(usblp);
+ }
+
+@@ -1107,7 +1108,7 @@ static int usblp_probe(struct usb_interface *intf,
+ init_waitqueue_head(&usblp->wwait);
+ init_usb_anchor(&usblp->urbs);
+ usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+- usblp->intf = intf;
++ usblp->intf = usb_get_intf(intf);
+
+ /* Malloc device ID string buffer to the largest expected length,
+ * since we can re-query it on an ioctl and a dynamic string
+@@ -1196,6 +1197,7 @@ abort:
+ kfree(usblp->readbuf);
+ kfree(usblp->statusbuf);
+ kfree(usblp->device_id_string);
++ usb_put_intf(usblp->intf);
+ kfree(usblp);
+ abort_ret:
+ return retval;
+diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
+index bb6af6b5ac97..4f1ac9f59f1c 100644
+--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
+@@ -1180,11 +1180,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
+ tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
+
+ bl = bytes - n;
+- if (bl > 3)
+- bl = 3;
++ if (bl > 4)
++ bl = 4;
+
+ for (i = 0; i < bl; i++)
+- data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
++ data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
+ }
+ break;
+
+diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
+index f3108d85e768..15b5f06fb0b3 100644
+--- a/drivers/usb/misc/ldusb.c
++++ b/drivers/usb/misc/ldusb.c
+@@ -380,10 +380,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
+ goto exit;
+ }
+
+- if (mutex_lock_interruptible(&dev->mutex)) {
+- retval = -ERESTARTSYS;
+- goto exit;
+- }
++ mutex_lock(&dev->mutex);
+
+ if (dev->open_count != 1) {
+ retval = -ENODEV;
+@@ -467,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
+
+ /* wait for data */
+ spin_lock_irq(&dev->rbsl);
+- if (dev->ring_head == dev->ring_tail) {
++ while (dev->ring_head == dev->ring_tail) {
+ dev->interrupt_in_done = 0;
+ spin_unlock_irq(&dev->rbsl);
+ if (file->f_flags & O_NONBLOCK) {
+@@ -477,12 +474,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
+ retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
+ if (retval < 0)
+ goto unlock_exit;
+- } else {
+- spin_unlock_irq(&dev->rbsl);
++
++ spin_lock_irq(&dev->rbsl);
+ }
++ spin_unlock_irq(&dev->rbsl);
+
+ /* actual_buffer contains actual_length + interrupt_in_buffer */
+ actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
++ if (*actual_buffer > dev->interrupt_in_endpoint_size) {
++ retval = -EIO;
++ goto unlock_exit;
++ }
+ bytes_to_read = min(count, *actual_buffer);
+ if (bytes_to_read < *actual_buffer)
+ dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
+@@ -693,10 +695,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
+ dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
+
+ dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
+- dev->ring_buffer =
+- kmalloc_array(ring_buffer_size,
+- sizeof(size_t) + dev->interrupt_in_endpoint_size,
+- GFP_KERNEL);
++ dev->ring_buffer = kcalloc(ring_buffer_size,
++ sizeof(size_t) + dev->interrupt_in_endpoint_size,
++ GFP_KERNEL);
+ if (!dev->ring_buffer)
+ goto error;
+ dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
+diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
+index 9d4c52a7ebe0..62dab2441ec4 100644
+--- a/drivers/usb/misc/legousbtower.c
++++ b/drivers/usb/misc/legousbtower.c
+@@ -419,10 +419,7 @@ static int tower_release (struct inode *inode, struct file *file)
+ goto exit;
+ }
+
+- if (mutex_lock_interruptible(&dev->lock)) {
+- retval = -ERESTARTSYS;
+- goto exit;
+- }
++ mutex_lock(&dev->lock);
+
+ if (dev->open_count != 1) {
+ dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index dd0ad67aa71e..9174ba2e06da 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
+ struct ti_port *tport;
+ int port_number;
+ int status;
+- int do_unlock;
+ unsigned long flags;
+
+ tdev = usb_get_serial_data(port->serial);
+@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
+ "%s - cannot send close port command, %d\n"
+ , __func__, status);
+
+- /* if mutex_lock is interrupted, continue anyway */
+- do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
++ mutex_lock(&tdev->td_open_close_lock);
+ --tport->tp_tdev->td_open_port_count;
+- if (tport->tp_tdev->td_open_port_count <= 0) {
++ if (tport->tp_tdev->td_open_port_count == 0) {
+ /* last port is closed, shut down interrupt urb */
+ usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
+- tport->tp_tdev->td_open_port_count = 0;
+ }
+- if (do_unlock)
+- mutex_unlock(&tdev->td_open_close_lock);
++ mutex_unlock(&tdev->td_open_close_lock);
+ }
+
+
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index f131651502b8..c62903290f3a 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -899,7 +899,7 @@ out_free_interp:
+ the correct location in memory. */
+ for(i = 0, elf_ppnt = elf_phdata;
+ i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+- int elf_prot, elf_flags, elf_fixed = MAP_FIXED_NOREPLACE;
++ int elf_prot, elf_flags;
+ unsigned long k, vaddr;
+ unsigned long total_size = 0;
+
+@@ -931,13 +931,6 @@ out_free_interp:
+ */
+ }
+ }
+-
+- /*
+- * Some binaries have overlapping elf segments and then
+- * we have to forcefully map over an existing mapping
+- * e.g. over this newly established brk mapping.
+- */
+- elf_fixed = MAP_FIXED;
+ }
+
+ elf_prot = make_prot(elf_ppnt->p_flags);
+@@ -950,7 +943,7 @@ out_free_interp:
+ * the ET_DYN load_addr calculations, proceed normally.
+ */
+ if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
+- elf_flags |= elf_fixed;
++ elf_flags |= MAP_FIXED;
+ } else if (loc->elf_ex.e_type == ET_DYN) {
+ /*
+ * This logic is run once for the first LOAD Program
+@@ -986,7 +979,7 @@ out_free_interp:
+ load_bias = ELF_ET_DYN_BASE;
+ if (current->flags & PF_RANDOMIZE)
+ load_bias += arch_mmap_rnd();
+- elf_flags |= elf_fixed;
++ elf_flags |= MAP_FIXED;
+ } else
+ load_bias = 0;
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index d9541d58ce3d..e7a1ec075c65 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -908,8 +908,6 @@ struct btrfs_fs_info {
+ struct btrfs_workqueue *fixup_workers;
+ struct btrfs_workqueue *delayed_workers;
+
+- /* the extent workers do delayed refs on the extent allocation tree */
+- struct btrfs_workqueue *extent_workers;
+ struct task_struct *transaction_kthread;
+ struct task_struct *cleaner_kthread;
+ u32 thread_pool_size;
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index 17f7c0d38768..934521fe7e71 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -371,7 +371,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
+ out_qgroup:
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
+ out_fail:
+- btrfs_inode_rsv_release(inode, true);
+ if (delalloc_lock)
+ mutex_unlock(&inode->delalloc_mutex);
+ return ret;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 65af7eb3f7bd..46eac7ddf0f7 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2036,7 +2036,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
+ btrfs_destroy_workqueue(fs_info->readahead_workers);
+ btrfs_destroy_workqueue(fs_info->flush_workers);
+ btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
+- btrfs_destroy_workqueue(fs_info->extent_workers);
+ /*
+ * Now that all other work queues are destroyed, we can safely destroy
+ * the queues used for metadata I/O, since tasks from those other work
+@@ -2242,10 +2241,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
+ max_active, 2);
+ fs_info->qgroup_rescan_workers =
+ btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
+- fs_info->extent_workers =
+- btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
+- min_t(u64, fs_devices->num_devices,
+- max_active), 8);
+
+ if (!(fs_info->workers && fs_info->delalloc_workers &&
+ fs_info->submit_workers && fs_info->flush_workers &&
+@@ -2256,7 +2251,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
+ fs_info->endio_freespace_worker && fs_info->rmw_workers &&
+ fs_info->caching_workers && fs_info->readahead_workers &&
+ fs_info->fixup_workers && fs_info->delayed_workers &&
+- fs_info->extent_workers &&
+ fs_info->qgroup_rescan_workers)) {
+ return -ENOMEM;
+ }
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index ef2f80825c82..d5a3a66c8f1d 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -8117,6 +8117,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
+ btrfs_err(info,
+ "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
+ cache->key.objectid);
++ btrfs_put_block_group(cache);
+ ret = -EINVAL;
+ goto error;
+ }
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index abcda051eee2..d68add0bf346 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2067,25 +2067,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ struct btrfs_trans_handle *trans;
+ struct btrfs_log_ctx ctx;
+ int ret = 0, err;
+- u64 len;
+
+- /*
+- * If the inode needs a full sync, make sure we use a full range to
+- * avoid log tree corruption, due to hole detection racing with ordered
+- * extent completion for adjacent ranges, and assertion failures during
+- * hole detection.
+- */
+- if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+- &BTRFS_I(inode)->runtime_flags)) {
+- start = 0;
+- end = LLONG_MAX;
+- }
+-
+- /*
+- * The range length can be represented by u64, we have to do the typecasts
+- * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
+- */
+- len = (u64)end - (u64)start + 1;
+ trace_btrfs_sync_file(file, datasync);
+
+ btrfs_init_log_ctx(&ctx, inode);
+@@ -2111,6 +2093,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+
+ atomic_inc(&root->log_batch);
+
++ /*
++ * If the inode needs a full sync, make sure we use a full range to
++ * avoid log tree corruption, due to hole detection racing with ordered
++ * extent completion for adjacent ranges, and assertion failures during
++ * hole detection. Do this while holding the inode lock, to avoid races
++ * with other tasks.
++ */
++ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
++ &BTRFS_I(inode)->runtime_flags)) {
++ start = 0;
++ end = LLONG_MAX;
++ }
++
+ /*
+ * Before we acquired the inode's lock, someone may have dirtied more
+ * pages in the target range. We need to make sure that writeback for
+@@ -2138,8 +2133,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ /*
+ * We have to do this here to avoid the priority inversion of waiting on
+ * IO of a lower priority task while holding a transaction open.
++ *
++ * Also, the range length can be represented by u64, we have to do the
++ * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
+ */
+- ret = btrfs_wait_ordered_range(inode, start, len);
++ ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
+ if (ret) {
+ up_write(&BTRFS_I(inode)->dio_sem);
+ inode_unlock(inode);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 001efc9ba1e7..60a00f6ca18f 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3617,7 +3617,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ return 0;
+
+ BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
+- trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
++ trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
+ ret = qgroup_reserve(root, num_bytes, enforce, type);
+ if (ret < 0)
+ return ret;
+@@ -3664,7 +3664,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
+ */
+ num_bytes = sub_root_meta_rsv(root, num_bytes, type);
+ BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
+- trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
++ trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
+ btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
+ num_bytes, type);
+ }
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index fbd66c33dd63..074947bebd16 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -3276,6 +3276,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
+ if (!page) {
+ btrfs_delalloc_release_metadata(BTRFS_I(inode),
+ PAGE_SIZE, true);
++ btrfs_delalloc_release_extents(BTRFS_I(inode),
++ PAGE_SIZE, true);
+ ret = -ENOMEM;
+ goto out;
+ }
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index b11af7d8e8e9..61282b77950f 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -384,8 +384,8 @@ static int parse_reply_info_readdir(void **p, void *end,
+ }
+
+ done:
+- if (*p != end)
+- goto bad;
++ /* Skip over any unrecognized fields */
++ *p = end;
+ return 0;
+
+ bad:
+@@ -406,12 +406,10 @@ static int parse_reply_info_filelock(void **p, void *end,
+ goto bad;
+
+ info->filelock_reply = *p;
+- *p += sizeof(*info->filelock_reply);
+
+- if (unlikely(*p != end))
+- goto bad;
++ /* Skip over any unrecognized fields */
++ *p = end;
+ return 0;
+-
+ bad:
+ return -EIO;
+ }
+@@ -425,18 +423,21 @@ static int parse_reply_info_create(void **p, void *end,
+ {
+ if (features == (u64)-1 ||
+ (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
++ /* Malformed reply? */
+ if (*p == end) {
+ info->has_create_ino = false;
+ } else {
+ info->has_create_ino = true;
+- info->ino = ceph_decode_64(p);
++ ceph_decode_64_safe(p, end, info->ino, bad);
+ }
++ } else {
++ if (*p != end)
++ goto bad;
+ }
+
+- if (unlikely(*p != end))
+- goto bad;
++ /* Skip over any unrecognized fields */
++ *p = end;
+ return 0;
+-
+ bad:
+ return -EIO;
+ }
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 4c1aeb2cf7f5..53dbb6e0d390 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -405,10 +405,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+ bool oplock_break_cancelled;
+
+ spin_lock(&tcon->open_file_lock);
+-
++ spin_lock(&cifsi->open_file_lock);
+ spin_lock(&cifs_file->file_info_lock);
+ if (--cifs_file->count > 0) {
+ spin_unlock(&cifs_file->file_info_lock);
++ spin_unlock(&cifsi->open_file_lock);
+ spin_unlock(&tcon->open_file_lock);
+ return;
+ }
+@@ -421,9 +422,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+ cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
+
+ /* remove it from the lists */
+- spin_lock(&cifsi->open_file_lock);
+ list_del(&cifs_file->flist);
+- spin_unlock(&cifsi->open_file_lock);
+ list_del(&cifs_file->tlist);
+ atomic_dec(&tcon->num_local_opens);
+
+@@ -440,6 +439,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+ cifs_set_oplock_level(cifsi, 0);
+ }
+
++ spin_unlock(&cifsi->open_file_lock);
+ spin_unlock(&tcon->open_file_lock);
+
+ oplock_break_cancelled = wait_oplock_handler ?
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 79d9a60f21ba..3c952024e10f 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2465,9 +2465,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
+ cifsFileInfo_put(wfile);
+ if (rc)
+- return rc;
++ goto cifs_setattr_exit;
+ } else if (rc != -EBADF)
+- return rc;
++ goto cifs_setattr_exit;
+ else
+ rc = 0;
+ }
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index b7421a096319..514810694c0f 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -171,6 +171,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
+ /* we do not want to loop forever */
+ last_mid = cur_mid;
+ cur_mid++;
++ /* avoid 0xFFFF MID */
++ if (cur_mid == 0xffff)
++ cur_mid++;
+
+ /*
+ * This nested loop looks more expensive than it is.
+diff --git a/fs/dax.c b/fs/dax.c
+index 6bf81f931de3..2cc43cd914eb 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -220,10 +220,11 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
+
+ for (;;) {
+ entry = xas_find_conflict(xas);
++ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
++ return entry;
+ if (dax_entry_order(entry) < order)
+ return XA_RETRY_ENTRY;
+- if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
+- !dax_is_locked(entry))
++ if (!dax_is_locked(entry))
+ return entry;
+
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 30149652c379..ed223c33dd89 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -221,6 +221,7 @@ struct io_ring_ctx {
+ unsigned sq_entries;
+ unsigned sq_mask;
+ unsigned sq_thread_idle;
++ unsigned cached_sq_dropped;
+ struct io_uring_sqe *sq_sqes;
+
+ struct list_head defer_list;
+@@ -237,6 +238,7 @@ struct io_ring_ctx {
+ /* CQ ring */
+ struct io_cq_ring *cq_ring;
+ unsigned cached_cq_tail;
++ atomic_t cached_cq_overflow;
+ unsigned cq_entries;
+ unsigned cq_mask;
+ struct wait_queue_head cq_wait;
+@@ -431,7 +433,8 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
+ if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
+ return false;
+
+- return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
++ return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped
++ + atomic_read(&ctx->cached_cq_overflow);
+ }
+
+ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
+@@ -511,9 +514,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
+ WRITE_ONCE(cqe->res, res);
+ WRITE_ONCE(cqe->flags, 0);
+ } else {
+- unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
+-
+- WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
++ WRITE_ONCE(ctx->cq_ring->overflow,
++ atomic_inc_return(&ctx->cached_cq_overflow));
+ }
+ }
+
+@@ -687,6 +689,14 @@ static unsigned io_cqring_events(struct io_cq_ring *ring)
+ return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
+ }
+
++static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
++{
++ struct io_sq_ring *ring = ctx->sq_ring;
++
++ /* make sure SQ entry isn't read before tail */
++ return smp_load_acquire(&ring->r.tail) - ctx->cached_sq_head;
++}
++
+ /*
+ * Find and free completed poll iocbs
+ */
+@@ -816,19 +826,11 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
+ mutex_unlock(&ctx->uring_lock);
+ }
+
+-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+- long min)
++static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
++ long min)
+ {
+- int iters, ret = 0;
++ int iters = 0, ret = 0;
+
+- /*
+- * We disallow the app entering submit/complete with polling, but we
+- * still need to lock the ring to prevent racing with polled issue
+- * that got punted to a workqueue.
+- */
+- mutex_lock(&ctx->uring_lock);
+-
+- iters = 0;
+ do {
+ int tmin = 0;
+
+@@ -864,6 +866,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ ret = 0;
+ } while (min && !*nr_events && !need_resched());
+
++ return ret;
++}
++
++static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
++ long min)
++{
++ int ret;
++
++ /*
++ * We disallow the app entering submit/complete with polling, but we
++ * still need to lock the ring to prevent racing with polled issue
++ * that got punted to a workqueue.
++ */
++ mutex_lock(&ctx->uring_lock);
++ ret = __io_iopoll_check(ctx, nr_events, min);
+ mutex_unlock(&ctx->uring_lock);
+ return ret;
+ }
+@@ -2150,6 +2167,8 @@ err:
+ return;
+ }
+
++ req->user_data = s->sqe->user_data;
++
+ /*
+ * If we already have a head request, queue this one for async
+ * submittal once the head completes. If we don't have a head but
+@@ -2255,12 +2274,13 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
+
+ /* drop invalid entries */
+ ctx->cached_sq_head++;
+- ring->dropped++;
++ ctx->cached_sq_dropped++;
++ WRITE_ONCE(ring->dropped, ctx->cached_sq_dropped);
+ return false;
+ }
+
+-static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
+- unsigned int nr, bool has_user, bool mm_fault)
++static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
++ bool has_user, bool mm_fault)
+ {
+ struct io_submit_state state, *statep = NULL;
+ struct io_kiocb *link = NULL;
+@@ -2273,6 +2293,11 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
+ }
+
+ for (i = 0; i < nr; i++) {
++ struct sqe_submit s;
++
++ if (!io_get_sqring(ctx, &s))
++ break;
++
+ /*
+ * If previous wasn't linked and we have a linked command,
+ * that's the end of the chain. Submit the previous link.
+@@ -2281,16 +2306,16 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
+ io_queue_sqe(ctx, link, &link->submit);
+ link = NULL;
+ }
+- prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
++ prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
+
+ if (unlikely(mm_fault)) {
+- io_cqring_add_event(ctx, sqes[i].sqe->user_data,
++ io_cqring_add_event(ctx, s.sqe->user_data,
+ -EFAULT);
+ } else {
+- sqes[i].has_user = has_user;
+- sqes[i].needs_lock = true;
+- sqes[i].needs_fixed_file = true;
+- io_submit_sqe(ctx, &sqes[i], statep, &link);
++ s.has_user = has_user;
++ s.needs_lock = true;
++ s.needs_fixed_file = true;
++ io_submit_sqe(ctx, &s, statep, &link);
+ submitted++;
+ }
+ }
+@@ -2305,7 +2330,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
+
+ static int io_sq_thread(void *data)
+ {
+- struct sqe_submit sqes[IO_IOPOLL_BATCH];
+ struct io_ring_ctx *ctx = data;
+ struct mm_struct *cur_mm = NULL;
+ mm_segment_t old_fs;
+@@ -2320,14 +2344,27 @@ static int io_sq_thread(void *data)
+
+ timeout = inflight = 0;
+ while (!kthread_should_park()) {
+- bool all_fixed, mm_fault = false;
+- int i;
++ bool mm_fault = false;
++ unsigned int to_submit;
+
+ if (inflight) {
+ unsigned nr_events = 0;
+
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
+- io_iopoll_check(ctx, &nr_events, 0);
++ /*
++ * inflight is the count of the maximum possible
++ * entries we submitted, but it can be smaller
++ * if we dropped some of them. If we don't have
++ * poll entries available, then we know that we
++ * have nothing left to poll for. Reset the
++ * inflight count to zero in that case.
++ */
++ mutex_lock(&ctx->uring_lock);
++ if (!list_empty(&ctx->poll_list))
++ __io_iopoll_check(ctx, &nr_events, 0);
++ else
++ inflight = 0;
++ mutex_unlock(&ctx->uring_lock);
+ } else {
+ /*
+ * Normal IO, just pretend everything completed.
+@@ -2341,7 +2378,8 @@ static int io_sq_thread(void *data)
+ timeout = jiffies + ctx->sq_thread_idle;
+ }
+
+- if (!io_get_sqring(ctx, &sqes[0])) {
++ to_submit = io_sqring_entries(ctx);
++ if (!to_submit) {
+ /*
+ * We're polling. If we're within the defined idle
+ * period, then let us spin without work before going
+@@ -2372,7 +2410,8 @@ static int io_sq_thread(void *data)
+ /* make sure to read SQ tail after writing flags */
+ smp_mb();
+
+- if (!io_get_sqring(ctx, &sqes[0])) {
++ to_submit = io_sqring_entries(ctx);
++ if (!to_submit) {
+ if (kthread_should_park()) {
+ finish_wait(&ctx->sqo_wait, &wait);
+ break;
+@@ -2390,19 +2429,8 @@ static int io_sq_thread(void *data)
+ ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
+ }
+
+- i = 0;
+- all_fixed = true;
+- do {
+- if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
+- all_fixed = false;
+-
+- i++;
+- if (i == ARRAY_SIZE(sqes))
+- break;
+- } while (io_get_sqring(ctx, &sqes[i]));
+-
+ /* Unless all new commands are FIXED regions, grab mm */
+- if (!all_fixed && !cur_mm) {
++ if (!cur_mm) {
+ mm_fault = !mmget_not_zero(ctx->sqo_mm);
+ if (!mm_fault) {
+ use_mm(ctx->sqo_mm);
+@@ -2410,8 +2438,9 @@ static int io_sq_thread(void *data)
+ }
+ }
+
+- inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
+- mm_fault);
++ to_submit = min(to_submit, ctx->sq_entries);
++ inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
++ mm_fault);
+
+ /* Commit SQ ring head once we've consumed all SQEs */
+ io_commit_sqring(ctx);
+@@ -2462,13 +2491,14 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
+ submit++;
+ io_submit_sqe(ctx, &s, statep, &link);
+ }
+- io_commit_sqring(ctx);
+
+ if (link)
+ io_queue_sqe(ctx, link, &link->submit);
+ if (statep)
+ io_submit_state_end(statep);
+
++ io_commit_sqring(ctx);
++
+ return submit;
+ }
+
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 930e3d388579..699a560efbb0 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -217,7 +217,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
+ /* At this point, we know that no more recovery threads can be
+ * launched, so wait for any recovery completion work to
+ * complete. */
+- flush_workqueue(osb->ocfs2_wq);
++ if (osb->ocfs2_wq)
++ flush_workqueue(osb->ocfs2_wq);
+
+ /*
+ * Now that recovery is shut down, and the osb is about to be
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index 158e5af767fd..720e9f94957e 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -377,7 +377,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
+ struct ocfs2_dinode *alloc = NULL;
+
+ cancel_delayed_work(&osb->la_enable_wq);
+- flush_workqueue(osb->ocfs2_wq);
++ if (osb->ocfs2_wq)
++ flush_workqueue(osb->ocfs2_wq);
+
+ if (osb->local_alloc_state == OCFS2_LA_UNUSED)
+ goto out;
+diff --git a/fs/proc/page.c b/fs/proc/page.c
+index 544d1ee15aee..7c952ee732e6 100644
+--- a/fs/proc/page.c
++++ b/fs/proc/page.c
+@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
+ return -EINVAL;
+
+ while (count > 0) {
+- if (pfn_valid(pfn))
+- ppage = pfn_to_page(pfn);
+- else
+- ppage = NULL;
++ /*
++ * TODO: ZONE_DEVICE support requires to identify
++ * memmaps that were actually initialized.
++ */
++ ppage = pfn_to_online_page(pfn);
++
+ if (!ppage || PageSlab(ppage) || page_has_type(ppage))
+ pcount = 0;
+ else
+@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
+ return -EINVAL;
+
+ while (count > 0) {
+- if (pfn_valid(pfn))
+- ppage = pfn_to_page(pfn);
+- else
+- ppage = NULL;
++ /*
++ * TODO: ZONE_DEVICE support requires to identify
++ * memmaps that were actually initialized.
++ */
++ ppage = pfn_to_online_page(pfn);
+
+ if (put_user(stable_page_flags(ppage), out)) {
+ ret = -EFAULT;
+@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
+ return -EINVAL;
+
+ while (count > 0) {
+- if (pfn_valid(pfn))
+- ppage = pfn_to_page(pfn);
+- else
+- ppage = NULL;
++ /*
++ * TODO: ZONE_DEVICE support requires to identify
++ * memmaps that were actually initialized.
++ */
++ ppage = pfn_to_online_page(pfn);
+
+ if (ppage)
+ ino = page_cgroup_ino(ppage);
+diff --git a/fs/readdir.c b/fs/readdir.c
+index 2f6a4534e0df..d26d5ea4de7b 100644
+--- a/fs/readdir.c
++++ b/fs/readdir.c
+@@ -20,9 +20,23 @@
+ #include <linux/syscalls.h>
+ #include <linux/unistd.h>
+ #include <linux/compat.h>
+-
+ #include <linux/uaccess.h>
+
++#include <asm/unaligned.h>
++
++/*
++ * Note the "unsafe_put_user() semantics: we goto a
++ * label for errors.
++ */
++#define unsafe_copy_dirent_name(_dst, _src, _len, label) do { \
++ char __user *dst = (_dst); \
++ const char *src = (_src); \
++ size_t len = (_len); \
++ unsafe_put_user(0, dst+len, label); \
++ unsafe_copy_to_user(dst, src, len, label); \
++} while (0)
++
++
+ int iterate_dir(struct file *file, struct dir_context *ctx)
+ {
+ struct inode *inode = file_inode(file);
+@@ -64,6 +78,40 @@ out:
+ }
+ EXPORT_SYMBOL(iterate_dir);
+
++/*
++ * POSIX says that a dirent name cannot contain NULL or a '/'.
++ *
++ * It's not 100% clear what we should really do in this case.
++ * The filesystem is clearly corrupted, but returning a hard
++ * error means that you now don't see any of the other names
++ * either, so that isn't a perfect alternative.
++ *
++ * And if you return an error, what error do you use? Several
++ * filesystems seem to have decided on EUCLEAN being the error
++ * code for EFSCORRUPTED, and that may be the error to use. Or
++ * just EIO, which is perhaps more obvious to users.
++ *
++ * In order to see the other file names in the directory, the
++ * caller might want to make this a "soft" error: skip the
++ * entry, and return the error at the end instead.
++ *
++ * Note that this should likely do a "memchr(name, 0, len)"
++ * check too, since that would be filesystem corruption as
++ * well. However, that case can't actually confuse user space,
++ * which has to do a strlen() on the name anyway to find the
++ * filename length, and the above "soft error" worry means
++ * that it's probably better left alone until we have that
++ * issue clarified.
++ */
++static int verify_dirent_name(const char *name, int len)
++{
++ if (!len)
++ return -EIO;
++ if (memchr(name, '/', len))
++ return -EIO;
++ return 0;
++}
++
+ /*
+ * Traditional linux readdir() handling..
+ *
+@@ -173,6 +221,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
+ int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
+ sizeof(long));
+
++ buf->error = verify_dirent_name(name, namlen);
++ if (unlikely(buf->error))
++ return buf->error;
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+@@ -182,28 +233,31 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
+ return -EOVERFLOW;
+ }
+ dirent = buf->previous;
+- if (dirent) {
+- if (signal_pending(current))
+- return -EINTR;
+- if (__put_user(offset, &dirent->d_off))
+- goto efault;
+- }
+- dirent = buf->current_dir;
+- if (__put_user(d_ino, &dirent->d_ino))
+- goto efault;
+- if (__put_user(reclen, &dirent->d_reclen))
+- goto efault;
+- if (copy_to_user(dirent->d_name, name, namlen))
+- goto efault;
+- if (__put_user(0, dirent->d_name + namlen))
+- goto efault;
+- if (__put_user(d_type, (char __user *) dirent + reclen - 1))
++ if (dirent && signal_pending(current))
++ return -EINTR;
++
++ /*
++ * Note! This range-checks 'previous' (which may be NULL).
++ * The real range was checked in getdents
++ */
++ if (!user_access_begin(dirent, sizeof(*dirent)))
+ goto efault;
++ if (dirent)
++ unsafe_put_user(offset, &dirent->d_off, efault_end);
++ dirent = buf->current_dir;
++ unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
++ unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
++ unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
++ unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
++ user_access_end();
++
+ buf->previous = dirent;
+ dirent = (void __user *)dirent + reclen;
+ buf->current_dir = dirent;
+ buf->count -= reclen;
+ return 0;
++efault_end:
++ user_access_end();
+ efault:
+ buf->error = -EFAULT;
+ return -EFAULT;
+@@ -259,34 +313,38 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
+ int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
+ sizeof(u64));
+
++ buf->error = verify_dirent_name(name, namlen);
++ if (unlikely(buf->error))
++ return buf->error;
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+- if (dirent) {
+- if (signal_pending(current))
+- return -EINTR;
+- if (__put_user(offset, &dirent->d_off))
+- goto efault;
+- }
+- dirent = buf->current_dir;
+- if (__put_user(ino, &dirent->d_ino))
+- goto efault;
+- if (__put_user(0, &dirent->d_off))
+- goto efault;
+- if (__put_user(reclen, &dirent->d_reclen))
+- goto efault;
+- if (__put_user(d_type, &dirent->d_type))
+- goto efault;
+- if (copy_to_user(dirent->d_name, name, namlen))
+- goto efault;
+- if (__put_user(0, dirent->d_name + namlen))
++ if (dirent && signal_pending(current))
++ return -EINTR;
++
++ /*
++ * Note! This range-checks 'previous' (which may be NULL).
++ * The real range was checked in getdents
++ */
++ if (!user_access_begin(dirent, sizeof(*dirent)))
+ goto efault;
++ if (dirent)
++ unsafe_put_user(offset, &dirent->d_off, efault_end);
++ dirent = buf->current_dir;
++ unsafe_put_user(ino, &dirent->d_ino, efault_end);
++ unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
++ unsafe_put_user(d_type, &dirent->d_type, efault_end);
++ unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
++ user_access_end();
++
+ buf->previous = dirent;
+ dirent = (void __user *)dirent + reclen;
+ buf->current_dir = dirent;
+ buf->count -= reclen;
+ return 0;
++efault_end:
++ user_access_end();
+ efault:
+ buf->error = -EFAULT;
+ return -EFAULT;
+diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
+index ad24554f11f9..75f880c25bb8 100644
+--- a/include/linux/micrel_phy.h
++++ b/include/linux/micrel_phy.h
+@@ -31,7 +31,7 @@
+ #define PHY_ID_KSZ886X 0x00221430
+ #define PHY_ID_KSZ8863 0x00221435
+
+-#define PHY_ID_KSZ8795 0x00221550
++#define PHY_ID_KSZ87XX 0x00221550
+
+ #define PHY_ID_KSZ9477 0x00221631
+
+diff --git a/include/linux/mii.h b/include/linux/mii.h
+index 5cd824c1c0ca..4ce8901a1af6 100644
+--- a/include/linux/mii.h
++++ b/include/linux/mii.h
+@@ -455,6 +455,15 @@ static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
+ lp_advertising, lpa & LPA_LPACK);
+ }
+
++static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
++ u32 ctrl1000)
++{
++ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising,
++ ctrl1000 & ADVERTISE_1000HALF);
++ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising,
++ ctrl1000 & ADVERTISE_1000FULL);
++}
++
+ /**
+ * linkmode_adv_to_lcl_adv_t
+ * @advertising:pointer to linkmode advertising
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index ba5583522d24..9b18d33681c2 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -3465,8 +3465,9 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
+ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
+ int skb_vlan_pop(struct sk_buff *skb);
+ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
+-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
++int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
++ int mac_len);
++int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
+ int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
+ int skb_mpls_dec_ttl(struct sk_buff *skb);
+ struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index 34a038563d97..d38051dd414f 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -284,8 +284,10 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
+ #ifndef user_access_begin
+ #define user_access_begin(ptr,len) access_ok(ptr, len)
+ #define user_access_end() do { } while (0)
+-#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+-#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
++#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
++#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
++#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
++#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
+ static inline unsigned long user_access_save(void) { return 0UL; }
+ static inline void user_access_restore(unsigned long flags) { }
+ #endif
+diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
+index 3810b340551c..6bd5ed695a5e 100644
+--- a/include/scsi/scsi_eh.h
++++ b/include/scsi/scsi_eh.h
+@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
+ struct scsi_eh_save {
+ /* saved state */
+ int result;
++ unsigned int resid_len;
+ int eh_eflags;
+ enum dma_data_direction data_direction;
+ unsigned underflow;
+diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
+index 2f6a669408bb..e83dee3212bd 100644
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -1687,6 +1687,7 @@ TRACE_EVENT(qgroup_update_reserve,
+ __entry->qgid = qgroup->qgroupid;
+ __entry->cur_reserved = qgroup->rsv.values[type];
+ __entry->diff = diff;
++ __entry->type = type;
+ ),
+
+ TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
+@@ -1709,6 +1710,7 @@ TRACE_EVENT(qgroup_meta_reserve,
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->refroot = root->root_key.objectid;
+ __entry->diff = diff;
++ __entry->type = type;
+ ),
+
+ TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
+@@ -1725,7 +1727,6 @@ TRACE_EVENT(qgroup_meta_convert,
+ TP_STRUCT__entry_btrfs(
+ __field( u64, refroot )
+ __field( s64, diff )
+- __field( int, type )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
+index 1c215ea1798e..e168dc59e9a0 100644
+--- a/include/uapi/linux/nvme_ioctl.h
++++ b/include/uapi/linux/nvme_ioctl.h
+@@ -45,6 +45,27 @@ struct nvme_passthru_cmd {
+ __u32 result;
+ };
+
++struct nvme_passthru_cmd64 {
++ __u8 opcode;
++ __u8 flags;
++ __u16 rsvd1;
++ __u32 nsid;
++ __u32 cdw2;
++ __u32 cdw3;
++ __u64 metadata;
++ __u64 addr;
++ __u32 metadata_len;
++ __u32 data_len;
++ __u32 cdw10;
++ __u32 cdw11;
++ __u32 cdw12;
++ __u32 cdw13;
++ __u32 cdw14;
++ __u32 cdw15;
++ __u32 timeout_ms;
++ __u64 result;
++};
++
+ #define nvme_admin_cmd nvme_passthru_cmd
+
+ #define NVME_IOCTL_ID _IO('N', 0x40)
+@@ -54,5 +75,7 @@ struct nvme_passthru_cmd {
+ #define NVME_IOCTL_RESET _IO('N', 0x44)
+ #define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
+ #define NVME_IOCTL_RESCAN _IO('N', 0x46)
++#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
++#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
+
+ #endif /* _UAPI_LINUX_NVME_IOCTL_H */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 0463c1151bae..a2a50b668ef3 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6839,7 +6839,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
+ static int __perf_pmu_output_stop(void *info)
+ {
+ struct perf_event *event = info;
+- struct pmu *pmu = event->pmu;
++ struct pmu *pmu = event->ctx->pmu;
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+ struct remote_output ro = {
+ .rb = event->rb,
+diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
+index 0892e38ed6fb..a9dfa04ffa44 100644
+--- a/kernel/trace/trace_event_perf.c
++++ b/kernel/trace/trace_event_perf.c
+@@ -272,9 +272,11 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
+ goto out;
+ }
+
++ mutex_lock(&event_mutex);
+ ret = perf_trace_event_init(tp_event, p_event);
+ if (ret)
+ destroy_local_trace_kprobe(tp_event);
++ mutex_unlock(&event_mutex);
+ out:
+ kfree(func);
+ return ret;
+@@ -282,8 +284,10 @@ out:
+
+ void perf_kprobe_destroy(struct perf_event *p_event)
+ {
++ mutex_lock(&event_mutex);
+ perf_trace_event_close(p_event);
+ perf_trace_event_unreg(p_event);
++ mutex_unlock(&event_mutex);
+
+ destroy_local_trace_kprobe(p_event->tp_event);
+ }
+diff --git a/lib/textsearch.c b/lib/textsearch.c
+index 4f16eec5d554..f68dea8806be 100644
+--- a/lib/textsearch.c
++++ b/lib/textsearch.c
+@@ -89,9 +89,9 @@
+ * goto errout;
+ * }
+ *
+- * pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
++ * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
+ * if (pos != UINT_MAX)
+- * panic("Oh my god, dancing chickens at \%d\n", pos);
++ * panic("Oh my god, dancing chickens at %d\n", pos);
+ *
+ * textsearch_destroy(conf);
+ */
+diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
+index e630e7ff57f1..45f57fd2db64 100644
+--- a/lib/vdso/gettimeofday.c
++++ b/lib/vdso/gettimeofday.c
+@@ -214,9 +214,10 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
+ return -1;
+ }
+
+- res->tv_sec = 0;
+- res->tv_nsec = ns;
+-
++ if (likely(res)) {
++ res->tv_sec = 0;
++ res->tv_nsec = ns;
++ }
+ return 0;
+ }
+
+@@ -245,7 +246,7 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+ ret = clock_getres_fallback(clock, &ts);
+ #endif
+
+- if (likely(!ret)) {
++ if (likely(!ret && res)) {
+ res->tv_sec = ts.tv_sec;
+ res->tv_nsec = ts.tv_nsec;
+ }
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 1e994920e6ff..5ab9c2b22693 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -270,14 +270,15 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
+
+ /* Ensure the start of the pageblock or zone is online and valid */
+ block_pfn = pageblock_start_pfn(pfn);
+- block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
++ block_pfn = max(block_pfn, zone->zone_start_pfn);
++ block_page = pfn_to_online_page(block_pfn);
+ if (block_page) {
+ page = block_page;
+ pfn = block_pfn;
+ }
+
+ /* Ensure the end of the pageblock or zone is online and valid */
+- block_pfn += pageblock_nr_pages;
++ block_pfn = pageblock_end_pfn(pfn) - 1;
+ block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
+ end_page = pfn_to_online_page(block_pfn);
+ if (!end_page)
+@@ -303,7 +304,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
+
+ page += (1 << PAGE_ALLOC_COSTLY_ORDER);
+ pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
+- } while (page < end_page);
++ } while (page <= end_page);
+
+ return false;
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 6d7296dd11b8..843ee2f8d356 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1084,11 +1084,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
+ struct page *page;
+
+ for (i = start_pfn; i < end_pfn; i++) {
+- if (!pfn_valid(i))
++ page = pfn_to_online_page(i);
++ if (!page)
+ return false;
+
+- page = pfn_to_page(i);
+-
+ if (page_zone(page) != z)
+ return false;
+
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 7d4f61ae666a..c4b16cae2bc9 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1356,9 +1356,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
+ align = SMP_CACHE_BYTES;
+ }
+
+- if (end > memblock.current_limit)
+- end = memblock.current_limit;
+-
+ again:
+ found = memblock_find_in_range_node(size, align, start, end, nid,
+ flags);
+@@ -1469,6 +1466,9 @@ static void * __init memblock_alloc_internal(
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, nid);
+
++ if (max_addr > memblock.current_limit)
++ max_addr = memblock.current_limit;
++
+ alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
+
+ /* retry allocation without lower limit */
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 7ef849da8278..3151c87dff73 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -199,7 +199,6 @@ struct to_kill {
+ struct task_struct *tsk;
+ unsigned long addr;
+ short size_shift;
+- char addr_valid;
+ };
+
+ /*
+@@ -324,22 +323,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
+ }
+ }
+ tk->addr = page_address_in_vma(p, vma);
+- tk->addr_valid = 1;
+ if (is_zone_device_page(p))
+ tk->size_shift = dev_pagemap_mapping_shift(p, vma);
+ else
+ tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
+
+ /*
+- * In theory we don't have to kill when the page was
+- * munmaped. But it could be also a mremap. Since that's
+- * likely very rare kill anyways just out of paranoia, but use
+- * a SIGKILL because the error is not contained anymore.
++ * Send SIGKILL if "tk->addr == -EFAULT". Also, as
++ * "tk->size_shift" is always non-zero for !is_zone_device_page(),
++ * so "tk->size_shift == 0" effectively checks no mapping on
++ * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
++ * to a process' address space, it's possible not all N VMAs
++ * contain mappings for the page, but at least one VMA does.
++ * Only deliver SIGBUS with payload derived from the VMA that
++ * has a mapping for the page.
+ */
+- if (tk->addr == -EFAULT || tk->size_shift == 0) {
++ if (tk->addr == -EFAULT) {
+ pr_info("Memory failure: Unable to find user space address %lx in %s\n",
+ page_to_pfn(p), tsk->comm);
+- tk->addr_valid = 0;
++ } else if (tk->size_shift == 0) {
++ kfree(tk);
++ return;
+ }
+ get_task_struct(tsk);
+ tk->tsk = tsk;
+@@ -366,7 +370,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
+ * make sure the process doesn't catch the
+ * signal and then access the memory. Just kill it.
+ */
+- if (fail || tk->addr_valid == 0) {
++ if (fail || tk->addr == -EFAULT) {
+ pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
+ pfn, tk->tsk->comm, tk->tsk->pid);
+ do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
+@@ -1253,17 +1257,19 @@ int memory_failure(unsigned long pfn, int flags)
+ if (!sysctl_memory_failure_recovery)
+ panic("Memory failure on page %lx", pfn);
+
+- if (!pfn_valid(pfn)) {
++ p = pfn_to_online_page(pfn);
++ if (!p) {
++ if (pfn_valid(pfn)) {
++ pgmap = get_dev_pagemap(pfn, NULL);
++ if (pgmap)
++ return memory_failure_dev_pagemap(pfn, flags,
++ pgmap);
++ }
+ pr_err("Memory failure: %#lx: memory outside kernel control\n",
+ pfn);
+ return -ENXIO;
+ }
+
+- pgmap = get_dev_pagemap(pfn, NULL);
+- if (pgmap)
+- return memory_failure_dev_pagemap(pfn, flags, pgmap);
+-
+- p = pfn_to_page(pfn);
+ if (PageHuge(p))
+ return memory_failure_hugetlb(pfn, flags);
+ if (TestSetPageHWPoison(p)) {
+diff --git a/mm/memremap.c b/mm/memremap.c
+index ed70c4e8e52a..31f1b2953c64 100644
+--- a/mm/memremap.c
++++ b/mm/memremap.c
+@@ -104,6 +104,7 @@ static void devm_memremap_pages_release(void *data)
+ struct dev_pagemap *pgmap = data;
+ struct device *dev = pgmap->dev;
+ struct resource *res = &pgmap->res;
++ struct page *first_page;
+ unsigned long pfn;
+ int nid;
+
+@@ -112,14 +113,16 @@ static void devm_memremap_pages_release(void *data)
+ put_page(pfn_to_page(pfn));
+ dev_pagemap_cleanup(pgmap);
+
++ /* make sure to access a memmap that was actually initialized */
++ first_page = pfn_to_page(pfn_first(pgmap));
++
+ /* pages are dead and unused, undo the arch mapping */
+- nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
++ nid = page_to_nid(first_page);
+
+ mem_hotplug_begin();
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+- pfn = PHYS_PFN(res->start);
+- __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+- PHYS_PFN(resource_size(res)), NULL);
++ __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
++ PHYS_PFN(resource_size(res)), NULL);
+ } else {
+ arch_remove_memory(nid, res->start, resource_size(res),
+ pgmap_altmap(pgmap));
+diff --git a/mm/page_owner.c b/mm/page_owner.c
+index addcbb2ae4e4..8088ab29bc2d 100644
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -258,7 +258,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+ * not matter as the mixed block count will still be correct
+ */
+ for (; pfn < end_pfn; ) {
+- if (!pfn_valid(pfn)) {
++ page = pfn_to_online_page(pfn);
++ if (!page) {
+ pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
+ continue;
+ }
+@@ -266,13 +267,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+ block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = min(block_end_pfn, end_pfn);
+
+- page = pfn_to_page(pfn);
+ pageblock_mt = get_pageblock_migratetype(page);
+
+ for (; pfn < block_end_pfn; pfn++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+
++ /* The pageblock is online, no need to recheck. */
+ page = pfn_to_page(pfn);
+
+ if (page_zone(page) != zone)
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 807490fe217a..7f492e53a7db 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -178,10 +178,13 @@ static int init_memcg_params(struct kmem_cache *s,
+
+ static void destroy_memcg_params(struct kmem_cache *s)
+ {
+- if (is_root_cache(s))
++ if (is_root_cache(s)) {
+ kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
+- else
++ } else {
++ mem_cgroup_put(s->memcg_params.memcg);
++ WRITE_ONCE(s->memcg_params.memcg, NULL);
+ percpu_ref_exit(&s->memcg_params.refcnt);
++ }
+ }
+
+ static void free_memcg_params(struct rcu_head *rcu)
+@@ -253,8 +256,6 @@ static void memcg_unlink_cache(struct kmem_cache *s)
+ } else {
+ list_del(&s->memcg_params.children_node);
+ list_del(&s->memcg_params.kmem_caches_node);
+- mem_cgroup_put(s->memcg_params.memcg);
+- WRITE_ONCE(s->memcg_params.memcg, NULL);
+ }
+ }
+ #else
+diff --git a/mm/slub.c b/mm/slub.c
+index 8834563cdb4b..dac41cf0b94a 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4836,7 +4836,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
+ }
+ }
+
+- get_online_mems();
++ /*
++ * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
++ * already held which will conflict with an existing lock order:
++ *
++ * mem_hotplug_lock->slab_mutex->kernfs_mutex
++ *
++ * We don't really need mem_hotplug_lock (to hold off
++ * slab_mem_going_offline_callback) here because slab's memory hot
++ * unplug code doesn't destroy the kmem_cache->node[] data.
++ */
++
+ #ifdef CONFIG_SLUB_DEBUG
+ if (flags & SO_ALL) {
+ struct kmem_cache_node *n;
+@@ -4877,7 +4887,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
+ x += sprintf(buf + x, " N%d=%lu",
+ node, nodes[node]);
+ #endif
+- put_online_mems();
+ kfree(nodes);
+ return x + sprintf(buf + x, "\n");
+ }
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index a6c5d0b28321..8d03013b6c59 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -354,12 +354,13 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
+ */
+ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
+ {
+- unsigned long lru_size;
++ unsigned long lru_size = 0;
+ int zid;
+
+- if (!mem_cgroup_disabled())
+- lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
+- else
++ if (!mem_cgroup_disabled()) {
++ for (zid = 0; zid < MAX_NR_ZONES; zid++)
++ lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
++ } else
+ lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
+
+ for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 982d8d12830e..d4a47c44daf0 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5465,12 +5465,14 @@ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
+ * @skb: buffer
+ * @mpls_lse: MPLS label stack entry to push
+ * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
++ * @mac_len: length of the MAC header
+ *
+ * Expects skb->data at mac header.
+ *
+ * Returns 0 on success, -errno otherwise.
+ */
+-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
++int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
++ int mac_len)
+ {
+ struct mpls_shim_hdr *lse;
+ int err;
+@@ -5487,15 +5489,15 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
+ return err;
+
+ if (!skb->inner_protocol) {
+- skb_set_inner_network_header(skb, skb->mac_len);
++ skb_set_inner_network_header(skb, mac_len);
+ skb_set_inner_protocol(skb, skb->protocol);
+ }
+
+ skb_push(skb, MPLS_HLEN);
+ memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
+- skb->mac_len);
++ mac_len);
+ skb_reset_mac_header(skb);
+- skb_set_network_header(skb, skb->mac_len);
++ skb_set_network_header(skb, mac_len);
+
+ lse = mpls_hdr(skb);
+ lse->label_stack_entry = mpls_lse;
+@@ -5514,29 +5516,30 @@ EXPORT_SYMBOL_GPL(skb_mpls_push);
+ *
+ * @skb: buffer
+ * @next_proto: ethertype of header after popped MPLS header
++ * @mac_len: length of the MAC header
+ *
+ * Expects skb->data at mac header.
+ *
+ * Returns 0 on success, -errno otherwise.
+ */
+-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto)
++int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
+ {
+ int err;
+
+ if (unlikely(!eth_p_mpls(skb->protocol)))
+- return -EINVAL;
++ return 0;
+
+- err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
++ err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
+ if (unlikely(err))
+ return err;
+
+ skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
+ memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
+- skb->mac_len);
++ mac_len);
+
+ __skb_pull(skb, MPLS_HLEN);
+ skb_reset_mac_header(skb);
+- skb_set_network_header(skb, skb->mac_len);
++ skb_set_network_header(skb, mac_len);
+
+ if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
+ struct ethhdr *hdr;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 14654876127e..621f83434b24 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1482,7 +1482,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
+ prev = cmpxchg(p, orig, rt);
+ if (prev == orig) {
+ if (orig) {
+- dst_dev_put(&orig->dst);
++ rt_add_uncached_list(orig);
+ dst_release(&orig->dst);
+ }
+ } else {
+@@ -2470,14 +2470,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
+ int orig_oif = fl4->flowi4_oif;
+ unsigned int flags = 0;
+ struct rtable *rth;
+- int err = -ENETUNREACH;
++ int err;
+
+ if (fl4->saddr) {
+- rth = ERR_PTR(-EINVAL);
+ if (ipv4_is_multicast(fl4->saddr) ||
+ ipv4_is_lbcast(fl4->saddr) ||
+- ipv4_is_zeronet(fl4->saddr))
++ ipv4_is_zeronet(fl4->saddr)) {
++ rth = ERR_PTR(-EINVAL);
+ goto out;
++ }
++
++ rth = ERR_PTR(-ENETUNREACH);
+
+ /* I removed check for oif == dev_out->oif here.
+ It was wrong for two reasons:
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index a593aaf25748..2bb0b66181a7 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -80,8 +80,10 @@ static void ip6_sublist_rcv_finish(struct list_head *head)
+ {
+ struct sk_buff *skb, *next;
+
+- list_for_each_entry_safe(skb, next, head, list)
++ list_for_each_entry_safe(skb, next, head, list) {
++ skb_list_del_init(skb);
+ dst_input(skb);
++ }
+ }
+
+ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index b1438fd4d876..64b544ae9966 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -487,9 +487,14 @@ static ssize_t ieee80211_if_fmt_aqm(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+ {
+ struct ieee80211_local *local = sdata->local;
+- struct txq_info *txqi = to_txq_info(sdata->vif.txq);
++ struct txq_info *txqi;
+ int len;
+
++ if (!sdata->vif.txq)
++ return 0;
++
++ txqi = to_txq_info(sdata->vif.txq);
++
+ spin_lock_bh(&local->fq.lock);
+ rcu_read_lock();
+
+@@ -658,7 +663,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
+ DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
+ DEBUGFS_ADD(hw_queues);
+
+- if (sdata->local->ops->wake_tx_queue)
++ if (sdata->local->ops->wake_tx_queue &&
++ sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
++ sdata->vif.type != NL80211_IFTYPE_NAN)
+ DEBUGFS_ADD(aqm);
+ }
+
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 4c888dc9bd81..a826f9ccc03f 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2629,7 +2629,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
+
+ rcu_read_lock();
+ ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
+- if (WARN_ON_ONCE(ssid == NULL))
++ if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
++ "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
+ ssid_len = 0;
+ else
+ ssid_len = ssid[1];
+@@ -5227,7 +5228,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+
+ rcu_read_lock();
+ ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
+- if (!ssidie) {
++ if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
+ rcu_read_unlock();
+ kfree(assoc_data);
+ return -EINVAL;
+diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
+index af1497ab9464..69d6173f91e2 100644
+--- a/net/netfilter/nft_connlimit.c
++++ b/net/netfilter/nft_connlimit.c
+@@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
+ static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
+ {
+ struct nft_connlimit *priv = nft_expr_priv(expr);
++ bool ret;
+
+- return nf_conncount_gc_list(net, &priv->list);
++ local_bh_disable();
++ ret = nf_conncount_gc_list(net, &priv->list);
++ local_bh_enable();
++
++ return ret;
+ }
+
+ static struct nft_expr_type nft_connlimit_type;
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 3572e11b6f21..1c77f520f474 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -165,7 +165,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ {
+ int err;
+
+- err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype);
++ err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
++ skb->mac_len);
+ if (err)
+ return err;
+
+@@ -178,7 +179,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ {
+ int err;
+
+- err = skb_mpls_pop(skb, ethertype);
++ err = skb_mpls_pop(skb, ethertype, skb->mac_len);
+ if (err)
+ return err;
+
+diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
+index c97ebdc043e4..48f67a9b1037 100644
+--- a/net/rxrpc/peer_event.c
++++ b/net/rxrpc/peer_event.c
+@@ -147,10 +147,16 @@ void rxrpc_error_report(struct sock *sk)
+ {
+ struct sock_exterr_skb *serr;
+ struct sockaddr_rxrpc srx;
+- struct rxrpc_local *local = sk->sk_user_data;
++ struct rxrpc_local *local;
+ struct rxrpc_peer *peer;
+ struct sk_buff *skb;
+
++ rcu_read_lock();
++ local = rcu_dereference_sk_user_data(sk);
++ if (unlikely(!local)) {
++ rcu_read_unlock();
++ return;
++ }
+ _enter("%p{%d}", sk, local->debug_id);
+
+ /* Clear the outstanding error value on the socket so that it doesn't
+@@ -160,6 +166,7 @@ void rxrpc_error_report(struct sock *sk)
+
+ skb = sock_dequeue_err_skb(sk);
+ if (!skb) {
++ rcu_read_unlock();
+ _leave("UDP socket errqueue empty");
+ return;
+ }
+@@ -167,11 +174,11 @@ void rxrpc_error_report(struct sock *sk)
+ serr = SKB_EXT_ERR(skb);
+ if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
+ _leave("UDP empty message");
++ rcu_read_unlock();
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
+ return;
+ }
+
+- rcu_read_lock();
+ peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 2558f00f6b3e..69d4676a402f 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -832,8 +832,7 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
+ }
+
+ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
+- [TCA_ACT_KIND] = { .type = NLA_NUL_STRING,
+- .len = IFNAMSIZ - 1 },
++ [TCA_ACT_KIND] = { .type = NLA_STRING },
+ [TCA_ACT_INDEX] = { .type = NLA_U32 },
+ [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
+ .len = TC_COOKIE_MAX_SIZE },
+@@ -865,8 +864,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ NL_SET_ERR_MSG(extack, "TC action kind must be specified");
+ goto err_out;
+ }
+- nla_strlcpy(act_name, kind, IFNAMSIZ);
+-
++ if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
++ NL_SET_ERR_MSG(extack, "TC action name too long");
++ goto err_out;
++ }
+ if (tb[TCA_ACT_COOKIE]) {
+ cookie = nla_memdup_cookie(tb);
+ if (!cookie) {
+@@ -1352,11 +1353,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+ {
+ size_t attr_size = 0;
+- int ret = 0;
++ int loop, ret;
+ struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
+
+- ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
+- &attr_size, true, extack);
++ for (loop = 0; loop < 10; loop++) {
++ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
++ actions, &attr_size, true, extack);
++ if (ret != -EAGAIN)
++ break;
++ }
++
+ if (ret < 0)
+ return ret;
+ ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
+@@ -1406,11 +1412,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
+ */
+ if (n->nlmsg_flags & NLM_F_REPLACE)
+ ovr = 1;
+-replay:
+ ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
+ extack);
+- if (ret == -EAGAIN)
+- goto replay;
+ break;
+ case RTM_DELACTION:
+ ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index e168df0e008a..4cf6c553bb0b 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -55,7 +55,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_mpls *m = to_mpls(a);
+ struct tcf_mpls_params *p;
+ __be32 new_lse;
+- int ret;
++ int ret, mac_len;
+
+ tcf_lastuse_update(&m->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
+@@ -63,8 +63,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
+ /* Ensure 'data' points at mac_header prior calling mpls manipulating
+ * functions.
+ */
+- if (skb_at_tc_ingress(skb))
++ if (skb_at_tc_ingress(skb)) {
+ skb_push_rcsum(skb, skb->mac_len);
++ mac_len = skb->mac_len;
++ } else {
++ mac_len = skb_network_header(skb) - skb_mac_header(skb);
++ }
+
+ ret = READ_ONCE(m->tcf_action);
+
+@@ -72,12 +76,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
+
+ switch (p->tcfm_action) {
+ case TCA_MPLS_ACT_POP:
+- if (skb_mpls_pop(skb, p->tcfm_proto))
++ if (skb_mpls_pop(skb, p->tcfm_proto, mac_len))
+ goto drop;
+ break;
+ case TCA_MPLS_ACT_PUSH:
+ new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
+- if (skb_mpls_push(skb, new_lse, p->tcfm_proto))
++ if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len))
+ goto drop;
+ break;
+ case TCA_MPLS_ACT_MODIFY:
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 9aef93300f1c..6b12883e04b8 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -160,11 +160,22 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
+ return TC_H_MAJ(first);
+ }
+
++static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
++{
++ if (kind)
++ return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
++ memset(name, 0, IFNAMSIZ);
++ return false;
++}
++
+ static bool tcf_proto_is_unlocked(const char *kind)
+ {
+ const struct tcf_proto_ops *ops;
+ bool ret;
+
++ if (strlen(kind) == 0)
++ return false;
++
+ ops = tcf_proto_lookup_ops(kind, false, NULL);
+ /* On error return false to take rtnl lock. Proto lookup/create
+ * functions will perform lookup again and properly handle errors.
+@@ -1976,6 +1987,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ {
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tca[TCA_MAX + 1];
++ char name[IFNAMSIZ];
+ struct tcmsg *t;
+ u32 protocol;
+ u32 prio;
+@@ -2032,13 +2044,19 @@ replay:
+ if (err)
+ return err;
+
++ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
++ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
++ err = -EINVAL;
++ goto errout;
++ }
++
+ /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
+ * block is shared (no qdisc found), qdisc is not unlocked, classifier
+ * type is not specified, classifier is not unlocked.
+ */
+ if (rtnl_held ||
+ (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
+- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
++ !tcf_proto_is_unlocked(name)) {
+ rtnl_held = true;
+ rtnl_lock();
+ }
+@@ -2196,6 +2214,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ {
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tca[TCA_MAX + 1];
++ char name[IFNAMSIZ];
+ struct tcmsg *t;
+ u32 protocol;
+ u32 prio;
+@@ -2235,13 +2254,18 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ if (err)
+ return err;
+
++ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
++ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
++ err = -EINVAL;
++ goto errout;
++ }
+ /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
+ * found), qdisc is not unlocked, classifier type is not specified,
+ * classifier is not unlocked.
+ */
+ if (!prio ||
+ (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
+- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
++ !tcf_proto_is_unlocked(name)) {
+ rtnl_held = true;
+ rtnl_lock();
+ }
+@@ -2349,6 +2373,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ {
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tca[TCA_MAX + 1];
++ char name[IFNAMSIZ];
+ struct tcmsg *t;
+ u32 protocol;
+ u32 prio;
+@@ -2385,12 +2410,17 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ if (err)
+ return err;
+
++ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
++ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
++ err = -EINVAL;
++ goto errout;
++ }
+ /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
+ * unlocked, classifier type is not specified, classifier is not
+ * unlocked.
+ */
+ if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
+- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
++ !tcf_proto_is_unlocked(name)) {
+ rtnl_held = true;
+ rtnl_lock();
+ }
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 81d58b280612..1047825d9f48 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1390,8 +1390,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
+ }
+
+ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
+- [TCA_KIND] = { .type = NLA_NUL_STRING,
+- .len = IFNAMSIZ - 1 },
++ [TCA_KIND] = { .type = NLA_STRING },
+ [TCA_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct tc_estimator) },
+ [TCA_STAB] = { .type = NLA_NESTED },
+diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
+index cebfb65d8556..b1da5589a0c6 100644
+--- a/net/sched/sch_etf.c
++++ b/net/sched/sch_etf.c
+@@ -177,7 +177,7 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
+
+ parent = *p;
+ skb = rb_to_skb(parent);
+- if (ktime_after(txtime, skb->tstamp)) {
++ if (ktime_compare(txtime, skb->tstamp) >= 0) {
+ p = &parent->rb_right;
+ leftmost = false;
+ } else {
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index b083d4e66230..8fd7b0e6ce9f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -9353,7 +9353,7 @@ struct proto sctp_prot = {
+ .backlog_rcv = sctp_backlog_rcv,
+ .hash = sctp_hash,
+ .unhash = sctp_unhash,
+- .get_port = sctp_get_port,
++ .no_autobind = true,
+ .obj_size = sizeof(struct sctp_sock),
+ .useroffset = offsetof(struct sctp_sock, subscribe),
+ .usersize = offsetof(struct sctp_sock, initmsg) -
+@@ -9395,7 +9395,7 @@ struct proto sctpv6_prot = {
+ .backlog_rcv = sctp_backlog_rcv,
+ .hash = sctp_hash,
+ .unhash = sctp_unhash,
+- .get_port = sctp_get_port,
++ .no_autobind = true,
+ .obj_size = sizeof(struct sctp6_sock),
+ .useroffset = offsetof(struct sctp6_sock, sctp.subscribe),
+ .usersize = offsetof(struct sctp6_sock, sctp.initmsg) -
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index f03459ddc840..c2ce582ea143 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -6184,6 +6184,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
+ if (!rdev->ops->del_mpath)
+ return -EOPNOTSUPP;
+
++ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
++ return -EOPNOTSUPP;
++
+ return rdev_del_mpath(rdev, dev, dst);
+ }
+
+diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
+index c67d7a82ab13..73fd0eae08ca 100644
+--- a/net/wireless/wext-sme.c
++++ b/net/wireless/wext-sme.c
+@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
+ struct iw_point *data, char *ssid)
+ {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
++ int ret = 0;
+
+ /* call only for station! */
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
+ if (ie) {
+ data->flags = 1;
+ data->length = ie[1];
+- memcpy(ssid, ie + 2, data->length);
++ if (data->length > IW_ESSID_MAX_SIZE)
++ ret = -EINVAL;
++ else
++ memcpy(ssid, ie + 2, data->length);
+ }
+ rcu_read_unlock();
+ } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
+@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
+ }
+ wdev_unlock(wdev);
+
+- return 0;
++ return ret;
+ }
+
+ int cfg80211_mgd_wext_siwap(struct net_device *dev,
+diff --git a/scripts/namespace.pl b/scripts/namespace.pl
+index 6135574a6f39..1da7bca201a4 100755
+--- a/scripts/namespace.pl
++++ b/scripts/namespace.pl
+@@ -65,13 +65,14 @@
+ use warnings;
+ use strict;
+ use File::Find;
++use File::Spec;
+
+ my $nm = ($ENV{'NM'} || "nm") . " -p";
+ my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
+-my $srctree = "";
+-my $objtree = "";
+-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
+-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
++my $srctree = File::Spec->curdir();
++my $objtree = File::Spec->curdir();
++$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
++$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
+
+ if ($#ARGV != -1) {
+ print STDERR "usage: $0 takes no parameters\n";
+@@ -231,9 +232,9 @@ sub do_nm
+ }
+ ($source = $basename) =~ s/\.o$//;
+ if (-e "$source.c" || -e "$source.S") {
+- $source = "$objtree$File::Find::dir/$source";
++ $source = File::Spec->catfile($objtree, $File::Find::dir, $source)
+ } else {
+- $source = "$srctree$File::Find::dir/$source";
++ $source = File::Spec->catfile($srctree, $File::Find::dir, $source)
+ }
+ if (! -e "$source.c" && ! -e "$source.S") {
+ # No obvious source, exclude the object if it is conglomerate
+diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
+index d568e17dd773..74a13d432ed8 100644
+--- a/security/safesetid/securityfs.c
++++ b/security/safesetid/securityfs.c
+@@ -187,7 +187,8 @@ out_free_rule:
+ out_free_buf:
+ kfree(buf);
+ out_free_pol:
+- release_ruleset(pol);
++ if (pol)
++ release_ruleset(pol);
+ return err;
+ }
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 36240def9bf5..00796c7727ea 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -3307,6 +3307,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+
++ codec->link_down_at_suspend = 1;
++
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 36aee8ad2054..26249c607f2c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -393,6 +393,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ case 0x10ec0700:
+ case 0x10ec0701:
+ case 0x10ec0703:
++ case 0x10ec0711:
+ alc_update_coef_idx(codec, 0x10, 1<<15, 0);
+ break;
+ case 0x10ec0662:
+@@ -5867,6 +5868,7 @@ enum {
+ ALC225_FIXUP_WYSE_AUTO_MUTE,
+ ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
+ ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
++ ALC256_FIXUP_ASUS_HEADSET_MIC,
+ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC299_FIXUP_PREDATOR_SPK,
+ ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
+@@ -6901,6 +6903,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
+ },
++ [ALC256_FIXUP_ASUS_HEADSET_MIC] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x03a11020 }, /* headset mic with jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
++ },
+ [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -7097,6 +7108,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+@@ -7965,6 +7977,7 @@ static int patch_alc269(struct hda_codec *codec)
+ case 0x10ec0700:
+ case 0x10ec0701:
+ case 0x10ec0703:
++ case 0x10ec0711:
+ spec->codec_variant = ALC269_TYPE_ALC700;
+ spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
+ alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
+@@ -9105,6 +9118,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
++ HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
+ HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
+ HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index 56e8dae9a15c..217f2aa06139 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -761,6 +761,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ }
+
+ /* set format */
++ rdai->bit_clk_inv = 0;
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ rdai->sys_delay = 0;
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 33cd26763c0e..ff5ab24f3bd1 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -348,6 +348,9 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ ep = 0x84;
+ ifnum = 0;
+ goto add_sync_ep_from_ifnum;
++ case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
++ /* BOSS Katana amplifiers do not need quirks */
++ return 0;
+ }
+
+ if (attr == USB_ENDPOINT_SYNC_ASYNC &&
+diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
+index ba7849751989..fc8aeb224c03 100644
+--- a/tools/testing/selftests/kvm/Makefile
++++ b/tools/testing/selftests/kvm/Makefile
+@@ -46,7 +46,7 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
+ -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
+
+ no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+- $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
++ $(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
+
+ # On s390, build the testcases KVM-enabled
+ pgste-option = $(call try-run, echo 'int main() { return 0; }' | \