summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2021-06-10 08:08:56 -0400
committerMike Pagano <mpagano@gentoo.org>2021-06-10 08:08:56 -0400
commit1d2a2e1dbbbafa2ae51bb6ee4258ae1441d706c3 (patch)
tree4ac5c72e627d7f3d3ab2453721f5b5f58ffd7fdc
parentCONFIG opt to enable a subset of Kernel Self Protection Project settings (diff)
downloadlinux-patches-5.10-46.tar.gz
linux-patches-5.10-46.tar.bz2
linux-patches-5.10-46.zip
Linux patch 5.10.435.10-46
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1042_linux-5.10.43.patch5652
2 files changed, 5656 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index fb747998..f258b9d5 100644
--- a/0000_README
+++ b/0000_README
@@ -211,6 +211,10 @@ Patch: 1041_linux-5.10.42.patch
From: http://www.kernel.org
Desc: Linux 5.10.42
+Patch: 1042_linux-5.10.43.patch
+From: http://www.kernel.org
+Desc: Linux 5.10.43
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1042_linux-5.10.43.patch b/1042_linux-5.10.43.patch
new file mode 100644
index 00000000..7d996263
--- /dev/null
+++ b/1042_linux-5.10.43.patch
@@ -0,0 +1,5652 @@
+diff --git a/Makefile b/Makefile
+index 290903d0e7dab..ec9ee8032a985 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 42
++SUBLEVEL = 43
+ EXTRAVERSION =
+ NAME = Dare mighty things
+
+diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+index 7d2c72562c735..9148a01ed6d9f 100644
+--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+@@ -105,9 +105,13 @@
+ phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <20>;
+ phy-supply = <&sw2_reg>;
+- phy-handle = <&ethphy0>;
+ status = "okay";
+
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ };
++
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+index 236fc205c3890..d0768ae429faa 100644
+--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+@@ -406,6 +406,18 @@
+ vin-supply = <&sw1_reg>;
+ };
+
++&reg_pu {
++ vin-supply = <&sw1_reg>;
++};
++
++&reg_vdd1p1 {
++ vin-supply = <&sw2_reg>;
++};
++
++&reg_vdd2p5 {
++ vin-supply = <&sw2_reg>;
++};
++
+ &uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+index 828cf3e39784a..c4e146f3341bb 100644
+--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+@@ -126,7 +126,7 @@
+ compatible = "nxp,pca8574";
+ reg = <0x3a>;
+ gpio-controller;
+- #gpio-cells = <1>;
++ #gpio-cells = <2>;
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts
+index 5339210b63d0f..dd8003bd1fc09 100644
+--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
++++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
+@@ -193,7 +193,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ keep-power-in-suspend;
+- tuning-step = <2>;
++ fsl,tuning-step = <2>;
+ vmmc-supply = <&reg_3p3v>;
+ no-1-8-v;
+ broken-cd;
+diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
+index e57da0d32b98d..e519897fae082 100644
+--- a/arch/arm/boot/dts/imx7d-pico.dtsi
++++ b/arch/arm/boot/dts/imx7d-pico.dtsi
+@@ -351,7 +351,7 @@
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+- tuning-step = <2>;
++ fsl,tuning-step = <2>;
+ vmmc-supply = <&reg_3p3v>;
+ wakeup-source;
+ no-1-8-v;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
+index df212ed5bb942..e65d1c477e2ce 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
+@@ -31,11 +31,10 @@
+ reg = <0x4>;
+ eee-broken-1000t;
+ eee-broken-100tx;
+-
+ qca,clk-out-frequency = <125000000>;
+ qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
+-
+- vddio-supply = <&vddh>;
++ qca,keep-pll-enabled;
++ vddio-supply = <&vddio>;
+
+ vddio: vddio-regulator {
+ regulator-name = "VDDIO";
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+index 62f4dcb96e70d..f3b58bb9b8408 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+@@ -192,8 +192,8 @@
+ ddr: memory-controller@1080000 {
+ compatible = "fsl,qoriq-memory-controller";
+ reg = <0x0 0x1080000 0x0 0x1000>;
+- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+- big-endian;
++ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
++ little-endian;
+ };
+
+ dcfg: syscon@1e00000 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+index fa7a041ffcfde..825c83c71a9f1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+@@ -45,8 +45,8 @@
+ reg_12p0_main: regulator-12p0-main {
+ compatible = "regulator-fixed";
+ regulator-name = "12V_MAIN";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
++ regulator-min-microvolt = <12000000>;
++ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+index 72d6496e88dd4..689538244392c 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+@@ -78,6 +78,8 @@
+ #size-cells = <2>;
+ ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
+ ti,sci-dev-id = <199>;
++ dma-coherent;
++ dma-ranges;
+
+ main_navss_intr: interrupt-controller1 {
+ compatible = "ti,sci-intr";
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index b246a4acba416..568f11e23830c 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -464,14 +464,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+ {
+- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+
+ return true;
+ }
+@@ -479,7 +479,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
+ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+@@ -489,7 +489,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+@@ -499,21 +499,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static void reset_bvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+ {
+- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
++ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
+ }
+
+ static bool trap_bcr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+ {
+- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+
+ return true;
+ }
+@@ -521,7 +521,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
+ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+@@ -532,7 +532,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+@@ -542,22 +542,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static void reset_bcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+ {
+- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
++ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
+ }
+
+ static bool trap_wvr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+ {
+- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+- trace_trap_reg(__func__, rd->reg, p->is_write,
+- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
++ trace_trap_reg(__func__, rd->CRm, p->is_write,
++ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
+
+ return true;
+ }
+@@ -565,7 +565,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
+ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+@@ -575,7 +575,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+@@ -585,21 +585,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static void reset_wvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+ {
+- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
++ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
+ }
+
+ static bool trap_wcr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+ {
+- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
+
+ return true;
+ }
+@@ -607,7 +607,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
+ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+@@ -617,7 +617,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+ {
+- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+@@ -627,7 +627,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ static void reset_wcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+ {
+- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
++ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
+ }
+
+ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index 01ab2163659e4..e8c2a6373157d 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
+ int ret = 0;
+ struct kprobe *prev;
+ struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
+- struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
+
+ if ((unsigned long)p->addr & 0x03) {
+ printk("Attempt to register kprobe at an unaligned address\n");
+@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
+ } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
+ printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
+ ret = -EINVAL;
+- } else if (ppc_inst_prefixed(prefix)) {
++ } else if ((unsigned long)p->addr & ~PAGE_MASK &&
++ ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
+ printk("Cannot register a kprobe on the second word of prefixed instruction\n");
+ ret = -EINVAL;
+ }
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index ca2b40dfd24b8..24d936c147cdf 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),)
+ endif
+
+ # Build rules
+-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
++targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S
+ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+ obj-y += vdso.o vdso-syms.o
+@@ -41,7 +41,7 @@ KASAN_SANITIZE := n
+ $(obj)/vdso.o: $(obj)/vdso.so
+
+ # link rule for the .so file, .lds has to be first
+-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
++$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+ LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
+ --build-id=sha1 --hash-style=both --eh-frame-hdr
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 51abd44ab8c2d..3b4412c83eec0 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
+ extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
+ extern void lapic_assign_system_vectors(void);
+ extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
++extern void lapic_update_legacy_vectors(void);
+ extern void lapic_online(void);
+ extern void lapic_offline(void);
+ extern bool apic_needs_pit(void);
+diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
+index 5861d34f97718..09db5b8f1444a 100644
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -56,11 +56,8 @@
+ # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
+ #endif
+
+-#ifdef CONFIG_IOMMU_SUPPORT
+-# define DISABLE_ENQCMD 0
+-#else
+-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
+-#endif
++/* Force disable because it's broken beyond repair */
++#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
+
+ /*
+ * Make sure to add features to the correct mask
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
+index 38f4936045ab6..8b9bfaad6e662 100644
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -79,10 +79,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
+ */
+ #define PASID_DISABLED 0
+
+-#ifdef CONFIG_IOMMU_SUPPORT
+-/* Update current's PASID MSR/state by mm's PASID. */
+-void update_pasid(void);
+-#else
+ static inline void update_pasid(void) { }
+-#endif
++
+ #endif /* _ASM_X86_FPU_API_H */
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 8d33ad80704f2..ceeba9f631722 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -584,13 +584,6 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
+ pkru_val = pk->pkru;
+ }
+ __write_pkru(pkru_val);
+-
+- /*
+- * Expensive PASID MSR write will be avoided in update_pasid() because
+- * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
+- * unless it's different from mm->pasid to reduce overhead.
+- */
+- update_pasid();
+ }
+
+ #endif /* _ASM_X86_FPU_INTERNAL_H */
+diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
+index 3381198525126..69299878b200a 100644
+--- a/arch/x86/include/asm/kvm_para.h
++++ b/arch/x86/include/asm/kvm_para.h
+@@ -7,8 +7,6 @@
+ #include <linux/interrupt.h>
+ #include <uapi/asm/kvm_para.h>
+
+-extern void kvmclock_init(void);
+-
+ #ifdef CONFIG_KVM_GUEST
+ bool kvm_check_and_clear_guest_paused(void);
+ #else
+@@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+ }
+
+ #ifdef CONFIG_KVM_GUEST
++void kvmclock_init(void);
++void kvmclock_disable(void);
+ bool kvm_para_available(void);
+ unsigned int kvm_arch_para_features(void);
+ unsigned int kvm_arch_para_hints(void);
+ void kvm_async_pf_task_wait_schedule(u32 token);
+ void kvm_async_pf_task_wake(u32 token);
+ u32 kvm_read_and_reset_apf_flags(void);
+-void kvm_disable_steal_time(void);
+ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
+
+ DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
+@@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void)
+ return 0;
+ }
+
+-static inline void kvm_disable_steal_time(void)
+-{
+- return;
+-}
+-
+ static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
+ {
+ return false;
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 539f3e88ca7cd..24539a05c58c7 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2539,6 +2539,7 @@ static void __init apic_bsp_setup(bool upmode)
+ end_local_APIC_setup();
+ irq_remap_enable_fault_handling();
+ setup_IO_APIC();
++ lapic_update_legacy_vectors();
+ }
+
+ #ifdef CONFIG_UP_LATE_INIT
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 758bbf25ef748..bd557e9f5dd8e 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -687,6 +687,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
+ irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
+ }
+
++void __init lapic_update_legacy_vectors(void)
++{
++ unsigned int i;
++
++ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
++ return;
++
++ /*
++ * If the IO/APIC is disabled via config, kernel command line or
++ * lack of enumeration then all legacy interrupts are routed
++ * through the PIC. Make sure that they are marked as legacy
++ * vectors. PIC_CASCADE_IRQ has already been marked in
++ * lapic_assign_system_vectors().
++ */
++ for (i = 0; i < nr_legacy_irqs(); i++) {
++ if (i != PIC_CASCADE_IR)
++ lapic_assign_legacy_vector(i, true);
++ }
++}
++
+ void __init lapic_assign_system_vectors(void)
+ {
+ unsigned int i, vector = 0;
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 5d8047441a0aa..67f1a03b9b235 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1402,60 +1402,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
+ return 0;
+ }
+ #endif /* CONFIG_PROC_PID_ARCH_STATUS */
+-
+-#ifdef CONFIG_IOMMU_SUPPORT
+-void update_pasid(void)
+-{
+- u64 pasid_state;
+- u32 pasid;
+-
+- if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
+- return;
+-
+- if (!current->mm)
+- return;
+-
+- pasid = READ_ONCE(current->mm->pasid);
+- /* Set the valid bit in the PASID MSR/state only for valid pasid. */
+- pasid_state = pasid == PASID_DISABLED ?
+- pasid : pasid | MSR_IA32_PASID_VALID;
+-
+- /*
+- * No need to hold fregs_lock() since the task's fpstate won't
+- * be changed by others (e.g. ptrace) while the task is being
+- * switched to or is in IPI.
+- */
+- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
+- /* The MSR is active and can be directly updated. */
+- wrmsrl(MSR_IA32_PASID, pasid_state);
+- } else {
+- struct fpu *fpu = &current->thread.fpu;
+- struct ia32_pasid_state *ppasid_state;
+- struct xregs_state *xsave;
+-
+- /*
+- * The CPU's xstate registers are not currently active. Just
+- * update the PASID state in the memory buffer here. The
+- * PASID MSR will be loaded when returning to user mode.
+- */
+- xsave = &fpu->state.xsave;
+- xsave->header.xfeatures |= XFEATURE_MASK_PASID;
+- ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
+- /*
+- * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
+- * won't be NULL and no need to check its value.
+- *
+- * Only update the task's PASID state when it's different
+- * from the mm's pasid.
+- */
+- if (ppasid_state->pasid != pasid_state) {
+- /*
+- * Invalid fpregs so that state restoring will pick up
+- * the PASID state.
+- */
+- __fpu_invalidate_fpregs_state(fpu);
+- ppasid_state->pasid = pasid_state;
+- }
+- }
+-}
+-#endif /* CONFIG_IOMMU_SUPPORT */
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 7f57ede3cb8e7..7462b79c39de6 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -26,6 +26,7 @@
+ #include <linux/kprobes.h>
+ #include <linux/nmi.h>
+ #include <linux/swait.h>
++#include <linux/syscore_ops.h>
+ #include <asm/timer.h>
+ #include <asm/cpu.h>
+ #include <asm/traps.h>
+@@ -37,6 +38,7 @@
+ #include <asm/tlb.h>
+ #include <asm/cpuidle_haltpoll.h>
+ #include <asm/ptrace.h>
++#include <asm/reboot.h>
+ #include <asm/svm.h>
+
+ DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
+@@ -374,6 +376,14 @@ static void kvm_pv_disable_apf(void)
+ pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
+ }
+
++static void kvm_disable_steal_time(void)
++{
++ if (!has_steal_clock)
++ return;
++
++ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
++}
++
+ static void kvm_pv_guest_cpu_reboot(void *unused)
+ {
+ /*
+@@ -416,14 +426,6 @@ static u64 kvm_steal_clock(int cpu)
+ return steal;
+ }
+
+-void kvm_disable_steal_time(void)
+-{
+- if (!has_steal_clock)
+- return;
+-
+- wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
+-}
+-
+ static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
+ {
+ early_set_memory_decrypted((unsigned long) ptr, size);
+@@ -460,6 +462,27 @@ static bool pv_tlb_flush_supported(void)
+
+ static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
+
++static void kvm_guest_cpu_offline(bool shutdown)
++{
++ kvm_disable_steal_time();
++ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
++ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
++ kvm_pv_disable_apf();
++ if (!shutdown)
++ apf_task_wake_all();
++ kvmclock_disable();
++}
++
++static int kvm_cpu_online(unsigned int cpu)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ kvm_guest_cpu_init();
++ local_irq_restore(flags);
++ return 0;
++}
++
+ #ifdef CONFIG_SMP
+
+ static bool pv_ipi_supported(void)
+@@ -587,29 +610,46 @@ static void __init kvm_smp_prepare_boot_cpu(void)
+ kvm_spinlock_init();
+ }
+
+-static void kvm_guest_cpu_offline(void)
++static int kvm_cpu_down_prepare(unsigned int cpu)
+ {
+- kvm_disable_steal_time();
+- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+- kvm_pv_disable_apf();
+- apf_task_wake_all();
++ unsigned long flags;
++
++ local_irq_save(flags);
++ kvm_guest_cpu_offline(false);
++ local_irq_restore(flags);
++ return 0;
+ }
+
+-static int kvm_cpu_online(unsigned int cpu)
++#endif
++
++static int kvm_suspend(void)
+ {
+- local_irq_disable();
+- kvm_guest_cpu_init();
+- local_irq_enable();
++ kvm_guest_cpu_offline(false);
++
+ return 0;
+ }
+
+-static int kvm_cpu_down_prepare(unsigned int cpu)
++static void kvm_resume(void)
+ {
+- local_irq_disable();
+- kvm_guest_cpu_offline();
+- local_irq_enable();
+- return 0;
++ kvm_cpu_online(raw_smp_processor_id());
++}
++
++static struct syscore_ops kvm_syscore_ops = {
++ .suspend = kvm_suspend,
++ .resume = kvm_resume,
++};
++
++/*
++ * After a PV feature is registered, the host will keep writing to the
++ * registered memory location. If the guest happens to shutdown, this memory
++ * won't be valid. In cases like kexec, in which you install a new kernel, this
++ * means a random memory location will be kept being written.
++ */
++#ifdef CONFIG_KEXEC_CORE
++static void kvm_crash_shutdown(struct pt_regs *regs)
++{
++ kvm_guest_cpu_offline(true);
++ native_machine_crash_shutdown(regs);
+ }
+ #endif
+
+@@ -681,6 +721,12 @@ static void __init kvm_guest_init(void)
+ kvm_guest_cpu_init();
+ #endif
+
++#ifdef CONFIG_KEXEC_CORE
++ machine_ops.crash_shutdown = kvm_crash_shutdown;
++#endif
++
++ register_syscore_ops(&kvm_syscore_ops);
++
+ /*
+ * Hard lockup detection is enabled by default. Disable it, as guests
+ * can get false positives too easily, for example if the host is
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 5ee705b44560b..c4ac26333bc41 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -20,7 +20,6 @@
+ #include <asm/hypervisor.h>
+ #include <asm/mem_encrypt.h>
+ #include <asm/x86_init.h>
+-#include <asm/reboot.h>
+ #include <asm/kvmclock.h>
+
+ static int kvmclock __initdata = 1;
+@@ -204,28 +203,9 @@ static void kvm_setup_secondary_clock(void)
+ }
+ #endif
+
+-/*
+- * After the clock is registered, the host will keep writing to the
+- * registered memory location. If the guest happens to shutdown, this memory
+- * won't be valid. In cases like kexec, in which you install a new kernel, this
+- * means a random memory location will be kept being written. So before any
+- * kind of shutdown from our side, we unregister the clock by writing anything
+- * that does not have the 'enable' bit set in the msr
+- */
+-#ifdef CONFIG_KEXEC_CORE
+-static void kvm_crash_shutdown(struct pt_regs *regs)
+-{
+- native_write_msr(msr_kvm_system_time, 0, 0);
+- kvm_disable_steal_time();
+- native_machine_crash_shutdown(regs);
+-}
+-#endif
+-
+-static void kvm_shutdown(void)
++void kvmclock_disable(void)
+ {
+ native_write_msr(msr_kvm_system_time, 0, 0);
+- kvm_disable_steal_time();
+- native_machine_shutdown();
+ }
+
+ static void __init kvmclock_init_mem(void)
+@@ -352,10 +332,6 @@ void __init kvmclock_init(void)
+ #endif
+ x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
+ x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
+- machine_ops.shutdown = kvm_shutdown;
+-#ifdef CONFIG_KEXEC_CORE
+- machine_ops.crash_shutdown = kvm_crash_shutdown;
+-#endif
+ kvm_get_preset_lpj();
+
+ /*
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9d4eb114613c2..41d44fb5f753d 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2362,7 +2362,7 @@ static int cr_interception(struct vcpu_svm *svm)
+ err = 0;
+ if (cr >= 16) { /* mov to cr */
+ cr -= 16;
+- val = kvm_register_read(&svm->vcpu, reg);
++ val = kvm_register_readl(&svm->vcpu, reg);
+ trace_kvm_cr_write(cr, val);
+ switch (cr) {
+ case 0:
+@@ -2408,7 +2408,7 @@ static int cr_interception(struct vcpu_svm *svm)
+ kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ return 1;
+ }
+- kvm_register_write(&svm->vcpu, reg, val);
++ kvm_register_writel(&svm->vcpu, reg, val);
+ trace_kvm_cr_read(cr, val);
+ }
+ return kvm_complete_insn_gp(&svm->vcpu, err);
+@@ -2439,13 +2439,13 @@ static int dr_interception(struct vcpu_svm *svm)
+ if (dr >= 16) { /* mov to DRn */
+ if (!kvm_require_dr(&svm->vcpu, dr - 16))
+ return 1;
+- val = kvm_register_read(&svm->vcpu, reg);
++ val = kvm_register_readl(&svm->vcpu, reg);
+ kvm_set_dr(&svm->vcpu, dr - 16, val);
+ } else {
+ if (!kvm_require_dr(&svm->vcpu, dr))
+ return 1;
+ kvm_get_dr(&svm->vcpu, dr, &val);
+- kvm_register_write(&svm->vcpu, reg, val);
++ kvm_register_writel(&svm->vcpu, reg, val);
+ }
+
+ return kvm_skip_emulated_instruction(&svm->vcpu);
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index a19374d261013..65f599e9075bc 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
+ #define AMD_SME_BIT BIT(0)
+ #define AMD_SEV_BIT BIT(1)
+
+- /* Check the SEV MSR whether SEV or SME is enabled */
+- sev_status = __rdmsr(MSR_AMD64_SEV);
+- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+-
+ /*
+ * Check for the SME/SEV feature:
+ * CPUID Fn8000_001F[EAX]
+@@ -519,11 +515,16 @@ void __init sme_enable(struct boot_params *bp)
+ eax = 0x8000001f;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+- if (!(eax & feature_mask))
++ /* Check whether SEV or SME is supported */
++ if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
+ return;
+
+ me_mask = 1UL << (ebx & 0x3f);
+
++ /* Check the SEV MSR whether SEV or SME is enabled */
++ sev_status = __rdmsr(MSR_AMD64_SEV);
++ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
++
+ /* Check if memory encryption is enabled */
+ if (feature_mask == AMD_SME_BIT) {
+ /*
+diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
+index 4c0d4e4341961..72d2c0b656339 100644
+--- a/drivers/acpi/acpica/utdelete.c
++++ b/drivers/acpi/acpica/utdelete.c
+@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
+ }
+ break;
+
++ case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
++
++ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
++ "***** Address handler %p\n", object));
++
++ acpi_os_delete_mutex(object->address_space.context_mutex);
++ break;
++
+ default:
+
+ break;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 9afbe4992a1dd..818dc7f54f038 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1330,6 +1330,34 @@ err_allow_idle:
+ return error;
+ }
+
++static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
++{
++ struct device *dev = ddata->dev;
++ int error;
++
++ /* Disable target module if it is enabled */
++ if (ddata->enabled) {
++ error = sysc_runtime_suspend(dev);
++ if (error)
++ dev_warn(dev, "reinit suspend failed: %i\n", error);
++ }
++
++ /* Enable target module */
++ error = sysc_runtime_resume(dev);
++ if (error)
++ dev_warn(dev, "reinit resume failed: %i\n", error);
++
++ if (leave_enabled)
++ return error;
++
++ /* Disable target module if no leave_enabled was set */
++ error = sysc_runtime_suspend(dev);
++ if (error)
++ dev_warn(dev, "reinit suspend failed: %i\n", error);
++
++ return error;
++}
++
+ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
+ {
+ struct sysc *ddata;
+@@ -1340,12 +1368,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ return 0;
+
+- return pm_runtime_force_suspend(dev);
++ if (!ddata->enabled)
++ return 0;
++
++ ddata->needs_resume = 1;
++
++ return sysc_runtime_suspend(dev);
+ }
+
+ static int __maybe_unused sysc_noirq_resume(struct device *dev)
+ {
+ struct sysc *ddata;
++ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
+
+@@ -1353,7 +1387,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ return 0;
+
+- return pm_runtime_force_resume(dev);
++ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
++ error = sysc_reinit_module(ddata, ddata->needs_resume);
++ if (error)
++ dev_warn(dev, "noirq_resume failed: %i\n", error);
++ } else if (ddata->needs_resume) {
++ error = sysc_runtime_resume(dev);
++ if (error)
++ dev_warn(dev, "noirq_resume failed: %i\n", error);
++ }
++
++ ddata->needs_resume = 0;
++
++ return error;
+ }
+
+ static const struct dev_pm_ops sysc_pm_ops = {
+@@ -1404,9 +1450,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ /* Uarts on omap4 and later */
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+
+ /* Quirks that need to be set based on the module address */
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
+@@ -1462,7 +1508,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
+- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
++ SYSC_QUIRK_REINIT_ON_RESUME),
+ SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT),
+ /* PRUSS on am3, am4 and am5 */
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index e15d484b6a5a7..ea7ca74fc1730 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
+ if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
+ return 0;
+
+- n = 0;
+- len = CPER_REC_LEN - 1;
++ len = CPER_REC_LEN;
+ dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
+ if (bank && device)
+ n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
+@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
+ "DIMM location: not present. DMI handle: 0x%.4x ",
+ mem->mem_dev_handle);
+
+- msg[n] = '\0';
+ return n;
+ }
+
+diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
+index bb042ab7c2be6..e901f8564ca0c 100644
+--- a/drivers/firmware/efi/fdtparams.c
++++ b/drivers/firmware/efi/fdtparams.c
+@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
+
++ if (!fdt)
++ return 0;
++
+ for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
+ node = fdt_path_offset(fdt, dt_params[i].path);
+ if (node < 0)
+diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
+index 4e81c6077188e..dd95f330fe6e1 100644
+--- a/drivers/firmware/efi/libstub/file.c
++++ b/drivers/firmware/efi/libstub/file.c
+@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
+ return 0;
+
+ /* Skip any leading slashes */
+- while (cmdline[i] == L'/' || cmdline[i] == L'\\')
++ while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
+ i++;
+
+ while (--result_len > 0 && i < cmdline_len) {
+diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
+index 5737cb0fcd44e..0a9aba5f9ceff 100644
+--- a/drivers/firmware/efi/memattr.c
++++ b/drivers/firmware/efi/memattr.c
+@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+ return false;
+ }
+
+- if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+- pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
+- return false;
+- }
+-
+ if (PAGE_SIZE > EFI_PAGE_SIZE &&
+ (!PAGE_ALIGNED(in->phys_addr) ||
+ !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index c80d8339f58c4..2c1c5f7f98deb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+ {
+ struct amdgpu_ctx *ctx;
+ struct amdgpu_ctx_mgr *mgr;
+- unsigned long ras_counter;
+
+ if (!fpriv)
+ return -EINVAL;
+@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+ if (atomic_read(&ctx->guilty))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+
+- /*query ue count*/
+- ras_counter = amdgpu_ras_query_error_count(adev, false);
+- /*ras counter is monotonic increasing*/
+- if (ras_counter != ctx->ras_counter_ue) {
+- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+- ctx->ras_counter_ue = ras_counter;
+- }
+-
+- /*query ce count*/
+- ras_counter = amdgpu_ras_query_error_count(adev, true);
+- if (ras_counter != ctx->ras_counter_ce) {
+- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+- ctx->ras_counter_ce = ras_counter;
+- }
+-
+ mutex_unlock(&mgr->lock);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+index 63b3501823898..8c84e35c2719b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle)
+ static int jpeg_v2_5_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring;
+ int i;
+
++ cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+- ring = &adev->jpeg.inst[i].ring_dec;
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
+ jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+index 9259e35f0f55a..e00c88abeaed1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle)
+ static int jpeg_v3_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring;
+
+- ring = &adev->jpeg.inst->ring_dec;
++ cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
+ jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 666bfa4a0b8ea..53f0899eb3166 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -356,6 +356,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+
+ error:
+ dma_fence_put(fence);
++ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index 700621ddc02e2..c9c888be12285 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -345,15 +345,14 @@ done:
+ static int vcn_v3_0_hw_fini(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- struct amdgpu_ring *ring;
+ int i;
+
++ cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+- ring = &adev->vcn.inst[i].ring_dec;
+-
+ if (!amdgpu_sriov_vf(adev)) {
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
+index e424a6d1a68c9..7a72faf29f272 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_request.c
++++ b/drivers/gpu/drm/i915/selftests/i915_request.c
+@@ -1391,8 +1391,8 @@ static int live_breadcrumbs_smoketest(void *arg)
+
+ for (n = 0; n < smoke[0].ncontexts; n++) {
+ smoke[0].contexts[n] = live_context(i915, file);
+- if (!smoke[0].contexts[n]) {
+- ret = -ENOMEM;
++ if (IS_ERR(smoke[0].contexts[n])) {
++ ret = PTR_ERR(smoke[0].contexts[n]);
+ goto out_contexts;
+ }
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+index e69ea810e18d9..c8217f4858a15 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+@@ -931,8 +931,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
+ DPU_DEBUG("REG_DMA is not defined");
+ }
+
+- if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+- dpu_kms_parse_data_bus_icc_path(dpu_kms);
++ dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+index cd4078807db1b..3416e9617ee9a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+@@ -31,40 +31,8 @@ struct dpu_mdss {
+ void __iomem *mmio;
+ struct dss_module_power mp;
+ struct dpu_irq_controller irq_controller;
+- struct icc_path *path[2];
+- u32 num_paths;
+ };
+
+-static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
+- struct dpu_mdss *dpu_mdss)
+-{
+- struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem");
+- struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem");
+-
+- if (IS_ERR_OR_NULL(path0))
+- return PTR_ERR_OR_ZERO(path0);
+-
+- dpu_mdss->path[0] = path0;
+- dpu_mdss->num_paths = 1;
+-
+- if (!IS_ERR_OR_NULL(path1)) {
+- dpu_mdss->path[1] = path1;
+- dpu_mdss->num_paths++;
+- }
+-
+- return 0;
+-}
+-
+-static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss)
+-{
+- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+- int i;
+- u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0;
+-
+- for (i = 0; i < dpu_mdss->num_paths; i++)
+- icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW));
+-}
+-
+ static void dpu_mdss_irq(struct irq_desc *desc)
+ {
+ struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
+@@ -178,8 +146,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+- dpu_mdss_icc_request_bw(mdss);
+-
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (ret) {
+ DPU_ERROR("clock enable failed, ret:%d\n", ret);
+@@ -213,15 +179,12 @@ static int dpu_mdss_disable(struct msm_mdss *mdss)
+ {
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+- int ret, i;
++ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (ret)
+ DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+- for (i = 0; i < dpu_mdss->num_paths; i++)
+- icc_set_bw(dpu_mdss->path[i], 0, 0);
+-
+ return ret;
+ }
+
+@@ -232,7 +195,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int irq;
+- int i;
+
+ pm_runtime_suspend(dev->dev);
+ pm_runtime_disable(dev->dev);
+@@ -242,9 +204,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+
+- for (i = 0; i < dpu_mdss->num_paths; i++)
+- icc_put(dpu_mdss->path[i]);
+-
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+@@ -276,12 +235,6 @@ int dpu_mdss_init(struct drm_device *dev)
+
+ DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+- if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
+- ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
+- if (ret)
+- return ret;
+- }
+-
+ mp = &dpu_mdss->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+@@ -307,8 +260,6 @@ int dpu_mdss_init(struct drm_device *dev)
+
+ pm_runtime_enable(dev->dev);
+
+- dpu_mdss_icc_request_bw(priv->mdss);
+-
+ return ret;
+
+ irq_error:
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 74ebfb12c360e..66b1051620390 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -1259,6 +1259,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
+ int status;
+
+ long flags = (long) data[2];
++ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+
+ if (flags & 0x80)
+ switch (flags & 0x07) {
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index abd86903875f0..fc4c074597539 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -597,7 +597,7 @@ static int magicmouse_probe(struct hid_device *hdev,
+ if (id->vendor == USB_VENDOR_ID_APPLE &&
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ hdev->type != HID_TYPE_USBMOUSE)
+- return 0;
++ return -ENODEV;
+
+ msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
+ if (msc == NULL) {
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 8429ebe7097e4..8580ace596c25 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -604,9 +604,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
+ if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+ continue;
+
+- for (n = 0; n < field->report_count; n++) {
+- if (field->usage[n].hid == HID_DG_CONTACTID)
+- rdata->is_mt_collection = true;
++ if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
++ for (n = 0; n < field->report_count; n++) {
++ if (field->usage[n].hid == HID_DG_CONTACTID) {
++ rdata->is_mt_collection = true;
++ break;
++ }
++ }
+ }
+ }
+
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index cb7758d59014e..1f08c848c33de 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -50,6 +50,7 @@
+ #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
+ #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
+ #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
++#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
+
+
+ /* flags */
+@@ -183,6 +184,11 @@ static const struct i2c_hid_quirks {
+ I2C_HID_QUIRK_RESET_ON_RESUME },
+ { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+ I2C_HID_QUIRK_BAD_INPUT_SIZE },
++ /*
++ * Sending the wakeup after reset actually break ELAN touchscreen controller
++ */
++ { USB_VENDOR_ID_ELAN, HID_ANY_ID,
++ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
+ { 0, 0 }
+ };
+
+@@ -466,7 +472,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ }
+
+ /* At least some SIS devices need this after reset */
+- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
++ if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
++ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+
+ out_unlock:
+ mutex_unlock(&ihid->reset_lock);
+@@ -1131,8 +1138,8 @@ static int i2c_hid_probe(struct i2c_client *client,
+ hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+ hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+
+- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
+- client->name, hid->vendor, hid->product);
++ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
++ client->name, (u16)hid->vendor, (u16)hid->product);
+ strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+
+ ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
+index fddac7c72f645..07a9fe97d2e05 100644
+--- a/drivers/hid/usbhid/hid-pidff.c
++++ b/drivers/hid/usbhid/hid-pidff.c
+@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
+
+ if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
+ pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
++ error = -EPERM;
+ hid_notice(hid,
+ "device does not support device managed pool\n");
+ goto fail;
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index 73b9db9e3aab6..63b74e781c5d9 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
+ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
+ {
+- if (disallow_fan_support && index >= 8)
++ if (disallow_fan_support && index >= 20)
+ return 0;
+ if (disallow_fan_type_call &&
+- (index == 9 || index == 12 || index == 15))
++ (index == 21 || index == 25 || index == 28))
+ return 0;
+ if (index >= 0 && index <= 1 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
+diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
+index 7cad76e07f701..3f1b826dac8a0 100644
+--- a/drivers/hwmon/pmbus/isl68137.c
++++ b/drivers/hwmon/pmbus/isl68137.c
+@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
+ info->read_word_data = raa_dmpvr2_read_word_data;
+ break;
+ case raa_dmpvr2_2rail_nontc:
+- info->func[0] &= ~PMBUS_HAVE_TEMP;
+- info->func[1] &= ~PMBUS_HAVE_TEMP;
++ info->func[0] &= ~PMBUS_HAVE_TEMP3;
++ info->func[1] &= ~PMBUS_HAVE_TEMP3;
+ fallthrough;
+ case raa_dmpvr2_2rail:
+ info->pages = 2;
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index 4a6dd05d6dbf9..86f028febce35 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -654,6 +654,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++static void geni_i2c_shutdown(struct platform_device *pdev)
++{
++ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
++
++ /* Make client i2c transfers start failing */
++ i2c_mark_adapter_suspended(&gi2c->adap);
++}
++
+ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
+ {
+ int ret;
+@@ -694,6 +702,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
+ {
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
++ i2c_mark_adapter_suspended(&gi2c->adap);
++
+ if (!gi2c->suspended) {
+ geni_i2c_runtime_suspend(dev);
+ pm_runtime_disable(dev);
+@@ -703,8 +713,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
+ return 0;
+ }
+
++static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
++{
++ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
++
++ i2c_mark_adapter_resumed(&gi2c->adap);
++ return 0;
++}
++
+ static const struct dev_pm_ops geni_i2c_pm_ops = {
+- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
+ SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
+ NULL)
+ };
+@@ -718,6 +736,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
+ static struct platform_driver geni_i2c_driver = {
+ .probe = geni_i2c_probe,
+ .remove = geni_i2c_remove,
++ .shutdown = geni_i2c_shutdown,
+ .driver = {
+ .name = "geni_i2c",
+ .pm = &geni_i2c_pm_ops,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index 27308600da153..2dd4869156291 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
+ bool persistent, u8 *smt_idx);
+ int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
+ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
+-int cxgb_open(struct net_device *dev);
+-int cxgb_close(struct net_device *dev);
+ void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
+ void cxgb4_quiesce_rx(struct sge_rspq *q);
+ int cxgb4_port_mirror_alloc(struct net_device *dev);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 23c13f34a5727..04dcb5e4b3161 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
+ /*
+ * net_device operations
+ */
+-int cxgb_open(struct net_device *dev)
++static int cxgb_open(struct net_device *dev)
+ {
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+@@ -2882,7 +2882,7 @@ out_unlock:
+ return err;
+ }
+
+-int cxgb_close(struct net_device *dev)
++static int cxgb_close(struct net_device *dev)
+ {
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index 1b88bd1c2dbe4..dd9be229819a5 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
+ if (!ch_flower)
+ return -ENOENT;
+
++ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
++ adap->flower_ht_params);
++
+ ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
+ &ch_flower->fs, ch_flower->filter_id);
+ if (ret)
+- goto err;
++ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
++ ch_flower->filter_id, ret);
+
+- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+- adap->flower_ht_params);
+- if (ret) {
+- netdev_err(dev, "Flow remove from rhashtable failed");
+- goto err;
+- }
+ kfree_rcu(ch_flower, rcu);
+-
+-err:
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+index 6c259de96f969..338b04f339b3d 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ * down before configuring tc params.
+ */
+ if (netif_running(dev)) {
+- cxgb_close(dev);
++ netif_tx_stop_all_queues(dev);
++ netif_carrier_off(dev);
+ needs_bring_up = true;
+ }
+
+@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ }
+
+ out:
+- if (needs_bring_up)
+- cxgb_open(dev);
++ if (needs_bring_up) {
++ netif_tx_start_all_queues(dev);
++ netif_carrier_on(dev);
++ }
+
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ return ret;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 546301272271d..ccb6bd002b20d 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2552,6 +2552,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
+ if (!eosw_txq)
+ return -ENOMEM;
+
++ if (!(adap->flags & CXGB4_FW_OK)) {
++ /* Don't stall caller when access to FW is lost */
++ complete(&eosw_txq->completion);
++ return -EIO;
++ }
++
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 011f484606a3a..c40ac82db863e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -2205,15 +2205,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
++ if (result == I40E_XDP_CONSUMED)
++ goto out_failure;
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
++ if (err)
++ goto out_failure;
++ result = I40E_XDP_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
++out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index 8557807b41717..86c79f71c685a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -159,21 +159,28 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
++ if (likely(act == XDP_REDIRECT)) {
++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
++ if (err)
++ goto out_failure;
++ rcu_read_unlock();
++ return I40E_XDP_REDIR;
++ }
++
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+- break;
+- case XDP_REDIRECT:
+- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
++ if (result == I40E_XDP_CONSUMED)
++ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
++out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index d70573f5072c6..a7975afecf70f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -1797,49 +1797,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseKR4_Full);
+ }
+-
+- /* Autoneg PHY types */
+- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
+- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
+- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
+- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
+- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
+- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
+- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
+- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
+- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+- ethtool_link_ksettings_add_link_mode(ks, supported,
+- Autoneg);
+- ethtool_link_ksettings_add_link_mode(ks, advertising,
+- Autoneg);
+- }
+- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
+- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+- ethtool_link_ksettings_add_link_mode(ks, supported,
+- Autoneg);
+- ethtool_link_ksettings_add_link_mode(ks, advertising,
+- Autoneg);
+- }
+- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
+- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
+- ethtool_link_ksettings_add_link_mode(ks, supported,
+- Autoneg);
+- ethtool_link_ksettings_add_link_mode(ks, advertising,
+- Autoneg);
+- }
+ }
+
+ #define TEST_SET_BITS_TIMEOUT 50
+@@ -1996,9 +1953,7 @@ ice_get_link_ksettings(struct net_device *netdev,
+ ks->base.port = PORT_TP;
+ break;
+ case ICE_MEDIA_BACKPLANE:
+- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
+- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Backplane);
+ ks->base.port = PORT_NONE;
+@@ -2073,6 +2028,12 @@ ice_get_link_ksettings(struct net_device *netdev,
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+
++ /* Set supported and advertised autoneg */
++ if (ice_is_phy_caps_an_enabled(caps)) {
++ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
++ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
++ }
++
+ done:
+ kfree(caps);
+ return err;
+diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+index 90abc8612a6ab..406dd6bd97a7d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+@@ -31,6 +31,7 @@
+ #define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
+ #define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
+ #define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
++#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
+ #define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
+ #define PF_FW_ATQT 0x00080400
+ #define PF_MBX_ARQBAH 0x0022E400
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index e1384503dd4d5..fb20c6971f4c7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -192,6 +192,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
+ break;
+ case ICE_VSI_VF:
+ vf = &pf->vf[vsi->vf_id];
++ if (vf->num_req_qs)
++ vf->num_vf_qs = vf->num_req_qs;
+ vsi->alloc_txq = vf->num_vf_qs;
+ vsi->alloc_rxq = vf->num_vf_qs;
+ /* pf->num_msix_per_vf includes (VF miscellaneous vector +
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 0f2544c420ac3..442a9bcbf60a7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -537,34 +537,35 @@ static int
+ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+ {
+- int err, result = ICE_XDP_PASS;
+ struct ice_ring *xdp_ring;
++ int err, result;
+ u32 act;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+- break;
++ return ICE_XDP_PASS;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+- break;
++ if (result == ICE_XDP_CONSUMED)
++ goto out_failure;
++ return result;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+- break;
++ if (err)
++ goto out_failure;
++ return ICE_XDP_REDIR;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
++out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+- result = ICE_XDP_CONSUMED;
+- break;
++ return ICE_XDP_CONSUMED;
+ }
+-
+- return result;
+ }
+
+ /**
+@@ -2373,6 +2374,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ struct ice_tx_offload_params offload = { 0 };
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_tx_buf *first;
++ struct ethhdr *eth;
+ unsigned int count;
+ int tso, csum;
+
+@@ -2419,7 +2421,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ goto out_drop;
+
+ /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
+- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
++ eth = (struct ethhdr *)skb_mac_header(skb);
++ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
++ eth->h_proto == htons(ETH_P_LLDP)) &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->qos_cfg.is_sw_lldp))
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index b3161c5def465..c9f82fd3cf48d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -435,13 +435,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
+- * in the case of VFR. If this is done for PFR, it can mess up VF
+- * resets because the VF driver may already have started cleanup
+- * by the time we get here.
++ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
++ * needs to clear them in the case of VFR/VFLR. If this is done for
++ * PFR, it can mess up VF resets because the VF driver may already
++ * have started cleanup by the time we get here.
+ */
+- if (!is_pfr)
++ if (!is_pfr) {
+ wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
++ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
++ }
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+@@ -1339,7 +1341,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+ }
+
+ ice_vf_pre_vsi_rebuild(vf);
+- ice_vf_rebuild_vsi_with_release(vf);
++
++ if (ice_vf_rebuild_vsi_with_release(vf)) {
++ dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
++ return false;
++ }
++
+ ice_vf_post_vsi_rebuild(vf);
+
+ return true;
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 98101a8e2952d..9f36f8d7a9854 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -524,21 +524,29 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
++
++ if (likely(act == XDP_REDIRECT)) {
++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
++ if (err)
++ goto out_failure;
++ rcu_read_unlock();
++ return ICE_XDP_REDIR;
++ }
++
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+- break;
+- case XDP_REDIRECT:
+- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
++ if (result == ICE_XDP_CONSUMED)
++ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
++out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 368f0aac5e1d4..5c87c0a7ce3d7 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -8419,18 +8419,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
++ if (result == IGB_XDP_CONSUMED)
++ goto out_failure;
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+- if (!err)
+- result = IGB_XDP_REDIR;
+- else
+- result = IGB_XDP_CONSUMED;
++ if (err)
++ goto out_failure;
++ result = IGB_XDP_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
++out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 0b9fddbc5db4f..1bfba87f1ff60 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -2218,23 +2218,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ break;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+- if (unlikely(!xdpf)) {
+- result = IXGBE_XDP_CONSUMED;
+- break;
+- }
++ if (unlikely(!xdpf))
++ goto out_failure;
+ result = ixgbe_xmit_xdp_ring(adapter, xdpf);
++ if (result == IXGBE_XDP_CONSUMED)
++ goto out_failure;
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+- if (!err)
+- result = IXGBE_XDP_REDIR;
+- else
+- result = IXGBE_XDP_CONSUMED;
++ if (err)
++ goto out_failure;
++ result = IXGBE_XDP_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
++out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index 3771857cf887c..f72d2978263b9 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -104,25 +104,30 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
++ if (likely(act == XDP_REDIRECT)) {
++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
++ if (err)
++ goto out_failure;
++ rcu_read_unlock();
++ return IXGBE_XDP_REDIR;
++ }
++
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+- if (unlikely(!xdpf)) {
+- result = IXGBE_XDP_CONSUMED;
+- break;
+- }
++ if (unlikely(!xdpf))
++ goto out_failure;
+ result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+- break;
+- case XDP_REDIRECT:
+- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
++ if (result == IXGBE_XDP_CONSUMED)
++ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
++out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index 82fce27f682bb..a7d0a459969a2 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -1072,11 +1072,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ case XDP_TX:
+ xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
+ result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
++ if (result == IXGBEVF_XDP_CONSUMED)
++ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
++out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 986f0d86e94dc..bc7c1962f9e66 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1618,12 +1618,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
++ unsigned long fec_bitmap;
+ u16 fec_policy = 0;
+ int mode;
+ int err;
+
+- if (bitmap_weight((unsigned long *)&fecparam->fec,
+- ETHTOOL_FEC_LLRS_BIT + 1) > 1)
++ bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
++ if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ return -EOPNOTSUPP;
+
+ for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 1bdeb948f56d7..80abdb0b47d7e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -2253,11 +2253,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
+ misc_parameters);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
++ enum fs_flow_table_type fs_type;
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+ u8 *match_level;
+ int err;
+
++ fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
+ match_level = outer_match_level;
+
+ if (dissector->used_keys &
+@@ -2382,6 +2384,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
+ if (match.mask->vlan_id ||
+ match.mask->vlan_priority ||
+ match.mask->vlan_tpid) {
++ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
++ fs_type)) {
++ NL_SET_ERR_MSG_MOD(extack,
++ "Matching on CVLAN is not supported");
++ return -EOPNOTSUPP;
++ }
++
+ if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_svlan_tag, 1);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index f9042e147c7f6..ee710ce007950 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -354,6 +354,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
+ reset_abort_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
++ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
++ return;
++
+ mlx5_sync_reset_clear_reset_requested(dev, true);
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+index 1fbcd012bb855..7ccfd40586cee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ int ret;
+
+ ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+- ft_attr.level = dmn->info.caps.max_ft_level - 2;
++ ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
++ MLX5_FT_MAX_MULTIPATH_LEVEL);
+ ft_attr.reformat_en = reformat_req;
+ ft_attr.decap_en = reformat_req;
+
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 854c6624e6859..1d3bf810f2ca1 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1827,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
+ uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
+ uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
+
++ /* if the speed hasn't changed, don't report it.
++ * RTL8156 shipped before 2021 sends notification about every 32ms.
++ */
++ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
++ return;
++
++ dev->rx_speed = rx_speed;
++ dev->tx_speed = tx_speed;
++
+ /*
+ * Currently the USB-NET API does not support reporting the actual
+ * device speed. Do print it instead.
+@@ -1867,7 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
+ * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
+ * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
+ */
+- usbnet_link_change(dev, !!event->wValue, 0);
++ if (netif_carrier_ok(dev->net) != !!event->wValue)
++ usbnet_link_change(dev, !!event->wValue, 0);
+ break;
+
+ case USB_CDC_NOTIFY_SPEED_CHANGE:
+diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
+index fc52b2cb500b3..dbe1f8514efc3 100644
+--- a/drivers/net/wireguard/Makefile
++++ b/drivers/net/wireguard/Makefile
+@@ -1,5 +1,4 @@
+-ccflags-y := -O3
+-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
++ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
+ wireguard-y := main.o
+ wireguard-y += noise.o
+diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
+index 3725e9cd85f4f..b7197e80f2264 100644
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -6,6 +6,8 @@
+ #include "allowedips.h"
+ #include "peer.h"
+
++static struct kmem_cache *node_cache;
++
+ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
+ {
+ if (bits == 32) {
+@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
+ node->bitlen = bits;
+ memcpy(node->bits, src, bits / 8U);
+ }
+-#define CHOOSE_NODE(parent, key) \
+- parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
++
++static inline u8 choose(struct allowedips_node *node, const u8 *key)
++{
++ return (key[node->bit_at_a] >> node->bit_at_b) & 1;
++}
+
+ static void push_rcu(struct allowedips_node **stack,
+ struct allowedips_node __rcu *p, unsigned int *len)
+@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
+ }
+ }
+
++static void node_free_rcu(struct rcu_head *rcu)
++{
++ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
++}
++
+ static void root_free_rcu(struct rcu_head *rcu)
+ {
+ struct allowedips_node *node, *stack[128] = {
+@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
+ while (len > 0 && (node = stack[--len])) {
+ push_rcu(stack, node->bit[0], &len);
+ push_rcu(stack, node->bit[1], &len);
+- kfree(node);
++ kmem_cache_free(node_cache, node);
+ }
+ }
+
+@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
+ }
+ }
+
+-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
+- struct wg_peer *peer, struct mutex *lock)
+-{
+-#define REF(p) rcu_access_pointer(p)
+-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
+-#define PUSH(p) ({ \
+- WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \
+- stack[len++] = p; \
+- })
+-
+- struct allowedips_node __rcu **stack[128], **nptr;
+- struct allowedips_node *node, *prev;
+- unsigned int len;
+-
+- if (unlikely(!peer || !REF(*top)))
+- return;
+-
+- for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
+- nptr = stack[len - 1];
+- node = DEREF(nptr);
+- if (!node) {
+- --len;
+- continue;
+- }
+- if (!prev || REF(prev->bit[0]) == node ||
+- REF(prev->bit[1]) == node) {
+- if (REF(node->bit[0]))
+- PUSH(&node->bit[0]);
+- else if (REF(node->bit[1]))
+- PUSH(&node->bit[1]);
+- } else if (REF(node->bit[0]) == prev) {
+- if (REF(node->bit[1]))
+- PUSH(&node->bit[1]);
+- } else {
+- if (rcu_dereference_protected(node->peer,
+- lockdep_is_held(lock)) == peer) {
+- RCU_INIT_POINTER(node->peer, NULL);
+- list_del_init(&node->peer_list);
+- if (!node->bit[0] || !node->bit[1]) {
+- rcu_assign_pointer(*nptr, DEREF(
+- &node->bit[!REF(node->bit[0])]));
+- kfree_rcu(node, rcu);
+- node = DEREF(nptr);
+- }
+- }
+- --len;
+- }
+- }
+-
+-#undef REF
+-#undef DEREF
+-#undef PUSH
+-}
+-
+ static unsigned int fls128(u64 a, u64 b)
+ {
+ return a ? fls64(a) + 64U : fls64(b);
+@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
+ found = node;
+ if (node->cidr == bits)
+ break;
+- node = rcu_dereference_bh(CHOOSE_NODE(node, key));
++ node = rcu_dereference_bh(node->bit[choose(node, key)]);
+ }
+ return found;
+ }
+@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
+ u8 cidr, u8 bits, struct allowedips_node **rnode,
+ struct mutex *lock)
+ {
+- struct allowedips_node *node = rcu_dereference_protected(trie,
+- lockdep_is_held(lock));
++ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
+ struct allowedips_node *parent = NULL;
+ bool exact = false;
+
+@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
+ exact = true;
+ break;
+ }
+- node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
+- lockdep_is_held(lock));
++ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
+ }
+ *rnode = parent;
+ return exact;
+ }
+
++static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
++{
++ node->parent_bit_packed = (unsigned long)parent | bit;
++ rcu_assign_pointer(*parent, node);
++}
++
++static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
++{
++ u8 bit = choose(parent, node->bits);
++ connect_node(&parent->bit[bit], bit, node);
++}
++
+ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+ {
+@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ return -EINVAL;
+
+ if (!rcu_access_pointer(*trie)) {
+- node = kzalloc(sizeof(*node), GFP_KERNEL);
++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+ if (unlikely(!node))
+ return -ENOMEM;
+ RCU_INIT_POINTER(node->peer, peer);
+ list_add_tail(&node->peer_list, &peer->allowedips_list);
+ copy_and_assign_cidr(node, key, cidr, bits);
+- rcu_assign_pointer(*trie, node);
++ connect_node(trie, 2, node);
+ return 0;
+ }
+ if (node_placement(*trie, key, cidr, bits, &node, lock)) {
+@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ return 0;
+ }
+
+- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
++ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+ if (unlikely(!newnode))
+ return -ENOMEM;
+ RCU_INIT_POINTER(newnode->peer, peer);
+@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ if (!node) {
+ down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
+ } else {
+- down = rcu_dereference_protected(CHOOSE_NODE(node, key),
+- lockdep_is_held(lock));
++ const u8 bit = choose(node, key);
++ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
+ if (!down) {
+- rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
++ connect_node(&node->bit[bit], bit, newnode);
+ return 0;
+ }
+ }
+@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ parent = node;
+
+ if (newnode->cidr == cidr) {
+- rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
++ choose_and_connect_node(newnode, down);
+ if (!parent)
+- rcu_assign_pointer(*trie, newnode);
++ connect_node(trie, 2, newnode);
+ else
+- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
+- newnode);
+- } else {
+- node = kzalloc(sizeof(*node), GFP_KERNEL);
+- if (unlikely(!node)) {
+- list_del(&newnode->peer_list);
+- kfree(newnode);
+- return -ENOMEM;
+- }
+- INIT_LIST_HEAD(&node->peer_list);
+- copy_and_assign_cidr(node, newnode->bits, cidr, bits);
++ choose_and_connect_node(parent, newnode);
++ return 0;
++ }
+
+- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
+- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
+- if (!parent)
+- rcu_assign_pointer(*trie, node);
+- else
+- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
+- node);
++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
++ if (unlikely(!node)) {
++ list_del(&newnode->peer_list);
++ kmem_cache_free(node_cache, newnode);
++ return -ENOMEM;
+ }
++ INIT_LIST_HEAD(&node->peer_list);
++ copy_and_assign_cidr(node, newnode->bits, cidr, bits);
++
++ choose_and_connect_node(node, down);
++ choose_and_connect_node(node, newnode);
++ if (!parent)
++ connect_node(trie, 2, node);
++ else
++ choose_and_connect_node(parent, node);
+ return 0;
+ }
+
+@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wg_peer *peer, struct mutex *lock)
+ {
++ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
++ bool free_parent;
++
++ if (list_empty(&peer->allowedips_list))
++ return;
+ ++table->seq;
+- walk_remove_by_peer(&table->root4, peer, lock);
+- walk_remove_by_peer(&table->root6, peer, lock);
++ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
++ list_del_init(&node->peer_list);
++ RCU_INIT_POINTER(node->peer, NULL);
++ if (node->bit[0] && node->bit[1])
++ continue;
++ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
++ lockdep_is_held(lock));
++ if (child)
++ child->parent_bit_packed = node->parent_bit_packed;
++ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
++ *parent_bit = child;
++ parent = (void *)parent_bit -
++ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
++ free_parent = !rcu_access_pointer(node->bit[0]) &&
++ !rcu_access_pointer(node->bit[1]) &&
++ (node->parent_bit_packed & 3) <= 1 &&
++ !rcu_access_pointer(parent->peer);
++ if (free_parent)
++ child = rcu_dereference_protected(
++ parent->bit[!(node->parent_bit_packed & 1)],
++ lockdep_is_held(lock));
++ call_rcu(&node->rcu, node_free_rcu);
++ if (!free_parent)
++ continue;
++ if (child)
++ child->parent_bit_packed = parent->parent_bit_packed;
++ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
++ call_rcu(&parent->rcu, node_free_rcu);
++ }
+ }
+
+ int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
+@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ return NULL;
+ }
+
++int __init wg_allowedips_slab_init(void)
++{
++ node_cache = KMEM_CACHE(allowedips_node, 0);
++ return node_cache ? 0 : -ENOMEM;
++}
++
++void wg_allowedips_slab_uninit(void)
++{
++ rcu_barrier();
++ kmem_cache_destroy(node_cache);
++}
++
+ #include "selftest/allowedips.c"
+diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
+index e5c83cafcef4c..2346c797eb4d8 100644
+--- a/drivers/net/wireguard/allowedips.h
++++ b/drivers/net/wireguard/allowedips.h
+@@ -15,14 +15,11 @@ struct wg_peer;
+ struct allowedips_node {
+ struct wg_peer __rcu *peer;
+ struct allowedips_node __rcu *bit[2];
+- /* While it may seem scandalous that we waste space for v4,
+- * we're alloc'ing to the nearest power of 2 anyway, so this
+- * doesn't actually make a difference.
+- */
+- u8 bits[16] __aligned(__alignof(u64));
+ u8 cidr, bit_at_a, bit_at_b, bitlen;
++ u8 bits[16] __aligned(__alignof(u64));
+
+- /* Keep rarely used list at bottom to be beyond cache line. */
++ /* Keep rarely used members at bottom to be beyond cache line. */
++ unsigned long parent_bit_packed;
+ union {
+ struct list_head peer_list;
+ struct rcu_head rcu;
+@@ -33,7 +30,7 @@ struct allowedips {
+ struct allowedips_node __rcu *root4;
+ struct allowedips_node __rcu *root6;
+ u64 seq;
+-};
++} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
+
+ void wg_allowedips_init(struct allowedips *table);
+ void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
+@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ bool wg_allowedips_selftest(void);
+ #endif
+
++int wg_allowedips_slab_init(void);
++void wg_allowedips_slab_uninit(void);
++
+ #endif /* _WG_ALLOWEDIPS_H */
+diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
+index 7a7d5f1a80fc7..75dbe77b0b4b4 100644
+--- a/drivers/net/wireguard/main.c
++++ b/drivers/net/wireguard/main.c
+@@ -21,13 +21,22 @@ static int __init mod_init(void)
+ {
+ int ret;
+
++ ret = wg_allowedips_slab_init();
++ if (ret < 0)
++ goto err_allowedips;
++
+ #ifdef DEBUG
++ ret = -ENOTRECOVERABLE;
+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
+ !wg_ratelimiter_selftest())
+- return -ENOTRECOVERABLE;
++ goto err_peer;
+ #endif
+ wg_noise_init();
+
++ ret = wg_peer_init();
++ if (ret < 0)
++ goto err_peer;
++
+ ret = wg_device_init();
+ if (ret < 0)
+ goto err_device;
+@@ -44,6 +53,10 @@ static int __init mod_init(void)
+ err_netlink:
+ wg_device_uninit();
+ err_device:
++ wg_peer_uninit();
++err_peer:
++ wg_allowedips_slab_uninit();
++err_allowedips:
+ return ret;
+ }
+
+@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
+ {
+ wg_genetlink_uninit();
+ wg_device_uninit();
++ wg_peer_uninit();
++ wg_allowedips_slab_uninit();
+ }
+
+ module_init(mod_init);
+diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
+index cd5cb0292cb67..1acd00ab2fbcb 100644
+--- a/drivers/net/wireguard/peer.c
++++ b/drivers/net/wireguard/peer.c
+@@ -15,6 +15,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/list.h>
+
++static struct kmem_cache *peer_cache;
+ static atomic64_t peer_counter = ATOMIC64_INIT(0);
+
+ struct wg_peer *wg_peer_create(struct wg_device *wg,
+@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
+ if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
+ return ERR_PTR(ret);
+
+- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
++ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
+ if (unlikely(!peer))
+ return ERR_PTR(ret);
+- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
++ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
+ goto err;
+
+ peer->device = wg;
+@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
+ return peer;
+
+ err:
+- kfree(peer);
++ kmem_cache_free(peer_cache, peer);
+ return ERR_PTR(ret);
+ }
+
+@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
+ /* Mark as dead, so that we don't allow jumping contexts after. */
+ WRITE_ONCE(peer->is_dead, true);
+
+- /* The caller must now synchronize_rcu() for this to take effect. */
++ /* The caller must now synchronize_net() for this to take effect. */
+ }
+
+ static void peer_remove_after_dead(struct wg_peer *peer)
+@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
+ lockdep_assert_held(&peer->device->device_update_lock);
+
+ peer_make_dead(peer);
+- synchronize_rcu();
++ synchronize_net();
+ peer_remove_after_dead(peer);
+ }
+
+@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
+ peer_make_dead(peer);
+ list_add_tail(&peer->peer_list, &dead_peers);
+ }
+- synchronize_rcu();
++ synchronize_net();
+ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
+ peer_remove_after_dead(peer);
+ }
+@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
+ /* The final zeroing takes care of clearing any remaining handshake key
+ * material and other potentially sensitive information.
+ */
+- kfree_sensitive(peer);
++ memzero_explicit(peer, sizeof(*peer));
++ kmem_cache_free(peer_cache, peer);
+ }
+
+ static void kref_release(struct kref *refcount)
+@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
+ return;
+ kref_put(&peer->refcount, kref_release);
+ }
++
++int __init wg_peer_init(void)
++{
++ peer_cache = KMEM_CACHE(wg_peer, 0);
++ return peer_cache ? 0 : -ENOMEM;
++}
++
++void wg_peer_uninit(void)
++{
++ kmem_cache_destroy(peer_cache);
++}
+diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
+index 0809cda08bfa4..74227aa2d5b5a 100644
+--- a/drivers/net/wireguard/peer.h
++++ b/drivers/net/wireguard/peer.h
+@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
+ void wg_peer_remove(struct wg_peer *peer);
+ void wg_peer_remove_all(struct wg_device *wg);
+
++int wg_peer_init(void);
++void wg_peer_uninit(void);
++
+ #endif /* _WG_PEER_H */
+diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
+index 846db14cb046b..e173204ae7d78 100644
+--- a/drivers/net/wireguard/selftest/allowedips.c
++++ b/drivers/net/wireguard/selftest/allowedips.c
+@@ -19,32 +19,22 @@
+
+ #include <linux/siphash.h>
+
+-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
+- u8 cidr)
+-{
+- swap_endian(dst, src, bits);
+- memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
+- if (cidr)
+- dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
+-}
+-
+ static __init void print_node(struct allowedips_node *node, u8 bits)
+ {
+ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
+- char *fmt_declaration = KERN_DEBUG
+- "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
++ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
++ u8 ip1[16], ip2[16], cidr1, cidr2;
+ char *style = "dotted";
+- u8 ip1[16], ip2[16];
+ u32 color = 0;
+
++ if (node == NULL)
++ return;
+ if (bits == 32) {
+ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
+- fmt_declaration = KERN_DEBUG
+- "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
++ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+ } else if (bits == 128) {
+ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
+- fmt_declaration = KERN_DEBUG
+- "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
++ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+ }
+ if (node->peer) {
+ hsiphash_key_t key = { { 0 } };
+@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
+ hsiphash_1u32(0xabad1dea, &key) % 200;
+ style = "bold";
+ }
+- swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
+- printk(fmt_declaration, ip1, node->cidr, style, color);
++ wg_allowedips_read_node(node, ip1, &cidr1);
++ printk(fmt_declaration, ip1, cidr1, style, color);
+ if (node->bit[0]) {
+- swap_endian_and_apply_cidr(ip2,
+- rcu_dereference_raw(node->bit[0])->bits, bits,
+- node->cidr);
+- printk(fmt_connection, ip1, node->cidr, ip2,
+- rcu_dereference_raw(node->bit[0])->cidr);
+- print_node(rcu_dereference_raw(node->bit[0]), bits);
++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
++ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
+ }
+ if (node->bit[1]) {
+- swap_endian_and_apply_cidr(ip2,
+- rcu_dereference_raw(node->bit[1])->bits,
+- bits, node->cidr);
+- printk(fmt_connection, ip1, node->cidr, ip2,
+- rcu_dereference_raw(node->bit[1])->cidr);
+- print_node(rcu_dereference_raw(node->bit[1]), bits);
++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
++ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
+ }
++ if (node->bit[0])
++ print_node(rcu_dereference_raw(node->bit[0]), bits);
++ if (node->bit[1])
++ print_node(rcu_dereference_raw(node->bit[1]), bits);
+ }
+
+ static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
+@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
+ {
+ union nf_inet_addr mask;
+
+- memset(&mask, 0x00, 128 / 8);
+- memset(&mask, 0xff, cidr / 8);
++ memset(&mask, 0, sizeof(mask));
++ memset(&mask.all, 0xff, cidr / 8);
+ if (cidr % 32)
+ mask.all[cidr / 32] = (__force u32)htonl(
+ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
+@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
+ }
+
+ static __init inline bool
+-horrible_match_v4(const struct horrible_allowedips_node *node,
+- struct in_addr *ip)
++horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
+ {
+ return (ip->s_addr & node->mask.ip) == node->ip.ip;
+ }
+
+ static __init inline bool
+-horrible_match_v6(const struct horrible_allowedips_node *node,
+- struct in6_addr *ip)
++horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
+ {
+- return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
+- node->ip.ip6[0] &&
+- (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
+- node->ip.ip6[1] &&
+- (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
+- node->ip.ip6[2] &&
++ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
++ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
++ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
+ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
+ }
+
+ static __init void
+-horrible_insert_ordered(struct horrible_allowedips *table,
+- struct horrible_allowedips_node *node)
++horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
+ {
+ struct horrible_allowedips_node *other = NULL, *where = NULL;
+ u8 my_cidr = horrible_mask_to_cidr(node->mask);
+
+ hlist_for_each_entry(other, &table->head, table) {
+- if (!memcmp(&other->mask, &node->mask,
+- sizeof(union nf_inet_addr)) &&
+- !memcmp(&other->ip, &node->ip,
+- sizeof(union nf_inet_addr)) &&
+- other->ip_version == node->ip_version) {
++ if (other->ip_version == node->ip_version &&
++ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
++ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
+ other->value = node->value;
+ kfree(node);
+ return;
+ }
++ }
++ hlist_for_each_entry(other, &table->head, table) {
+ where = other;
+ if (horrible_mask_to_cidr(other->mask) <= my_cidr)
+ break;
+@@ -201,8 +181,7 @@ static __init int
+ horrible_allowedips_insert_v4(struct horrible_allowedips *table,
+ struct in_addr *ip, u8 cidr, void *value)
+ {
+- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+- GFP_KERNEL);
++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (unlikely(!node))
+ return -ENOMEM;
+@@ -219,8 +198,7 @@ static __init int
+ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
+ struct in6_addr *ip, u8 cidr, void *value)
+ {
+- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+- GFP_KERNEL);
++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (unlikely(!node))
+ return -ENOMEM;
+@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
+ }
+
+ static __init void *
+-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
+- struct in_addr *ip)
++horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
+ {
+ struct horrible_allowedips_node *node;
+- void *ret = NULL;
+
+ hlist_for_each_entry(node, &table->head, table) {
+- if (node->ip_version != 4)
+- continue;
+- if (horrible_match_v4(node, ip)) {
+- ret = node->value;
+- break;
+- }
++ if (node->ip_version == 4 && horrible_match_v4(node, ip))
++ return node->value;
+ }
+- return ret;
++ return NULL;
+ }
+
+ static __init void *
+-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
+- struct in6_addr *ip)
++horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
+ {
+ struct horrible_allowedips_node *node;
+- void *ret = NULL;
+
+ hlist_for_each_entry(node, &table->head, table) {
+- if (node->ip_version != 6)
++ if (node->ip_version == 6 && horrible_match_v6(node, ip))
++ return node->value;
++ }
++ return NULL;
++}
++
++
++static __init void
++horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
++{
++ struct horrible_allowedips_node *node;
++ struct hlist_node *h;
++
++ hlist_for_each_entry_safe(node, h, &table->head, table) {
++ if (node->value != value)
+ continue;
+- if (horrible_match_v6(node, ip)) {
+- ret = node->value;
+- break;
+- }
++ hlist_del(&node->table);
++ kfree(node);
+ }
+- return ret;
++
+ }
+
+ static __init bool randomized_test(void)
+@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
+ goto free;
+ }
+ kref_init(&peers[i]->refcount);
++ INIT_LIST_HEAD(&peers[i]->allowedips_list);
+ }
+
+ mutex_lock(&mutex);
+@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
+ if (wg_allowedips_insert_v4(&t,
+ (struct in_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
+- pr_err("allowedips random malloc: FAIL\n");
++ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v4(&h,
+@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
+ print_tree(t.root6, 128);
+ }
+
+- for (i = 0; i < NUM_QUERIES; ++i) {
+- prandom_bytes(ip, 4);
+- if (lookup(t.root4, 32, ip) !=
+- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+- pr_err("allowedips random self-test: FAIL\n");
+- goto free;
++ for (j = 0;; ++j) {
++ for (i = 0; i < NUM_QUERIES; ++i) {
++ prandom_bytes(ip, 4);
++ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
++ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
++ pr_err("allowedips random v4 self-test: FAIL\n");
++ goto free;
++ }
++ prandom_bytes(ip, 16);
++ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
++ pr_err("allowedips random v6 self-test: FAIL\n");
++ goto free;
++ }
+ }
++ if (j >= NUM_PEERS)
++ break;
++ mutex_lock(&mutex);
++ wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
++ mutex_unlock(&mutex);
++ horrible_allowedips_remove_by_value(&h, peers[j]);
+ }
+
+- for (i = 0; i < NUM_QUERIES; ++i) {
+- prandom_bytes(ip, 16);
+- if (lookup(t.root6, 128, ip) !=
+- horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+- pr_err("allowedips random self-test: FAIL\n");
+- goto free;
+- }
++ if (t.root4 || t.root6) {
++ pr_err("allowedips random self-test removal: FAIL\n");
++ goto free;
+ }
++
+ ret = true;
+
+ free:
+diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
+index c33e2c81635fa..c8cd385d233b6 100644
+--- a/drivers/net/wireguard/socket.c
++++ b/drivers/net/wireguard/socket.c
+@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
+ if (new4)
+ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
+ mutex_unlock(&wg->socket_update_lock);
+- synchronize_rcu();
++ synchronize_net();
+ sock_free(old4);
+ sock_free(old6);
+ }
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index e02a4fbb74de5..7ce9807fc24c5 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -685,6 +685,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
+ {
+ if (queue->task) {
+ kthread_stop(queue->task);
++ put_task_struct(queue->task);
+ queue->task = NULL;
+ }
+
+@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
+ if (IS_ERR(task))
+ goto kthread_err;
+ queue->task = task;
++ /*
++ * Take a reference to the task in order to prevent it from being freed
++ * if the thread function returns before kthread_stop is called.
++ */
++ get_task_struct(task);
+
+ task = kthread_run(xenvif_dealloc_kthread, queue,
+ "%s-dealloc", queue->name);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 8b326508a480e..e6d58402b829d 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1327,16 +1327,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
+ int count)
+ {
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+- struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
+ struct ib_sge *sge = &req->sge[1];
++ struct scatterlist *sgl;
+ u32 len = 0;
+ int i;
+
+- for (i = 0; i < count; i++, sgl++, sge++) {
++ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
+ sge->addr = sg_dma_address(sgl);
+ sge->length = sg_dma_len(sgl);
+ sge->lkey = queue->device->pd->local_dma_lkey;
+ len += sge->length;
++ sge++;
+ }
+
+ sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 46e4f7ea34c8b..8b939e9db470c 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -988,19 +988,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
+ return req->transfer_len - req->metadata_len;
+ }
+
+-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
++static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
++ struct nvmet_req *req)
+ {
+- req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
++ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ nvmet_data_transfer_len(req));
+ if (!req->sg)
+ goto out_err;
+
+ if (req->metadata_len) {
+- req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
++ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
+ &req->metadata_sg_cnt, req->metadata_len);
+ if (!req->metadata_sg)
+ goto out_free_sg;
+ }
++
++ req->p2p_dev = p2p_dev;
++
+ return 0;
+ out_free_sg:
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+@@ -1008,25 +1012,19 @@ out_err:
+ return -ENOMEM;
+ }
+
+-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
++static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
+ {
+- if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
+- return false;
+-
+- if (req->sq->ctrl && req->sq->qid && req->ns) {
+- req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+- req->ns->nsid);
+- if (req->p2p_dev)
+- return true;
+- }
+-
+- req->p2p_dev = NULL;
+- return false;
++ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
++ !req->sq->ctrl || !req->sq->qid || !req->ns)
++ return NULL;
++ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
+ }
+
+ int nvmet_req_alloc_sgls(struct nvmet_req *req)
+ {
+- if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
++ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
++
++ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
+ return 0;
+
+ req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
+@@ -1055,6 +1053,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+ if (req->metadata_sg)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
++ req->p2p_dev = NULL;
+ } else {
+ sgl_free(req->sg);
+ if (req->metadata_sg)
+diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
+index 780d7c4fd7565..0790de29f0ca2 100644
+--- a/drivers/tee/optee/call.c
++++ b/drivers/tee/optee/call.c
+@@ -217,6 +217,7 @@ int optee_open_session(struct tee_context *ctx,
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess = NULL;
++ uuid_t client_uuid;
+
+ /* +2 for the meta parameters added below */
+ shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
+@@ -237,10 +238,11 @@ int optee_open_session(struct tee_context *ctx,
+ memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
+ msg_arg->params[1].u.value.c = arg->clnt_login;
+
+- rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
+- arg->clnt_login, arg->clnt_uuid);
++ rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
++ arg->clnt_uuid);
+ if (rc)
+ goto out;
++ export_uuid(msg_arg->params[1].u.octets, &client_uuid);
+
+ rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
+ if (rc)
+diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
+index 7b2d919da2ace..c7ac7d02d6cc9 100644
+--- a/drivers/tee/optee/optee_msg.h
++++ b/drivers/tee/optee/optee_msg.h
+@@ -9,7 +9,7 @@
+ #include <linux/types.h>
+
+ /*
+- * This file defines the OP-TEE message protocol used to communicate
++ * This file defines the OP-TEE message protocol (ABI) used to communicate
+ * with an instance of OP-TEE running in secure world.
+ *
+ * This file is divided into three sections.
+@@ -146,9 +146,10 @@ struct optee_msg_param_value {
+ * @tmem: parameter by temporary memory reference
+ * @rmem: parameter by registered memory reference
+ * @value: parameter by opaque value
++ * @octets: parameter by octet string
+ *
+ * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
+- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
++ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
+ * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
+@@ -159,6 +160,7 @@ struct optee_msg_param {
+ struct optee_msg_param_tmem tmem;
+ struct optee_msg_param_rmem rmem;
+ struct optee_msg_param_value value;
++ u8 octets[24];
+ } u;
+ };
+
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 2cf9fc915510c..844059861f9e1 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -213,14 +213,11 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+ struct tty_port *tport = &port->state->port;
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+- unsigned long c, flags;
++ unsigned long c;
+ u32 sr;
+ char flag;
+
+- if (threaded)
+- spin_lock_irqsave(&port->lock, flags);
+- else
+- spin_lock(&port->lock);
++ spin_lock(&port->lock);
+
+ while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
+ threaded)) {
+@@ -277,10 +274,7 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+ uart_insert_char(port, sr, USART_SR_ORE, c, flag);
+ }
+
+- if (threaded)
+- spin_unlock_irqrestore(&port->lock, flags);
+- else
+- spin_unlock(&port->lock);
++ spin_unlock(&port->lock);
+
+ tty_flip_buffer_push(tport);
+ }
+@@ -653,7 +647,8 @@ static int stm32_usart_startup(struct uart_port *port)
+
+ ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
+ stm32_usart_threaded_interrupt,
+- IRQF_NO_SUSPEND, name, port);
++ IRQF_ONESHOT | IRQF_NO_SUSPEND,
++ name, port);
+ if (ret)
+ return ret;
+
+@@ -1126,6 +1121,13 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
+ struct dma_async_tx_descriptor *desc = NULL;
+ int ret;
+
++ /*
++ * Using DMA and threaded handler for the console could lead to
++ * deadlocks.
++ */
++ if (uart_console(port))
++ return -ENODEV;
++
+ /* Request DMA RX channel */
+ stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
+ if (!stm32port->rx_ch) {
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 510fd0572feb1..e3f429f1575e9 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -707,7 +707,11 @@ static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+
+ hsotg->hibernated = 0;
++
++#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ hsotg->bus_suspended = 0;
++#endif
+
+ if (gpwrdn & GPWRDN_IDSTS) {
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
+index 0f28bf99efebc..4e1107767e29b 100644
+--- a/drivers/vfio/pci/Kconfig
++++ b/drivers/vfio/pci/Kconfig
+@@ -2,6 +2,7 @@
+ config VFIO_PCI
+ tristate "VFIO support for PCI devices"
+ depends on VFIO && PCI && EVENTFD
++ depends on MMU
+ select VFIO_VIRQFD
+ select IRQ_BYPASS_MANAGER
+ help
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index a402adee8a215..47f21a6ca7fe9 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
+ if (len == 0xFF) {
+ len = vfio_ext_cap_len(vdev, ecap, epos);
+ if (len < 0)
+- return ret;
++ return len;
+ }
+ }
+
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index fb4b385191f28..e83a7cd15c956 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -289,7 +289,7 @@ err_irq:
+ vfio_platform_regions_cleanup(vdev);
+ err_reg:
+ mutex_unlock(&driver_lock);
+- module_put(THIS_MODULE);
++ module_put(vdev->parent_module);
+ return ret;
+ }
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 51c18da4792ec..73ebe0c5fdbc9 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1297,16 +1297,20 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
+ for (i = 0; i < bbio->num_stripes; i++, stripe++) {
+ u64 bytes;
+ struct request_queue *req_q;
++ struct btrfs_device *device = stripe->dev;
+
+- if (!stripe->dev->bdev) {
++ if (!device->bdev) {
+ ASSERT(btrfs_test_opt(fs_info, DEGRADED));
+ continue;
+ }
+- req_q = bdev_get_queue(stripe->dev->bdev);
++ req_q = bdev_get_queue(device->bdev);
+ if (!blk_queue_discard(req_q))
+ continue;
+
+- ret = btrfs_issue_discard(stripe->dev->bdev,
++ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
++ continue;
++
++ ret = btrfs_issue_discard(device->bdev,
+ stripe->physical,
+ stripe->length,
+ &bytes);
+@@ -1830,7 +1834,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ trace_run_delayed_ref_head(fs_info, head, 0);
+ btrfs_delayed_ref_unlock(head);
+ btrfs_put_delayed_ref_head(head);
+- return 0;
++ return ret;
+ }
+
+ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 8f4f2bd6d9b95..48a2ea6d70921 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -690,7 +690,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ u64 end_byte = bytenr + len;
+ u64 csum_end;
+ struct extent_buffer *leaf;
+- int ret;
++ int ret = 0;
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ int blocksize_bits = fs_info->sb->s_blocksize_bits;
+
+@@ -709,6 +709,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ path->leave_spinning = 1;
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret > 0) {
++ ret = 0;
+ if (path->slots[0] == 0)
+ break;
+ path->slots[0]--;
+@@ -765,7 +766,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ ret = btrfs_del_items(trans, root, path,
+ path->slots[0], del_nr);
+ if (ret)
+- goto out;
++ break;
+ if (key.offset == bytenr)
+ break;
+ } else if (key.offset < bytenr && csum_end > end_byte) {
+@@ -809,8 +810,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ ret = btrfs_split_item(trans, root, path, &key, offset);
+ if (ret && ret != -EAGAIN) {
+ btrfs_abort_transaction(trans, ret);
+- goto out;
++ break;
+ }
++ ret = 0;
+
+ key.offset = end_byte - 1;
+ } else {
+@@ -820,8 +822,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ }
+ btrfs_release_path(path);
+ }
+- ret = 0;
+-out:
+ btrfs_free_path(path);
+ return ret;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 94c24b2a211bf..4f26dae63b64a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2760,6 +2760,18 @@ out:
+ if (ret || truncated) {
+ u64 unwritten_start = start;
+
++ /*
++ * If we failed to finish this ordered extent for any reason we
++ * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
++ * extent, and mark the inode with the error if it wasn't
++ * already set. Any error during writeback would have already
++ * set the mapping error, so we need to set it if we're the ones
++ * marking this ordered extent as failed.
++ */
++ if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
++ &ordered_extent->flags))
++ mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
++
+ if (truncated)
+ unwritten_start += logical_len;
+ clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
+@@ -8878,6 +8890,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ int ret2;
+ bool root_log_pinned = false;
+ bool dest_log_pinned = false;
++ bool need_abort = false;
+
+ /* we only allow rename subvolume link between subvolumes */
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+@@ -8934,6 +8947,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ old_idx);
+ if (ret)
+ goto out_fail;
++ need_abort = true;
+ }
+
+ /* And now for the dest. */
+@@ -8949,8 +8963,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ new_ino,
+ btrfs_ino(BTRFS_I(old_dir)),
+ new_idx);
+- if (ret)
++ if (ret) {
++ if (need_abort)
++ btrfs_abort_transaction(trans, ret);
+ goto out_fail;
++ }
+ }
+
+ /* Update inode version and ctime/mtime. */
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index eeb66e797e0bf..96ef9fed9a656 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -207,10 +207,7 @@ static int clone_copy_inline_extent(struct inode *dst,
+ * inline extent's data to the page.
+ */
+ ASSERT(key.offset > 0);
+- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+- inline_data, size, datal,
+- comp_type);
+- goto out;
++ goto copy_to_page;
+ }
+ } else if (i_size_read(dst) <= datal) {
+ struct btrfs_file_extent_item *ei;
+@@ -226,13 +223,10 @@ static int clone_copy_inline_extent(struct inode *dst,
+ BTRFS_FILE_EXTENT_INLINE)
+ goto copy_inline_extent;
+
+- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+- inline_data, size, datal, comp_type);
+- goto out;
++ goto copy_to_page;
+ }
+
+ copy_inline_extent:
+- ret = 0;
+ /*
+ * We have no extent items, or we have an extent at offset 0 which may
+ * or may not be inlined. All these cases are dealt the same way.
+@@ -244,11 +238,13 @@ copy_inline_extent:
+ * clone. Deal with all these cases by copying the inline extent
+ * data into the respective page at the destination inode.
+ */
+- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+- inline_data, size, datal, comp_type);
+- goto out;
++ goto copy_to_page;
+ }
+
++ /*
++ * Release path before starting a new transaction so we don't hold locks
++ * that would confuse lockdep.
++ */
+ btrfs_release_path(path);
+ /*
+ * If we end up here it means were copy the inline extent into a leaf
+@@ -281,11 +277,6 @@ copy_inline_extent:
+ ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
+ out:
+ if (!ret && !trans) {
+- /*
+- * Release path before starting a new transaction so we don't
+- * hold locks that would confuse lockdep.
+- */
+- btrfs_release_path(path);
+ /*
+ * No transaction here means we copied the inline extent into a
+ * page of the destination inode.
+@@ -306,6 +297,21 @@ out:
+ *trans_out = trans;
+
+ return ret;
++
++copy_to_page:
++ /*
++ * Release our path because we don't need it anymore and also because
++ * copy_inline_to_page() needs to reserve data and metadata, which may
++ * need to flush delalloc when we are low on available space and
++ * therefore cause a deadlock if writeback of an inline extent needs to
++ * write to the same leaf or an ordered extent completion needs to write
++ * to the same leaf.
++ */
++ btrfs_release_path(path);
++
++ ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
++ inline_data, size, datal, comp_type);
++ goto out;
+ }
+
+ /**
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 40845428b739c..d4a3a56726aa8 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1440,22 +1440,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ return -EUCLEAN;
+ }
+ for (; ptr < end; ptr += sizeof(*dref)) {
+- u64 root_objectid;
+- u64 owner;
+ u64 offset;
+- u64 hash;
+
++ /*
++ * We cannot check the extent_data_ref hash due to possible
++ * overflow from the leaf due to hash collisions.
++ */
+ dref = (struct btrfs_extent_data_ref *)ptr;
+- root_objectid = btrfs_extent_data_ref_root(leaf, dref);
+- owner = btrfs_extent_data_ref_objectid(leaf, dref);
+ offset = btrfs_extent_data_ref_offset(leaf, dref);
+- hash = hash_extent_data_ref(root_objectid, owner, offset);
+- if (hash != key->offset) {
+- extent_err(leaf, slot,
+- "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
+- hash, key->offset);
+- return -EUCLEAN;
+- }
+ if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref offset, have %llu expect aligned to %u",
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 9a0cfa0e124da..300951088a11c 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1752,6 +1752,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+ break;
+
+ if (ret == 1) {
++ ret = 0;
+ if (path->slots[0] == 0)
+ break;
+ path->slots[0]--;
+@@ -1764,17 +1765,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+
+ ret = btrfs_del_item(trans, root, path);
+ if (ret)
+- goto out;
++ break;
+
+ btrfs_release_path(path);
+ inode = read_one_inode(root, key.offset);
+- if (!inode)
+- return -EIO;
++ if (!inode) {
++ ret = -EIO;
++ break;
++ }
+
+ ret = fixup_inode_link_count(trans, root, inode);
+ iput(inode);
+ if (ret)
+- goto out;
++ break;
+
+ /*
+ * fixup on a directory may create new entries,
+@@ -1783,8 +1786,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+ */
+ key.offset = (u64)-1;
+ }
+- ret = 0;
+-out:
+ btrfs_release_path(path);
+ return ret;
+ }
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 12eac88373032..e6542ba264330 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle,
+ ext4_ext_mark_unwritten(ex2);
+
+ err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
+- if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
++ if (err != -ENOSPC && err != -EDQUOT)
++ goto out;
++
++ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+ if (split_flag & EXT4_EXT_DATA_VALID1) {
+ err = ext4_ext_zeroout(inode, ex2);
+@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle,
+ ext4_ext_pblock(&orig_ex));
+ }
+
+- if (err)
+- goto fix_extent_len;
+- /* update the extent length and mark as initialized */
+- ex->ee_len = cpu_to_le16(ee_len);
+- ext4_ext_try_to_merge(handle, inode, path, ex);
+- err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+- if (err)
+- goto fix_extent_len;
+-
+- /* update extent status tree */
+- err = ext4_zeroout_es(inode, &zero_ex);
+-
+- goto out;
+- } else if (err)
+- goto fix_extent_len;
+-
+-out:
+- ext4_ext_show_leaf(inode, path);
+- return err;
++ if (!err) {
++ /* update the extent length and mark as initialized */
++ ex->ee_len = cpu_to_le16(ee_len);
++ ext4_ext_try_to_merge(handle, inode, path, ex);
++ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
++ if (!err)
++ /* update extent status tree */
++ err = ext4_zeroout_es(inode, &zero_ex);
++ /* If we failed at this point, we don't know in which
++ * state the extent tree exactly is so don't try to fix
++ * length of the original extent as it may do even more
++ * damage.
++ */
++ goto out;
++ }
++ }
+
+ fix_extent_len:
+ ex->ee_len = orig_ex.ee_len;
+@@ -3260,6 +3260,9 @@ fix_extent_len:
+ */
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
+ return err;
++out:
++ ext4_ext_show_leaf(inode, path);
++ return err;
+ }
+
+ /*
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 896e1176e0449..53647fa038773 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1227,18 +1227,6 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
+
+ /* Ext4 Replay Path Routines */
+
+-/* Get length of a particular tlv */
+-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
+-{
+- return le16_to_cpu(tl->fc_len);
+-}
+-
+-/* Get a pointer to "value" of a tlv */
+-static inline u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
+-{
+- return (u8 *)tl + sizeof(*tl);
+-}
+-
+ /* Helper struct for dentry replay routines */
+ struct dentry_info_args {
+ int parent_ino, dname_len, ino, inode_len;
+@@ -1246,28 +1234,29 @@ struct dentry_info_args {
+ };
+
+ static inline void tl_to_darg(struct dentry_info_args *darg,
+- struct ext4_fc_tl *tl)
++ struct ext4_fc_tl *tl, u8 *val)
+ {
+- struct ext4_fc_dentry_info *fcd;
++ struct ext4_fc_dentry_info fcd;
+
+- fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
++ memcpy(&fcd, val, sizeof(fcd));
+
+- darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
+- darg->ino = le32_to_cpu(fcd->fc_ino);
+- darg->dname = fcd->fc_dname;
+- darg->dname_len = ext4_fc_tag_len(tl) -
+- sizeof(struct ext4_fc_dentry_info);
++ darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
++ darg->ino = le32_to_cpu(fcd.fc_ino);
++ darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
++ darg->dname_len = le16_to_cpu(tl->fc_len) -
++ sizeof(struct ext4_fc_dentry_info);
+ }
+
+ /* Unlink replay function */
+-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
++static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
++ u8 *val)
+ {
+ struct inode *inode, *old_parent;
+ struct qstr entry;
+ struct dentry_info_args darg;
+ int ret = 0;
+
+- tl_to_darg(&darg, tl);
++ tl_to_darg(&darg, tl, val);
+
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
+ darg.parent_ino, darg.dname_len);
+@@ -1357,13 +1346,14 @@ out:
+ }
+
+ /* Link replay function */
+-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
++static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
++ u8 *val)
+ {
+ struct inode *inode;
+ struct dentry_info_args darg;
+ int ret = 0;
+
+- tl_to_darg(&darg, tl);
++ tl_to_darg(&darg, tl, val);
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
+ darg.parent_ino, darg.dname_len);
+
+@@ -1408,9 +1398,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
+ /*
+ * Inode replay function
+ */
+-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
++static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
++ u8 *val)
+ {
+- struct ext4_fc_inode *fc_inode;
++ struct ext4_fc_inode fc_inode;
+ struct ext4_inode *raw_inode;
+ struct ext4_inode *raw_fc_inode;
+ struct inode *inode = NULL;
+@@ -1418,9 +1409,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+ int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
+ struct ext4_extent_header *eh;
+
+- fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
++ memcpy(&fc_inode, val, sizeof(fc_inode));
+
+- ino = le32_to_cpu(fc_inode->fc_ino);
++ ino = le32_to_cpu(fc_inode.fc_ino);
+ trace_ext4_fc_replay(sb, tag, ino, 0, 0);
+
+ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
+@@ -1432,12 +1423,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+
+ ext4_fc_record_modified_inode(sb, ino);
+
+- raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
++ raw_fc_inode = (struct ext4_inode *)
++ (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
+ ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
+ if (ret)
+ goto out;
+
+- inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
++ inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
+ raw_inode = ext4_raw_inode(&iloc);
+
+ memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
+@@ -1505,14 +1497,15 @@ out:
+ * inode for which we are trying to create a dentry here, should already have
+ * been replayed before we start here.
+ */
+-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
++static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
++ u8 *val)
+ {
+ int ret = 0;
+ struct inode *inode = NULL;
+ struct inode *dir = NULL;
+ struct dentry_info_args darg;
+
+- tl_to_darg(&darg, tl);
++ tl_to_darg(&darg, tl, val);
+
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
+ darg.parent_ino, darg.dname_len);
+@@ -1591,9 +1584,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
+
+ /* Replay add range tag */
+ static int ext4_fc_replay_add_range(struct super_block *sb,
+- struct ext4_fc_tl *tl)
++ struct ext4_fc_tl *tl, u8 *val)
+ {
+- struct ext4_fc_add_range *fc_add_ex;
++ struct ext4_fc_add_range fc_add_ex;
+ struct ext4_extent newex, *ex;
+ struct inode *inode;
+ ext4_lblk_t start, cur;
+@@ -1603,15 +1596,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
+ struct ext4_ext_path *path = NULL;
+ int ret;
+
+- fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
+- ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
++ memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
++ ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
+
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
+- le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
++ le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
+ ext4_ext_get_actual_len(ex));
+
+- inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
+- EXT4_IGET_NORMAL);
++ inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
+ if (IS_ERR(inode)) {
+ jbd_debug(1, "Inode not found.");
+ return 0;
+@@ -1720,32 +1712,33 @@ next:
+
+ /* Replay DEL_RANGE tag */
+ static int
+-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
++ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
++ u8 *val)
+ {
+ struct inode *inode;
+- struct ext4_fc_del_range *lrange;
++ struct ext4_fc_del_range lrange;
+ struct ext4_map_blocks map;
+ ext4_lblk_t cur, remaining;
+ int ret;
+
+- lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
+- cur = le32_to_cpu(lrange->fc_lblk);
+- remaining = le32_to_cpu(lrange->fc_len);
++ memcpy(&lrange, val, sizeof(lrange));
++ cur = le32_to_cpu(lrange.fc_lblk);
++ remaining = le32_to_cpu(lrange.fc_len);
+
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
+- le32_to_cpu(lrange->fc_ino), cur, remaining);
++ le32_to_cpu(lrange.fc_ino), cur, remaining);
+
+- inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
++ inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
+ if (IS_ERR(inode)) {
+- jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
++ jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
+ return 0;
+ }
+
+ ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+
+ jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
+- inode->i_ino, le32_to_cpu(lrange->fc_lblk),
+- le32_to_cpu(lrange->fc_len));
++ inode->i_ino, le32_to_cpu(lrange.fc_lblk),
++ le32_to_cpu(lrange.fc_len));
+ while (remaining > 0) {
+ map.m_lblk = cur;
+ map.m_len = remaining;
+@@ -1766,8 +1759,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
+ }
+
+ ret = ext4_punch_hole(inode,
+- le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
+- le32_to_cpu(lrange->fc_len) << sb->s_blocksize_bits);
++ le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
++ le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits);
+ if (ret)
+ jbd_debug(1, "ext4_punch_hole returned %d", ret);
+ ext4_ext_replay_shrink_inode(inode,
+@@ -1909,11 +1902,11 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_fc_replay_state *state;
+ int ret = JBD2_FC_REPLAY_CONTINUE;
+- struct ext4_fc_add_range *ext;
+- struct ext4_fc_tl *tl;
+- struct ext4_fc_tail *tail;
+- __u8 *start, *end;
+- struct ext4_fc_head *head;
++ struct ext4_fc_add_range ext;
++ struct ext4_fc_tl tl;
++ struct ext4_fc_tail tail;
++ __u8 *start, *end, *cur, *val;
++ struct ext4_fc_head head;
+ struct ext4_extent *ex;
+
+ state = &sbi->s_fc_replay_state;
+@@ -1940,15 +1933,17 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ }
+
+ state->fc_replay_expected_off++;
+- fc_for_each_tl(start, end, tl) {
++ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
++ memcpy(&tl, cur, sizeof(tl));
++ val = cur + sizeof(tl);
+ jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
+- tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
+- switch (le16_to_cpu(tl->fc_tag)) {
++ tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
++ switch (le16_to_cpu(tl.fc_tag)) {
+ case EXT4_FC_TAG_ADD_RANGE:
+- ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
+- ex = (struct ext4_extent *)&ext->fc_ex;
++ memcpy(&ext, val, sizeof(ext));
++ ex = (struct ext4_extent *)&ext.fc_ex;
+ ret = ext4_fc_record_regions(sb,
+- le32_to_cpu(ext->fc_ino),
++ le32_to_cpu(ext.fc_ino),
+ le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
+ ext4_ext_get_actual_len(ex));
+ if (ret < 0)
+@@ -1962,18 +1957,18 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ case EXT4_FC_TAG_INODE:
+ case EXT4_FC_TAG_PAD:
+ state->fc_cur_tag++;
+- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+- sizeof(*tl) + ext4_fc_tag_len(tl));
++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
++ sizeof(tl) + le16_to_cpu(tl.fc_len));
+ break;
+ case EXT4_FC_TAG_TAIL:
+ state->fc_cur_tag++;
+- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
+- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+- sizeof(*tl) +
++ memcpy(&tail, val, sizeof(tail));
++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
++ sizeof(tl) +
+ offsetof(struct ext4_fc_tail,
+ fc_crc));
+- if (le32_to_cpu(tail->fc_tid) == expected_tid &&
+- le32_to_cpu(tail->fc_crc) == state->fc_crc) {
++ if (le32_to_cpu(tail.fc_tid) == expected_tid &&
++ le32_to_cpu(tail.fc_crc) == state->fc_crc) {
+ state->fc_replay_num_tags = state->fc_cur_tag;
+ state->fc_regions_valid =
+ state->fc_regions_used;
+@@ -1984,19 +1979,19 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ state->fc_crc = 0;
+ break;
+ case EXT4_FC_TAG_HEAD:
+- head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
+- if (le32_to_cpu(head->fc_features) &
++ memcpy(&head, val, sizeof(head));
++ if (le32_to_cpu(head.fc_features) &
+ ~EXT4_FC_SUPPORTED_FEATURES) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+- if (le32_to_cpu(head->fc_tid) != expected_tid) {
++ if (le32_to_cpu(head.fc_tid) != expected_tid) {
+ ret = JBD2_FC_REPLAY_STOP;
+ break;
+ }
+ state->fc_cur_tag++;
+- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
+- sizeof(*tl) + ext4_fc_tag_len(tl));
++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
++ sizeof(tl) + le16_to_cpu(tl.fc_len));
+ break;
+ default:
+ ret = state->fc_replay_num_tags ?
+@@ -2020,11 +2015,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
+ {
+ struct super_block *sb = journal->j_private;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- struct ext4_fc_tl *tl;
+- __u8 *start, *end;
++ struct ext4_fc_tl tl;
++ __u8 *start, *end, *cur, *val;
+ int ret = JBD2_FC_REPLAY_CONTINUE;
+ struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
+- struct ext4_fc_tail *tail;
++ struct ext4_fc_tail tail;
+
+ if (pass == PASS_SCAN) {
+ state->fc_current_pass = PASS_SCAN;
+@@ -2051,49 +2046,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
+ start = (u8 *)bh->b_data;
+ end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
+
+- fc_for_each_tl(start, end, tl) {
++ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
++ memcpy(&tl, cur, sizeof(tl));
++ val = cur + sizeof(tl);
++
+ if (state->fc_replay_num_tags == 0) {
+ ret = JBD2_FC_REPLAY_STOP;
+ ext4_fc_set_bitmaps_and_counters(sb);
+ break;
+ }
+ jbd_debug(3, "Replay phase, tag:%s\n",
+- tag2str(le16_to_cpu(tl->fc_tag)));
++ tag2str(le16_to_cpu(tl.fc_tag)));
+ state->fc_replay_num_tags--;
+- switch (le16_to_cpu(tl->fc_tag)) {
++ switch (le16_to_cpu(tl.fc_tag)) {
+ case EXT4_FC_TAG_LINK:
+- ret = ext4_fc_replay_link(sb, tl);
++ ret = ext4_fc_replay_link(sb, &tl, val);
+ break;
+ case EXT4_FC_TAG_UNLINK:
+- ret = ext4_fc_replay_unlink(sb, tl);
++ ret = ext4_fc_replay_unlink(sb, &tl, val);
+ break;
+ case EXT4_FC_TAG_ADD_RANGE:
+- ret = ext4_fc_replay_add_range(sb, tl);
++ ret = ext4_fc_replay_add_range(sb, &tl, val);
+ break;
+ case EXT4_FC_TAG_CREAT:
+- ret = ext4_fc_replay_create(sb, tl);
++ ret = ext4_fc_replay_create(sb, &tl, val);
+ break;
+ case EXT4_FC_TAG_DEL_RANGE:
+- ret = ext4_fc_replay_del_range(sb, tl);
++ ret = ext4_fc_replay_del_range(sb, &tl, val);
+ break;
+ case EXT4_FC_TAG_INODE:
+- ret = ext4_fc_replay_inode(sb, tl);
++ ret = ext4_fc_replay_inode(sb, &tl, val);
+ break;
+ case EXT4_FC_TAG_PAD:
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
+- ext4_fc_tag_len(tl), 0);
++ le16_to_cpu(tl.fc_len), 0);
+ break;
+ case EXT4_FC_TAG_TAIL:
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
+- ext4_fc_tag_len(tl), 0);
+- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
+- WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
++ le16_to_cpu(tl.fc_len), 0);
++ memcpy(&tail, val, sizeof(tail));
++ WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
+ break;
+ case EXT4_FC_TAG_HEAD:
+ break;
+ default:
+- trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
+- ext4_fc_tag_len(tl), 0);
++ trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
++ le16_to_cpu(tl.fc_len), 0);
+ ret = -ECANCELED;
+ break;
+ }
+diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
+index 3a6e5a1fa1b80..d8d0998a5c163 100644
+--- a/fs/ext4/fast_commit.h
++++ b/fs/ext4/fast_commit.h
+@@ -146,12 +146,5 @@ struct ext4_fc_replay_state {
+
+ #define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
+
+-#define fc_for_each_tl(__start, __end, __tl) \
+- for (tl = (struct ext4_fc_tl *)start; \
+- (u8 *)tl < (u8 *)end; \
+- tl = (struct ext4_fc_tl *)((u8 *)tl + \
+- sizeof(struct ext4_fc_tl) + \
+- + le16_to_cpu(tl->fc_len)))
+-
+
+ #endif /* __FAST_COMMIT_H__ */
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index c92558ede623e..b294ebcb4db4b 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
+ if (is_directory) {
+ count = ext4_used_dirs_count(sb, gdp) - 1;
+ ext4_used_dirs_set(sb, gdp, count);
+- percpu_counter_dec(&sbi->s_dirs_counter);
++ if (percpu_counter_initialized(&sbi->s_dirs_counter))
++ percpu_counter_dec(&sbi->s_dirs_counter);
+ }
+ ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+ EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_group_desc_csum_set(sb, block_group, gdp);
+ ext4_unlock_group(sb, block_group);
+
+- percpu_counter_inc(&sbi->s_freeinodes_counter);
++ if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
++ percpu_counter_inc(&sbi->s_freeinodes_counter);
+ if (sbi->s_log_groups_per_flex) {
+ struct flex_groups *fg;
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index b6229fe1aa233..9c390c3d7fb15 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2738,7 +2738,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
+ */
+ if (sbi->s_es->s_log_groups_per_flex >= 32) {
+ ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
+- goto err_freesgi;
++ goto err_freebuddy;
+ }
+ sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
+ BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index c7f5b665834fc..21c4ba2513ce5 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -4451,14 +4451,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ }
+
+ if (sb->s_blocksize != blocksize) {
++ /*
++ * bh must be released before kill_bdev(), otherwise
++ * it won't be freed and its page also. kill_bdev()
++ * is called by sb_set_blocksize().
++ */
++ brelse(bh);
+ /* Validate the filesystem blocksize */
+ if (!sb_set_blocksize(sb, blocksize)) {
+ ext4_msg(sb, KERN_ERR, "bad block size %d",
+ blocksize);
++ bh = NULL;
+ goto failed_mount;
+ }
+
+- brelse(bh);
+ logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
+ offset = do_div(logical_sb_block, blocksize);
+ bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
+@@ -5181,8 +5187,9 @@ failed_mount:
+ kfree(get_qf_name(sb, sbi, i));
+ #endif
+ fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
+- ext4_blkdev_remove(sbi);
++ /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
+ brelse(bh);
++ ext4_blkdev_remove(sbi);
+ out_fail:
+ sb->s_fs_info = NULL;
+ kfree(sbi->s_blockgroup_lock);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 35a6fd103761b..ea2f2de448063 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1457,9 +1457,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
+ glock_blocked_by_withdraw(gl) &&
+ gh->gh_gl != sdp->sd_jinode_gl) {
+ sdp->sd_glock_dqs_held++;
++ spin_unlock(&gl->gl_lockref.lock);
+ might_sleep();
+ wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
+ TASK_UNINTERRUPTIBLE);
++ spin_lock(&gl->gl_lockref.lock);
+ }
+ if (gh->gh_flags & GL_NOCACHE)
+ handle_callback(gl, LM_ST_UNLOCKED, 0, false);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 369ec81033d67..fdbaaf579cc60 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -545,7 +545,7 @@ struct io_statx {
+ struct io_completion {
+ struct file *file;
+ struct list_head list;
+- int cflags;
++ u32 cflags;
+ };
+
+ struct io_async_connect {
+@@ -1711,7 +1711,8 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+ }
+ }
+
+-static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
++static void __io_cqring_fill_event(struct io_kiocb *req, long res,
++ unsigned int cflags)
+ {
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_uring_cqe *cqe;
+@@ -6266,6 +6267,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+ if (prev) {
+ io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+ io_put_req_deferred(prev, 1);
++ io_put_req_deferred(req, 1);
+ } else {
+ io_cqring_add_event(req, -ETIME, 0);
+ io_put_req_deferred(req, 1);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 8880071ee4ee0..2b296d720c9fa 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1855,6 +1855,45 @@ out:
+ return ret;
+ }
+
++/*
++ * zero out partial blocks of one cluster.
++ *
++ * start: file offset where zero starts, will be made upper block aligned.
++ * len: it will be trimmed to the end of current cluster if "start + len"
++ * is bigger than it.
++ */
++static int ocfs2_zeroout_partial_cluster(struct inode *inode,
++ u64 start, u64 len)
++{
++ int ret;
++ u64 start_block, end_block, nr_blocks;
++ u64 p_block, offset;
++ u32 cluster, p_cluster, nr_clusters;
++ struct super_block *sb = inode->i_sb;
++ u64 end = ocfs2_align_bytes_to_clusters(sb, start);
++
++ if (start + len < end)
++ end = start + len;
++
++ start_block = ocfs2_blocks_for_bytes(sb, start);
++ end_block = ocfs2_blocks_for_bytes(sb, end);
++ nr_blocks = end_block - start_block;
++ if (!nr_blocks)
++ return 0;
++
++ cluster = ocfs2_bytes_to_clusters(sb, start);
++ ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
++ &nr_clusters, NULL);
++ if (ret)
++ return ret;
++ if (!p_cluster)
++ return 0;
++
++ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
++ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
++ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
++}
++
+ /*
+ * Parts of this function taken from xfs_change_file_space()
+ */
+@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ {
+ int ret;
+ s64 llen;
+- loff_t size;
++ loff_t size, orig_isize;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *di_bh = NULL;
+ handle_t *handle;
+@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ goto out_inode_unlock;
+ }
+
++ orig_isize = i_size_read(inode);
+ switch (sr->l_whence) {
+ case 0: /*SEEK_SET*/
+ break;
+@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ sr->l_start += f_pos;
+ break;
+ case 2: /*SEEK_END*/
+- sr->l_start += i_size_read(inode);
++ sr->l_start += orig_isize;
+ break;
+ default:
+ ret = -EINVAL;
+@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ default:
+ ret = -EINVAL;
+ }
++
++ /* zeroout eof blocks in the cluster. */
++ if (!ret && change_size && orig_isize < size) {
++ ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
++ size - orig_isize);
++ if (!ret)
++ i_size_write(inode, size);
++ }
+ up_write(&OCFS2_I(inode)->ip_alloc_sem);
+ if (ret) {
+ mlog_errno(ret);
+@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ goto out_inode_unlock;
+ }
+
+- if (change_size && i_size_read(inode) < size)
+- i_size_write(inode, size);
+-
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
+ if (ret < 0)
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index cc9ee07769745..af8f4e2cf21d1 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -1223,6 +1223,8 @@ enum mlx5_fc_bulk_alloc_bitmask {
+
+ #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+
++#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
++
+ enum {
+ MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
+ MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
+index fafc1beea504a..9837fb011f2fb 100644
+--- a/include/linux/platform_data/ti-sysc.h
++++ b/include/linux/platform_data/ti-sysc.h
+@@ -50,6 +50,7 @@ struct sysc_regbits {
+ s8 emufree_shift;
+ };
+
++#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
+ #define SYSC_QUIRK_GPMC_DEBUG BIT(26)
+ #define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
+ #define SYSC_MODULE_QUIRK_PRUSS BIT(24)
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index 2e4f7721fc4e7..8110c29fab42d 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -83,6 +83,8 @@ struct usbnet {
+ # define EVENT_LINK_CHANGE 11
+ # define EVENT_SET_RX_MODE 12
+ # define EVENT_NO_IP_ALIGN 13
++ u32 rx_speed; /* in bps - NOT Mbps */
++ u32 tx_speed; /* in bps - NOT Mbps */
+ };
+
+ static inline struct usb_driver *driver_of(struct usb_interface *intf)
+diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
+index 48ecca8530ffa..b655d8666f555 100644
+--- a/include/net/caif/caif_dev.h
++++ b/include/net/caif/caif_dev.h
+@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
+ * The link_support layer is used to add any Link Layer specific
+ * framing.
+ */
+-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ struct cflayer *link_support, int head_room,
+ struct cflayer **layer, int (**rcv_func)(
+ struct sk_buff *, struct net_device *,
+diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
+index 2aa5e91d84576..8819ff4db35a6 100644
+--- a/include/net/caif/cfcnfg.h
++++ b/include/net/caif/cfcnfg.h
+@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
+ * @fcs: Specify if checksum is used in CAIF Framing Layer.
+ * @head_room: Head space needed by link specific protocol.
+ */
+-void
++int
+ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ struct net_device *dev, struct cflayer *phy_layer,
+ enum cfcnfg_phy_preference pref,
+diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
+index 14a55e03bb3ce..67cce8757175a 100644
+--- a/include/net/caif/cfserl.h
++++ b/include/net/caif/cfserl.h
+@@ -9,4 +9,5 @@
+ #include <net/caif/caif_layer.h>
+
+ struct cflayer *cfserl_create(int instance, bool use_stx);
++void cfserl_release(struct cflayer *layer);
+ #endif
+diff --git a/include/net/tls.h b/include/net/tls.h
+index 2bdd802212fe0..43891b28fc482 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -193,7 +193,11 @@ struct tls_offload_context_tx {
+ (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
+
+ enum tls_context_flags {
+- TLS_RX_SYNC_RUNNING = 0,
++ /* tls_device_down was called after the netdev went down, device state
++ * was released, and kTLS works in software, even though rx_conf is
++ * still TLS_HW (needed for transition).
++ */
++ TLS_RX_DEV_DEGRADED = 0,
+ /* Unlike RX where resync is driven entirely by the core in TX only
+ * the driver knows when things went out of sync, so we need the flag
+ * to be atomic.
+@@ -265,6 +269,7 @@ struct tls_context {
+
+ /* cache cold stuff */
+ struct proto *sk_proto;
++ struct sock *sk;
+
+ void (*sk_destruct)(struct sock *sk);
+
+@@ -447,6 +452,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
+ struct sk_buff *
+ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
++struct sk_buff *
++tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
++ struct sk_buff *skb);
+
+ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
+ {
+diff --git a/init/main.c b/init/main.c
+index d9d9141112511..b4449544390ca 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -1505,7 +1505,7 @@ static noinline void __init kernel_init_freeable(void)
+ */
+ set_mems_allowed(node_states[N_MEMORY]);
+
+- cad_pid = task_pid(current);
++ cad_pid = get_pid(task_pid(current));
+
+ smp_prepare_cpus(setup_max_cpus);
+
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index c489430cac78c..f7e99bb8c3b6c 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -14,6 +14,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/proc_ns.h>
++#include <linux/security.h>
+
+ #include "../../lib/kstrtox.h"
+
+@@ -707,14 +708,6 @@ bpf_base_func_proto(enum bpf_func_id func_id)
+ return &bpf_spin_lock_proto;
+ case BPF_FUNC_spin_unlock:
+ return &bpf_spin_unlock_proto;
+- case BPF_FUNC_trace_printk:
+- if (!perfmon_capable())
+- return NULL;
+- return bpf_get_trace_printk_proto();
+- case BPF_FUNC_snprintf_btf:
+- if (!perfmon_capable())
+- return NULL;
+- return &bpf_snprintf_btf_proto;
+ case BPF_FUNC_jiffies64:
+ return &bpf_jiffies64_proto;
+ case BPF_FUNC_per_cpu_ptr:
+@@ -729,16 +722,22 @@ bpf_base_func_proto(enum bpf_func_id func_id)
+ return NULL;
+
+ switch (func_id) {
++ case BPF_FUNC_trace_printk:
++ return bpf_get_trace_printk_proto();
+ case BPF_FUNC_get_current_task:
+ return &bpf_get_current_task_proto;
+ case BPF_FUNC_probe_read_user:
+ return &bpf_probe_read_user_proto;
+ case BPF_FUNC_probe_read_kernel:
+- return &bpf_probe_read_kernel_proto;
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++ NULL : &bpf_probe_read_kernel_proto;
+ case BPF_FUNC_probe_read_user_str:
+ return &bpf_probe_read_user_str_proto;
+ case BPF_FUNC_probe_read_kernel_str:
+- return &bpf_probe_read_kernel_str_proto;
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++ NULL : &bpf_probe_read_kernel_str_proto;
++ case BPF_FUNC_snprintf_btf:
++ return &bpf_snprintf_btf_proto;
+ default:
+ return NULL;
+ }
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index fcbfc95649967..01710831fd02f 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -212,16 +212,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+ static __always_inline int
+ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
+ {
+- int ret = security_locked_down(LOCKDOWN_BPF_READ);
++ int ret;
+
+- if (unlikely(ret < 0))
+- goto fail;
+ ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+- goto fail;
+- return ret;
+-fail:
+- memset(dst, 0, size);
++ memset(dst, 0, size);
+ return ret;
+ }
+
+@@ -243,10 +238,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
+ static __always_inline int
+ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
+ {
+- int ret = security_locked_down(LOCKDOWN_BPF_READ);
+-
+- if (unlikely(ret < 0))
+- goto fail;
++ int ret;
+
+ /*
+ * The strncpy_from_kernel_nofault() call will likely not fill the
+@@ -259,11 +251,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
+ */
+ ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+- goto fail;
+-
+- return ret;
+-fail:
+- memset(dst, 0, size);
++ memset(dst, 0, size);
+ return ret;
+ }
+
+@@ -1293,16 +1281,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+ case BPF_FUNC_probe_read_user:
+ return &bpf_probe_read_user_proto;
+ case BPF_FUNC_probe_read_kernel:
+- return &bpf_probe_read_kernel_proto;
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++ NULL : &bpf_probe_read_kernel_proto;
+ case BPF_FUNC_probe_read_user_str:
+ return &bpf_probe_read_user_str_proto;
+ case BPF_FUNC_probe_read_kernel_str:
+- return &bpf_probe_read_kernel_str_proto;
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++ NULL : &bpf_probe_read_kernel_str_proto;
+ #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ case BPF_FUNC_probe_read:
+- return &bpf_probe_read_compat_proto;
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++ NULL : &bpf_probe_read_compat_proto;
+ case BPF_FUNC_probe_read_str:
+- return &bpf_probe_read_compat_str_proto;
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
++ NULL : &bpf_probe_read_compat_str_proto;
+ #endif
+ #ifdef CONFIG_CGROUPS
+ case BPF_FUNC_get_current_cgroup_id:
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
+index 00cb0d0b73e16..8a7724a6ce2fb 100644
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -263,7 +263,11 @@ static FORCE_INLINE int LZ4_decompress_generic(
+ }
+ }
+
+- LZ4_memcpy(op, ip, length);
++ /*
++ * supports overlapping memory regions; only matters
++ * for in-place decompression scenarios
++ */
++ LZ4_memmove(op, ip, length);
+ ip += length;
+ op += length;
+
+diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
+index c91dd96ef6291..673bd206aa98b 100644
+--- a/lib/lz4/lz4defs.h
++++ b/lib/lz4/lz4defs.h
+@@ -146,6 +146,7 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+ #define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
++#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
+
+ static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
+ {
+diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
+index c05d9dcf78911..750bfef26be37 100644
+--- a/mm/debug_vm_pgtable.c
++++ b/mm/debug_vm_pgtable.c
+@@ -163,7 +163,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
+
+ pr_debug("Validating PMD advanced\n");
+ /* Align the address wrt HPAGE_PMD_SIZE */
+- vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
++ vaddr &= HPAGE_PMD_MASK;
+
+ pgtable_trans_huge_deposit(mm, pmdp, pgtable);
+
+@@ -285,7 +285,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
+
+ pr_debug("Validating PUD advanced\n");
+ /* Align the address wrt HPAGE_PUD_SIZE */
+- vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
++ vaddr &= HPAGE_PUD_MASK;
+
+ set_pud_at(mm, vaddr, pudp, pud);
+ pudp_set_wrprotect(mm, vaddr, pudp);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 900851a4f9146..bc1006a327338 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4708,10 +4708,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+ struct page *page;
+
+ if (!*pagep) {
+- ret = -ENOMEM;
++ /* If a page already exists, then it's UFFDIO_COPY for
++ * a non-missing case. Return -EEXIST.
++ */
++ if (vm_shared &&
++ hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
++ ret = -EEXIST;
++ goto out;
++ }
++
+ page = alloc_huge_page(dst_vma, dst_addr, 0);
+- if (IS_ERR(page))
++ if (IS_ERR(page)) {
++ ret = -ENOMEM;
+ goto out;
++ }
+
+ ret = copy_huge_page_from_user(page,
+ (const void __user *) src_addr,
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 7ffa706e5c305..81cc7fdc9c8fd 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -8870,6 +8870,8 @@ bool take_page_off_buddy(struct page *page)
+ del_page_from_free_list(page_head, zone, page_order);
+ break_down_buddy_pages(zone, page_head, page, 0,
+ page_order, migratetype);
++ if (!is_migrate_isolate(migratetype))
++ __mod_zone_freepage_state(zone, -1, migratetype);
+ ret = true;
+ break;
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 0152bc6b67967..86ebfc6ae6986 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1602,8 +1602,13 @@ setup_failed:
+ } else {
+ /* Init failed, cleanup */
+ flush_work(&hdev->tx_work);
+- flush_work(&hdev->cmd_work);
++
++ /* Since hci_rx_work() is possible to awake new cmd_work
++ * it should be flushed first to avoid unexpected call of
++ * hci_cmd_work()
++ */
+ flush_work(&hdev->rx_work);
++ flush_work(&hdev->cmd_work);
+
+ skb_queue_purge(&hdev->cmd_q);
+ skb_queue_purge(&hdev->rx_q);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 251b9128f530a..eed0dd066e12c 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
+ /* Detach sockets from device */
+ read_lock(&hci_sk_list.lock);
+ sk_for_each(sk, &hci_sk_list.head) {
+- bh_lock_sock_nested(sk);
++ lock_sock(sk);
+ if (hci_pi(sk)->hdev == hdev) {
+ hci_pi(sk)->hdev = NULL;
+ sk->sk_err = EPIPE;
+@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
+
+ hci_dev_put(hdev);
+ }
+- bh_unlock_sock(sk);
++ release_sock(sk);
+ }
+ read_unlock(&hci_sk_list.lock);
+ }
+diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
+index c10e5a55758d2..440139706130a 100644
+--- a/net/caif/caif_dev.c
++++ b/net/caif/caif_dev.c
+@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
+ caifd_put(caifd);
+ }
+
+-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ struct cflayer *link_support, int head_room,
+ struct cflayer **layer,
+ int (**rcv_func)(struct sk_buff *, struct net_device *,
+@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ enum cfcnfg_phy_preference pref;
+ struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
+ struct caif_device_entry_list *caifdevs;
++ int res;
+
+ caifdevs = caif_device_list(dev_net(dev));
+ caifd = caif_device_alloc(dev);
+ if (!caifd)
+- return;
++ return -ENOMEM;
+ *layer = &caifd->layer;
+ spin_lock_init(&caifd->flow_lock);
+
+@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ strlcpy(caifd->layer.name, dev->name,
+ sizeof(caifd->layer.name));
+ caifd->layer.transmit = transmit;
+- cfcnfg_add_phy_layer(cfg,
++ res = cfcnfg_add_phy_layer(cfg,
+ dev,
+ &caifd->layer,
+ pref,
+@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ mutex_unlock(&caifdevs->lock);
+ if (rcv_func)
+ *rcv_func = receive;
++ return res;
+ }
+ EXPORT_SYMBOL(caif_enroll_dev);
+
+@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
+ struct cflayer *layer, *link_support;
+ int head_room = 0;
+ struct caif_device_entry_list *caifdevs;
++ int res;
+
+ cfg = get_cfcnfg(dev_net(dev));
+ caifdevs = caif_device_list(dev_net(dev));
+@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
+ break;
+ }
+ }
+- caif_enroll_dev(dev, caifdev, link_support, head_room,
++ res = caif_enroll_dev(dev, caifdev, link_support, head_room,
+ &layer, NULL);
++ if (res)
++ cfserl_release(link_support);
+ caifdev->flowctrl = dev_flowctrl;
+ break;
+
+diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
+index a0116b9503d9d..b02e1292f7f19 100644
+--- a/net/caif/caif_usb.c
++++ b/net/caif/caif_usb.c
+@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+ return (struct cflayer *) this;
+ }
+
++static void cfusbl_release(struct cflayer *layer)
++{
++ kfree(layer);
++}
++
+ static struct packet_type caif_usb_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_802_EX1),
+ };
+@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ struct cflayer *layer, *link_support;
+ struct usbnet *usbnet;
+ struct usb_device *usbdev;
++ int res;
+
+ /* Check whether we have a NCM device, and find its VID/PID. */
+ if (!(dev->dev.parent && dev->dev.parent->driver &&
+@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ if (dev->num_tx_queues > 1)
+ pr_warn("USB device uses more than one tx queue\n");
+
+- caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
++ res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+ &layer, &caif_usb_type.func);
++ if (res)
++ goto err;
++
+ if (!pack_added)
+ dev_add_pack(&caif_usb_type);
+ pack_added = true;
+@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ strlcpy(layer->name, dev->name, sizeof(layer->name));
+
+ return 0;
++err:
++ cfusbl_release(link_support);
++ return res;
+ }
+
+ static struct notifier_block caif_device_notifier = {
+diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
+index 399239a14420f..cac30e676ac94 100644
+--- a/net/caif/cfcnfg.c
++++ b/net/caif/cfcnfg.c
+@@ -450,7 +450,7 @@ unlock:
+ rcu_read_unlock();
+ }
+
+-void
++int
+ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ struct net_device *dev, struct cflayer *phy_layer,
+ enum cfcnfg_phy_preference pref,
+@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ {
+ struct cflayer *frml;
+ struct cfcnfg_phyinfo *phyinfo = NULL;
+- int i;
++ int i, res = 0;
+ u8 phyid;
+
+ mutex_lock(&cnfg->lock);
+@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
+ goto got_phyid;
+ }
+ pr_warn("Too many CAIF Link Layers (max 6)\n");
++ res = -EEXIST;
+ goto out;
+
+ got_phyid:
+ phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
+- if (!phyinfo)
++ if (!phyinfo) {
++ res = -ENOMEM;
+ goto out_err;
++ }
+
+ phy_layer->id = phyid;
+ phyinfo->pref = pref;
+@@ -492,8 +495,10 @@ got_phyid:
+
+ frml = cffrml_create(phyid, fcs);
+
+- if (!frml)
++ if (!frml) {
++ res = -ENOMEM;
+ goto out_err;
++ }
+ phyinfo->frm_layer = frml;
+ layer_set_up(frml, cnfg->mux);
+
+@@ -511,11 +516,12 @@ got_phyid:
+ list_add_rcu(&phyinfo->node, &cnfg->phys);
+ out:
+ mutex_unlock(&cnfg->lock);
+- return;
++ return res;
+
+ out_err:
+ kfree(phyinfo);
+ mutex_unlock(&cnfg->lock);
++ return res;
+ }
+ EXPORT_SYMBOL(cfcnfg_add_phy_layer);
+
+diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
+index e11725a4bb0ed..40cd57ad0a0f4 100644
+--- a/net/caif/cfserl.c
++++ b/net/caif/cfserl.c
+@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+ static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
++void cfserl_release(struct cflayer *layer)
++{
++ kfree(layer);
++}
++
+ struct cflayer *cfserl_create(int instance, bool use_stx)
+ {
+ struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 5d397838bceb6..90badb6f72271 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -693,7 +693,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
+ case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+ case DEVLINK_PORT_FLAVOUR_CPU:
+ case DEVLINK_PORT_FLAVOUR_DSA:
+- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
+ attrs->phys.port_number))
+ return -EMSGSIZE;
+@@ -8376,7 +8375,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
+
+ switch (attrs->flavour) {
+ case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ if (!attrs->split)
+ n = snprintf(name, len, "p%u", attrs->phys.port_number);
+ else
+@@ -8413,6 +8411,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
+ n = snprintf(name, len, "pf%uvf%u",
+ attrs->pci_vf.pf, attrs->pci_vf.vf);
+ break;
++ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
++ return -EOPNOTSUPP;
+ }
+
+ if (n >= len)
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index a18c2973b8c6d..c452ebf209394 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -239,6 +239,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+
+ write_lock(&n->lock);
+ if ((n->nud_state == NUD_FAILED) ||
++ (n->nud_state == NUD_NOARP) ||
+ (tbl->is_multicast &&
+ tbl->is_multicast(n->primary_key)) ||
+ time_after(tref, n->updated))
+diff --git a/net/core/sock.c b/net/core/sock.c
+index dee29f41beaf8..7de51ea15cdfc 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -807,10 +807,18 @@ void sock_set_rcvbuf(struct sock *sk, int val)
+ }
+ EXPORT_SYMBOL(sock_set_rcvbuf);
+
++static void __sock_set_mark(struct sock *sk, u32 val)
++{
++ if (val != sk->sk_mark) {
++ sk->sk_mark = val;
++ sk_dst_reset(sk);
++ }
++}
++
+ void sock_set_mark(struct sock *sk, u32 val)
+ {
+ lock_sock(sk);
+- sk->sk_mark = val;
++ __sock_set_mark(sk, val);
+ release_sock(sk);
+ }
+ EXPORT_SYMBOL(sock_set_mark);
+@@ -1118,10 +1126,10 @@ set_sndbuf:
+ case SO_MARK:
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ ret = -EPERM;
+- } else if (val != sk->sk_mark) {
+- sk->sk_mark = val;
+- sk_dst_reset(sk);
++ break;
+ }
++
++ __sock_set_mark(sk, val);
+ break;
+
+ case SO_RXQ_OVFL:
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
+index 8e3e8a5b85593..a00b513c22a1d 100644
+--- a/net/dsa/tag_8021q.c
++++ b/net/dsa/tag_8021q.c
+@@ -64,7 +64,7 @@
+ #define DSA_8021Q_SUBVLAN_HI_SHIFT 9
+ #define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9)
+ #define DSA_8021Q_SUBVLAN_LO_SHIFT 4
+-#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(4, 3)
++#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4)
+ #define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2)
+ #define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0))
+ #define DSA_8021Q_SUBVLAN(x) \
+diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
+index d19c40c684e80..71be751123210 100644
+--- a/net/ieee802154/nl-mac.c
++++ b/net/ieee802154/nl-mac.c
+@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
+ nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
+ nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+ be32_to_cpu(params.frame_counter)) ||
+- ieee802154_llsec_fill_key_id(msg, &params.out_key))
++ ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
++ rc = -ENOBUFS;
+ goto out_free;
++ }
+
+ dev_put(dev);
+
+diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
+index 2cdc7e63fe172..88215b5c93aa4 100644
+--- a/net/ieee802154/nl-phy.c
++++ b/net/ieee802154/nl-phy.c
+@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
+ }
+
+ if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+- nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
++ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
++ rc = -EMSGSIZE;
+ goto nla_put_failure;
++ }
+ dev_put(dev);
+
+ wpan_phy_put(phy);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 71e578ed8699f..ccff4738313c1 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3671,11 +3671,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ if (nh) {
+ if (rt->fib6_src.plen) {
+ NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
+- goto out;
++ goto out_free;
+ }
+ if (!nexthop_get(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+- goto out;
++ goto out_free;
+ }
+ rt->nh = nh;
+ fib6_nh = nexthop_fib6_nh(rt->nh);
+@@ -3712,6 +3712,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ out:
+ fib6_info_release(rt);
+ return ERR_PTR(err);
++out_free:
++ ip_fib_metrics_put(rt->fib6_metrics);
++ kfree(rt);
++ return ERR_PTR(err);
+ }
+
+ int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index bdd6af38a9ae3..96b6aca9d0ae7 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -527,21 +527,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+
+ /* if the sk is MP_CAPABLE, we try to fetch the client key */
+ if (subflow_req->mp_capable) {
+- if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
+- /* here we can receive and accept an in-window,
+- * out-of-order pkt, which will not carry the MP_CAPABLE
+- * opt even on mptcp enabled paths
+- */
+- goto create_msk;
+- }
+-
++ /* we can receive and accept an in-window, out-of-order pkt,
++ * which may not carry the MP_CAPABLE opt even on mptcp enabled
++ * paths: always try to extract the peer key, and fallback
++ * for packets missing it.
++ * Even OoO DSS packets coming legitly after dropped or
++ * reordered MPC will cause fallback, but we don't have other
++ * options.
++ */
+ mptcp_get_options(skb, &mp_opt);
+ if (!mp_opt.mp_capable) {
+ fallback = true;
+ goto create_child;
+ }
+
+-create_msk:
+ new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
+ if (!new_msk)
+ fallback = true;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index d45dbcba8b49c..c25097092a060 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
+ ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
+ svc->port = u->port;
+ svc->fwmark = u->fwmark;
+- svc->flags = u->flags;
++ svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
+ svc->timeout = u->timeout * HZ;
+ svc->netmask = u->netmask;
+ svc->ipvs = ipvs;
+diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
+index 47e9319d2cf31..71892822bbf5d 100644
+--- a/net/netfilter/nf_conntrack_proto.c
++++ b/net/netfilter/nf_conntrack_proto.c
+@@ -660,7 +660,7 @@ int nf_conntrack_proto_init(void)
+
+ #if IS_ENABLED(CONFIG_IPV6)
+ cleanup_sockopt:
+- nf_unregister_sockopt(&so_getorigdst6);
++ nf_unregister_sockopt(&so_getorigdst);
+ #endif
+ return ret;
+ }
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 7bf7bfa0c7d9c..e34d05cc57549 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3263,8 +3263,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ if (n == NFT_RULE_MAXEXPRS)
+ goto err1;
+ err = nf_tables_expr_parse(&ctx, tmp, &info[n]);
+- if (err < 0)
++ if (err < 0) {
++ NL_SET_BAD_ATTR(extack, tmp);
+ goto err1;
++ }
+ size += info[n].ops->size;
+ n++;
+ }
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 5b0d0a77379c6..91afbf8ac8cf0 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -380,10 +380,14 @@ static int
+ nfnl_cthelper_update(const struct nlattr * const tb[],
+ struct nf_conntrack_helper *helper)
+ {
++ u32 size;
+ int ret;
+
+- if (tb[NFCTH_PRIV_DATA_LEN])
+- return -EBUSY;
++ if (tb[NFCTH_PRIV_DATA_LEN]) {
++ size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
++ if (size != helper->data_len)
++ return -EBUSY;
++ }
+
+ if (tb[NFCTH_POLICY]) {
+ ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index a1b0aac46e9e0..70d46e0bbf064 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -1218,7 +1218,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(pkt->skb, &ctinfo);
+- if (!ct || ctinfo == IP_CT_UNTRACKED) {
++ if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 53dbe733f9981..6cfd30fc07985 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ if (!llcp_sock->service_name) {
+ nfc_llcp_local_put(llcp_sock->local);
+ llcp_sock->local = NULL;
++ llcp_sock->dev = NULL;
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ llcp_sock->local = NULL;
+ kfree(llcp_sock->service_name);
+ llcp_sock->service_name = NULL;
++ llcp_sock->dev = NULL;
+ ret = -EADDRINUSE;
+ goto put_dev;
+ }
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index aba3cd85f284f..315a5b2f3add8 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -979,7 +979,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ */
+ cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
+ if (!cached) {
+- if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
++ if (tcf_ct_flow_table_lookup(p, skb, family)) {
+ skip_add = true;
+ goto do_nat;
+ }
+@@ -1019,10 +1019,11 @@ do_nat:
+ * even if the connection is already confirmed.
+ */
+ nf_conntrack_confirm(skb);
+- } else if (!skip_add) {
+- tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
+ }
+
++ if (!skip_add)
++ tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
++
+ out_push:
+ skb_push_rcsum(skb, nh_ofs);
+
+@@ -1198,9 +1199,6 @@ static int tcf_ct_fill_params(struct net *net,
+ sizeof(p->zone));
+ }
+
+- if (p->zone == NF_CT_DEFAULT_ZONE_ID)
+- return 0;
+-
+ nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
+ tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
+ if (!tmpl) {
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 6504141104521..12e535b43d887 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -234,7 +234,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
+ */
+ static int tipc_enable_bearer(struct net *net, const char *name,
+ u32 disc_domain, u32 prio,
+- struct nlattr *attr[])
++ struct nlattr *attr[],
++ struct netlink_ext_ack *extack)
+ {
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer_names b_names;
+@@ -245,20 +246,24 @@ static int tipc_enable_bearer(struct net *net, const char *name,
+ int bearer_id = 0;
+ int res = -EINVAL;
+ char *errstr = "";
++ u32 i;
+
+ if (!bearer_name_validate(name, &b_names)) {
+ errstr = "illegal name";
++ NL_SET_ERR_MSG(extack, "Illegal name");
+ goto rejected;
+ }
+
+ if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
+ errstr = "illegal priority";
++ NL_SET_ERR_MSG(extack, "Illegal priority");
+ goto rejected;
+ }
+
+ m = tipc_media_find(b_names.media_name);
+ if (!m) {
+ errstr = "media not registered";
++ NL_SET_ERR_MSG(extack, "Media not registered");
+ goto rejected;
+ }
+
+@@ -266,33 +271,43 @@ static int tipc_enable_bearer(struct net *net, const char *name,
+ prio = m->priority;
+
+ /* Check new bearer vs existing ones and find free bearer id if any */
+- while (bearer_id < MAX_BEARERS) {
+- b = rtnl_dereference(tn->bearer_list[bearer_id]);
+- if (!b)
+- break;
++ bearer_id = MAX_BEARERS;
++ i = MAX_BEARERS;
++ while (i-- != 0) {
++ b = rtnl_dereference(tn->bearer_list[i]);
++ if (!b) {
++ bearer_id = i;
++ continue;
++ }
+ if (!strcmp(name, b->name)) {
+ errstr = "already enabled";
++ NL_SET_ERR_MSG(extack, "Already enabled");
+ goto rejected;
+ }
+- bearer_id++;
+- if (b->priority != prio)
+- continue;
+- if (++with_this_prio <= 2)
+- continue;
+- pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
+- name, prio);
+- if (prio == TIPC_MIN_LINK_PRI) {
+- errstr = "cannot adjust to lower";
+- goto rejected;
++
++ if (b->priority == prio &&
++ (++with_this_prio > 2)) {
++ pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
++ name, prio);
++
++ if (prio == TIPC_MIN_LINK_PRI) {
++ errstr = "cannot adjust to lower";
++ NL_SET_ERR_MSG(extack, "Cannot adjust to lower");
++ goto rejected;
++ }
++
++ pr_warn("Bearer <%s>: trying with adjusted priority\n",
++ name);
++ prio--;
++ bearer_id = MAX_BEARERS;
++ i = MAX_BEARERS;
++ with_this_prio = 1;
+ }
+- pr_warn("Bearer <%s>: trying with adjusted priority\n", name);
+- prio--;
+- bearer_id = 0;
+- with_this_prio = 1;
+ }
+
+ if (bearer_id >= MAX_BEARERS) {
+ errstr = "max 3 bearers permitted";
++ NL_SET_ERR_MSG(extack, "Max 3 bearers permitted");
+ goto rejected;
+ }
+
+@@ -306,6 +321,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
+ if (res) {
+ kfree(b);
+ errstr = "failed to enable media";
++ NL_SET_ERR_MSG(extack, "Failed to enable media");
+ goto rejected;
+ }
+
+@@ -322,6 +338,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
+ if (res) {
+ bearer_disable(net, b);
+ errstr = "failed to create discoverer";
++ NL_SET_ERR_MSG(extack, "Failed to create discoverer");
+ goto rejected;
+ }
+
+@@ -894,6 +911,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
+ bearer = tipc_bearer_find(net, name);
+ if (!bearer) {
+ err = -EINVAL;
++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ goto err_out;
+ }
+
+@@ -933,8 +951,10 @@ int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ bearer = tipc_bearer_find(net, name);
+- if (!bearer)
++ if (!bearer) {
++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ return -EINVAL;
++ }
+
+ bearer_disable(net, bearer);
+
+@@ -992,7 +1012,8 @@ int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ }
+
+- return tipc_enable_bearer(net, bearer, domain, prio, attrs);
++ return tipc_enable_bearer(net, bearer, domain, prio, attrs,
++ info->extack);
+ }
+
+ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+@@ -1031,6 +1052,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
+ b = tipc_bearer_find(net, name);
+ if (!b) {
+ rtnl_unlock();
++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ return -EINVAL;
+ }
+
+@@ -1071,8 +1093,10 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ b = tipc_bearer_find(net, name);
+- if (!b)
++ if (!b) {
++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ return -EINVAL;
++ }
+
+ if (attrs[TIPC_NLA_BEARER_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+@@ -1091,12 +1115,18 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+ if (props[TIPC_NLA_PROP_WIN])
+ b->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (props[TIPC_NLA_PROP_MTU]) {
+- if (b->media->type_id != TIPC_MEDIA_TYPE_UDP)
++ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
++ NL_SET_ERR_MSG(info->extack,
++ "MTU property is unsupported");
+ return -EINVAL;
++ }
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+ if (tipc_udp_mtu_bad(nla_get_u32
+- (props[TIPC_NLA_PROP_MTU])))
++ (props[TIPC_NLA_PROP_MTU]))) {
++ NL_SET_ERR_MSG(info->extack,
++ "MTU value is out-of-range");
+ return -EINVAL;
++ }
+ b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+ tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU);
+ #endif
+@@ -1224,6 +1254,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
+ rtnl_lock();
+ media = tipc_media_find(name);
+ if (!media) {
++ NL_SET_ERR_MSG(info->extack, "Media not found");
+ err = -EINVAL;
+ goto err_out;
+ }
+@@ -1260,9 +1291,10 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+ name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
+
+ m = tipc_media_find(name);
+- if (!m)
++ if (!m) {
++ NL_SET_ERR_MSG(info->extack, "Media not found");
+ return -EINVAL;
+-
++ }
+ if (attrs[TIPC_NLA_MEDIA_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+@@ -1278,12 +1310,18 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+ if (props[TIPC_NLA_PROP_WIN])
+ m->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (props[TIPC_NLA_PROP_MTU]) {
+- if (m->type_id != TIPC_MEDIA_TYPE_UDP)
++ if (m->type_id != TIPC_MEDIA_TYPE_UDP) {
++ NL_SET_ERR_MSG(info->extack,
++ "MTU property is unsupported");
+ return -EINVAL;
++ }
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+ if (tipc_udp_mtu_bad(nla_get_u32
+- (props[TIPC_NLA_PROP_MTU])))
++ (props[TIPC_NLA_PROP_MTU]))) {
++ NL_SET_ERR_MSG(info->extack,
++ "MTU value is out-of-range");
+ return -EINVAL;
++ }
+ m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+ #endif
+ }
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index a3ab2d3d4e4ea..f718c7346088f 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work);
+ static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
+ static LIST_HEAD(tls_device_gc_list);
+ static LIST_HEAD(tls_device_list);
++static LIST_HEAD(tls_device_down_list);
+ static DEFINE_SPINLOCK(tls_device_lock);
+
+ static void tls_device_free_ctx(struct tls_context *ctx)
+@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ struct net_device *netdev;
+
+- if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
+- return;
+-
+ trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
++ rcu_read_lock();
+ netdev = READ_ONCE(tls_ctx->netdev);
+ if (netdev)
+ netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
+ TLS_OFFLOAD_CTX_DIR_RX);
+- clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
++ rcu_read_unlock();
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
+ }
+
+@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
+
+ if (tls_ctx->rx_conf != TLS_HW)
+ return;
++ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
++ return;
+
+ prot = &tls_ctx->prot_info;
+ rx_ctx = tls_offload_ctx_rx(tls_ctx);
+@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
+
+ ctx->sw.decrypted |= is_decrypted;
+
++ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
++ if (likely(is_encrypted || is_decrypted))
++ return 0;
++
++ /* After tls_device_down disables the offload, the next SKB will
++ * likely have initial fragments decrypted, and final ones not
++ * decrypted. We need to reencrypt that single SKB.
++ */
++ return tls_device_reencrypt(sk, skb);
++ }
++
+ /* Return immediately if the record is either entirely plaintext or
+ * entirely ciphertext. Otherwise handle reencrypt partially decrypted
+ * record.
+@@ -1290,6 +1302,26 @@ static int tls_device_down(struct net_device *netdev)
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+
+ list_for_each_entry_safe(ctx, tmp, &list, list) {
++ /* Stop offloaded TX and switch to the fallback.
++ * tls_is_sk_tx_device_offloaded will return false.
++ */
++ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
++
++ /* Stop the RX and TX resync.
++ * tls_dev_resync must not be called after tls_dev_del.
++ */
++ WRITE_ONCE(ctx->netdev, NULL);
++
++ /* Start skipping the RX resync logic completely. */
++ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
++
++ /* Sync with inflight packets. After this point:
++ * TX: no non-encrypted packets will be passed to the driver.
++ * RX: resync requests from the driver will be ignored.
++ */
++ synchronize_net();
++
++ /* Release the offload context on the driver side. */
+ if (ctx->tx_conf == TLS_HW)
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ TLS_OFFLOAD_CTX_DIR_TX);
+@@ -1297,15 +1329,21 @@ static int tls_device_down(struct net_device *netdev)
+ !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ TLS_OFFLOAD_CTX_DIR_RX);
+- WRITE_ONCE(ctx->netdev, NULL);
+- smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
+- while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
+- usleep_range(10, 200);
++
+ dev_put(netdev);
+- list_del_init(&ctx->list);
+
+- if (refcount_dec_and_test(&ctx->refcount))
+- tls_device_free_ctx(ctx);
++ /* Move the context to a separate list for two reasons:
++ * 1. When the context is deallocated, list_del is called.
++ * 2. It's no longer an offloaded context, so we don't want to
++ * run offload-specific code on this context.
++ */
++ spin_lock_irqsave(&tls_device_lock, flags);
++ list_move_tail(&ctx->list, &tls_device_down_list);
++ spin_unlock_irqrestore(&tls_device_lock, flags);
++
++ /* Device contexts for RX and TX will be freed in on sk_destruct
++ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
++ */
+ }
+
+ up_write(&device_offload_lock);
+diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
+index 28895333701e4..0d40016bf69e0 100644
+--- a/net/tls/tls_device_fallback.c
++++ b/net/tls/tls_device_fallback.c
+@@ -430,6 +430,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
+ }
+ EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
+
++struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
++ struct net_device *dev,
++ struct sk_buff *skb)
++{
++ return tls_sw_fallback(sk, skb);
++}
++
+ struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
+ {
+ return tls_sw_fallback(skb->sk, skb);
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 8d93cea99f2cb..32a51b20509c9 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -633,6 +633,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
+ mutex_init(&ctx->tx_lock);
+ rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ ctx->sk_proto = READ_ONCE(sk->sk_prot);
++ ctx->sk = sk;
+ return ctx;
+ }
+
+diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
+index 21dbf63d6e415..9ec93d90e8a5a 100644
+--- a/samples/vfio-mdev/mdpy-fb.c
++++ b/samples/vfio-mdev/mdpy-fb.c
+@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
+ if (format != DRM_FORMAT_XRGB8888) {
+ pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
+ format, DRM_FORMAT_XRGB8888);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_release_regions;
+ }
+ if (width < 100 || width > 10000) {
+ pci_err(pdev, "width (%d) out of range\n", width);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_release_regions;
+ }
+ if (height < 100 || height > 10000) {
+ pci_err(pdev, "height (%d) out of range\n", height);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_release_regions;
+ }
+ pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
+ width, height);
+
+ info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
+- if (!info)
++ if (!info) {
++ ret = -ENOMEM;
+ goto err_release_regions;
++ }
+ pci_set_drvdata(pdev, info);
+ par = info->par;
+
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 765ea66665a8c..c15c8314671b7 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
+ return;
+ if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
+ return;
++ event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
+ list_for_each_entry(ts, &ti->slave_active_head, active_list)
+ if (ts->ccallback)
+- ts->ccallback(ts, event + 100, &tstamp, resolution);
++ ts->ccallback(ts, event, &tstamp, resolution);
+ }
+
+ /* start/continue a master timer */
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index eec1775dfffe9..4cec1bd77e6fe 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2973,6 +2973,7 @@ static int hda_codec_runtime_resume(struct device *dev)
+ #ifdef CONFIG_PM_SLEEP
+ static int hda_codec_pm_prepare(struct device *dev)
+ {
++ dev->power.power_state = PMSG_SUSPEND;
+ return pm_runtime_suspended(dev);
+ }
+
+@@ -2980,6 +2981,10 @@ static void hda_codec_pm_complete(struct device *dev)
+ {
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+
++ /* If no other pm-functions are called between prepare() and complete() */
++ if (dev->power.power_state.event == PM_EVENT_SUSPEND)
++ dev->power.power_state = PMSG_RESUME;
++
+ if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
+ hda_codec_need_resume(codec) || codec->forced_resume))
+ pm_request_resume(dev);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d8424d226714f..cc13a68197f3c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8289,6 +8289,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++ SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index 7b2d471a6419d..4343356f3cf9a 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
+ if ((tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) &&
+ die_compare_name(die_mem, fvp->name) &&
+- /* Does the DIE have location information or external instance? */
++ /*
++ * Does the DIE have location information or const value
++ * or external instance?
++ */
+ (dwarf_attr(die_mem, DW_AT_external, &attr) ||
+- dwarf_attr(die_mem, DW_AT_location, &attr)))
++ dwarf_attr(die_mem, DW_AT_location, &attr) ||
++ dwarf_attr(die_mem, DW_AT_const_value, &attr)))
+ return DIE_FIND_CB_END;
+ if (dwarf_haspc(die_mem, fvp->addr))
+ return DIE_FIND_CB_CONTINUE;
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
+index 76dd349aa48d8..fdafbfcef6871 100644
+--- a/tools/perf/util/probe-finder.c
++++ b/tools/perf/util/probe-finder.c
+@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
+ immediate_value_is_supported()) {
+ Dwarf_Sword snum;
+
++ if (!tvar)
++ return 0;
++
+ dwarf_formsdata(&attr, &snum);
+ ret = asprintf(&tvar->value, "\\%ld", (long)snum);
+
+diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
+index 7ed7cd95e58fe..ebc4ee0fe179f 100755
+--- a/tools/testing/selftests/wireguard/netns.sh
++++ b/tools/testing/selftests/wireguard/netns.sh
+@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0
+ ip1 -4 route add default dev wg0 table 51820
+ ip1 -4 rule add not fwmark 51820 table 51820
+ ip1 -4 rule add table main suppress_prefixlength 0
++n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
+ # Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
+ n1 ping -W 1 -c 100 -f 192.168.99.7
+ n1 ping -W 1 -c 100 -f abab::1111
+diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
+index 4eecb432a66c1..74db83a0aedd8 100644
+--- a/tools/testing/selftests/wireguard/qemu/kernel.config
++++ b/tools/testing/selftests/wireguard/qemu/kernel.config
+@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
+ CONFIG_NETFILTER_XT_NAT=y
+ CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+ CONFIG_NETFILTER_XT_MARK=y
+-CONFIG_NF_CONNTRACK_IPV4=y
+ CONFIG_NF_NAT_IPV4=y
+ CONFIG_IP_NF_IPTABLES=y
+ CONFIG_IP_NF_FILTER=y