summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-02-06 07:48:38 -0500
committerMike Pagano <mpagano@gentoo.org>2023-02-06 07:48:38 -0500
commitd936533a80769b87d14f376f5c093a3b7138f081 (patch)
tree25d062a830638e326365e2b9e7a6e414ee878463
parentgcc-plugins: drop -std=gnu++11 to fix GCC 13 build (diff)
downloadlinux-patches-d936533a80769b87d14f376f5c093a3b7138f081.tar.gz
linux-patches-d936533a80769b87d14f376f5c093a3b7138f081.tar.bz2
linux-patches-d936533a80769b87d14f376f5c093a3b7138f081.zip
Linux poatch 5.4.2315.4-236
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1230_linux-5.4.231.patch4958
2 files changed, 4962 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 3adc520a..f1efbc1b 100644
--- a/0000_README
+++ b/0000_README
@@ -963,6 +963,10 @@ Patch: 1229_linux-5.4.230.patch
From: http://www.kernel.org
Desc: Linux 5.4.230
+Patch: 1230_linux-5.4.231.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.231
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1230_linux-5.4.231.patch b/1230_linux-5.4.231.patch
new file mode 100644
index 00000000..72049c2a
--- /dev/null
+++ b/1230_linux-5.4.231.patch
@@ -0,0 +1,4958 @@
+diff --git a/Documentation/ABI/testing/sysfs-kernel-oops_count b/Documentation/ABI/testing/sysfs-kernel-oops_count
+new file mode 100644
+index 0000000000000..156cca9dbc960
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-kernel-oops_count
+@@ -0,0 +1,6 @@
++What: /sys/kernel/oops_count
++Date: November 2022
++KernelVersion: 6.2.0
++Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
++Description:
++ Shows how many times the system has Oopsed since last boot.
+diff --git a/Documentation/ABI/testing/sysfs-kernel-warn_count b/Documentation/ABI/testing/sysfs-kernel-warn_count
+new file mode 100644
+index 0000000000000..90a029813717d
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-kernel-warn_count
+@@ -0,0 +1,6 @@
++What: /sys/kernel/warn_count
++Date: November 2022
++KernelVersion: 6.2.0
++Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
++Description:
++ Shows how many times the system has Warned since last boot.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 9715685be6e3b..568c24ff00a72 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -557,6 +557,15 @@ numa_balancing_scan_size_mb is how many megabytes worth of pages are
+ scanned for a given scan.
+
+
++oops_limit
++==========
++
++Number of kernel oopses after which the kernel should panic when
++``panic_on_oops`` is not set. Setting this to 0 disables checking
++the count. Setting this to 1 has the same effect as setting
++``panic_on_oops=1``. The default value is 10000.
++
++
+ osrelease, ostype & version:
+ ============================
+
+@@ -1177,6 +1186,16 @@ entry will default to 2 instead of 0.
+ 2 Unprivileged calls to ``bpf()`` are disabled
+ = =============================================================
+
++
++warn_limit
++==========
++
++Number of kernel warnings after which the kernel should panic when
++``panic_on_warn`` is not set. Setting this to 0 disables checking
++the warning count. Setting this to 1 has the same effect as setting
++``panic_on_warn=1``. The default value is 0.
++
++
+ watchdog:
+ =========
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 973fcc9143d1e..ea8f1c8850892 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -14661,6 +14661,7 @@ L: netdev@vger.kernel.org
+ S: Maintained
+ F: drivers/net/phy/phylink.c
+ F: drivers/net/phy/sfp*
++F: include/linux/mdio/mdio-i2c.h
+ F: include/linux/phylink.h
+ F: include/linux/sfp.h
+ K: phylink
+diff --git a/Makefile b/Makefile
+index 6a947b4ed5dc4..3cbbc82e2ddf2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 230
++SUBLEVEL = 231
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
+index f6b9664ac5042..f87d8e1fcfe42 100644
+--- a/arch/alpha/kernel/traps.c
++++ b/arch/alpha/kernel/traps.c
+@@ -192,7 +192,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
+ local_irq_enable();
+ while (1);
+ }
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ #ifndef CONFIG_MATHEMU
+@@ -577,7 +577,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
+
+ printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
+ pc, va, opcode, reg);
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+
+ got_exception:
+ /* Ok, we caught the exception, but we don't want it. Is there
+@@ -632,7 +632,7 @@ got_exception:
+ local_irq_enable();
+ while (1);
+ }
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ /*
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 741e61ef9d3fe..a86286d2d3f3f 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -206,7 +206,7 @@ retry:
+ printk(KERN_ALERT "Unable to handle kernel paging request at "
+ "virtual address %016lx\n", address);
+ die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+
+ /* We ran out of memory, or some other thing happened to us that
+ made us unable to handle the page fault gracefully. */
+diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
+index c80d1700e0949..c01dc571b55cd 100644
+--- a/arch/arm/boot/dts/imx53-ppd.dts
++++ b/arch/arm/boot/dts/imx53-ppd.dts
+@@ -461,7 +461,7 @@
+ scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+- i2c-switch@70 {
++ i2c-mux@70 {
+ compatible = "nxp,pca9547";
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+index e8e36dfd0a6b0..c951834f49847 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+@@ -464,7 +464,6 @@
+ &uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+- uart-has-rtscts;
+ rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 207ef9a797bd4..03dfeb1208431 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -341,7 +341,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ if (panic_on_oops)
+ panic("Fatal exception");
+ if (signr)
+- do_exit(signr);
++ make_task_dead(signr);
+ }
+
+ /*
+diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c
+index b2e1963f473de..2ee2d2813d577 100644
+--- a/arch/arm/mach-imx/cpu-imx25.c
++++ b/arch/arm/mach-imx/cpu-imx25.c
+@@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void)
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
+ iim_base = of_iomap(np, 0);
++ of_node_put(np);
+ BUG_ON(!iim_base);
+ rev = readl(iim_base + MXC_IIMSREV);
+ iounmap(iim_base);
+diff --git a/arch/arm/mach-imx/cpu-imx27.c b/arch/arm/mach-imx/cpu-imx27.c
+index a969aa71b60fc..1d28939083683 100644
+--- a/arch/arm/mach-imx/cpu-imx27.c
++++ b/arch/arm/mach-imx/cpu-imx27.c
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/io.h>
++#include <linux/of_address.h>
+ #include <linux/module.h>
+
+ #include "hardware.h"
+@@ -17,16 +18,24 @@ static int mx27_cpu_rev = -1;
+ static int mx27_cpu_partnumber;
+
+ #define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */
++#define SYSCTRL_OFFSET 0x800 /* Offset from CCM base address */
+
+ static int mx27_read_cpu_rev(void)
+ {
++ void __iomem *ccm_base;
++ struct device_node *np;
+ u32 val;
++
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm");
++ ccm_base = of_iomap(np, 0);
++ of_node_put(np);
++ BUG_ON(!ccm_base);
+ /*
+ * now we have access to the IO registers. As we need
+ * the silicon revision very early we read it here to
+ * avoid any further hooks
+ */
+- val = imx_readl(MX27_IO_ADDRESS(MX27_SYSCTRL_BASE_ADDR + SYS_CHIP_ID));
++ val = imx_readl(ccm_base + SYSCTRL_OFFSET + SYS_CHIP_ID);
+
+ mx27_cpu_partnumber = (int)((val >> 12) & 0xFFFF);
+
+diff --git a/arch/arm/mach-imx/cpu-imx31.c b/arch/arm/mach-imx/cpu-imx31.c
+index 3ee684b71006f..35c544924e509 100644
+--- a/arch/arm/mach-imx/cpu-imx31.c
++++ b/arch/arm/mach-imx/cpu-imx31.c
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/module.h>
++#include <linux/of_address.h>
+ #include <linux/io.h>
+
+ #include "common.h"
+@@ -32,10 +33,17 @@ static struct {
+
+ static int mx31_read_cpu_rev(void)
+ {
++ void __iomem *iim_base;
++ struct device_node *np;
+ u32 i, srev;
+
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim");
++ iim_base = of_iomap(np, 0);
++ of_node_put(np);
++ BUG_ON(!iim_base);
++
+ /* read SREV register from IIM module */
+- srev = imx_readl(MX31_IO_ADDRESS(MX31_IIM_BASE_ADDR + MXC_IIMSREV));
++ srev = imx_readl(iim_base + MXC_IIMSREV);
+ srev &= 0xff;
+
+ for (i = 0; i < ARRAY_SIZE(mx31_cpu_type); i++)
+diff --git a/arch/arm/mach-imx/cpu-imx35.c b/arch/arm/mach-imx/cpu-imx35.c
+index ebb3cdabd5068..1fe75b39c2d99 100644
+--- a/arch/arm/mach-imx/cpu-imx35.c
++++ b/arch/arm/mach-imx/cpu-imx35.c
+@@ -5,6 +5,7 @@
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ */
+ #include <linux/module.h>
++#include <linux/of_address.h>
+ #include <linux/io.h>
+
+ #include "hardware.h"
+@@ -14,9 +15,16 @@ static int mx35_cpu_rev = -1;
+
+ static int mx35_read_cpu_rev(void)
+ {
++ void __iomem *iim_base;
++ struct device_node *np;
+ u32 rev;
+
+- rev = imx_readl(MX35_IO_ADDRESS(MX35_IIM_BASE_ADDR + MXC_IIMSREV));
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim");
++ iim_base = of_iomap(np, 0);
++ of_node_put(np);
++ BUG_ON(!iim_base);
++
++ rev = imx_readl(iim_base + MXC_IIMSREV);
+ switch (rev) {
+ case 0x00:
+ return IMX_CHIP_REVISION_1_0;
+diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
+index ad56263778f93..a67c89bf155dd 100644
+--- a/arch/arm/mach-imx/cpu-imx5.c
++++ b/arch/arm/mach-imx/cpu-imx5.c
+@@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat)
+
+ np = of_find_compatible_node(NULL, NULL, compat);
+ iim_base = of_iomap(np, 0);
++ of_node_put(np);
+ WARN_ON(!iim_base);
+
+ srev = readl(iim_base + IIM_SREV) & 0xff;
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index bd0f4821f7e11..d623932437208 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -124,7 +124,7 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+ show_pte(KERN_ALERT, mm, addr);
+ die("Oops", regs, fsr);
+ bust_spinlocks(0);
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+ }
+
+ /*
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index a3ad8a1b0e079..74a24e6e9a2d9 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc)
+ mpu_setup();
+
+ /* allocate the zero page. */
+- zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++ zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 4e3e9d9c81517..a436a6972ced7 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -202,7 +202,7 @@ void die(const char *str, struct pt_regs *regs, int err)
+ raw_spin_unlock_irqrestore(&die_lock, flags);
+
+ if (ret != NOTIFY_STOP)
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ static void arm64_show_signal(int signo, const char *str)
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 2a7339aeb1ad4..a8e9c98147a19 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -296,7 +296,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
+ show_pte(addr);
+ die("Oops", regs, esr);
+ bust_spinlocks(0);
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+ }
+
+ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
+diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c
+index cb2a0d94a144d..2df115d0e2105 100644
+--- a/arch/csky/abiv1/alignment.c
++++ b/arch/csky/abiv1/alignment.c
+@@ -294,7 +294,7 @@ bad_area:
+ __func__, opcode, rz, rx, imm, addr);
+ show_regs(regs);
+ bust_spinlocks(0);
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+ }
+
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
+diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
+index 63715cb90ee99..8cdbbcb5ed875 100644
+--- a/arch/csky/kernel/traps.c
++++ b/arch/csky/kernel/traps.c
+@@ -85,7 +85,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, int nr)
+ pr_err("%s: %08x\n", str, nr);
+ show_regs(regs);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ void buserr(struct pt_regs *regs)
+diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
+index e47a9e0dc278f..090adaee4b84c 100644
+--- a/arch/h8300/kernel/traps.c
++++ b/arch/h8300/kernel/traps.c
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/sched.h>
+ #include <linux/sched/debug.h>
++#include <linux/sched/task.h>
+ #include <linux/mm_types.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+@@ -110,7 +111,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err)
+ dump(fp);
+
+ spin_unlock_irq(&die_lock);
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ static int kstack_depth_to_print = 24;
+diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
+index fabffb83930af..573825c3cb708 100644
+--- a/arch/h8300/mm/fault.c
++++ b/arch/h8300/mm/fault.c
+@@ -52,7 +52,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
+ printk(" at virtual address %08lx\n", address);
+ if (!user_mode(regs))
+ die("Oops", regs, error_code);
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+
+ return 1;
+ }
+diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
+index 69c623b14ddd2..f69eae3f32bd2 100644
+--- a/arch/hexagon/kernel/traps.c
++++ b/arch/hexagon/kernel/traps.c
+@@ -221,7 +221,7 @@ int die(const char *str, struct pt_regs *regs, long err)
+ panic("Fatal exception");
+
+ oops_exit();
+- do_exit(err);
++ make_task_dead(err);
+ return 0;
+ }
+
+diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
+index 16714477eef42..6a6036f16abe6 100644
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -360,7 +360,7 @@ config ARCH_PROC_KCORE_TEXT
+ depends on PROC_KCORE
+
+ config IA64_MCA_RECOVERY
+- tristate "MCA recovery from errors other than TLB."
++ bool "MCA recovery from errors other than TLB."
+
+ config PERFMON
+ bool "Performance monitor support"
+diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
+index 2a40268c3d494..c8a87798618ed 100644
+--- a/arch/ia64/kernel/mca_drv.c
++++ b/arch/ia64/kernel/mca_drv.c
+@@ -12,6 +12,7 @@
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/sched.h>
++#include <linux/sched/task.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
+ #include <linux/kallsyms.h>
+@@ -176,7 +177,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
+ spin_unlock(&mca_bh_lock);
+
+ /* This process is about to be killed itself */
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+ }
+
+ /**
+diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
+index e13cb905930fb..753642366e12e 100644
+--- a/arch/ia64/kernel/traps.c
++++ b/arch/ia64/kernel/traps.c
+@@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err)
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ return 0;
+ }
+
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index c2f299fe9e04a..7f8c49579a2c2 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -272,7 +272,7 @@ retry:
+ regs = NULL;
+ bust_spinlocks(0);
+ if (regs)
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+ return;
+
+ out_of_memory:
+diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
+index 344f93d36a9a0..a245c1933d418 100644
+--- a/arch/m68k/kernel/traps.c
++++ b/arch/m68k/kernel/traps.c
+@@ -1139,7 +1139,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
+ pr_crit("%s: %08x\n", str, nr);
+ show_registers(fp);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ asmlinkage void set_esp0(unsigned long ssp)
+diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
+index e9b1d7585b43b..03ebb67b413ef 100644
+--- a/arch/m68k/mm/fault.c
++++ b/arch/m68k/mm/fault.c
+@@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs)
+ pr_alert("Unable to handle kernel access");
+ pr_cont(" at virtual address %p\n", addr);
+ die_if_kernel("Oops", regs, 0 /*error_code*/);
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+ }
+
+ return 1;
+diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
+index cf99c411503e3..6d3a6a6442205 100644
+--- a/arch/microblaze/kernel/exceptions.c
++++ b/arch/microblaze/kernel/exceptions.c
+@@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err)
+ pr_warn("Oops: %s, sig: %ld\n", str, err);
+ show_regs(fp);
+ spin_unlock_irq(&die_lock);
+- /* do_exit() should take care of panic'ing from an interrupt
++ /* make_task_dead() should take care of panic'ing from an interrupt
+ * context so we don't handle it here
+ */
+- do_exit(err);
++ make_task_dead(err);
+ }
+
+ /* for user application debugging */
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 749089c25d5e6..5a491eca456fc 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -415,7 +415,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
+- do_exit(sig);
++ make_task_dead(sig);
+ }
+
+ extern struct exception_table_entry __start___dbe_table[];
+diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
+index 62bdafbc53f4c..26c62d5a55c15 100644
+--- a/arch/nds32/kernel/fpu.c
++++ b/arch/nds32/kernel/fpu.c
+@@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs)
+ }
+ } else if (fpcsr & FPCSR_mskRIT) {
+ if (!user_mode(regs))
+- do_exit(SIGILL);
++ make_task_dead(SIGILL);
+ si_signo = SIGILL;
+ }
+
+diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
+index f4d386b526227..f6648845aae76 100644
+--- a/arch/nds32/kernel/traps.c
++++ b/arch/nds32/kernel/traps.c
+@@ -184,7 +184,7 @@ void die(const char *str, struct pt_regs *regs, int err)
+
+ bust_spinlocks(0);
+ spin_unlock_irq(&die_lock);
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ EXPORT_SYMBOL(die);
+@@ -288,7 +288,7 @@ void unhandled_interruption(struct pt_regs *regs)
+ pr_emerg("unhandled_interruption\n");
+ show_regs(regs);
+ if (!user_mode(regs))
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+ force_sig(SIGKILL);
+ }
+
+@@ -299,7 +299,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr,
+ addr, type);
+ show_regs(regs);
+ if (!user_mode(regs))
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+ force_sig(SIGKILL);
+ }
+
+@@ -326,7 +326,7 @@ void do_revinsn(struct pt_regs *regs)
+ pr_emerg("Reserved Instruction\n");
+ show_regs(regs);
+ if (!user_mode(regs))
+- do_exit(SIGILL);
++ make_task_dead(SIGILL);
+ force_sig(SIGILL);
+ }
+
+diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
+index 486db793923c0..8e192d6564261 100644
+--- a/arch/nios2/kernel/traps.c
++++ b/arch/nios2/kernel/traps.c
+@@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err)
+ show_regs(regs);
+ spin_unlock_irq(&die_lock);
+ /*
+- * do_exit() should take care of panic'ing from an interrupt
++ * make_task_dead() should take care of panic'ing from an interrupt
+ * context so we don't handle it here
+ */
+- do_exit(err);
++ make_task_dead(err);
+ }
+
+ void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
+diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
+index 932a8ec2b520e..2804852a55924 100644
+--- a/arch/openrisc/kernel/traps.c
++++ b/arch/openrisc/kernel/traps.c
+@@ -218,7 +218,7 @@ void die(const char *str, struct pt_regs *regs, long err)
+ __asm__ __volatile__("l.nop 1");
+ do {} while (1);
+ #endif
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ /* This is normally the 'Oops' routine */
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 2a1060d747a5d..37988f7f3abcb 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -268,7 +268,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
+ panic("Fatal exception");
+
+ oops_exit();
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ /* gdb uses break 4,8 */
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index ecfa460f66d17..70b99246dec46 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -246,7 +246,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++ make_task_dead(signr);
+ }
+ NOKPROBE_SYMBOL(oops_end);
+
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index ae462037910be..c28d4debf5926 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -57,7 +57,7 @@ void die(struct pt_regs *regs, const char *str)
+ if (panic_on_oops)
+ panic("Fatal exception");
+ if (ret != NOTIFY_STOP)
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index 247b8c859c448..1cfce62caa119 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -189,7 +189,7 @@ no_context:
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
+ die(regs, "Oops");
+- do_exit(SIGKILL);
++ make_task_dead(SIGKILL);
+
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
+index 3101340155417..54f4bc5d1108b 100644
+--- a/arch/s390/include/asm/debug.h
++++ b/arch/s390/include/asm/debug.h
+@@ -4,8 +4,8 @@
+ *
+ * Copyright IBM Corp. 1999, 2000
+ */
+-#ifndef DEBUG_H
+-#define DEBUG_H
++#ifndef _ASM_S390_DEBUG_H
++#define _ASM_S390_DEBUG_H
+
+ #include <linux/string.h>
+ #include <linux/spinlock.h>
+@@ -416,4 +416,4 @@ int debug_unregister_view(debug_info_t *id, struct debug_view *view);
+ #define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+ #endif /* DASD_DEBUG */
+
+-#endif /* DEBUG_H */
++#endif /* _ASM_S390_DEBUG_H */
+diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
+index 34bdc60c0b11d..2100833adfb69 100644
+--- a/arch/s390/kernel/dumpstack.c
++++ b/arch/s390/kernel/dumpstack.c
+@@ -210,5 +210,5 @@ void die(struct pt_regs *regs, const char *str)
+ if (panic_on_oops)
+ panic("Fatal exception: panic_on_oops");
+ oops_exit();
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
+index 0a487fae763ee..d8951274658bd 100644
+--- a/arch/s390/kernel/nmi.c
++++ b/arch/s390/kernel/nmi.c
+@@ -179,7 +179,7 @@ void s390_handle_mcck(void)
+ "malfunction (code 0x%016lx).\n", mcck.mcck_code);
+ printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
+ current->comm, current->pid);
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+ }
+ EXPORT_SYMBOL_GPL(s390_handle_mcck);
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 8be5750fe5ac3..a180fe54dc687 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -81,8 +81,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
+ struct esca_block *sca = vcpu->kvm->arch.sca;
+ union esca_sigp_ctrl *sigp_ctrl =
+ &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+- union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
++ union esca_sigp_ctrl new_val = {0}, old_val;
+
++ old_val = READ_ONCE(*sigp_ctrl);
+ new_val.scn = src_id;
+ new_val.c = 1;
+ old_val.c = 0;
+@@ -93,8 +94,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+ union bsca_sigp_ctrl *sigp_ctrl =
+ &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+- union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
++ union bsca_sigp_ctrl new_val = {0}, old_val;
+
++ old_val = READ_ONCE(*sigp_ctrl);
+ new_val.scn = src_id;
+ new_val.c = 1;
+ old_val.c = 0;
+@@ -124,16 +126,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
+ struct esca_block *sca = vcpu->kvm->arch.sca;
+ union esca_sigp_ctrl *sigp_ctrl =
+ &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+- union esca_sigp_ctrl old = *sigp_ctrl;
++ union esca_sigp_ctrl old;
+
++ old = READ_ONCE(*sigp_ctrl);
+ expect = old.value;
+ rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
+ } else {
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+ union bsca_sigp_ctrl *sigp_ctrl =
+ &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+- union bsca_sigp_ctrl old = *sigp_ctrl;
++ union bsca_sigp_ctrl old;
+
++ old = READ_ONCE(*sigp_ctrl);
+ expect = old.value;
+ rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
+ }
+diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
+index 63cf17bc760da..6a228c00b73f4 100644
+--- a/arch/sh/kernel/traps.c
++++ b/arch/sh/kernel/traps.c
+@@ -57,7 +57,7 @@ void die(const char *str, struct pt_regs *regs, long err)
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+- do_exit(SIGSEGV);
++ make_task_dead(SIGSEGV);
+ }
+
+ void die_if_kernel(const char *str, struct pt_regs *regs, long err)
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index 4ceecad556a9f..dbf068ac54ff3 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ }
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+- if(regs->psr & PSR_PS)
+- do_exit(SIGKILL);
+- do_exit(SIGSEGV);
++ make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
+ }
+
+ void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index f2b22c496fb97..17768680cbaeb 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2564,9 +2564,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ }
+ if (panic_on_oops)
+ panic("Fatal exception");
+- if (regs->tstate & TSTATE_PRIV)
+- do_exit(SIGKILL);
+- do_exit(SIGSEGV);
++ make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 2d837fb54c31b..740df9cc21963 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -1659,13 +1659,13 @@ ENTRY(async_page_fault)
+ END(async_page_fault)
+ #endif
+
+-ENTRY(rewind_stack_do_exit)
++ENTRY(rewind_stack_and_make_dead)
+ /* Prevent any naive code from trying to unwind to our caller. */
+ xorl %ebp, %ebp
+
+ movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
+ leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
+
+- call do_exit
++ call make_task_dead
+ 1: jmp 1b
+-END(rewind_stack_do_exit)
++END(rewind_stack_and_make_dead)
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index c82136030d58f..bd7a4ad0937c4 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -1757,7 +1757,7 @@ ENTRY(ignore_sysret)
+ END(ignore_sysret)
+ #endif
+
+-ENTRY(rewind_stack_do_exit)
++ENTRY(rewind_stack_and_make_dead)
+ UNWIND_HINT_FUNC
+ /* Prevent any naive code from trying to unwind to our caller. */
+ xorl %ebp, %ebp
+@@ -1766,5 +1766,5 @@ ENTRY(rewind_stack_do_exit)
+ leaq -PTREGS_SIZE(%rax), %rsp
+ UNWIND_HINT_REGS
+
+- call do_exit
+-END(rewind_stack_do_exit)
++ call make_task_dead
++END(rewind_stack_and_make_dead)
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index 3ea8056148d84..c3555e5237cf6 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -969,7 +969,7 @@ static int __init amd_core_pmu_init(void)
+ * numbered counter following it.
+ */
+ for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
+- even_ctr_mask |= 1 << i;
++ even_ctr_mask |= BIT_ULL(i);
+
+ pair_constraint = (struct event_constraint)
+ __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index e07424e19274b..e72042dc9487c 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -326,7 +326,7 @@ unsigned long oops_begin(void)
+ }
+ NOKPROBE_SYMBOL(oops_begin);
+
+-void __noreturn rewind_stack_do_exit(int signr);
++void __noreturn rewind_stack_and_make_dead(int signr);
+
+ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ {
+@@ -361,7 +361,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ * reuse the task stack and that existing poisons are invalid.
+ */
+ kasan_unpoison_task_stack(current);
+- rewind_stack_do_exit(signr);
++ rewind_stack_and_make_dead(signr);
+ }
+ NOKPROBE_SYMBOL(oops_end);
+
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index fe522691ac717..8821d0ab0a08c 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
+ disable_irq_nosync(irq);
+ io_apic_irqs &= ~(1<<irq);
+ irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
++ irq_set_status_flags(irq, IRQ_LEVEL);
+ enable_irq(irq);
+ lapic_assign_legacy_vector(irq, true);
+ }
+diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
+index 16919a9671fa9..faed27c8dc39f 100644
+--- a/arch/x86/kernel/irqinit.c
++++ b/arch/x86/kernel/irqinit.c
+@@ -72,8 +72,10 @@ void __init init_ISA_irqs(void)
+
+ legacy_pic->init(0);
+
+- for (i = 0; i < nr_legacy_irqs(); i++)
++ for (i = 0; i < nr_legacy_irqs(); i++) {
+ irq_set_chip_and_handler(i, chip, handle_level_irq);
++ irq_set_status_flags(i, IRQ_LEVEL);
++ }
+ }
+
+ void __init init_IRQ(void)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index df77207d93b06..a8c8073654cf1 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3241,18 +3241,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
+ {
+ u32 ar;
+
+- if (var->unusable || !var->present)
+- ar = 1 << 16;
+- else {
+- ar = var->type & 15;
+- ar |= (var->s & 1) << 4;
+- ar |= (var->dpl & 3) << 5;
+- ar |= (var->present & 1) << 7;
+- ar |= (var->avl & 1) << 12;
+- ar |= (var->l & 1) << 13;
+- ar |= (var->db & 1) << 14;
+- ar |= (var->g & 1) << 15;
+- }
++ ar = var->type & 15;
++ ar |= (var->s & 1) << 4;
++ ar |= (var->dpl & 3) << 5;
++ ar |= (var->present & 1) << 7;
++ ar |= (var->avl & 1) << 12;
++ ar |= (var->l & 1) << 13;
++ ar |= (var->db & 1) << 14;
++ ar |= (var->g & 1) << 15;
++ ar |= (var->unusable || !var->present) << 16;
+
+ return ar;
+ }
+diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
+index a9bdf0805be04..1329d7ca05b5f 100644
+--- a/arch/x86/lib/iomap_copy_64.S
++++ b/arch/x86/lib/iomap_copy_64.S
+@@ -10,6 +10,6 @@
+ */
+ ENTRY(__iowrite32_copy)
+ movl %edx,%ecx
+- rep movsd
++ rep movsl
+ ret
+ ENDPROC(__iowrite32_copy)
+diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
+index 4a6c495ce9b6d..16af8e514cb3b 100644
+--- a/arch/xtensa/kernel/traps.c
++++ b/arch/xtensa/kernel/traps.c
+@@ -543,5 +543,5 @@ void die(const char * str, struct pt_regs * regs, long err)
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+- do_exit(err);
++ make_task_dead(err);
+ }
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index dde8d0acfb34f..cd085a0e5e4a7 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1445,6 +1445,10 @@ retry:
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ pol->pd_init_fn(blkg->pd[pol->plid]);
+
++ if (pol->pd_online_fn)
++ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
++ pol->pd_online_fn(blkg->pd[pol->plid]);
++
+ __set_bit(pol->plid, q->blkcg_pols);
+ ret = 0;
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 5808baa950c35..030de4fdf9b1d 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -793,10 +793,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
+
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ return false;
+-
+- WARN_ONCE(1,
+- "generic_make_request: Trying to write "
+- "to read-only block-device %s (partno %d)\n",
++ pr_warn("Trying to write to read-only block-device %s (partno %d)\n",
+ bio_devname(bio, b), part->partno);
+ /* Older lvm-tools actually trigger this */
+ return false;
+diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
+index 3bb7beb127a96..c157a912d6739 100644
+--- a/drivers/base/test/test_async_driver_probe.c
++++ b/drivers/base/test/test_async_driver_probe.c
+@@ -146,7 +146,7 @@ static int __init test_async_probe_init(void)
+ calltime = ktime_get();
+ for_each_online_cpu(cpu) {
+ nid = cpu_to_node(cpu);
+- pdev = &sync_dev[sync_id];
++ pdev = &async_dev[async_id];
+
+ *pdev = test_platform_device_register_node("test_async_driver",
+ async_id,
+diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
+index f9d5b73343417..4fb4fd4b06bda 100644
+--- a/drivers/clk/clk-devres.c
++++ b/drivers/clk/clk-devres.c
+@@ -4,42 +4,101 @@
+ #include <linux/export.h>
+ #include <linux/gfp.h>
+
++struct devm_clk_state {
++ struct clk *clk;
++ void (*exit)(struct clk *clk);
++};
++
+ static void devm_clk_release(struct device *dev, void *res)
+ {
+- clk_put(*(struct clk **)res);
++ struct devm_clk_state *state = res;
++
++ if (state->exit)
++ state->exit(state->clk);
++
++ clk_put(state->clk);
+ }
+
+-struct clk *devm_clk_get(struct device *dev, const char *id)
++static struct clk *__devm_clk_get(struct device *dev, const char *id,
++ struct clk *(*get)(struct device *dev, const char *id),
++ int (*init)(struct clk *clk),
++ void (*exit)(struct clk *clk))
+ {
+- struct clk **ptr, *clk;
++ struct devm_clk_state *state;
++ struct clk *clk;
++ int ret;
+
+- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
+- if (!ptr)
++ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
++ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+- clk = clk_get(dev, id);
+- if (!IS_ERR(clk)) {
+- *ptr = clk;
+- devres_add(dev, ptr);
+- } else {
+- devres_free(ptr);
++ clk = get(dev, id);
++ if (IS_ERR(clk)) {
++ ret = PTR_ERR(clk);
++ goto err_clk_get;
+ }
+
++ if (init) {
++ ret = init(clk);
++ if (ret)
++ goto err_clk_init;
++ }
++
++ state->clk = clk;
++ state->exit = exit;
++
++ devres_add(dev, state);
++
+ return clk;
++
++err_clk_init:
++
++ clk_put(clk);
++err_clk_get:
++
++ devres_free(state);
++ return ERR_PTR(ret);
++}
++
++struct clk *devm_clk_get(struct device *dev, const char *id)
++{
++ return __devm_clk_get(dev, id, clk_get, NULL, NULL);
+ }
+ EXPORT_SYMBOL(devm_clk_get);
+
+-struct clk *devm_clk_get_optional(struct device *dev, const char *id)
++struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
+ {
+- struct clk *clk = devm_clk_get(dev, id);
++ return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
++}
++EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
+
+- if (clk == ERR_PTR(-ENOENT))
+- return NULL;
++struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
++{
++ return __devm_clk_get(dev, id, clk_get,
++ clk_prepare_enable, clk_disable_unprepare);
++}
++EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
+
+- return clk;
++struct clk *devm_clk_get_optional(struct device *dev, const char *id)
++{
++ return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
+ }
+ EXPORT_SYMBOL(devm_clk_get_optional);
+
++struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
++{
++ return __devm_clk_get(dev, id, clk_get_optional,
++ clk_prepare, clk_unprepare);
++}
++EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
++
++struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
++{
++ return __devm_clk_get(dev, id, clk_get_optional,
++ clk_prepare_enable, clk_disable_unprepare);
++}
++EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
++
+ struct clk_bulk_devres {
+ struct clk_bulk_data *clks;
+ int num_clks;
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index 2de7fd18f66a1..f0be8a43ec496 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -443,7 +443,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ return -ENODEV;
+ }
+
+- clk = clk_get(cpu_dev, 0);
++ clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index 1200842c3da42..5d28553b69f5b 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -126,6 +126,7 @@ static const struct of_device_id blacklist[] __initconst = {
+
+ { .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra210", },
++ { .compatible = "nvidia,tegra234", },
+
+ { .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8996", },
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index 4b604086b1b3a..a8996faa85a9f 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -212,7 +212,8 @@ static int dma_chan_get(struct dma_chan *chan)
+ /* The channel is already in use, update client count */
+ if (chan->client_count) {
+ __module_get(owner);
+- goto out;
++ chan->client_count++;
++ return 0;
+ }
+
+ if (!try_module_get(owner))
+@@ -225,11 +226,11 @@ static int dma_chan_get(struct dma_chan *chan)
+ goto err_out;
+ }
+
++ chan->client_count++;
++
+ if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+ balance_ref_count(chan);
+
+-out:
+- chan->client_count++;
+ return 0;
+
+ err_out:
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index 8ec7a7041e840..8dbff2f6c3b8d 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -1360,10 +1360,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
+ sdma_config_ownership(sdmac, false, true, false);
+
+ if (sdma_load_context(sdmac))
+- goto err_desc_out;
++ goto err_bd_out;
+
+ return desc;
+
++err_bd_out:
++ sdma_free_bd(desc);
+ err_desc_out:
+ kfree(desc);
+ err_out:
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 3bb711e735ab9..be44c86a1e037 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2626,7 +2626,6 @@ static int xilinx_dma_probe(struct platform_device *pdev)
+ struct device_node *node = pdev->dev.of_node;
+ struct xilinx_dma_device *xdev;
+ struct device_node *child, *np = pdev->dev.of_node;
+- struct resource *io;
+ u32 num_frames, addr_width, len_width;
+ int i, err;
+
+@@ -2652,11 +2651,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
+ return err;
+
+ /* Request and map I/O memory */
+- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+- if (IS_ERR(xdev->regs))
+- return PTR_ERR(xdev->regs);
+-
++ xdev->regs = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(xdev->regs)) {
++ err = PTR_ERR(xdev->regs);
++ goto disable_clks;
++ }
+ /* Retrieve the DMA engine properties from the device tree */
+ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
+
+@@ -2748,8 +2747,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
+ /* Initialize the channels */
+ for_each_child_of_node(node, child) {
+ err = xilinx_dma_child_probe(xdev, child);
+- if (err < 0)
+- goto disable_clks;
++ if (err < 0) {
++ of_node_put(child);
++ goto error;
++ }
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+@@ -2782,12 +2783,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
+
+ return 0;
+
+-disable_clks:
+- xdma_disable_allclks(xdev);
+ error:
+ for (i = 0; i < xdev->nr_channels; i++)
+ if (xdev->chan[i])
+ xilinx_dma_chan_remove(xdev->chan[i]);
++disable_clks:
++ xdma_disable_allclks(xdev);
+
+ return err;
+ }
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 93d6e6319b3cc..0ac6c49ecdbf4 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -34,6 +34,9 @@
+ static DEFINE_MUTEX(device_ctls_mutex);
+ static LIST_HEAD(edac_device_list);
+
++/* Default workqueue processing interval on this instance, in msecs */
++#define DEFAULT_POLL_INTERVAL 1000
++
+ #ifdef CONFIG_EDAC_DEBUG
+ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+ {
+@@ -366,7 +369,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
+ * whole one second to save timers firing all over the period
+ * between integral seconds
+ */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -396,7 +399,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ * timers firing on sub-second basis, while they are happy
+ * to fire together on the 1 second exactly
+ */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -430,7 +433,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+ edac_dev->delay = msecs_to_jiffies(msec);
+
+ /* See comment in edac_device_workq_setup() above */
+- if (edac_dev->poll_msec == 1000)
++ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_mod_work(&edac_dev->work, edac_dev->delay);
+@@ -472,11 +475,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
+ /* This instance is NOW RUNNING */
+ edac_dev->op_state = OP_RUNNING_POLL;
+
+- /*
+- * enable workq processing on this instance,
+- * default = 1000 msec
+- */
+- edac_device_workq_setup(edac_dev, 1000);
++ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
+ } else {
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
+diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
+index 61b76ec226af1..19fba258ae108 100644
+--- a/drivers/edac/highbank_mc_edac.c
++++ b/drivers/edac/highbank_mc_edac.c
+@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
+ drvdata = mci->pvt_info;
+ platform_set_drvdata(pdev, mci);
+
+- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+- return -ENOMEM;
++ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
++ res = -ENOMEM;
++ goto free;
++ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+@@ -243,6 +245,7 @@ err2:
+ edac_mc_del_mc(&pdev->dev);
+ err:
+ devres_release_group(&pdev->dev, NULL);
++free:
+ edac_mc_free(mci);
+ return res;
+ }
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index 97a27e42dd610..c45519f59dc11 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -252,7 +252,7 @@ clear:
+ static int
+ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
+ {
+- struct llcc_drv_data *drv = edev_ctl->pvt_info;
++ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
+ int ret;
+
+ ret = dump_syn_reg_values(drv, bank, err_type);
+@@ -289,7 +289,7 @@ static irqreturn_t
+ llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ {
+ struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
+- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
++ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
+ irqreturn_t irq_rc = IRQ_NONE;
+ u32 drp_error, trp_error, i;
+ int ret;
+@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ edev_ctl->dev_name = dev_name(dev);
+ edev_ctl->ctl_name = "llcc";
+ edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
+- edev_ctl->pvt_info = llcc_driv_data;
+
+ rc = edac_device_add_device(edev_ctl);
+ if (rc)
+diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
+index c77d474185f31..2e4b6b176875d 100644
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -229,7 +229,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
+
+ writel(1 << gpio_idx, port->base + GPIO_ISR);
+
+- return 0;
++ return port->gc.direction_input(&port->gc, gpio_idx);
+ }
+
+ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index ca0fefeaab20b..ce739ba45c551 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -272,6 +272,12 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
++ }, { /* Lenovo Ideapad D330-10IGL (HD) */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
++ },
++ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Yoga Book X90F / X91F / X91L */
+ .matches = {
+ /* Non exact match to match all versions */
+diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
+index 86cdc0ce79e65..77f4d32e52045 100644
+--- a/drivers/gpu/drm/panfrost/Kconfig
++++ b/drivers/gpu/drm/panfrost/Kconfig
+@@ -3,7 +3,8 @@
+ config DRM_PANFROST
+ tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
+ depends on DRM
+- depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
++ depends on ARM || ARM64 || COMPILE_TEST
++ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ depends on MMU
+ select DRM_SCHED
+ select IOMMU_SUPPORT
+diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
+index 467d789f9bc2d..25ed7b9a917e4 100644
+--- a/drivers/hid/hid-betopff.c
++++ b/drivers/hid/hid-betopff.c
+@@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid)
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev;
+- int field_count = 0;
+ int error;
+ int i, j;
+
+@@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid)
+ * -----------------------------------------
+ * Do init them with default value.
+ */
++ if (report->maxfield < 4) {
++ hid_err(hid, "not enough fields in the report: %d\n",
++ report->maxfield);
++ return -ENODEV;
++ }
+ for (i = 0; i < report->maxfield; i++) {
++ if (report->field[i]->report_count < 1) {
++ hid_err(hid, "no values in the field\n");
++ return -ENODEV;
++ }
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] = 0x00;
+- field_count++;
+ }
+ }
+
+- if (field_count < 4) {
+- hid_err(hid, "not enough fields in the report: %d\n",
+- field_count);
+- return -ENODEV;
+- }
+-
+ betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
+ if (!betopff)
+ return -ENOMEM;
+diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
+index e8c5e3ac9fff1..e8b16665860d6 100644
+--- a/drivers/hid/hid-bigbenff.c
++++ b/drivers/hid/hid-bigbenff.c
+@@ -344,6 +344,11 @@ static int bigben_probe(struct hid_device *hid,
+ }
+
+ report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
++ if (list_empty(report_list)) {
++ hid_err(hid, "no output report found\n");
++ error = -ENODEV;
++ goto error_hw_stop;
++ }
+ bigben->report = list_entry(report_list->next,
+ struct hid_report, list);
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index eda96c92977b3..2888bd5502f3f 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -981,8 +981,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
+ * Validating on id 0 means we should examine the first
+ * report in the list.
+ */
+- report = list_entry(
+- hid->report_enum[type].report_list.next,
++ report = list_first_entry_or_null(
++ &hid->report_enum[type].report_list,
+ struct hid_report, list);
+ } else {
+ report = hid->report_enum[type].report_id_hash[id];
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 222f525c3d045..1c034c397e3e7 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -259,7 +259,6 @@
+ #define USB_DEVICE_ID_CH_AXIS_295 0x001c
+
+ #define USB_VENDOR_ID_CHERRY 0x046a
+-#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c
+ #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
+ #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
+
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index baad65fcdff70..e5dcc47586ee4 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -54,7 +54,6 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+index 40554c8daca07..00046cbfd4ed0 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/dma-if.c
++++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+@@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ int required_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+
++ if (!dev->ishtp_dma_tx_map) {
++ dev_err(dev->devc, "Fail to allocate Tx map\n");
++ return NULL;
++ }
++
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+ free = 1;
+@@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ return;
+ }
+
++ if (!dev->ishtp_dma_tx_map) {
++ dev_err(dev->devc, "Fail to allocate Tx map\n");
++ return;
++ }
++
+ i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (j = 0; j < acked_slots; j++) {
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 5d896f6b2b617..93a7ff1bd02c7 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -2840,15 +2840,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
+ bool __rdma_block_iter_next(struct ib_block_iter *biter)
+ {
+ unsigned int block_offset;
++ unsigned int sg_delta;
+
+ if (!biter->__sg_nents || !biter->__sg)
+ return false;
+
+ biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
+ block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
+- biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
++ sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
+
+- if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
++ if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
++ biter->__sg_advance += sg_delta;
++ } else {
+ biter->__sg_advance = 0;
+ biter->__sg = sg_next(biter->__sg);
+ biter->__sg_nents--;
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+index 4d732353379df..e7daa65589ab9 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -325,6 +325,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+
+ if (!PAGE_ALIGNED(tinfo->vaddr))
+ return -EINVAL;
++ if (tinfo->length == 0)
++ return -EINVAL;
+
+ tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
+ if (!tidbuf)
+@@ -335,40 +337,38 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
+ GFP_KERNEL);
+ if (!tidbuf->psets) {
+- kfree(tidbuf);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto fail_release_mem;
+ }
+
+ pinned = pin_rcv_pages(fd, tidbuf);
+ if (pinned <= 0) {
+- kfree(tidbuf->psets);
+- kfree(tidbuf);
+- return pinned;
++ ret = (pinned < 0) ? pinned : -ENOSPC;
++ goto fail_unpin;
+ }
+
+ /* Find sets of physically contiguous pages */
+ tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
+
+- /*
+- * We don't need to access this under a lock since tid_used is per
+- * process and the same process cannot be in hfi1_user_exp_rcv_clear()
+- * and hfi1_user_exp_rcv_setup() at the same time.
+- */
++ /* Reserve the number of expected tids to be used. */
+ spin_lock(&fd->tid_lock);
+ if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
+ pageset_count = fd->tid_limit - fd->tid_used;
+ else
+ pageset_count = tidbuf->n_psets;
++ fd->tid_used += pageset_count;
+ spin_unlock(&fd->tid_lock);
+
+- if (!pageset_count)
+- goto bail;
++ if (!pageset_count) {
++ ret = -ENOSPC;
++ goto fail_unreserve;
++ }
+
+ ngroups = pageset_count / dd->rcv_entries.group_size;
+ tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
+ if (!tidlist) {
+ ret = -ENOMEM;
+- goto nomem;
++ goto fail_unreserve;
+ }
+
+ tididx = 0;
+@@ -464,43 +464,60 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ }
+ unlock:
+ mutex_unlock(&uctxt->exp_mutex);
+-nomem:
+ hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
+ mapped_pages, ret);
+- if (tididx) {
+- spin_lock(&fd->tid_lock);
+- fd->tid_used += tididx;
+- spin_unlock(&fd->tid_lock);
+- tinfo->tidcnt = tididx;
+- tinfo->length = mapped_pages * PAGE_SIZE;
+-
+- if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+- tidlist, sizeof(tidlist[0]) * tididx)) {
+- /*
+- * On failure to copy to the user level, we need to undo
+- * everything done so far so we don't leak resources.
+- */
+- tinfo->tidlist = (unsigned long)&tidlist;
+- hfi1_user_exp_rcv_clear(fd, tinfo);
+- tinfo->tidlist = 0;
+- ret = -EFAULT;
+- goto bail;
+- }
++
++ /* fail if nothing was programmed, set error if none provided */
++ if (tididx == 0) {
++ if (ret >= 0)
++ ret = -ENOSPC;
++ goto fail_unreserve;
+ }
+
+- /*
+- * If not everything was mapped (due to insufficient RcvArray entries,
+- * for example), unpin all unmapped pages so we can pin them nex time.
+- */
+- if (mapped_pages != pinned)
+- unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
+- (pinned - mapped_pages), false);
+-bail:
++ /* adjust reserved tid_used to actual count */
++ spin_lock(&fd->tid_lock);
++ fd->tid_used -= pageset_count - tididx;
++ spin_unlock(&fd->tid_lock);
++
++ /* unpin all pages not covered by a TID */
++ unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
++ false);
++
++ tinfo->tidcnt = tididx;
++ tinfo->length = mapped_pages * PAGE_SIZE;
++
++ if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
++ tidlist, sizeof(tidlist[0]) * tididx)) {
++ ret = -EFAULT;
++ goto fail_unprogram;
++ }
++
++ kfree(tidbuf->pages);
+ kfree(tidbuf->psets);
++ kfree(tidbuf);
+ kfree(tidlist);
++ return 0;
++
++fail_unprogram:
++ /* unprogram, unmap, and unpin all allocated TIDs */
++ tinfo->tidlist = (unsigned long)tidlist;
++ hfi1_user_exp_rcv_clear(fd, tinfo);
++ tinfo->tidlist = 0;
++ pinned = 0; /* nothing left to unpin */
++ pageset_count = 0; /* nothing left reserved */
++fail_unreserve:
++ spin_lock(&fd->tid_lock);
++ fd->tid_used -= pageset_count;
++ spin_unlock(&fd->tid_lock);
++fail_unpin:
++ if (pinned > 0)
++ unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
++fail_release_mem:
+ kfree(tidbuf->pages);
++ kfree(tidbuf->psets);
+ kfree(tidbuf);
+- return ret > 0 ? 0 : ret;
++ kfree(tidlist);
++ return ret;
+ }
+
+ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 05b007d0a89b1..4b81b2d0fe067 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -189,7 +189,6 @@ static const char * const smbus_pnp_ids[] = {
+ "SYN3221", /* HP 15-ay000 */
+ "SYN323d", /* HP Spectre X360 13-w013dx */
+ "SYN3257", /* HP Envy 13-ad105ng */
+- "SYN3286", /* HP Laptop 15-da3001TU */
+ NULL
+ };
+
+diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
+index 9c49d00c2a966..ea6e9e1eaf046 100644
+--- a/drivers/memory/atmel-sdramc.c
++++ b/drivers/memory/atmel-sdramc.c
+@@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev)
+ caps = of_device_get_match_data(&pdev->dev);
+
+ if (caps->has_ddrck) {
+- clk = devm_clk_get(&pdev->dev, "ddrck");
++ clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+- clk_prepare_enable(clk);
+ }
+
+ if (caps->has_mpddr_clk) {
+- clk = devm_clk_get(&pdev->dev, "mpddr");
++ clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
+ if (IS_ERR(clk)) {
+ pr_err("AT91 RAMC: couldn't get mpddr clock\n");
+ return PTR_ERR(clk);
+ }
+- clk_prepare_enable(clk);
+ }
+
+ return 0;
+diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
+index 095f8a3b2cfcc..9bf477b000c0d 100644
+--- a/drivers/memory/mvebu-devbus.c
++++ b/drivers/memory/mvebu-devbus.c
+@@ -282,10 +282,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
+ if (IS_ERR(devbus->base))
+ return PTR_ERR(devbus->base);
+
+- clk = devm_clk_get(&pdev->dev, NULL);
++ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+- clk_prepare_enable(clk);
+
+ /*
+ * Obtain clock period in picoseconds,
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 96cad561e1d8d..b3f761eca8299 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -89,6 +89,8 @@
+ /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
+ #define ESDHC_TUNING_START_TAP_DEFAULT 0x1
+ #define ESDHC_TUNING_START_TAP_MASK 0x7f
++#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7)
++#define ESDHC_TUNING_STEP_DEFAULT 0x1
+ #define ESDHC_TUNING_STEP_MASK 0x00070000
+ #define ESDHC_TUNING_STEP_SHIFT 16
+
+@@ -1180,7 +1182,8 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+- int tmp;
++ struct cqhci_host *cq_host = host->mmc->cqe_private;
++ u32 tmp;
+
+ if (esdhc_is_usdhc(imx_data)) {
+ /*
+@@ -1233,18 +1236,37 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+- tmp |= ESDHC_STD_TUNING_EN |
+- ESDHC_TUNING_START_TAP_DEFAULT;
+- if (imx_data->boarddata.tuning_start_tap) {
+- tmp &= ~ESDHC_TUNING_START_TAP_MASK;
++ tmp |= ESDHC_STD_TUNING_EN;
++
++ /*
++ * ROM code or bootloader may config the start tap
++ * and step, unmask them first.
++ */
++ tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
++ if (imx_data->boarddata.tuning_start_tap)
+ tmp |= imx_data->boarddata.tuning_start_tap;
+- }
++ else
++ tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
+
+ if (imx_data->boarddata.tuning_step) {
+- tmp &= ~ESDHC_TUNING_STEP_MASK;
+ tmp |= imx_data->boarddata.tuning_step
+ << ESDHC_TUNING_STEP_SHIFT;
++ } else {
++ tmp |= ESDHC_TUNING_STEP_DEFAULT
++ << ESDHC_TUNING_STEP_SHIFT;
+ }
++
++ /* Disable the CMD CRC check for tuning, if not, need to
++ * add some delay after every tuning command, because
++ * hardware standard tuning logic will directly go to next
++ * step once it detect the CMD CRC error, will not wait for
++ * the card side to finally send out the tuning data, trigger
++ * the buffer read ready interrupt immediately. If usdhc send
++ * the next tuning command some eMMC card will stuck, can't
++ * response, block the tuning procedure or the first command
++ * after the whole tuning procedure always can't get any response.
++ */
++ tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE;
+ writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
+ } else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ /*
+@@ -1256,6 +1278,21 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
+ tmp &= ~ESDHC_STD_TUNING_EN;
+ writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
+ }
++
++ /*
++ * On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card
++ * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the
++ * the 1st linux configure power/clock for the 2nd Linux.
++ *
++ * When the 2nd Linux is booting into rootfs stage, we let the 1st Linux
++ * to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump.
++ * After we clear the pending interrupt and halt CQCTL, issue gone.
++ */
++ if (cq_host) {
++ tmp = cqhci_readl(cq_host, CQHCI_IS);
++ cqhci_writel(cq_host, tmp, CQHCI_IS);
++ cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
++ }
+ }
+ }
+
+@@ -1571,8 +1608,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ if (err)
+ goto disable_ahb_clk;
+
+- host->tuning_delay = 1;
+-
+ sdhci_esdhc_imx_hwinit(host);
+
+ err = sdhci_add_host(host);
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index c66e78b2870d7..ba2dc01e0f6bf 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -682,10 +682,10 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
+
+ /* clear forwarding port */
+- alu_table[2] &= ~BIT(port);
++ alu_table[1] &= ~BIT(port);
+
+ /* if there is no port to forward, clear table */
+- if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
++ if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index d5fd49dd25f33..decc1c09a031b 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
+ netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
+ }
+
++static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
++{
++ unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
++
++ /* From MAC ver 30H the TFCR is per priority, instead of per queue */
++ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
++ return max_q_count;
++ else
++ return min_t(unsigned int, pdata->tx_q_count, max_q_count);
++}
++
+ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+- unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+- unsigned int i;
++ unsigned int i, q_count;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
++ q_count = xgbe_get_fc_queue_count(pdata);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+@@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+ struct ieee_pfc *pfc = pdata->pfc;
+ struct ieee_ets *ets = pdata->ets;
+- unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+- unsigned int i;
++ unsigned int i, q_count;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+@@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+ }
+
+ /* Set MAC flow control */
+- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
++ q_count = xgbe_get_fc_queue_count(pdata);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 97167fc9bebe7..7840eb4cdb8da 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++ pdata->kr_start_time = jiffies;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "KR training initiated\n");
+@@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
+
+ xgbe_switch_mode(pdata);
+
++ pdata->an_result = XGBE_AN_READY;
++
+ xgbe_an_restart(pdata);
+
+ return XGBE_AN_INCOMPAT_LINK;
+@@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+ {
+ unsigned long link_timeout;
++ unsigned long kr_time;
++ int wait;
+
+ link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+ if (time_after(jiffies, link_timeout)) {
++ if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
++ pdata->phy.autoneg == AUTONEG_ENABLE) {
++ /* AN restart should not happen while KR training is in progress.
++ * The while loop ensures no AN restart during KR training,
++ * waits up to 500ms and AN restart is triggered only if KR
++ * training is failed.
++ */
++ wait = XGBE_KR_TRAINING_WAIT_ITER;
++ while (wait--) {
++ kr_time = pdata->kr_start_time +
++ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
++ if (time_after(jiffies, kr_time))
++ break;
++ /* AN restart is not required, if AN result is COMPLETE */
++ if (pdata->an_result == XGBE_AN_COMPLETE)
++ return;
++ usleep_range(10000, 11000);
++ }
++ }
+ netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ xgbe_phy_config_aneg(pdata);
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 0c93a552b921d..729307a96c50d 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -290,6 +290,7 @@
+ /* Auto-negotiation */
+ #define XGBE_AN_MS_TIMEOUT 500
+ #define XGBE_LINK_TIMEOUT 5
++#define XGBE_KR_TRAINING_WAIT_ITER 50
+
+ #define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+ #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+@@ -1266,6 +1267,7 @@ struct xgbe_prv_data {
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
++ unsigned long kr_start_time;
+ enum xgbe_an_mode an_mode;
+
+ /* I2C support */
+diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+index 18f4923b1723b..6a253f81c5552 100644
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+@@ -18,6 +18,7 @@
+ #include <linux/of_platform.h>
+ #include <linux/of_net.h>
+ #include <linux/of_mdio.h>
++#include <linux/mdio/mdio-xgene.h>
+ #include <linux/module.h>
+ #include <net/ip.h>
+ #include <linux/prefetch.h>
+@@ -26,7 +27,6 @@
+ #include "xgene_enet_hw.h"
+ #include "xgene_enet_cle.h"
+ #include "xgene_enet_ring2.h"
+-#include "../../../phy/mdio-xgene.h"
+
+ #define XGENE_DRV_VERSION "v1.0"
+ #define ETHER_MIN_PACKET 64
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index a9962474d551d..d0cd86af29d9f 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -11195,7 +11195,7 @@ static void tg3_reset_task(struct work_struct *work)
+ rtnl_lock();
+ tg3_full_lock(tp, 0);
+
+- if (!netif_running(tp->dev)) {
++ if (tp->pcierr_recovery || !netif_running(tp->dev)) {
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ tg3_full_unlock(tp);
+ rtnl_unlock();
+@@ -18187,6 +18187,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+
+ netdev_info(netdev, "PCI I/O error detected\n");
+
++ /* Want to make sure that the reset task doesn't run */
++ tg3_reset_task_cancel(tp);
++
+ rtnl_lock();
+
+ /* Could be second call or maybe we don't have netdev yet */
+@@ -18203,9 +18206,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+
+ tg3_timer_stop(tp);
+
+- /* Want to make sure that the reset task doesn't run */
+- tg3_reset_task_cancel(tp);
+-
+ netif_device_detach(netdev);
+
+ /* Clean up software state, even if MMIO is blocked */
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 78219a9943a73..d948b582f4c97 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1752,7 +1752,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ skb_is_nonlinear(*skb);
+ int padlen = ETH_ZLEN - (*skb)->len;
+- int headroom = skb_headroom(*skb);
+ int tailroom = skb_tailroom(*skb);
+ struct sk_buff *nskb;
+ u32 fcs;
+@@ -1766,9 +1765,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ /* FCS could be appeded to tailroom. */
+ if (tailroom >= ETH_FCS_LEN)
+ goto add_fcs;
+- /* FCS could be appeded by moving data to headroom. */
+- else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
+- padlen = 0;
+ /* No room for FCS, need to reallocate skb. */
+ else
+ padlen = ETH_FCS_LEN;
+@@ -1777,10 +1773,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ padlen += ETH_FCS_LEN;
+ }
+
+- if (!cloned && headroom + tailroom >= padlen) {
+- (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
+- skb_set_tail_pointer(*skb, (*skb)->len);
+- } else {
++ if (cloned || tailroom < padlen) {
+ nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index f2657cd3ffa4f..83ee9429e7c65 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1640,7 +1640,7 @@ static void mlx5_core_verify_params(void)
+ }
+ }
+
+-static int __init init(void)
++static int __init mlx5_init(void)
+ {
+ int err;
+
+@@ -1665,7 +1665,7 @@ err_debug:
+ return err;
+ }
+
+-static void __exit cleanup(void)
++static void __exit mlx5_cleanup(void)
+ {
+ #ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_cleanup();
+@@ -1674,5 +1674,5 @@ static void __exit cleanup(void)
+ mlx5_unregister_debugfs();
+ }
+
+-module_init(init);
+-module_exit(cleanup);
++module_init(mlx5_init);
++module_exit(mlx5_cleanup);
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 3fd5155bdd5fa..231a1295c4700 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -736,14 +736,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
+ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
+ if (eis & EIS_QFS) {
+ ris2 = ravb_read(ndev, RIS2);
+- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
++ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
+ RIS2);
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF0)
+ priv->stats[RAVB_BE].rx_over_errors++;
+
+- /* Receive Descriptor Empty int */
++ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF1)
+ priv->stats[RAVB_NC].rx_over_errors++;
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 9931724c4727d..3079e52546663 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -998,6 +998,11 @@ static int stmmac_init_phy(struct net_device *dev)
+ int addr = priv->plat->phy_addr;
+ struct phy_device *phydev;
+
++ if (addr < 0) {
++ netdev_err(priv->dev, "no phy found\n");
++ return -ENODEV;
++ }
++
+ phydev = mdiobus_get_phy(priv->mii, addr);
+ if (!phydev) {
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
+diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c
+index 0dce676725488..5969878e0aa7c 100644
+--- a/drivers/net/phy/mdio-i2c.c
++++ b/drivers/net/phy/mdio-i2c.c
+@@ -10,10 +10,9 @@
+ * of their settings.
+ */
+ #include <linux/i2c.h>
++#include <linux/mdio/mdio-i2c.h>
+ #include <linux/phy.h>
+
+-#include "mdio-i2c.h"
+-
+ /*
+ * I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
+ * specified to be present in SFP modules. These correspond with PHY
+diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h
+deleted file mode 100644
+index 751dab281f57e..0000000000000
+--- a/drivers/net/phy/mdio-i2c.h
++++ /dev/null
+@@ -1,16 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * MDIO I2C bridge
+- *
+- * Copyright (C) 2015 Russell King
+- */
+-#ifndef MDIO_I2C_H
+-#define MDIO_I2C_H
+-
+-struct device;
+-struct i2c_adapter;
+-struct mii_bus;
+-
+-struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c);
+-
+-#endif
+diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
+index 7a9ad54582e19..aa3ad38e37d7b 100644
+--- a/drivers/net/phy/mdio-mux-meson-g12a.c
++++ b/drivers/net/phy/mdio-mux-meson-g12a.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include <linux/bitfield.h>
++#include <linux/delay.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/device.h>
+@@ -148,6 +149,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
+
+ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+ {
++ u32 value;
+ int ret;
+
+ /* Enable the phy clock */
+@@ -161,18 +163,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+
+ /* Initialize ephy control */
+ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
+- writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+- FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+- FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+- PHY_CNTL1_CLK_EN |
+- PHY_CNTL1_CLKFREQ |
+- PHY_CNTL1_PHY_ENB,
+- priv->regs + ETH_PHY_CNTL1);
++
++ /* Make sure we get a 0 -> 1 transition on the enable bit */
++ value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
++ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
++ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
++ PHY_CNTL1_CLK_EN |
++ PHY_CNTL1_CLKFREQ;
++ writel(value, priv->regs + ETH_PHY_CNTL1);
+ writel(PHY_CNTL2_USE_INTERNAL |
+ PHY_CNTL2_SMI_SRC_MAC |
+ PHY_CNTL2_RX_CLK_EPHY,
+ priv->regs + ETH_PHY_CNTL2);
+
++ value |= PHY_CNTL1_PHY_ENB;
++ writel(value, priv->regs + ETH_PHY_CNTL1);
++
++ /* The phy needs a bit of time to power up */
++ mdelay(10);
++
+ return 0;
+ }
+
+diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
+index 34990eaa3298c..461207cdf5d6e 100644
+--- a/drivers/net/phy/mdio-xgene.c
++++ b/drivers/net/phy/mdio-xgene.c
+@@ -11,6 +11,7 @@
+ #include <linux/efi.h>
+ #include <linux/if_vlan.h>
+ #include <linux/io.h>
++#include <linux/mdio/mdio-xgene.h>
+ #include <linux/module.h>
+ #include <linux/of_platform.h>
+ #include <linux/of_net.h>
+@@ -18,7 +19,6 @@
+ #include <linux/prefetch.h>
+ #include <linux/phy.h>
+ #include <net/ip.h>
+-#include "mdio-xgene.h"
+
+ static bool xgene_mdio_status;
+
+diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
+deleted file mode 100644
+index b1f5ccb4ad9c3..0000000000000
+--- a/drivers/net/phy/mdio-xgene.h
++++ /dev/null
+@@ -1,130 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/* Applied Micro X-Gene SoC MDIO Driver
+- *
+- * Copyright (c) 2016, Applied Micro Circuits Corporation
+- * Author: Iyappan Subramanian <isubramanian@apm.com>
+- */
+-
+-#ifndef __MDIO_XGENE_H__
+-#define __MDIO_XGENE_H__
+-
+-#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
+-#define BLOCK_DIAG_CSR_OFFSET 0xd000
+-#define XGENET_CONFIG_REG_ADDR 0x20
+-
+-#define MAC_ADDR_REG_OFFSET 0x00
+-#define MAC_COMMAND_REG_OFFSET 0x04
+-#define MAC_WRITE_REG_OFFSET 0x08
+-#define MAC_READ_REG_OFFSET 0x0c
+-#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+-
+-#define CLKEN_OFFSET 0x08
+-#define SRST_OFFSET 0x00
+-
+-#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70
+-#define MENET_BLOCK_MEM_RDY_ADDR 0x74
+-
+-#define MAC_CONFIG_1_ADDR 0x00
+-#define MII_MGMT_COMMAND_ADDR 0x24
+-#define MII_MGMT_ADDRESS_ADDR 0x28
+-#define MII_MGMT_CONTROL_ADDR 0x2c
+-#define MII_MGMT_STATUS_ADDR 0x30
+-#define MII_MGMT_INDICATORS_ADDR 0x34
+-#define SOFT_RESET BIT(31)
+-
+-#define MII_MGMT_CONFIG_ADDR 0x20
+-#define MII_MGMT_COMMAND_ADDR 0x24
+-#define MII_MGMT_ADDRESS_ADDR 0x28
+-#define MII_MGMT_CONTROL_ADDR 0x2c
+-#define MII_MGMT_STATUS_ADDR 0x30
+-#define MII_MGMT_INDICATORS_ADDR 0x34
+-
+-#define MIIM_COMMAND_ADDR 0x20
+-#define MIIM_FIELD_ADDR 0x24
+-#define MIIM_CONFIGURATION_ADDR 0x28
+-#define MIIM_LINKFAILVECTOR_ADDR 0x2c
+-#define MIIM_INDICATOR_ADDR 0x30
+-#define MIIMRD_FIELD_ADDR 0x34
+-
+-#define MDIO_CSR_OFFSET 0x5000
+-
+-#define REG_ADDR_POS 0
+-#define REG_ADDR_LEN 5
+-#define PHY_ADDR_POS 8
+-#define PHY_ADDR_LEN 5
+-
+-#define HSTMIIMWRDAT_POS 0
+-#define HSTMIIMWRDAT_LEN 16
+-#define HSTPHYADX_POS 23
+-#define HSTPHYADX_LEN 5
+-#define HSTREGADX_POS 18
+-#define HSTREGADX_LEN 5
+-#define HSTLDCMD BIT(3)
+-#define HSTMIIMCMD_POS 0
+-#define HSTMIIMCMD_LEN 3
+-
+-#define BUSY_MASK BIT(0)
+-#define READ_CYCLE_MASK BIT(0)
+-
+-enum xgene_enet_cmd {
+- XGENE_ENET_WR_CMD = BIT(31),
+- XGENE_ENET_RD_CMD = BIT(30)
+-};
+-
+-enum {
+- MIIM_CMD_IDLE,
+- MIIM_CMD_LEGACY_WRITE,
+- MIIM_CMD_LEGACY_READ,
+-};
+-
+-enum xgene_mdio_id {
+- XGENE_MDIO_RGMII = 1,
+- XGENE_MDIO_XFI
+-};
+-
+-struct xgene_mdio_pdata {
+- struct clk *clk;
+- struct device *dev;
+- void __iomem *mac_csr_addr;
+- void __iomem *diag_csr_addr;
+- void __iomem *mdio_csr_addr;
+- struct mii_bus *mdio_bus;
+- int mdio_id;
+- spinlock_t mac_lock; /* mac lock */
+-};
+-
+-/* Set the specified value into a bit-field defined by its starting position
+- * and length within a single u64.
+- */
+-static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
+-{
+- return (val & ((1ULL << len) - 1)) << pos;
+-}
+-
+-#define SET_VAL(field, val) \
+- xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
+-
+-#define SET_BIT(field) \
+- xgene_enet_set_field_value(field ## _POS, 1, 1)
+-
+-/* Get the value from a bit-field defined by its starting position
+- * and length within the specified u64.
+- */
+-static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
+-{
+- return (src >> pos) & ((1ULL << len) - 1);
+-}
+-
+-#define GET_VAL(field, src) \
+- xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
+-
+-#define GET_BIT(field, src) \
+- xgene_enet_get_field_value(field ## _POS, 1, src)
+-
+-u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
+-void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
+-int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
+-int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
+-struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
+-
+-#endif /* __MDIO_XGENE_H__ */
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 757763735e1f5..fdf8221f46fa5 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -117,7 +117,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
+
+ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
+ {
+- struct mdio_device *mdiodev = bus->mdio_map[addr];
++ struct mdio_device *mdiodev;
++
++ if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
++ return NULL;
++
++ mdiodev = bus->mdio_map[addr];
+
+ if (!mdiodev)
+ return NULL;
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index beaa00342a13f..9639aa1819685 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -7,6 +7,7 @@
+ #include <linux/i2c.h>
+ #include <linux/interrupt.h>
+ #include <linux/jiffies.h>
++#include <linux/mdio/mdio-i2c.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
+@@ -16,7 +17,6 @@
+ #include <linux/slab.h>
+ #include <linux/workqueue.h>
+
+-#include "mdio-i2c.h"
+ #include "sfp.h"
+ #include "swphy.h"
+
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index fce6713e970ba..811c8751308c6 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ /* ignore the CRC length */
+ len = (skb->data[1] | (skb->data[2] << 8)) - 4;
+
+- if (len > ETH_FRAME_LEN || len > skb->len)
++ if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
+ return 0;
+
+ /* the last packet of current skb */
+diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
+index 034eb6535ab7d..46077cef855b2 100644
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -1249,9 +1249,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
+ free_dev:
+ free_netdev(dev);
+ undo_uhdlc_init:
+- iounmap(utdm->siram);
++ if (utdm)
++ iounmap(utdm->siram);
+ unmap_si_regs:
+- iounmap(utdm->si_regs);
++ if (utdm)
++ iounmap(utdm->si_regs);
+ free_utdm:
+ if (uhdlc_priv->tsa)
+ kfree(utdm);
+diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
+index c8f8fe5497a8c..ace016967ff0e 100644
+--- a/drivers/net/wireless/rndis_wlan.c
++++ b/drivers/net/wireless/rndis_wlan.c
+@@ -700,8 +700,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
+ struct rndis_query *get;
+ struct rndis_query_c *get_c;
+ } u;
+- int ret, buflen;
+- int resplen, respoffs, copylen;
++ int ret;
++ size_t buflen, resplen, respoffs, copylen;
+
+ buflen = *len + sizeof(*u.get);
+ if (buflen < CONTROL_BUFFER_SIZE)
+@@ -736,22 +736,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
+
+ if (respoffs > buflen) {
+ /* Device returned data offset outside buffer, error. */
+- netdev_dbg(dev->net, "%s(%s): received invalid "
+- "data offset: %d > %d\n", __func__,
+- oid_to_string(oid), respoffs, buflen);
++ netdev_dbg(dev->net,
++ "%s(%s): received invalid data offset: %zu > %zu\n",
++ __func__, oid_to_string(oid), respoffs, buflen);
+
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+- if ((resplen + respoffs) > buflen) {
+- /* Device would have returned more data if buffer would
+- * have been big enough. Copy just the bits that we got.
+- */
+- copylen = buflen - respoffs;
+- } else {
+- copylen = resplen;
+- }
++ copylen = min(resplen, buflen - respoffs);
+
+ if (copylen > *len)
+ copylen = *len;
+diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+index eae865ff312c1..b5f7a93543b05 100644
+--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
++++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+@@ -467,8 +467,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
+ return ret;
+
+ ret = property_enable(base, &rport->port_cfg->phy_sus, false);
+- if (ret)
++ if (ret) {
++ clk_disable_unprepare(rphy->clk480m);
+ return ret;
++ }
+
+ /* waiting for the utmi_clk to become stable */
+ usleep_range(1500, 2000);
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index 59b78a181723b..6424bdb33d2f0 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -528,6 +528,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x32, { KEY_MUTE } },
++ { KE_KEY, 0x33, { KEY_SCREENLOCK } },
+ { KE_KEY, 0x35, { KEY_SCREENLOCK } },
+ { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+ { KE_KEY, 0x41, { KEY_NEXTSONG } },
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 515c66ca1aecb..61cb1a4a8257c 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -169,6 +169,23 @@ static const struct ts_dmi_data connect_tablet9_data = {
+ .properties = connect_tablet9_props,
+ };
+
++static const struct property_entry csl_panther_tab_hd_props[] = {
++ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
++ PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
++ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
++ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++ { }
++};
++
++static const struct ts_dmi_data csl_panther_tab_hd_data = {
++ .acpi_name = "MSSL1680:00",
++ .properties = csl_panther_tab_hd_props,
++};
++
+ static const struct property_entry cube_iwork8_air_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
+@@ -721,6 +738,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
+ },
+ },
++ {
++ /* CSL Panther Tab HD */
++ .driver_data = (void *)&csl_panther_tab_hd_data,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
++ },
++ },
+ {
+ /* CUBE iwork8 Air */
+ .driver_data = (void *)&cube_iwork8_air_data,
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 031aa4043c5ea..7135bbe5abb8a 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1397,7 +1397,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
+ device->linkrate = phy->sas_phy.linkrate;
+
+ hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+- } else
++ } else if (!port->port_attached)
+ port->id = 0xff;
+ }
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index ba125ed7e06a7..e670cce0cb6ef 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -5810,7 +5810,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
+ {
+ struct Scsi_Host *sh;
+
+- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
++ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
+ if (sh == NULL) {
+ dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
+ return -ENOMEM;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 2478ae471f4ee..6d6fc7de9cf3c 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -588,7 +588,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
+ goto err_find_dev;
+ }
+@@ -597,7 +596,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
+ goto err_alloc_rx_buf;
+ }
+diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+index c313c4f0e8563..9090f87b44913 100644
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+@@ -43,11 +43,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ int trip, int *temp)
+ {
+ struct int34x_thermal_zone *d = zone->devdata;
+- int i;
++ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_temp)
+ return d->override_ops->get_trip_temp(zone, trip, temp);
+
++ mutex_lock(&d->trip_mutex);
++
+ if (trip < d->aux_trip_nr)
+ *temp = d->aux_trips[trip];
+ else if (trip == d->crt_trip_id)
+@@ -65,10 +67,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+- return -EINVAL;
++ ret = -EINVAL;
+ }
+
+- return 0;
++ mutex_unlock(&d->trip_mutex);
++
++ return ret;
+ }
+
+ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+@@ -76,11 +80,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ enum thermal_trip_type *type)
+ {
+ struct int34x_thermal_zone *d = zone->devdata;
+- int i;
++ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_type)
+ return d->override_ops->get_trip_type(zone, trip, type);
+
++ mutex_lock(&d->trip_mutex);
++
+ if (trip < d->aux_trip_nr)
+ *type = THERMAL_TRIP_PASSIVE;
+ else if (trip == d->crt_trip_id)
+@@ -98,10 +104,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+- return -EINVAL;
++ ret = -EINVAL;
+ }
+
+- return 0;
++ mutex_unlock(&d->trip_mutex);
++
++ return ret;
+ }
+
+ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
+@@ -173,6 +181,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+ int trip_cnt = int34x_zone->aux_trip_nr;
+ int i;
+
++ mutex_lock(&int34x_zone->trip_mutex);
++
+ int34x_zone->crt_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
+ &int34x_zone->crt_temp))
+@@ -200,6 +210,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+ int34x_zone->act_trips[i].valid = true;
+ }
+
++ mutex_unlock(&int34x_zone->trip_mutex);
++
+ return trip_cnt;
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
+@@ -223,6 +235,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
+ if (!int34x_thermal_zone)
+ return ERR_PTR(-ENOMEM);
+
++ mutex_init(&int34x_thermal_zone->trip_mutex);
++
+ int34x_thermal_zone->adev = adev;
+ int34x_thermal_zone->override_ops = override_ops;
+
+@@ -269,6 +283,7 @@ err_thermal_zone:
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
+ err_trip_alloc:
++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ return ERR_PTR(ret);
+ }
+@@ -280,6 +295,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
+ thermal_zone_device_unregister(int34x_thermal_zone->zone);
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
++ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+index 3b4971df1b33b..8f9872afd0d3c 100644
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
+ struct thermal_zone_device_ops *override_ops;
+ void *priv_data;
+ struct acpi_lpat_conversion_table *lpat_table;
++ struct mutex trip_mutex;
+ };
+
+ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 5fd4fc49aef9f..431ab6d07497f 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -278,6 +278,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+ struct usb_request *req = ffs->ep0req;
+ int ret;
+
++ if (!req)
++ return -EINVAL;
++
+ req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+@@ -1900,10 +1903,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
+ ENTER();
+
+ if (!WARN_ON(!ffs->gadget)) {
++ /* dequeue before freeing ep0req */
++ usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
++ mutex_lock(&ffs->mutex);
+ usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+ ffs->ep0req = NULL;
+ ffs->gadget = NULL;
+ clear_bit(FFS_FL_BOUND, &ffs->flags);
++ mutex_unlock(&ffs->mutex);
+ ffs_data_put(ffs);
+ }
+ }
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 84cfa85442852..fa320006b04d2 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -276,7 +276,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ *priv = *priv_match;
+ }
+
+- device_wakeup_enable(hcd->self.controller);
++ device_set_wakeup_capable(&pdev->dev, true);
+
+ xhci->main_hcd = hcd;
+ xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 35d96796854d6..b8915790a20af 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -693,6 +693,7 @@ int xhci_run(struct usb_hcd *hcd)
+ if (ret)
+ xhci_free_command(xhci, command);
+ }
++ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Finished xhci_run for USB2 roothub");
+
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
+index e58c7592008d7..2a7970a10533e 100644
+--- a/drivers/w1/w1.c
++++ b/drivers/w1/w1.c
+@@ -1131,6 +1131,8 @@ int w1_process(void *data)
+ /* remainder if it woke up early */
+ unsigned long jremain = 0;
+
++ atomic_inc(&dev->refcnt);
++
+ for (;;) {
+
+ if (!jremain && dev->search_count) {
+@@ -1158,8 +1160,10 @@ int w1_process(void *data)
+ */
+ mutex_unlock(&dev->list_mutex);
+
+- if (kthread_should_stop())
++ if (kthread_should_stop()) {
++ __set_current_state(TASK_RUNNING);
+ break;
++ }
+
+ /* Only sleep when the search is active. */
+ if (dev->search_count) {
+diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
+index b3e1792d9c49f..3a71c5eb2f837 100644
+--- a/drivers/w1/w1_int.c
++++ b/drivers/w1/w1_int.c
+@@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
+ dev->search_count = w1_search_count;
+ dev->enable_pullup = w1_enable_pullup;
+
+- /* 1 for w1_process to decrement
+- * 1 for __w1_remove_master_device to decrement
++ /* For __w1_remove_master_device to decrement
+ */
+- atomic_set(&dev->refcnt, 2);
++ atomic_set(&dev->refcnt, 1);
+
+ INIT_LIST_HEAD(&dev->slist);
+ INIT_LIST_HEAD(&dev->async_list);
+diff --git a/fs/affs/file.c b/fs/affs/file.c
+index ba084b0b214b9..82bb38370aa9a 100644
+--- a/fs/affs/file.c
++++ b/fs/affs/file.c
+@@ -878,7 +878,7 @@ affs_truncate(struct inode *inode)
+ if (inode->i_size > AFFS_I(inode)->mmu_private) {
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+- void *fsdata;
++ void *fsdata = NULL;
+ loff_t isize = inode->i_size;
+ int res;
+
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 5b1b97e9e0c9b..2cea6c25d1b0e 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1478,6 +1478,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ destroy_workqueue(info->workqueue);
+ log_rdma_event(INFO, "rdma session destroyed\n");
+ kfree(info);
++ server->smbd_conn = NULL;
+ }
+
+ /*
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index ed53e206a2996..82329b5102c69 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -42,9 +42,6 @@ struct nfsd_net {
+ bool grace_ended;
+ time_t boot_time;
+
+- /* internal mount of the "nfsd" pseudofilesystem: */
+- struct vfsmount *nfsd_mnt;
+-
+ struct dentry *nfsd_client_dir;
+
+ /*
+@@ -121,6 +118,9 @@ struct nfsd_net {
+ wait_queue_head_t ntf_wq;
+ atomic_t ntf_refcnt;
+
++ /* Allow umount to wait for nfsd state cleanup */
++ struct completion nfsd_shutdown_complete;
++
+ /*
+ * clientid and stateid data for construction of net unique COPY
+ * stateids.
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index de2c3809d15aa..5922eceb01762 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7754,14 +7754,9 @@ nfs4_state_start_net(struct net *net)
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int ret;
+
+- ret = get_nfsdfs(net);
+- if (ret)
+- return ret;
+ ret = nfs4_state_create_net(net);
+- if (ret) {
+- mntput(nn->nfsd_mnt);
++ if (ret)
+ return ret;
+- }
+ locks_start_grace(net, &nn->nfsd4_manager);
+ nfsd4_client_tracking_init(net);
+ if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
+@@ -7830,7 +7825,6 @@ nfs4_state_shutdown_net(struct net *net)
+
+ nfsd4_client_tracking_exit(net);
+ nfs4_state_destroy_net(net);
+- mntput(nn->nfsd_mnt);
+ }
+
+ void
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 055cc0458f270..a2454739b1cfa 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1417,6 +1417,8 @@ static void nfsd_umount(struct super_block *sb)
+ {
+ struct net *net = sb->s_fs_info;
+
++ nfsd_shutdown_threads(net);
++
+ kill_litter_super(sb);
+ put_net(net);
+ }
+@@ -1429,18 +1431,6 @@ static struct file_system_type nfsd_fs_type = {
+ };
+ MODULE_ALIAS_FS("nfsd");
+
+-int get_nfsdfs(struct net *net)
+-{
+- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+- struct vfsmount *mnt;
+-
+- mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
+- if (IS_ERR(mnt))
+- return PTR_ERR(mnt);
+- nn->nfsd_mnt = mnt;
+- return 0;
+-}
+-
+ #ifdef CONFIG_PROC_FS
+ static int create_proc_exports_entry(void)
+ {
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 4ff0c5318a02b..3ae9811c0bb98 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -84,11 +84,10 @@ int nfsd_get_nrthreads(int n, int *, struct net *);
+ int nfsd_set_nrthreads(int n, int *, struct net *);
+ int nfsd_pool_stats_open(struct inode *, struct file *);
+ int nfsd_pool_stats_release(struct inode *, struct file *);
++void nfsd_shutdown_threads(struct net *net);
+
+ void nfsd_destroy(struct net *net);
+
+-int get_nfsdfs(struct net *);
+-
+ struct nfsdfs_client {
+ struct kref cl_ref;
+ void (*cl_release)(struct kref *kref);
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 70684c7ae94bd..969a227186fa8 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -594,6 +594,37 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = {
+ .svo_module = THIS_MODULE,
+ };
+
++static void nfsd_complete_shutdown(struct net *net)
++{
++ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++
++ WARN_ON(!mutex_is_locked(&nfsd_mutex));
++
++ nn->nfsd_serv = NULL;
++ complete(&nn->nfsd_shutdown_complete);
++}
++
++void nfsd_shutdown_threads(struct net *net)
++{
++ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++ struct svc_serv *serv;
++
++ mutex_lock(&nfsd_mutex);
++ serv = nn->nfsd_serv;
++ if (serv == NULL) {
++ mutex_unlock(&nfsd_mutex);
++ return;
++ }
++
++ svc_get(serv);
++ /* Kill outstanding nfsd threads */
++ serv->sv_ops->svo_setup(serv, NULL, 0);
++ nfsd_destroy(net);
++ mutex_unlock(&nfsd_mutex);
++ /* Wait for shutdown of nfsd_serv to complete */
++ wait_for_completion(&nn->nfsd_shutdown_complete);
++}
++
+ int nfsd_create_serv(struct net *net)
+ {
+ int error;
+@@ -611,11 +642,13 @@ int nfsd_create_serv(struct net *net)
+ &nfsd_thread_sv_ops);
+ if (nn->nfsd_serv == NULL)
+ return -ENOMEM;
++ init_completion(&nn->nfsd_shutdown_complete);
+
+ nn->nfsd_serv->sv_maxconn = nn->max_connections;
+ error = svc_bind(nn->nfsd_serv, net);
+ if (error < 0) {
+ svc_destroy(nn->nfsd_serv);
++ nfsd_complete_shutdown(net);
+ return error;
+ }
+
+@@ -664,7 +697,7 @@ void nfsd_destroy(struct net *net)
+ svc_shutdown_net(nn->nfsd_serv, net);
+ svc_destroy(nn->nfsd_serv);
+ if (destroy)
+- nn->nfsd_serv = NULL;
++ nfsd_complete_shutdown(net);
+ }
+
+ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index d80989b6c3448..f4264dd4ea31b 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -14,6 +14,7 @@
+ #include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/bpf-cgroup.h>
++#include <linux/kmemleak.h>
+ #include "internal.h"
+
+ static const struct dentry_operations proc_sys_dentry_operations;
+@@ -1397,6 +1398,38 @@ struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *tab
+ }
+ EXPORT_SYMBOL(register_sysctl);
+
++/**
++ * __register_sysctl_init() - register sysctl table to path
++ * @path: path name for sysctl base
++ * @table: This is the sysctl table that needs to be registered to the path
++ * @table_name: The name of sysctl table, only used for log printing when
++ * registration fails
++ *
++ * The sysctl interface is used by userspace to query or modify at runtime
++ * a predefined value set on a variable. These variables however have default
++ * values pre-set. Code which depends on these variables will always work even
++ * if register_sysctl() fails. If register_sysctl() fails you'd just loose the
++ * ability to query or modify the sysctls dynamically at run time. Chances of
++ * register_sysctl() failing on init are extremely low, and so for both reasons
++ * this function does not return any error as it is used by initialization code.
++ *
++ * Context: Can only be called after your respective sysctl base path has been
++ * registered. So for instance, most base directories are registered early on
++ * init before init levels are processed through proc_sys_init() and
++ * sysctl_init().
++ */
++void __init __register_sysctl_init(const char *path, struct ctl_table *table,
++ const char *table_name)
++{
++ struct ctl_table_header *hdr = register_sysctl(path, table);
++
++ if (unlikely(!hdr)) {
++ pr_err("failed when register_sysctl %s to %s\n", table_name, path);
++ return;
++ }
++ kmemleak_not_leak(hdr);
++}
++
+ static char *append_path(const char *path, char *pos, const char *name)
+ {
+ int namelen;
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 913f5af9bf248..0ebb6e6849082 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1437,7 +1437,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ unsigned long safe_mask = 0;
+ unsigned int commit_max_age = (unsigned int)-1;
+ struct reiserfs_journal *journal = SB_JOURNAL(s);
+- char *new_opts;
+ int err;
+ char *qf_names[REISERFS_MAXQUOTAS];
+ unsigned int qfmt = 0;
+@@ -1445,10 +1444,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ int i;
+ #endif
+
+- new_opts = kstrdup(arg, GFP_KERNEL);
+- if (arg && !new_opts)
+- return -ENOMEM;
+-
+ sync_filesystem(s);
+ reiserfs_write_lock(s);
+
+@@ -1599,7 +1594,6 @@ out_ok_unlocked:
+ out_err_unlock:
+ reiserfs_write_unlock(s);
+ out_err:
+- kfree(new_opts);
+ return err;
+ }
+
+diff --git a/include/linux/clk.h b/include/linux/clk.h
+index 18b7b95a8253c..87730337e28f8 100644
+--- a/include/linux/clk.h
++++ b/include/linux/clk.h
+@@ -418,6 +418,47 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
+ */
+ struct clk *devm_clk_get(struct device *dev, const char *id);
+
++/**
++ * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
++ * @dev: device for clock "consumer"
++ * @id: clock consumer ID
++ *
++ * Context: May sleep.
++ *
++ * Return: a struct clk corresponding to the clock producer, or
++ * valid IS_ERR() condition containing errno. The implementation
++ * uses @dev and @id to determine the clock consumer, and thereby
++ * the clock producer. (IOW, @id may be identical strings, but
++ * clk_get may return different clock producers depending on @dev.)
++ *
++ * The returned clk (if valid) is prepared. Drivers must however assume
++ * that the clock is not enabled.
++ *
++ * The clock will automatically be unprepared and freed when the device
++ * is unbound from the bus.
++ */
++struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
++
++/**
++ * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
++ * @dev: device for clock "consumer"
++ * @id: clock consumer ID
++ *
++ * Context: May sleep.
++ *
++ * Return: a struct clk corresponding to the clock producer, or
++ * valid IS_ERR() condition containing errno. The implementation
++ * uses @dev and @id to determine the clock consumer, and thereby
++ * the clock producer. (IOW, @id may be identical strings, but
++ * clk_get may return different clock producers depending on @dev.)
++ *
++ * The returned clk (if valid) is prepared and enabled.
++ *
++ * The clock will automatically be disabled, unprepared and freed
++ * when the device is unbound from the bus.
++ */
++struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
++
+ /**
+ * devm_clk_get_optional - lookup and obtain a managed reference to an optional
+ * clock producer.
+@@ -429,6 +470,50 @@ struct clk *devm_clk_get(struct device *dev, const char *id);
+ */
+ struct clk *devm_clk_get_optional(struct device *dev, const char *id);
+
++/**
++ * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
++ * @dev: device for clock "consumer"
++ * @id: clock consumer ID
++ *
++ * Context: May sleep.
++ *
++ * Return: a struct clk corresponding to the clock producer, or
++ * valid IS_ERR() condition containing errno. The implementation
++ * uses @dev and @id to determine the clock consumer, and thereby
++ * the clock producer. If no such clk is found, it returns NULL
++ * which serves as a dummy clk. That's the only difference compared
++ * to devm_clk_get_prepared().
++ *
++ * The returned clk (if valid) is prepared. Drivers must however
++ * assume that the clock is not enabled.
++ *
++ * The clock will automatically be unprepared and freed when the
++ * device is unbound from the bus.
++ */
++struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
++
++/**
++ * devm_clk_get_optional_enabled - devm_clk_get_optional() +
++ * clk_prepare_enable()
++ * @dev: device for clock "consumer"
++ * @id: clock consumer ID
++ *
++ * Context: May sleep.
++ *
++ * Return: a struct clk corresponding to the clock producer, or
++ * valid IS_ERR() condition containing errno. The implementation
++ * uses @dev and @id to determine the clock consumer, and thereby
++ * the clock producer. If no such clk is found, it returns NULL
++ * which serves as a dummy clk. That's the only difference compared
++ * to devm_clk_get_enabled().
++ *
++ * The returned clk (if valid) is prepared and enabled.
++ *
++ * The clock will automatically be disabled, unprepared and freed
++ * when the device is unbound from the bus.
++ */
++struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
++
+ /**
+ * devm_get_clk_from_child - lookup and obtain a managed reference to a
+ * clock producer from child node.
+@@ -770,12 +855,36 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
+ return NULL;
+ }
+
++static inline struct clk *devm_clk_get_prepared(struct device *dev,
++ const char *id)
++{
++ return NULL;
++}
++
++static inline struct clk *devm_clk_get_enabled(struct device *dev,
++ const char *id)
++{
++ return NULL;
++}
++
+ static inline struct clk *devm_clk_get_optional(struct device *dev,
+ const char *id)
+ {
+ return NULL;
+ }
+
++static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
++ const char *id)
++{
++ return NULL;
++}
++
++static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
++ const char *id)
++{
++ return NULL;
++}
++
+ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+ {
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 77c86a2236daf..1fdb251947ed4 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -321,6 +321,7 @@ extern long (*panic_blink)(int state);
+ __printf(1, 2)
+ void panic(const char *fmt, ...) __noreturn __cold;
+ void nmi_panic(struct pt_regs *regs, const char *msg);
++void check_panic_on_warn(const char *origin);
+ extern void oops_enter(void);
+ extern void oops_exit(void);
+ void print_oops_end_marker(void);
+diff --git a/include/linux/mdio/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
+new file mode 100644
+index 0000000000000..751dab281f57e
+--- /dev/null
++++ b/include/linux/mdio/mdio-i2c.h
+@@ -0,0 +1,16 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * MDIO I2C bridge
++ *
++ * Copyright (C) 2015 Russell King
++ */
++#ifndef MDIO_I2C_H
++#define MDIO_I2C_H
++
++struct device;
++struct i2c_adapter;
++struct mii_bus;
++
++struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c);
++
++#endif
+diff --git a/include/linux/mdio/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h
+new file mode 100644
+index 0000000000000..b1f5ccb4ad9c3
+--- /dev/null
++++ b/include/linux/mdio/mdio-xgene.h
+@@ -0,0 +1,130 @@
++// SPDX-License-Identifier: GPL-2.0+
++/* Applied Micro X-Gene SoC MDIO Driver
++ *
++ * Copyright (c) 2016, Applied Micro Circuits Corporation
++ * Author: Iyappan Subramanian <isubramanian@apm.com>
++ */
++
++#ifndef __MDIO_XGENE_H__
++#define __MDIO_XGENE_H__
++
++#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
++#define BLOCK_DIAG_CSR_OFFSET 0xd000
++#define XGENET_CONFIG_REG_ADDR 0x20
++
++#define MAC_ADDR_REG_OFFSET 0x00
++#define MAC_COMMAND_REG_OFFSET 0x04
++#define MAC_WRITE_REG_OFFSET 0x08
++#define MAC_READ_REG_OFFSET 0x0c
++#define MAC_COMMAND_DONE_REG_OFFSET 0x10
++
++#define CLKEN_OFFSET 0x08
++#define SRST_OFFSET 0x00
++
++#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70
++#define MENET_BLOCK_MEM_RDY_ADDR 0x74
++
++#define MAC_CONFIG_1_ADDR 0x00
++#define MII_MGMT_COMMAND_ADDR 0x24
++#define MII_MGMT_ADDRESS_ADDR 0x28
++#define MII_MGMT_CONTROL_ADDR 0x2c
++#define MII_MGMT_STATUS_ADDR 0x30
++#define MII_MGMT_INDICATORS_ADDR 0x34
++#define SOFT_RESET BIT(31)
++
++#define MII_MGMT_CONFIG_ADDR 0x20
++#define MII_MGMT_COMMAND_ADDR 0x24
++#define MII_MGMT_ADDRESS_ADDR 0x28
++#define MII_MGMT_CONTROL_ADDR 0x2c
++#define MII_MGMT_STATUS_ADDR 0x30
++#define MII_MGMT_INDICATORS_ADDR 0x34
++
++#define MIIM_COMMAND_ADDR 0x20
++#define MIIM_FIELD_ADDR 0x24
++#define MIIM_CONFIGURATION_ADDR 0x28
++#define MIIM_LINKFAILVECTOR_ADDR 0x2c
++#define MIIM_INDICATOR_ADDR 0x30
++#define MIIMRD_FIELD_ADDR 0x34
++
++#define MDIO_CSR_OFFSET 0x5000
++
++#define REG_ADDR_POS 0
++#define REG_ADDR_LEN 5
++#define PHY_ADDR_POS 8
++#define PHY_ADDR_LEN 5
++
++#define HSTMIIMWRDAT_POS 0
++#define HSTMIIMWRDAT_LEN 16
++#define HSTPHYADX_POS 23
++#define HSTPHYADX_LEN 5
++#define HSTREGADX_POS 18
++#define HSTREGADX_LEN 5
++#define HSTLDCMD BIT(3)
++#define HSTMIIMCMD_POS 0
++#define HSTMIIMCMD_LEN 3
++
++#define BUSY_MASK BIT(0)
++#define READ_CYCLE_MASK BIT(0)
++
++enum xgene_enet_cmd {
++ XGENE_ENET_WR_CMD = BIT(31),
++ XGENE_ENET_RD_CMD = BIT(30)
++};
++
++enum {
++ MIIM_CMD_IDLE,
++ MIIM_CMD_LEGACY_WRITE,
++ MIIM_CMD_LEGACY_READ,
++};
++
++enum xgene_mdio_id {
++ XGENE_MDIO_RGMII = 1,
++ XGENE_MDIO_XFI
++};
++
++struct xgene_mdio_pdata {
++ struct clk *clk;
++ struct device *dev;
++ void __iomem *mac_csr_addr;
++ void __iomem *diag_csr_addr;
++ void __iomem *mdio_csr_addr;
++ struct mii_bus *mdio_bus;
++ int mdio_id;
++ spinlock_t mac_lock; /* mac lock */
++};
++
++/* Set the specified value into a bit-field defined by its starting position
++ * and length within a single u64.
++ */
++static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
++{
++ return (val & ((1ULL << len) - 1)) << pos;
++}
++
++#define SET_VAL(field, val) \
++ xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
++
++#define SET_BIT(field) \
++ xgene_enet_set_field_value(field ## _POS, 1, 1)
++
++/* Get the value from a bit-field defined by its starting position
++ * and length within the specified u64.
++ */
++static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
++{
++ return (src >> pos) & ((1ULL << len) - 1);
++}
++
++#define GET_VAL(field, src) \
++ xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
++
++#define GET_BIT(field, src) \
++ xgene_enet_get_field_value(field ## _POS, 1, src)
++
++u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
++void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
++int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
++int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
++struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
++
++#endif /* __MDIO_XGENE_H__ */
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 36f3011ab6013..6f33a07858cf6 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -51,6 +51,7 @@ extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
+ extern void sched_dead(struct task_struct *p);
+
+ void __noreturn do_task_dead(void);
++void __noreturn make_task_dead(int signr);
+
+ extern void proc_caches_init(void);
+
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 6df477329b76e..aa615a0863f5c 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -208,6 +208,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
+ void unregister_sysctl_table(struct ctl_table_header * table);
+
+ extern int sysctl_init(void);
++extern void __register_sysctl_init(const char *path, struct ctl_table *table,
++ const char *table_name);
++#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table)
+
+ extern struct ctl_table sysctl_mount_point[];
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 1ee396ce0eda8..e8034756cbf8e 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -1334,4 +1334,11 @@ static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+ qstats_overlimit_inc(res->qstats);
+ }
+
++/* Make sure qdisc is no longer in SCHED state. */
++static inline void qdisc_synchronize(const struct Qdisc *q)
++{
++ while (test_bit(__QDISC_STATE_SCHED, &q->state))
++ msleep(1);
++}
++
+ #endif
+diff --git a/include/net/sock.h b/include/net/sock.h
+index f508e86a2021a..5fa255b1e0a65 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -300,7 +300,7 @@ struct bpf_sk_storage;
+ * @sk_tskey: counter to disambiguate concurrent tstamp requests
+ * @sk_zckey: counter to order MSG_ZEROCOPY notifications
+ * @sk_socket: Identd and reporting IO signals
+- * @sk_user_data: RPC layer private data
++ * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
+ * @sk_frag: cached page frag
+ * @sk_peek_off: current peek_offset value
+ * @sk_send_head: front of stuff to transmit
+diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+index edc6ddab0de6a..2d6f80d75ae74 100644
+--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
++++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+@@ -15,7 +15,7 @@ enum sctp_conntrack {
+ SCTP_CONNTRACK_SHUTDOWN_RECD,
+ SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+ SCTP_CONNTRACK_HEARTBEAT_SENT,
+- SCTP_CONNTRACK_HEARTBEAT_ACKED,
++ SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */
+ SCTP_CONNTRACK_MAX
+ };
+
+diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+index 6b20fb22717b2..aa805e6d4e284 100644
+--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
++++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+@@ -94,7 +94,7 @@ enum ctattr_timeout_sctp {
+ CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+- CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
++ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
+ __CTA_TIMEOUT_SCTP_MAX
+ };
+ #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 32b32ecad770d..ca7e05ddbb46e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1924,7 +1924,9 @@ static int check_stack_write(struct bpf_verifier_env *env,
+ bool sanitize = reg && is_spillable_regtype(reg->type);
+
+ for (i = 0; i < size; i++) {
+- if (state->stack[spi].slot_type[i] == STACK_INVALID) {
++ u8 type = state->stack[spi].slot_type[i];
++
++ if (type != STACK_MISC && type != STACK_ZERO) {
+ sanitize = true;
+ break;
+ }
+diff --git a/kernel/exit.c b/kernel/exit.c
+index ece64771a31f5..563bdaa766945 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -63,12 +63,59 @@
+ #include <linux/random.h>
+ #include <linux/rcuwait.h>
+ #include <linux/compat.h>
++#include <linux/sysfs.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+ #include <asm/pgtable.h>
+ #include <asm/mmu_context.h>
+
++/*
++ * The default value should be high enough to not crash a system that randomly
++ * crashes its kernel from time to time, but low enough to at least not permit
++ * overflowing 32-bit refcounts or the ldsem writer count.
++ */
++static unsigned int oops_limit = 10000;
++
++#ifdef CONFIG_SYSCTL
++static struct ctl_table kern_exit_table[] = {
++ {
++ .procname = "oops_limit",
++ .data = &oops_limit,
++ .maxlen = sizeof(oops_limit),
++ .mode = 0644,
++ .proc_handler = proc_douintvec,
++ },
++ { }
++};
++
++static __init int kernel_exit_sysctls_init(void)
++{
++ register_sysctl_init("kernel", kern_exit_table);
++ return 0;
++}
++late_initcall(kernel_exit_sysctls_init);
++#endif
++
++static atomic_t oops_count = ATOMIC_INIT(0);
++
++#ifdef CONFIG_SYSFS
++static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
++ char *page)
++{
++ return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
++}
++
++static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
++
++static __init int kernel_exit_sysfs_init(void)
++{
++ sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
++ return 0;
++}
++late_initcall(kernel_exit_sysfs_init);
++#endif
++
+ static void __unhash_process(struct task_struct *p, bool group_dead)
+ {
+ nr_threads--;
+@@ -864,6 +911,31 @@ void __noreturn do_exit(long code)
+ }
+ EXPORT_SYMBOL_GPL(do_exit);
+
++void __noreturn make_task_dead(int signr)
++{
++ /*
++ * Take the task off the cpu after something catastrophic has
++ * happened.
++ */
++ unsigned int limit;
++
++ /*
++ * Every time the system oopses, if the oops happens while a reference
++ * to an object was held, the reference leaks.
++ * If the oops doesn't also leak memory, repeated oopsing can cause
++ * reference counters to wrap around (if they're not using refcount_t).
++ * This means that repeated oopsing can make unexploitable-looking bugs
++ * exploitable through repeated oopsing.
++ * To make sure this can't happen, place an upper bound on how often the
++ * kernel may oops without panic().
++ */
++ limit = READ_ONCE(oops_limit);
++ if (atomic_inc_return(&oops_count) >= limit && limit)
++ panic("Oopsed too often (kernel.oops_limit is %d)", limit);
++
++ do_exit(signr);
++}
++
+ void complete_and_exit(struct completion *comp, long code)
+ {
+ if (comp)
+diff --git a/kernel/module.c b/kernel/module.c
+index 7c724356aca31..30ac7514bd2bf 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3654,7 +3654,8 @@ static bool finished_loading(const char *name)
+ sched_annotate_sleep();
+ mutex_lock(&module_mutex);
+ mod = find_module_all(name, strlen(name), true);
+- ret = !mod || mod->state == MODULE_STATE_LIVE;
++ ret = !mod || mod->state == MODULE_STATE_LIVE
++ || mod->state == MODULE_STATE_GOING;
+ mutex_unlock(&module_mutex);
+
+ return ret;
+@@ -3820,20 +3821,35 @@ static int add_unformed_module(struct module *mod)
+
+ mod->state = MODULE_STATE_UNFORMED;
+
+-again:
+ mutex_lock(&module_mutex);
+ old = find_module_all(mod->name, strlen(mod->name), true);
+ if (old != NULL) {
+- if (old->state != MODULE_STATE_LIVE) {
++ if (old->state == MODULE_STATE_COMING
++ || old->state == MODULE_STATE_UNFORMED) {
+ /* Wait in case it fails to load. */
+ mutex_unlock(&module_mutex);
+ err = wait_event_interruptible(module_wq,
+ finished_loading(mod->name));
+ if (err)
+ goto out_unlocked;
+- goto again;
++
++ /* The module might have gone in the meantime. */
++ mutex_lock(&module_mutex);
++ old = find_module_all(mod->name, strlen(mod->name),
++ true);
+ }
+- err = -EEXIST;
++
++ /*
++ * We are here only when the same module was being loaded. Do
++ * not try to load it again right now. It prevents long delays
++ * caused by serialized module load failures. It might happen
++ * when more devices of the same type trigger load of
++ * a particular module.
++ */
++ if (old && old->state == MODULE_STATE_LIVE)
++ err = -EEXIST;
++ else
++ err = -EBUSY;
+ goto out;
+ }
+ mod_update_bounds(mod);
+diff --git a/kernel/panic.c b/kernel/panic.c
+index f470a038b05bd..cef79466f9417 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -31,6 +31,7 @@
+ #include <linux/bug.h>
+ #include <linux/ratelimit.h>
+ #include <linux/debugfs.h>
++#include <linux/sysfs.h>
+ #include <asm/sections.h>
+
+ #define PANIC_TIMER_STEP 100
+@@ -44,6 +45,7 @@ static int pause_on_oops_flag;
+ static DEFINE_SPINLOCK(pause_on_oops_lock);
+ bool crash_kexec_post_notifiers;
+ int panic_on_warn __read_mostly;
++static unsigned int warn_limit __read_mostly;
+
+ int panic_timeout = CONFIG_PANIC_TIMEOUT;
+ EXPORT_SYMBOL_GPL(panic_timeout);
+@@ -60,6 +62,45 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
+
+ EXPORT_SYMBOL(panic_notifier_list);
+
++#ifdef CONFIG_SYSCTL
++static struct ctl_table kern_panic_table[] = {
++ {
++ .procname = "warn_limit",
++ .data = &warn_limit,
++ .maxlen = sizeof(warn_limit),
++ .mode = 0644,
++ .proc_handler = proc_douintvec,
++ },
++ { }
++};
++
++static __init int kernel_panic_sysctls_init(void)
++{
++ register_sysctl_init("kernel", kern_panic_table);
++ return 0;
++}
++late_initcall(kernel_panic_sysctls_init);
++#endif
++
++static atomic_t warn_count = ATOMIC_INIT(0);
++
++#ifdef CONFIG_SYSFS
++static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr,
++ char *page)
++{
++ return sysfs_emit(page, "%d\n", atomic_read(&warn_count));
++}
++
++static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count);
++
++static __init int kernel_panic_sysfs_init(void)
++{
++ sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL);
++ return 0;
++}
++late_initcall(kernel_panic_sysfs_init);
++#endif
++
+ static long no_blink(int state)
+ {
+ return 0;
+@@ -156,6 +197,19 @@ static void panic_print_sys_info(void)
+ ftrace_dump(DUMP_ALL);
+ }
+
++void check_panic_on_warn(const char *origin)
++{
++ unsigned int limit;
++
++ if (panic_on_warn)
++ panic("%s: panic_on_warn set ...\n", origin);
++
++ limit = READ_ONCE(warn_limit);
++ if (atomic_inc_return(&warn_count) >= limit && limit)
++ panic("%s: system warned too often (kernel.warn_limit is %d)",
++ origin, limit);
++}
++
+ /**
+ * panic - halt the system
+ * @fmt: The text string to print
+@@ -173,6 +227,16 @@ void panic(const char *fmt, ...)
+ int old_cpu, this_cpu;
+ bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
+
++ if (panic_on_warn) {
++ /*
++ * This thread may hit another WARN() in the panic path.
++ * Resetting this prevents additional WARN() from panicking the
++ * system on this thread. Other threads are blocked by the
++ * panic_mutex in panic().
++ */
++ panic_on_warn = 0;
++ }
++
+ /*
+ * Disable local interrupts. This will prevent panic_smp_self_stop
+ * from deadlocking the first cpu that invokes the panic, since
+@@ -571,16 +635,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
+ if (args)
+ vprintk(args->fmt, args->args);
+
+- if (panic_on_warn) {
+- /*
+- * This thread may hit another WARN() in the panic path.
+- * Resetting this prevents additional WARN() from panicking the
+- * system on this thread. Other threads are blocked by the
+- * panic_mutex in panic().
+- */
+- panic_on_warn = 0;
+- panic("panic_on_warn set ...\n");
+- }
++ check_panic_on_warn("kernel");
+
+ print_modules();
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 06b686ef36e68..8ab239fd1c8d3 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3964,8 +3964,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
+ print_ip_sym(preempt_disable_ip);
+ pr_cont("\n");
+ }
+- if (panic_on_warn)
+- panic("scheduling while atomic\n");
++ check_panic_on_warn("scheduling while atomic");
+
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 4d9f818029112..1e1345cd21b4f 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -649,6 +649,9 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
+ return -EPERM;
+ if (unlikely(!nmi_uaccess_okay()))
+ return -EPERM;
++ /* Task should not be pid=1 to avoid kernel panic. */
++ if (unlikely(is_global_init(current)))
++ return -EPERM;
+
+ if (irqs_disabled()) {
+ /* Do an early check on signal validity. Otherwise,
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 1090b24041104..8b87f1f74e325 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9317,6 +9317,8 @@ void __init early_trace_init(void)
+ static_key_enable(&tracepoint_printk_key.key);
+ }
+ tracer_alloc_buffers();
++
++ init_events();
+ }
+
+ void __init trace_init(void)
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 1d514a1a31554..f2ff39353e037 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1590,6 +1590,7 @@ extern void trace_event_enable_cmd_record(bool enable);
+ extern void trace_event_enable_tgid_record(bool enable);
+
+ extern int event_trace_init(void);
++extern int init_events(void);
+ extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
+ extern int event_trace_del_tracer(struct trace_array *tr);
+
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 7b648fb9ff115..50b6fb641e5b3 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -2552,6 +2552,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
+ unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
+ hist_field->fn = hist_field_log2;
+ hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
++ if (!hist_field->operands[0])
++ goto free;
+ hist_field->size = hist_field->operands[0]->size;
+ hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
+ if (!hist_field->type)
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index a0a45901dc027..b194dd1c8420f 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -1366,7 +1366,7 @@ static struct trace_event *events[] __initdata = {
+ NULL
+ };
+
+-__init static int init_events(void)
++__init int init_events(void)
+ {
+ struct trace_event *event;
+ int i, ret;
+@@ -1384,4 +1384,3 @@ __init static int init_events(void)
+
+ return 0;
+ }
+-early_initcall(init_events);
+diff --git a/lib/lockref.c b/lib/lockref.c
+index 5b34bbd3eba81..81ac5f3552428 100644
+--- a/lib/lockref.c
++++ b/lib/lockref.c
+@@ -24,7 +24,6 @@
+ } \
+ if (!--retry) \
+ break; \
+- cpu_relax(); \
+ } \
+ } while (0)
+
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index 0d84f79cb4b54..b5ce5e46c06e0 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -10,6 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/jiffies.h>
++#include <linux/nospec.h>
+ #include <linux/skbuff.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+@@ -169,6 +170,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
+ if (type <= 0 || type > maxtype)
+ return 0;
+
++ type = array_index_nospec(type, maxtype + 1);
+ pt = &policy[type];
+
+ BUG_ON(pt->type > NLA_TYPE_MAX);
+@@ -377,6 +379,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
+ }
+ continue;
+ }
++ type = array_index_nospec(type, maxtype + 1);
+ if (policy) {
+ int err = validate_nla(nla, maxtype, policy,
+ validate, extack);
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index 621782100eaa0..4d87df96acc1e 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -92,8 +92,8 @@ static void end_report(unsigned long *flags)
+ pr_err("==================================================================\n");
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irqrestore(&report_lock, *flags);
+- if (panic_on_warn)
+- panic("panic_on_warn set ...\n");
++ if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
++ check_panic_on_warn("KASAN");
+ kasan_enable_current();
+ }
+
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index e5e1c139f2118..eb5b2f45deec9 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1582,6 +1582,7 @@ setup_failed:
+ hdev->flush(hdev);
+
+ if (hdev->sent_cmd) {
++ cancel_delayed_work_sync(&hdev->cmd_timer);
+ kfree_skb(hdev->sent_cmd);
+ hdev->sent_cmd = NULL;
+ }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index ff6625493c9f8..84b430986b1de 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -4177,6 +4177,19 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
+ struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
++ switch (ev->link_type) {
++ case SCO_LINK:
++ case ESCO_LINK:
++ break;
++ default:
++ /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
++ * for HCI_Synchronous_Connection_Complete is limited to
++ * either SCO or eSCO
++ */
++ bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
++ return;
++ }
++
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ hci_dev_lock(hdev);
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index b96df54d00365..534a53124d14c 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -140,12 +140,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
+ return 0;
+
+ if (ops->id && ops->size) {
+-cleanup:
+ ng = rcu_dereference_protected(net->gen,
+ lockdep_is_held(&pernet_ops_rwsem));
+ ng->ptr[*ops->id] = NULL;
+ }
+
++cleanup:
+ kfree(data);
+
+ out:
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index f45b9daf62cf3..42a4ee192f8dc 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -30,6 +30,7 @@
+ #include <linux/slab.h>
+ #include <linux/netlink.h>
+ #include <linux/hash.h>
++#include <linux/nospec.h>
+
+ #include <net/arp.h>
+ #include <net/ip.h>
+@@ -1009,6 +1010,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
+ if (type > RTAX_MAX)
+ return false;
+
++ type = array_index_nospec(type, RTAX_MAX + 1);
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+ bool ecn_ca = false;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 25334aa3da04e..33292983b8cfb 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -536,8 +536,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
+ spin_lock(lock);
+ if (osk) {
+ WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+- ret = sk_nulls_del_node_init_rcu(osk);
+- } else if (found_dup_sk) {
++ ret = sk_hashed(osk);
++ if (ret) {
++ /* Before deleting the node, we insert a new one to make
++ * sure that the look-up-sk process would not miss either
++ * of them and that at least one node would exist in ehash
++ * table all the time. Otherwise there's a tiny chance
++ * that lookup process could find nothing in ehash table.
++ */
++ __sk_nulls_add_node_tail_rcu(sk, list);
++ sk_nulls_del_node_init_rcu(osk);
++ }
++ goto unlock;
++ }
++ if (found_dup_sk) {
+ *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
+ if (*found_dup_sk)
+ ret = false;
+@@ -546,6 +558,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
+ if (ret)
+ __sk_nulls_add_node_rcu(sk, list);
+
++unlock:
+ spin_unlock(lock);
+
+ return ret;
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index c411c87ae865f..a00102d7c7fd4 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -81,10 +81,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
+ }
+ EXPORT_SYMBOL_GPL(inet_twsk_put);
+
+-static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+- struct hlist_nulls_head *list)
++static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
++ struct hlist_nulls_head *list)
+ {
+- hlist_nulls_add_head_rcu(&tw->tw_node, list);
++ hlist_nulls_add_tail_rcu(&tw->tw_node, list);
+ }
+
+ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+@@ -120,7 +120,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+
+ spin_lock(lock);
+
+- inet_twsk_add_node_rcu(tw, &ehead->chain);
++ inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
+
+ /* Step 3: Remove SK from hash chain */
+ if (__sk_nulls_del_node_init_rcu(sk))
+diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
+index 3205d5f7c8c94..4966ac2aaf87d 100644
+--- a/net/ipv4/metrics.c
++++ b/net/ipv4/metrics.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ #include <linux/netlink.h>
++#include <linux/nospec.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/types.h>
+ #include <net/ip.h>
+@@ -28,6 +29,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ return -EINVAL;
+ }
+
++ type = array_index_nospec(type, RTAX_MAX + 1);
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 93825ec968aa4..a74965a6a54f4 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -429,6 +429,7 @@ void tcp_init_sock(struct sock *sk)
+
+ /* There's a bubble in the pipe until at least the first ACK. */
+ tp->app_limited = ~0U;
++ tp->rate_app_limited = 1;
+
+ /* See draft-stevens-tcpca-spec-01 for discussion of the
+ * initialization of these values.
+@@ -2675,6 +2676,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ tp->last_oow_ack_time = 0;
+ /* There's a bubble in the pipe until at least the first ACK. */
+ tp->app_limited = ~0U;
++ tp->rate_app_limited = 1;
+ tp->rack.mstamp = 0;
+ tp->rack.advanced = 0;
+ tp->rack.reo_wnd_steps = 1;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 4a6396d574a0d..fd4da1019e44c 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1137,14 +1137,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
+ dev->needed_headroom = dst_len;
+
+ if (set_mtu) {
+- dev->mtu = rt->dst.dev->mtu - t_hlen;
++ int mtu = rt->dst.dev->mtu - t_hlen;
++
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+- dev->mtu -= 8;
++ mtu -= 8;
+ if (dev->type == ARPHRD_ETHER)
+- dev->mtu -= ETH_HLEN;
++ mtu -= ETH_HLEN;
+
+- if (dev->mtu < IPV6_MIN_MTU)
+- dev->mtu = IPV6_MIN_MTU;
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++ WRITE_ONCE(dev->mtu, mtu);
+ }
+ }
+ ip6_rt_put(rt);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 878a08c40fffd..acc75975edded 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1430,6 +1430,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
+ struct __ip6_tnl_parm *p = &t->parms;
+ struct flowi6 *fl6 = &t->fl.u.ip6;
+ int t_hlen;
++ int mtu;
+
+ memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+ memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+@@ -1472,12 +1473,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
+ dev->hard_header_len = rt->dst.dev->hard_header_len +
+ t_hlen;
+
+- dev->mtu = rt->dst.dev->mtu - t_hlen;
++ mtu = rt->dst.dev->mtu - t_hlen;
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+- dev->mtu -= 8;
++ mtu -= 8;
+
+- if (dev->mtu < IPV6_MIN_MTU)
+- dev->mtu = IPV6_MIN_MTU;
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++ WRITE_ONCE(dev->mtu, mtu);
+ }
+ ip6_rt_put(rt);
+ }
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 117d374695fe6..1179608955f5f 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1083,10 +1083,12 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+
+ if (tdev && !netif_is_l3_master(tdev)) {
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
++ int mtu;
+
+- dev->mtu = tdev->mtu - t_hlen;
+- if (dev->mtu < IPV6_MIN_MTU)
+- dev->mtu = IPV6_MIN_MTU;
++ mtu = tdev->mtu - t_hlen;
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++ WRITE_ONCE(dev->mtu, mtu);
+ }
+ }
+
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 421b2c89ce12a..d001e254badad 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1171,8 +1171,10 @@ static void l2tp_tunnel_destruct(struct sock *sk)
+ }
+
+ /* Remove hooks into tunnel socket */
++ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_destruct = tunnel->old_sk_destruct;
+ sk->sk_user_data = NULL;
++ write_unlock_bh(&sk->sk_callback_lock);
+
+ /* Call the original destructor */
+ if (sk->sk_destruct)
+@@ -1491,20 +1493,27 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ sock = sockfd_lookup(tunnel->fd, &ret);
+ if (!sock)
+ goto err;
+-
+- ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
+- if (ret < 0)
+- goto err_sock;
+ }
+
++ sk = sock->sk;
++ write_lock_bh(&sk->sk_callback_lock);
++ ret = l2tp_validate_socket(sk, net, tunnel->encap);
++ if (ret < 0)
++ goto err_inval_sock;
++ rcu_assign_sk_user_data(sk, tunnel);
++ write_unlock_bh(&sk->sk_callback_lock);
++
+ tunnel->l2tp_net = net;
+ pn = l2tp_pernet(net);
+
++ sock_hold(sk);
++ tunnel->sock = sk;
++
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
+ if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+-
++ sock_put(sk);
+ ret = -EEXIST;
+ goto err_sock;
+ }
+@@ -1512,10 +1521,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+
+- sk = sock->sk;
+- sock_hold(sk);
+- tunnel->sock = sk;
+-
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ struct udp_tunnel_sock_cfg udp_cfg = {
+ .sk_user_data = tunnel,
+@@ -1525,8 +1530,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ };
+
+ setup_udp_tunnel_sock(net, sock, &udp_cfg);
+- } else {
+- sk->sk_user_data = tunnel;
+ }
+
+ tunnel->old_sk_destruct = sk->sk_destruct;
+@@ -1541,6 +1544,11 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ return 0;
+
+ err_sock:
++ write_lock_bh(&sk->sk_callback_lock);
++ rcu_assign_sk_user_data(sk, NULL);
++err_inval_sock:
++ write_unlock_bh(&sk->sk_callback_lock);
++
+ if (tunnel->fd < 0)
+ sock_release(sock);
+ else
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 7626f3e1c70a7..cec4b16170a0b 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -27,22 +27,16 @@
+ #include <net/netfilter/nf_conntrack_ecache.h>
+ #include <net/netfilter/nf_conntrack_timeout.h>
+
+-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
+- closely. They're more complex. --RR
+-
+- And so for me for SCTP :D -Kiran */
+-
+ static const char *const sctp_conntrack_names[] = {
+- "NONE",
+- "CLOSED",
+- "COOKIE_WAIT",
+- "COOKIE_ECHOED",
+- "ESTABLISHED",
+- "SHUTDOWN_SENT",
+- "SHUTDOWN_RECD",
+- "SHUTDOWN_ACK_SENT",
+- "HEARTBEAT_SENT",
+- "HEARTBEAT_ACKED",
++ [SCTP_CONNTRACK_NONE] = "NONE",
++ [SCTP_CONNTRACK_CLOSED] = "CLOSED",
++ [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT",
++ [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED",
++ [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED",
++ [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT",
++ [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD",
++ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT",
++ [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT",
+ };
+
+ #define SECS * HZ
+@@ -54,12 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ [SCTP_CONNTRACK_CLOSED] = 10 SECS,
+ [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
+- [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS,
++ [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
+ [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
+ [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+- [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
+ };
+
+ #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
+@@ -73,7 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
+ #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+ #define sHS SCTP_CONNTRACK_HEARTBEAT_SENT
+-#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
+ #define sIV SCTP_CONNTRACK_MAX
+
+ /*
+@@ -96,9 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
+ CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
+ the SHUTDOWN chunk. Connection is closed.
+ HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow.
+-HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK in the direction opposite to
+- that of the HEARTBEAT chunk. Secondary connection is
+- established.
+ */
+
+ /* TODO
+@@ -115,33 +104,33 @@ cookie echoed to closed.
+ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ {
+ /* ORIGINAL */
+-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+-/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+-/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+-/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+-/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
+-/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
+-/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
+-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
+-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
+-/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+-/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
++/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
++/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
++/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
++/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
++/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
++/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
++/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
++/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
++/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
++/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
++/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
++/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+ },
+ {
+ /* REPLY */
+-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+-/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
+-/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+-/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
+-/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
+-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
+-/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
+-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
+-/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
+-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
+-/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+-/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
++/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
++/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
++/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
++/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
++/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
++/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
++/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
++/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
++/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
++/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
++/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
++/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
+ }
+ };
+
+@@ -412,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+ /* Special cases of Verification tag check (Sec 8.5.1) */
+ if (sch->type == SCTP_CID_INIT) {
+- /* Sec 8.5.1 (A) */
++ /* (A) vtag MUST be zero */
+ if (sh->vtag != 0)
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_ABORT) {
+- /* Sec 8.5.1 (B) */
+- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+- sh->vtag != ct->proto.sctp.vtag[!dir])
++ /* (B) vtag MUST match own vtag if T flag is unset OR
++ * MUST match peer's vtag if T flag is set
++ */
++ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[dir]) ||
++ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[!dir]))
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+- /* Sec 8.5.1 (C) */
+- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+- sh->vtag != ct->proto.sctp.vtag[!dir] &&
+- sch->flags & SCTP_CHUNK_FLAG_T)
++ /* (C) vtag MUST match own vtag if T flag is unset OR
++ * MUST match peer's vtag if T flag is set
++ */
++ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[dir]) ||
++ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++ sh->vtag != ct->proto.sctp.vtag[!dir]))
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
+- /* Sec 8.5.1 (D) */
++ /* (D) vtag must be same as init_vtag as found in INIT_ACK */
+ if (sh->vtag != ct->proto.sctp.vtag[dir])
+ goto out_unlock;
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+@@ -501,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ }
+
+ ct->proto.sctp.state = new_state;
+- if (old_state != new_state)
++ if (old_state != new_state) {
+ nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
++ if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
++ !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
++ nf_conntrack_event_cache(IPCT_ASSURED, ct);
++ }
+ }
+ spin_unlock_bh(&ct->lock);
+
+@@ -516,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
+
+- if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
+- dir == IP_CT_DIR_REPLY &&
+- new_state == SCTP_CONNTRACK_ESTABLISHED) {
+- pr_debug("Setting assured bit\n");
+- set_bit(IPS_ASSURED_BIT, &ct->status);
+- nf_conntrack_event_cache(IPCT_ASSURED, ct);
+- }
+-
+ return NF_ACCEPT;
+
+ out_unlock:
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index b8cc3339a2495..aed967e2f30fb 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -1158,6 +1158,16 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ nf_ct_kill_acct(ct, ctinfo, skb);
+ return NF_ACCEPT;
+ }
++
++ if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
++ /* do not renew timeout on SYN retransmit.
++ *
++ * Else port reuse by client or NAT middlebox can keep
++ * entry alive indefinitely (including nat info).
++ */
++ return NF_ACCEPT;
++ }
++
+ /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
+ * pickup with loose=1. Avoid large ESTABLISHED timeout.
+ */
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index a3faeacaa1cbb..43c3c3be6defc 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -581,7 +581,6 @@ enum nf_ct_sysctl_index {
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
+- NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
+ #endif
+ #ifdef CONFIG_NF_CT_PROTO_DCCP
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
+@@ -851,12 +850,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+- [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
+- .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
+- .maxlen = sizeof(unsigned int),
+- .mode = 0644,
+- .proc_handler = proc_dointvec_jiffies,
+- },
+ #endif
+ #ifdef CONFIG_NF_CT_PROTO_DCCP
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
+@@ -985,7 +978,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
+ XASSIGN(SHUTDOWN_RECD, sn);
+ XASSIGN(SHUTDOWN_ACK_SENT, sn);
+ XASSIGN(HEARTBEAT_SENT, sn);
+- XASSIGN(HEARTBEAT_ACKED, sn);
+ #undef XASSIGN
+ #endif
+ }
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index ee7c29e0a9d7b..093eea02f9d28 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -389,23 +389,37 @@ static void nft_rbtree_gc(struct work_struct *work)
+ struct nft_rbtree *priv;
+ struct rb_node *node;
+ struct nft_set *set;
++ struct net *net;
++ u8 genmask;
+
+ priv = container_of(work, struct nft_rbtree, gc_work.work);
+ set = nft_set_container_of(priv);
++ net = read_pnet(&set->net);
++ genmask = nft_genmask_cur(net);
+
+ write_lock_bh(&priv->lock);
+ write_seqcount_begin(&priv->count);
+ for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+
++ if (!nft_set_elem_active(&rbe->ext, genmask))
++ continue;
++
++ /* elements are reversed in the rbtree for historical reasons,
++ * from highest to lowest value, that is why end element is
++ * always visited before the start element.
++ */
+ if (nft_rbtree_interval_end(rbe)) {
+ rbe_end = rbe;
+ continue;
+ }
+ if (!nft_set_elem_expired(&rbe->ext))
+ continue;
+- if (nft_set_elem_mark_busy(&rbe->ext))
++
++ if (nft_set_elem_mark_busy(&rbe->ext)) {
++ rbe_end = NULL;
+ continue;
++ }
+
+ if (rbe_prev) {
+ rb_erase(&rbe_prev->node, &priv->root);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 86b70385dce3b..a232fcbd721c4 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -569,7 +569,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ if (nlk_sk(sk)->bound)
+ goto err;
+
+- nlk_sk(sk)->portid = portid;
++ /* portid can be read locklessly from netlink_getname(). */
++ WRITE_ONCE(nlk_sk(sk)->portid, portid);
++
+ sock_hold(sk);
+
+ err = __netlink_insert(table, sk);
+@@ -1078,9 +1080,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ return -EINVAL;
+
+ if (addr->sa_family == AF_UNSPEC) {
+- sk->sk_state = NETLINK_UNCONNECTED;
+- nlk->dst_portid = 0;
+- nlk->dst_group = 0;
++ /* paired with READ_ONCE() in netlink_getsockbyportid() */
++ WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
++ /* dst_portid and dst_group can be read locklessly */
++ WRITE_ONCE(nlk->dst_portid, 0);
++ WRITE_ONCE(nlk->dst_group, 0);
+ return 0;
+ }
+ if (addr->sa_family != AF_NETLINK)
+@@ -1101,9 +1105,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ err = netlink_autobind(sock);
+
+ if (err == 0) {
+- sk->sk_state = NETLINK_CONNECTED;
+- nlk->dst_portid = nladdr->nl_pid;
+- nlk->dst_group = ffs(nladdr->nl_groups);
++ /* paired with READ_ONCE() in netlink_getsockbyportid() */
++ WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
++ /* dst_portid and dst_group can be read locklessly */
++ WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
++ WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
+ }
+
+ return err;
+@@ -1120,10 +1126,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
+ nladdr->nl_pad = 0;
+
+ if (peer) {
+- nladdr->nl_pid = nlk->dst_portid;
+- nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
++ /* Paired with WRITE_ONCE() in netlink_connect() */
++ nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
++ nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
+ } else {
+- nladdr->nl_pid = nlk->portid;
++ /* Paired with WRITE_ONCE() in netlink_insert() */
++ nladdr->nl_pid = READ_ONCE(nlk->portid);
+ netlink_lock_table();
+ nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
+ netlink_unlock_table();
+@@ -1150,8 +1158,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
+
+ /* Don't bother queuing skb if kernel socket has no input function */
+ nlk = nlk_sk(sock);
+- if (sock->sk_state == NETLINK_CONNECTED &&
+- nlk->dst_portid != nlk_sk(ssk)->portid) {
++ /* dst_portid and sk_state can be changed in netlink_connect() */
++ if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
++ READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
+ sock_put(sock);
+ return ERR_PTR(-ECONNREFUSED);
+ }
+@@ -1887,8 +1896,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ goto out;
+ netlink_skb_flags |= NETLINK_SKB_DST;
+ } else {
+- dst_portid = nlk->dst_portid;
+- dst_group = nlk->dst_group;
++ /* Paired with WRITE_ONCE() in netlink_connect() */
++ dst_portid = READ_ONCE(nlk->dst_portid);
++ dst_group = READ_ONCE(nlk->dst_group);
+ }
+
+ /* Paired with WRITE_ONCE() in netlink_insert() */
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index a8da88db7893f..4e7c968cde2dc 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
+ is accepted() it isn't 'dead' so doesn't get removed. */
+ if (sock_flag(sk, SOCK_DESTROY) ||
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
++ sock_hold(sk);
+ bh_unlock_sock(sk);
+ nr_destroy_socket(sk);
+ goto out;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index cc997518f79d1..edadebb3efd2a 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -159,6 +159,7 @@ static void local_cleanup(struct nfc_llcp_local *local)
+ cancel_work_sync(&local->rx_work);
+ cancel_work_sync(&local->timeout_work);
+ kfree_skb(local->rx_pending);
++ local->rx_pending = NULL;
+ del_timer_sync(&local->sdreq_timer);
+ cancel_work_sync(&local->sdreq_timeout_work);
+ nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 506ebae1f72cf..b7bd8c3e31586 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1622,6 +1622,7 @@ static void taprio_reset(struct Qdisc *sch)
+ int i;
+
+ hrtimer_cancel(&q->advance_timer);
++
+ if (q->qdiscs) {
+ for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
+ qdisc_reset(q->qdiscs[i]);
+@@ -1644,6 +1645,7 @@ static void taprio_destroy(struct Qdisc *sch)
+ * happens in qdisc_create(), after taprio_init() has been called.
+ */
+ hrtimer_cancel(&q->advance_timer);
++ qdisc_synchronize(sch);
+
+ taprio_disable_offload(dev, q, NULL);
+
+diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
+index a825e74d01fca..614bc081ca501 100644
+--- a/net/sctp/bind_addr.c
++++ b/net/sctp/bind_addr.c
+@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
+ }
+ }
+
++ /* If somehow no addresses were found that can be used with this
++ * scope, it's an error.
++ */
++ if (list_empty(&dest->address_list))
++ error = -ENETUNREACH;
++
+ out:
+ if (error)
+ sctp_bind_addr_clean(dest);
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index e13115bbe7196..0f4d39fdb48f2 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1037,7 +1037,6 @@ out4:
+ kfree(req->rl_sendbuf);
+ out3:
+ kfree(req->rl_rdmabuf);
+- rpcrdma_regbuf_free(req->rl_sendbuf);
+ out2:
+ kfree(req);
+ out1:
+diff --git a/scripts/tracing/ftrace-bisect.sh b/scripts/tracing/ftrace-bisect.sh
+index 926701162bc83..bb4f59262bbe9 100755
+--- a/scripts/tracing/ftrace-bisect.sh
++++ b/scripts/tracing/ftrace-bisect.sh
+@@ -12,7 +12,7 @@
+ # (note, if this is a problem with function_graph tracing, then simply
+ # replace "function" with "function_graph" in the following steps).
+ #
+-# # cd /sys/kernel/debug/tracing
++# # cd /sys/kernel/tracing
+ # # echo schedule > set_ftrace_filter
+ # # echo function > current_tracer
+ #
+@@ -20,22 +20,40 @@
+ #
+ # # echo nop > current_tracer
+ #
+-# # cat available_filter_functions > ~/full-file
++# Starting with v5.1 this can be done with numbers, making it much faster:
++#
++# The old (slow) way, for kernels before v5.1.
++#
++# [old-way] # cat available_filter_functions > ~/full-file
++#
++# [old-way] *** Note *** this process will take several minutes to update the
++# [old-way] filters. Setting multiple functions is an O(n^2) operation, and we
++# [old-way] are dealing with thousands of functions. So go have coffee, talk
++# [old-way] with your coworkers, read facebook. And eventually, this operation
++# [old-way] will end.
++#
++# The new way (using numbers) is an O(n) operation, and usually takes less than a second.
++#
++# seq `wc -l available_filter_functions | cut -d' ' -f1` > ~/full-file
++#
++# This will create a sequence of numbers that match the functions in
++# available_filter_functions, and when echoing in a number into the
++# set_ftrace_filter file, it will enable the corresponding function in
++# O(1) time. Making enabling all functions O(n) where n is the number of
++# functions to enable.
++#
++# For either the new or old way, the rest of the operations remain the same.
++#
+ # # ftrace-bisect ~/full-file ~/test-file ~/non-test-file
+ # # cat ~/test-file > set_ftrace_filter
+ #
+-# *** Note *** this will take several minutes. Setting multiple functions is
+-# an O(n^2) operation, and we are dealing with thousands of functions. So go
+-# have coffee, talk with your coworkers, read facebook. And eventually, this
+-# operation will end.
+-#
+ # # echo function > current_tracer
+ #
+ # If it crashes, we know that ~/test-file has a bad function.
+ #
+ # Reboot back to test kernel.
+ #
+-# # cd /sys/kernel/debug/tracing
++# # cd /sys/kernel/tracing
+ # # mv ~/test-file ~/full-file
+ #
+ # If it didn't crash.
+diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
+index cca5a3012fee2..221eaadffb09c 100644
+--- a/security/tomoyo/Makefile
++++ b/security/tomoyo/Makefile
+@@ -10,7 +10,7 @@ endef
+ quiet_cmd_policy = POLICY $@
+ cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
+
+-$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
++$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE
+ $(call if_changed,policy)
+
+ $(obj)/common.o: $(obj)/builtin-policy.h
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index 39ea9bda13947..db663e7d17a42 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -112,11 +112,11 @@ static const struct snd_soc_dapm_route audio_map[] = {
+
+ static const struct snd_soc_dapm_route audio_map_ac97[] = {
+ /* 1st half -- Normal DAPM routes */
+- {"Playback", NULL, "AC97 Playback"},
+- {"AC97 Capture", NULL, "Capture"},
++ {"AC97 Playback", NULL, "CPU AC97 Playback"},
++ {"CPU AC97 Capture", NULL, "AC97 Capture"},
+ /* 2nd half -- ASRC DAPM routes */
+- {"AC97 Playback", NULL, "ASRC-Playback"},
+- {"ASRC-Capture", NULL, "AC97 Capture"},
++ {"CPU AC97 Playback", NULL, "ASRC-Playback"},
++ {"ASRC-Capture", NULL, "CPU AC97 Capture"},
+ };
+
+ /* Add all possible widgets into here without being redundant */
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index f7f2d29f1bfed..b33746d586337 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -87,21 +87,21 @@ static DECLARE_TLV_DB_SCALE(gain_tlv, 0, 100, 0);
+
+ static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
+ SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
+- MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
++ MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
+ SOC_ENUM_EXT("MICFIL Quality Select",
+ fsl_micfil_quality_enum,
+ snd_soc_get_enum_double, snd_soc_put_enum_double),
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index ed18bc69e0954..0ab35c3dc7d2f 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -1147,14 +1147,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
+ .symmetric_channels = 1,
+ .probe = fsl_ssi_dai_probe,
+ .playback = {
+- .stream_name = "AC97 Playback",
++ .stream_name = "CPU AC97 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
+ },
+ .capture = {
+- .stream_name = "AC97 Capture",
++ .stream_name = "CPU AC97 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index ccf5580442d29..dfd67243faac0 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -136,6 +136,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
+ "panic",
+ "do_exit",
+ "do_task_dead",
++ "make_task_dead",
+ "__module_put_and_exit",
+ "complete_and_exit",
+ "__reiserfs_panic",
+@@ -143,7 +144,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
+ "fortify_panic",
+ "usercopy_abort",
+ "machine_real_restart",
+- "rewind_stack_do_exit",
++ "rewind_stack_and_make_dead",
+ "cpu_bringup_and_idle",
+ };
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
+deleted file mode 100644
+index 3add34df57678..0000000000000
+--- a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
++++ /dev/null
+@@ -1,9 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
+-#include <test_progs.h>
+-#include "jeq_infer_not_null_fail.skel.h"
+-
+-void test_jeq_infer_not_null(void)
+-{
+- RUN_TESTS(jeq_infer_not_null_fail);
+-}
+diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
+deleted file mode 100644
+index f46965053acb2..0000000000000
+--- a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
++++ /dev/null
+@@ -1,42 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
+-#include "vmlinux.h"
+-#include <bpf/bpf_helpers.h>
+-#include "bpf_misc.h"
+-
+-char _license[] SEC("license") = "GPL";
+-
+-struct {
+- __uint(type, BPF_MAP_TYPE_HASH);
+- __uint(max_entries, 1);
+- __type(key, u64);
+- __type(value, u64);
+-} m_hash SEC(".maps");
+-
+-SEC("?raw_tp")
+-__failure __msg("R8 invalid mem access 'map_value_or_null")
+-int jeq_infer_not_null_ptr_to_btfid(void *ctx)
+-{
+- struct bpf_map *map = (struct bpf_map *)&m_hash;
+- struct bpf_map *inner_map = map->inner_map_meta;
+- u64 key = 0, ret = 0, *val;
+-
+- val = bpf_map_lookup_elem(map, &key);
+- /* Do not mark ptr as non-null if one of them is
+- * PTR_TO_BTF_ID (R9), reject because of invalid
+- * access to map value (R8).
+- *
+- * Here, we need to inline those insns to access
+- * R8 directly, since compiler may use other reg
+- * once it figures out val==inner_map.
+- */
+- asm volatile("r8 = %[val];\n"
+- "r9 = %[inner_map];\n"
+- "if r8 != r9 goto +1;\n"
+- "%[ret] = *(u64 *)(r8 +0);\n"
+- : [ret] "+r"(ret)
+- : [inner_map] "r"(inner_map), [val] "r"(val)
+- : "r8", "r9");
+-
+- return ret;
+-}