summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2018-10-18 06:26:19 -0400
committerMike Pagano <mpagano@gentoo.org>2018-10-18 06:26:19 -0400
commita8972d12a3023216e8add6169fe295c303c10b3a (patch)
treec9b4e9a4a0a38052461c6b155bbecdfd6d2773f9
parentLinux patch 4.14.76 (diff)
downloadlinux-patches-a8972d12.tar.gz
linux-patches-a8972d12.tar.bz2
linux-patches-a8972d12.zip
Linux patch 4.14.774.14-84
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1076_linux-4.14.77.patch4677
2 files changed, 4681 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 7d1d7b62..832ddd2a 100644
--- a/0000_README
+++ b/0000_README
@@ -347,6 +347,10 @@ Patch: 1075_linux-4.14.76.patch
From: http://www.kernel.org
Desc: Linux 4.14.76
+Patch: 1076_linux-4.14.77.patch
+From: http://www.kernel.org
+Desc: Linux 4.14.77
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1076_linux-4.14.77.patch b/1076_linux-4.14.77.patch
new file mode 100644
index 00000000..da929405
--- /dev/null
+++ b/1076_linux-4.14.77.patch
@@ -0,0 +1,4677 @@
+diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
+index 27966ae741e0..141d8c1f714f 100644
+--- a/Documentation/devicetree/bindings/net/macb.txt
++++ b/Documentation/devicetree/bindings/net/macb.txt
+@@ -10,6 +10,7 @@ Required properties:
+ Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
+ the Cadence GEM, or the generic form: "cdns,gem".
+ Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
++ Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
+ Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
+ Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
+ Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
+diff --git a/Makefile b/Makefile
+index 332dd011b3b9..16d1a18496fb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 76
++SUBLEVEL = 77
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
+index 7cb235ef0fb6..6e9e1c2f9def 100644
+--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
++++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
+@@ -41,7 +41,7 @@
+ };
+
+ macb1: ethernet@f802c000 {
+- compatible = "cdns,at91sam9260-macb", "cdns,macb";
++ compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
+ reg = <0xf802c000 0x100>;
+ interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
+ pinctrl-names = "default";
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index 9342904cccca..b17ee03d280b 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -447,11 +447,23 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
+ .size \name , . - \name
+ .endm
+
++ .macro csdb
++#ifdef CONFIG_THUMB2_KERNEL
++ .inst.w 0xf3af8014
++#else
++ .inst 0xe320f014
++#endif
++ .endm
++
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+ #ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcccs \tmp, \tmp, \limit
+ bcs \bad
++#ifdef CONFIG_CPU_SPECTRE
++ movcs \addr, #0
++ csdb
++#endif
+ #endif
+ .endm
+
+diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
+index 40f5c410fd8c..69772e742a0a 100644
+--- a/arch/arm/include/asm/barrier.h
++++ b/arch/arm/include/asm/barrier.h
+@@ -17,6 +17,12 @@
+ #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+ #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+ #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
++#ifdef CONFIG_THUMB2_KERNEL
++#define CSDB ".inst.w 0xf3af8014"
++#else
++#define CSDB ".inst 0xe320f014"
++#endif
++#define csdb() __asm__ __volatile__(CSDB : : : "memory")
+ #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
+ #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+ : : "r" (0) : "memory")
+@@ -37,6 +43,13 @@
+ #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
+ #endif
+
++#ifndef CSDB
++#define CSDB
++#endif
++#ifndef csdb
++#define csdb()
++#endif
++
+ #ifdef CONFIG_ARM_HEAVY_MB
+ extern void (*soc_mb)(void);
+ extern void arm_heavy_mb(void);
+@@ -63,6 +76,25 @@ extern void arm_heavy_mb(void);
+ #define __smp_rmb() __smp_mb()
+ #define __smp_wmb() dmb(ishst)
+
++#ifdef CONFIG_CPU_SPECTRE
++static inline unsigned long array_index_mask_nospec(unsigned long idx,
++ unsigned long sz)
++{
++ unsigned long mask;
++
++ asm volatile(
++ "cmp %1, %2\n"
++ " sbc %0, %1, %1\n"
++ CSDB
++ : "=r" (mask)
++ : "r" (idx), "Ir" (sz)
++ : "cc");
++
++ return mask;
++}
++#define array_index_mask_nospec array_index_mask_nospec
++#endif
++
+ #include <asm-generic/barrier.h>
+
+ #endif /* !__ASSEMBLY__ */
+diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
+index a97f1ea708d1..73a99c72a930 100644
+--- a/arch/arm/include/asm/bugs.h
++++ b/arch/arm/include/asm/bugs.h
+@@ -10,12 +10,14 @@
+ #ifndef __ASM_BUGS_H
+ #define __ASM_BUGS_H
+
+-#ifdef CONFIG_MMU
+ extern void check_writebuffer_bugs(void);
+
+-#define check_bugs() check_writebuffer_bugs()
++#ifdef CONFIG_MMU
++extern void check_bugs(void);
++extern void check_other_bugs(void);
+ #else
+ #define check_bugs() do { } while (0)
++#define check_other_bugs() do { } while (0)
+ #endif
+
+ #endif
+diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
+index 4c9fa72b59f5..07e27f212dc7 100644
+--- a/arch/arm/include/asm/cp15.h
++++ b/arch/arm/include/asm/cp15.h
+@@ -65,6 +65,9 @@
+ #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+ #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
++#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
++#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
++
+ extern unsigned long cr_alignment; /* defined in entry-armv.S */
+
+ static inline unsigned long get_cr(void)
+diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
+index 441933311bbf..3379c2c684c2 100644
+--- a/arch/arm/include/asm/cputype.h
++++ b/arch/arm/include/asm/cputype.h
+@@ -77,8 +77,16 @@
+ #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
+ #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
+ #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
++#define ARM_CPU_PART_CORTEX_A53 0x4100d030
++#define ARM_CPU_PART_CORTEX_A57 0x4100d070
++#define ARM_CPU_PART_CORTEX_A72 0x4100d080
++#define ARM_CPU_PART_CORTEX_A73 0x4100d090
++#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
+ #define ARM_CPU_PART_MASK 0xff00fff0
+
++/* Broadcom cores */
++#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
++
+ /* DEC implemented cores */
+ #define ARM_CPU_PART_SA1100 0x4400a110
+
+diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
+index 14d68a4d826f..b598e666da4c 100644
+--- a/arch/arm/include/asm/kvm_asm.h
++++ b/arch/arm/include/asm/kvm_asm.h
+@@ -61,8 +61,6 @@ struct kvm_vcpu;
+ extern char __kvm_hyp_init[];
+ extern char __kvm_hyp_init_end[];
+
+-extern char __kvm_hyp_vector[];
+-
+ extern void __kvm_flush_vm_context(void);
+ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+ extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
+index 65572e14306c..b60232639984 100644
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -21,6 +21,7 @@
+
+ #include <linux/types.h>
+ #include <linux/kvm_types.h>
++#include <asm/cputype.h>
+ #include <asm/kvm.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_mmio.h>
+@@ -298,8 +299,17 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+
+ static inline bool kvm_arm_harden_branch_predictor(void)
+ {
+- /* No way to detect it yet, pretend it is not there. */
+- return false;
++ switch(read_cpuid_part()) {
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++ case ARM_CPU_PART_BRAHMA_B15:
++ case ARM_CPU_PART_CORTEX_A12:
++ case ARM_CPU_PART_CORTEX_A15:
++ case ARM_CPU_PART_CORTEX_A17:
++ return true;
++#endif
++ default:
++ return false;
++ }
+ }
+
+ #define KVM_SSBD_UNKNOWN -1
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index 8a098e65f5f8..ca62f95f3b4c 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -246,7 +246,28 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
+
+ static inline void *kvm_get_hyp_vector(void)
+ {
+- return kvm_ksym_ref(__kvm_hyp_vector);
++ switch(read_cpuid_part()) {
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++ case ARM_CPU_PART_CORTEX_A12:
++ case ARM_CPU_PART_CORTEX_A17:
++ {
++ extern char __kvm_hyp_vector_bp_inv[];
++ return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
++ }
++
++ case ARM_CPU_PART_BRAHMA_B15:
++ case ARM_CPU_PART_CORTEX_A15:
++ {
++ extern char __kvm_hyp_vector_ic_inv[];
++ return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
++ }
++#endif
++ default:
++ {
++ extern char __kvm_hyp_vector[];
++ return kvm_ksym_ref(__kvm_hyp_vector);
++ }
++ }
+ }
+
+ static inline int kvm_map_vectors(void)
+diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
+index f2e1af45bd6f..e25f4392e1b2 100644
+--- a/arch/arm/include/asm/proc-fns.h
++++ b/arch/arm/include/asm/proc-fns.h
+@@ -36,6 +36,10 @@ extern struct processor {
+ * Set up any processor specifics
+ */
+ void (*_proc_init)(void);
++ /*
++ * Check for processor bugs
++ */
++ void (*check_bugs)(void);
+ /*
+ * Disable any processor specifics
+ */
+diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
+index 78f6db114faf..8e76db83c498 100644
+--- a/arch/arm/include/asm/system_misc.h
++++ b/arch/arm/include/asm/system_misc.h
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include <linux/irqflags.h>
+ #include <linux/reboot.h>
++#include <linux/percpu.h>
+
+ extern void cpu_init(void);
+
+@@ -15,6 +16,20 @@ void soft_restart(unsigned long);
+ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+ extern void (*arm_pm_idle)(void);
+
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++typedef void (*harden_branch_predictor_fn_t)(void);
++DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
++static inline void harden_branch_predictor(void)
++{
++ harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
++ smp_processor_id());
++ if (fn)
++ fn();
++}
++#else
++#define harden_branch_predictor() do { } while (0)
++#endif
++
+ #define UDBG_UNDEFINED (1 << 0)
+ #define UDBG_SYSCALL (1 << 1)
+ #define UDBG_BADABORT (1 << 2)
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 776757d1604a..57d2ad9c75ca 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -126,8 +126,8 @@ struct user_vfp_exc;
+
+ extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
+ struct user_vfp_exc __user *);
+-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+- struct user_vfp_exc __user *);
++extern int vfp_restore_user_hwstate(struct user_vfp *,
++ struct user_vfp_exc *);
+ #endif
+
+ /*
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 0bf2347495f1..4140be431087 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -84,6 +84,13 @@ static inline void set_fs(mm_segment_t fs)
+ : "cc"); \
+ flag; })
+
++/*
++ * This is a type: either unsigned long, if the argument fits into
++ * that type, or otherwise unsigned long long.
++ */
++#define __inttype(x) \
++ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
++
+ /*
+ * Single-value transfer routines. They automatically use the right
+ * size if we just have the right pointer type. Note that the functions
+@@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
+ ({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ register const typeof(*(p)) __user *__p asm("r0") = (p);\
+- register typeof(x) __r2 asm("r2"); \
++ register __inttype(x) __r2 asm("r2"); \
+ register unsigned long __l asm("r1") = __limit; \
+ register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
+@@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
+ #define user_addr_max() \
+ (uaccess_kernel() ? ~0UL : get_fs())
+
++#ifdef CONFIG_CPU_SPECTRE
++/*
++ * When mitigating Spectre variant 1, it is not worth fixing the non-
++ * verifying accessors, because we need to add verification of the
++ * address space there. Force these to use the standard get_user()
++ * version instead.
++ */
++#define __get_user(x, ptr) get_user(x, ptr)
++#else
++
+ /*
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+@@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
+ __gu_err; \
+ })
+
+-#define __get_user_error(x, ptr, err) \
+-({ \
+- __get_user_err((x), (ptr), err); \
+- (void) 0; \
+-})
+-
+ #define __get_user_err(x, ptr, err) \
+ do { \
+ unsigned long __gu_addr = (unsigned long)(ptr); \
+@@ -324,6 +335,7 @@ do { \
+
+ #define __get_user_asm_word(x, addr, err) \
+ __get_user_asm(x, addr, err, ldr)
++#endif
+
+
+ #define __put_user_switch(x, ptr, __err, __fn) \
+diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
+index 499f978fb1fd..50de918252b7 100644
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -31,6 +31,7 @@ else
+ obj-y += entry-armv.o
+ endif
+
++obj-$(CONFIG_MMU) += bugs.o
+ obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+ obj-$(CONFIG_ISA_DMA_API) += dma.o
+ obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
+diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
+new file mode 100644
+index 000000000000..7be511310191
+--- /dev/null
++++ b/arch/arm/kernel/bugs.c
+@@ -0,0 +1,18 @@
++// SPDX-Identifier: GPL-2.0
++#include <linux/init.h>
++#include <asm/bugs.h>
++#include <asm/proc-fns.h>
++
++void check_other_bugs(void)
++{
++#ifdef MULTI_CPU
++ if (processor.check_bugs)
++ processor.check_bugs();
++#endif
++}
++
++void __init check_bugs(void)
++{
++ check_writebuffer_bugs();
++ check_other_bugs();
++}
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 99c908226065..54c10503d71f 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -241,9 +241,7 @@ local_restart:
+ tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ bne __sys_trace
+
+- cmp scno, #NR_syscalls @ check upper syscall limit
+- badr lr, ret_fast_syscall @ return address
+- ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
++ invoke_syscall tbl, scno, r10, ret_fast_syscall
+
+ add r1, sp, #S_OFF
+ 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+@@ -277,14 +275,8 @@ __sys_trace:
+ mov r1, scno
+ add r0, sp, #S_OFF
+ bl syscall_trace_enter
+-
+- badr lr, __sys_trace_return @ return address
+- mov scno, r0 @ syscall number (possibly new)
+- add r1, sp, #S_R0 + S_OFF @ pointer to regs
+- cmp scno, #NR_syscalls @ check upper syscall limit
+- ldmccia r1, {r0 - r6} @ have to reload r0 - r6
+- stmccia sp, {r4, r5} @ and update the stack args
+- ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
++ mov scno, r0
++ invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
+ cmp scno, #-1 @ skip the syscall?
+ bne 2b
+ add sp, sp, #S_OFF @ restore stack
+@@ -362,6 +354,10 @@ sys_syscall:
+ bic scno, r0, #__NR_OABI_SYSCALL_BASE
+ cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
+ cmpne scno, #NR_syscalls @ check range
++#ifdef CONFIG_CPU_SPECTRE
++ movhs scno, #0
++ csdb
++#endif
+ stmloia sp, {r5, r6} @ shuffle args
+ movlo r0, r1
+ movlo r1, r2
+diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
+index 0f07579af472..773424843d6e 100644
+--- a/arch/arm/kernel/entry-header.S
++++ b/arch/arm/kernel/entry-header.S
+@@ -378,6 +378,31 @@
+ #endif
+ .endm
+
++ .macro invoke_syscall, table, nr, tmp, ret, reload=0
++#ifdef CONFIG_CPU_SPECTRE
++ mov \tmp, \nr
++ cmp \tmp, #NR_syscalls @ check upper syscall limit
++ movcs \tmp, #0
++ csdb
++ badr lr, \ret @ return address
++ .if \reload
++ add r1, sp, #S_R0 + S_OFF @ pointer to regs
++ ldmccia r1, {r0 - r6} @ reload r0-r6
++ stmccia sp, {r4, r5} @ update stack arguments
++ .endif
++ ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
++#else
++ cmp \nr, #NR_syscalls @ check upper syscall limit
++ badr lr, \ret @ return address
++ .if \reload
++ add r1, sp, #S_R0 + S_OFF @ pointer to regs
++ ldmccia r1, {r0 - r6} @ reload r0-r6
++ stmccia sp, {r4, r5} @ update stack arguments
++ .endif
++ ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
++#endif
++ .endm
++
+ /*
+ * These are the registers used in the syscall handler, and allow us to
+ * have in theory up to 7 arguments to a function - r0 to r6.
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index b67ae12503f3..cdfe52b15a0a 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -149,22 +149,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
+
+ static int restore_vfp_context(char __user **auxp)
+ {
+- struct vfp_sigframe __user *frame =
+- (struct vfp_sigframe __user *)*auxp;
+- unsigned long magic;
+- unsigned long size;
+- int err = 0;
+-
+- __get_user_error(magic, &frame->magic, err);
+- __get_user_error(size, &frame->size, err);
++ struct vfp_sigframe frame;
++ int err;
+
++ err = __copy_from_user(&frame, *auxp, sizeof(frame));
+ if (err)
+- return -EFAULT;
+- if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
++ return err;
++
++ if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
+ return -EINVAL;
+
+- *auxp += size;
+- return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
++ *auxp += sizeof(frame);
++ return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
+ }
+
+ #endif
+@@ -184,6 +180,7 @@ struct rt_sigframe {
+
+ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
+ {
++ struct sigcontext context;
+ char __user *aux;
+ sigset_t set;
+ int err;
+@@ -192,23 +189,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
+ if (err == 0)
+ set_current_blocked(&set);
+
+- __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+- __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+- __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+- __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+- __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+- __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+- __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+- __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+- __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+- __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+- __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+- __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+- __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+- __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+- __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+- __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+- __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
++ err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
++ if (err == 0) {
++ regs->ARM_r0 = context.arm_r0;
++ regs->ARM_r1 = context.arm_r1;
++ regs->ARM_r2 = context.arm_r2;
++ regs->ARM_r3 = context.arm_r3;
++ regs->ARM_r4 = context.arm_r4;
++ regs->ARM_r5 = context.arm_r5;
++ regs->ARM_r6 = context.arm_r6;
++ regs->ARM_r7 = context.arm_r7;
++ regs->ARM_r8 = context.arm_r8;
++ regs->ARM_r9 = context.arm_r9;
++ regs->ARM_r10 = context.arm_r10;
++ regs->ARM_fp = context.arm_fp;
++ regs->ARM_ip = context.arm_ip;
++ regs->ARM_sp = context.arm_sp;
++ regs->ARM_lr = context.arm_lr;
++ regs->ARM_pc = context.arm_pc;
++ regs->ARM_cpsr = context.arm_cpsr;
++ }
+
+ err |= !valid_user_regs(regs);
+
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index c9a0a5299827..e61af0600133 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -31,6 +31,7 @@
+ #include <linux/irq_work.h>
+
+ #include <linux/atomic.h>
++#include <asm/bugs.h>
+ #include <asm/smp.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cpu.h>
+@@ -402,6 +403,9 @@ asmlinkage void secondary_start_kernel(void)
+ * before we continue - which happens after __cpu_up returns.
+ */
+ set_cpu_online(cpu, true);
++
++ check_other_bugs();
++
+ complete(&cpu_running);
+
+ local_irq_enable();
+diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
+index a40ebb7c0896..d08099269e35 100644
+--- a/arch/arm/kernel/suspend.c
++++ b/arch/arm/kernel/suspend.c
+@@ -3,6 +3,7 @@
+ #include <linux/slab.h>
+ #include <linux/mm_types.h>
+
++#include <asm/bugs.h>
+ #include <asm/cacheflush.h>
+ #include <asm/idmap.h>
+ #include <asm/pgalloc.h>
+@@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ cpu_switch_mm(mm->pgd, mm);
+ local_flush_bp_all();
+ local_flush_tlb_all();
++ check_other_bugs();
+ }
+
+ return ret;
+diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
+index b9786f491873..4abe4909417f 100644
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
+ return -ENOMEM;
+ err = 0;
+ for (i = 0; i < nsops; i++) {
+- __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
+- __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
+- __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
++ struct oabi_sembuf osb;
++ err |= __copy_from_user(&osb, tsops, sizeof(osb));
++ sops[i].sem_num = osb.sem_num;
++ sops[i].sem_op = osb.sem_op;
++ sops[i].sem_flg = osb.sem_flg;
+ tsops++;
+ }
+ if (timeout) {
+diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
+index 95a2faefc070..aa3f9a9837ac 100644
+--- a/arch/arm/kvm/hyp/hyp-entry.S
++++ b/arch/arm/kvm/hyp/hyp-entry.S
+@@ -16,6 +16,7 @@
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/linkage.h>
+ #include <asm/kvm_arm.h>
+ #include <asm/kvm_asm.h>
+@@ -71,6 +72,90 @@ __kvm_hyp_vector:
+ W(b) hyp_irq
+ W(b) hyp_fiq
+
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++ .align 5
++__kvm_hyp_vector_ic_inv:
++ .global __kvm_hyp_vector_ic_inv
++
++ /*
++ * We encode the exception entry in the bottom 3 bits of
++ * SP, and we have to guarantee to be 8 bytes aligned.
++ */
++ W(add) sp, sp, #1 /* Reset 7 */
++ W(add) sp, sp, #1 /* Undef 6 */
++ W(add) sp, sp, #1 /* Syscall 5 */
++ W(add) sp, sp, #1 /* Prefetch abort 4 */
++ W(add) sp, sp, #1 /* Data abort 3 */
++ W(add) sp, sp, #1 /* HVC 2 */
++ W(add) sp, sp, #1 /* IRQ 1 */
++ W(nop) /* FIQ 0 */
++
++ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
++ isb
++
++ b decode_vectors
++
++ .align 5
++__kvm_hyp_vector_bp_inv:
++ .global __kvm_hyp_vector_bp_inv
++
++ /*
++ * We encode the exception entry in the bottom 3 bits of
++ * SP, and we have to guarantee to be 8 bytes aligned.
++ */
++ W(add) sp, sp, #1 /* Reset 7 */
++ W(add) sp, sp, #1 /* Undef 6 */
++ W(add) sp, sp, #1 /* Syscall 5 */
++ W(add) sp, sp, #1 /* Prefetch abort 4 */
++ W(add) sp, sp, #1 /* Data abort 3 */
++ W(add) sp, sp, #1 /* HVC 2 */
++ W(add) sp, sp, #1 /* IRQ 1 */
++ W(nop) /* FIQ 0 */
++
++ mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
++ isb
++
++decode_vectors:
++
++#ifdef CONFIG_THUMB2_KERNEL
++ /*
++ * Yet another silly hack: Use VPIDR as a temp register.
++ * Thumb2 is really a pain, as SP cannot be used with most
++ * of the bitwise instructions. The vect_br macro ensures
++ * things gets cleaned-up.
++ */
++ mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
++ mov r0, sp
++ and r0, r0, #7
++ sub sp, sp, r0
++ push {r1, r2}
++ mov r1, r0
++ mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
++ mrc p15, 0, r2, c0, c0, 0 /* MIDR */
++ mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
++#endif
++
++.macro vect_br val, targ
++ARM( eor sp, sp, #\val )
++ARM( tst sp, #7 )
++ARM( eorne sp, sp, #\val )
++
++THUMB( cmp r1, #\val )
++THUMB( popeq {r1, r2} )
++
++ beq \targ
++.endm
++
++ vect_br 0, hyp_fiq
++ vect_br 1, hyp_irq
++ vect_br 2, hyp_hvc
++ vect_br 3, hyp_dabt
++ vect_br 4, hyp_pabt
++ vect_br 5, hyp_svc
++ vect_br 6, hyp_undef
++ vect_br 7, hyp_reset
++#endif
++
+ .macro invalid_vector label, cause
+ .align
+ \label: mov r0, #\cause
+@@ -118,7 +203,7 @@ hyp_hvc:
+ lsr r2, r2, #16
+ and r2, r2, #0xff
+ cmp r2, #0
+- bne guest_trap @ Guest called HVC
++ bne guest_hvc_trap @ Guest called HVC
+
+ /*
+ * Getting here means host called HVC, we shift parameters and branch
+@@ -149,7 +234,14 @@ hyp_hvc:
+ bx ip
+
+ 1:
+- push {lr}
++ /*
++ * Pushing r2 here is just a way of keeping the stack aligned to
++ * 8 bytes on any path that can trigger a HYP exception. Here,
++ * we may well be about to jump into the guest, and the guest
++ * exit would otherwise be badly decoded by our fancy
++ * "decode-exception-without-a-branch" code...
++ */
++ push {r2, lr}
+
+ mov lr, r0
+ mov r0, r1
+@@ -159,7 +251,21 @@ hyp_hvc:
+ THUMB( orr lr, #1)
+ blx lr @ Call the HYP function
+
+- pop {lr}
++ pop {r2, lr}
++ eret
++
++guest_hvc_trap:
++ movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
++ movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
++ ldr r0, [sp] @ Guest's r0
++ teq r0, r2
++ bne guest_trap
++ add sp, sp, #12
++ @ Returns:
++ @ r0 = 0
++ @ r1 = HSR value (perfectly predictable)
++ @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
++ mov r0, #0
+ eret
+
+ guest_trap:
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index 7a4b06049001..a826df3d3814 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -90,6 +90,15 @@
+ .text
+
+ ENTRY(arm_copy_from_user)
++#ifdef CONFIG_CPU_SPECTRE
++ get_thread_info r3
++ ldr r3, [r3, #TI_ADDR_LIMIT]
++ adds ip, r1, r2 @ ip=addr+size
++ sub r3, r3, #1 @ addr_limit - 1
++ cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
++ movcs r1, #0 @ addr = NULL
++ csdb
++#endif
+
+ #include "copy_template.S"
+
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index fd9077a74fce..50e0b45a22db 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -415,6 +415,7 @@ config CPU_V7
+ select CPU_CP15_MPU if !MMU
+ select CPU_HAS_ASID if MMU
+ select CPU_PABRT_V7
++ select CPU_SPECTRE if MMU
+ select CPU_THUMB_CAPABLE
+ select CPU_TLB_V7 if MMU
+
+@@ -826,6 +827,28 @@ config CPU_BPREDICT_DISABLE
+ help
+ Say Y here to disable branch prediction. If unsure, say N.
+
++config CPU_SPECTRE
++ bool
++
++config HARDEN_BRANCH_PREDICTOR
++ bool "Harden the branch predictor against aliasing attacks" if EXPERT
++ depends on CPU_SPECTRE
++ default y
++ help
++ Speculation attacks against some high-performance processors rely
++ on being able to manipulate the branch predictor for a victim
++ context by executing aliasing branches in the attacker context.
++ Such attacks can be partially mitigated against by clearing
++ internal branch predictor state and limiting the prediction
++ logic in some situations.
++
++ This config option will take CPU-specific actions to harden
++ the branch predictor against aliasing attacks and may rely on
++ specific instruction sequences or control bits being set by
++ the system firmware.
++
++ If unsure, say Y.
++
+ config TLS_REG_EMUL
+ bool
+ select NEED_KUSER_HELPERS
+diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
+index f353ee569f6b..93a622a18cba 100644
+--- a/arch/arm/mm/Makefile
++++ b/arch/arm/mm/Makefile
+@@ -95,7 +95,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
+ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
+ obj-$(CONFIG_CPU_V6) += proc-v6.o
+ obj-$(CONFIG_CPU_V6K) += proc-v6.o
+-obj-$(CONFIG_CPU_V7) += proc-v7.o
++obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
+ obj-$(CONFIG_CPU_V7M) += proc-v7m.o
+
+ AFLAGS_proc-v6.o :=-Wa,-march=armv6
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 42f585379e19..49b1b8048635 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -164,6 +164,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ {
+ struct siginfo si;
+
++ if (addr > TASK_SIZE)
++ harden_branch_predictor();
++
+ #ifdef CONFIG_DEBUG_USER
+ if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
+ ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
+diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
+index f10e31d0730a..81d0efb055c6 100644
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -273,13 +273,14 @@
+ mcr p15, 0, ip, c7, c10, 4 @ data write barrier
+ .endm
+
+-.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
++.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+ .type \name\()_processor_functions, #object
+ .align 2
+ ENTRY(\name\()_processor_functions)
+ .word \dabort
+ .word \pabort
+ .word cpu_\name\()_proc_init
++ .word \bugs
+ .word cpu_\name\()_proc_fin
+ .word cpu_\name\()_reset
+ .word cpu_\name\()_do_idle
+diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
+index c6141a5435c3..f8d45ad2a515 100644
+--- a/arch/arm/mm/proc-v7-2level.S
++++ b/arch/arm/mm/proc-v7-2level.S
+@@ -41,11 +41,6 @@
+ * even on Cortex-A8 revisions not affected by 430973.
+ * If IBE is not set, the flush BTAC/BTB won't do anything.
+ */
+-ENTRY(cpu_ca8_switch_mm)
+-#ifdef CONFIG_MMU
+- mov r2, #0
+- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+-#endif
+ ENTRY(cpu_v7_switch_mm)
+ #ifdef CONFIG_MMU
+ mmid r1, r1 @ get mm->context.id
+@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
+ #endif
+ bx lr
+ ENDPROC(cpu_v7_switch_mm)
+-ENDPROC(cpu_ca8_switch_mm)
+
+ /*
+ * cpu_v7_set_pte_ext(ptep, pte)
+diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
+new file mode 100644
+index 000000000000..5544b82a2e7a
+--- /dev/null
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -0,0 +1,174 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/arm-smccc.h>
++#include <linux/kernel.h>
++#include <linux/psci.h>
++#include <linux/smp.h>
++
++#include <asm/cp15.h>
++#include <asm/cputype.h>
++#include <asm/proc-fns.h>
++#include <asm/system_misc.h>
++
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
++
++extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
++extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
++extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
++extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
++
++static void harden_branch_predictor_bpiall(void)
++{
++ write_sysreg(0, BPIALL);
++}
++
++static void harden_branch_predictor_iciallu(void)
++{
++ write_sysreg(0, ICIALLU);
++}
++
++static void __maybe_unused call_smc_arch_workaround_1(void)
++{
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
++}
++
++static void __maybe_unused call_hvc_arch_workaround_1(void)
++{
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
++}
++
++static void cpu_v7_spectre_init(void)
++{
++ const char *spectre_v2_method = NULL;
++ int cpu = smp_processor_id();
++
++ if (per_cpu(harden_branch_predictor_fn, cpu))
++ return;
++
++ switch (read_cpuid_part()) {
++ case ARM_CPU_PART_CORTEX_A8:
++ case ARM_CPU_PART_CORTEX_A9:
++ case ARM_CPU_PART_CORTEX_A12:
++ case ARM_CPU_PART_CORTEX_A17:
++ case ARM_CPU_PART_CORTEX_A73:
++ case ARM_CPU_PART_CORTEX_A75:
++ if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
++ goto bl_error;
++ per_cpu(harden_branch_predictor_fn, cpu) =
++ harden_branch_predictor_bpiall;
++ spectre_v2_method = "BPIALL";
++ break;
++
++ case ARM_CPU_PART_CORTEX_A15:
++ case ARM_CPU_PART_BRAHMA_B15:
++ if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
++ goto bl_error;
++ per_cpu(harden_branch_predictor_fn, cpu) =
++ harden_branch_predictor_iciallu;
++ spectre_v2_method = "ICIALLU";
++ break;
++
++#ifdef CONFIG_ARM_PSCI
++ default:
++ /* Other ARM CPUs require no workaround */
++ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
++ break;
++ /* fallthrough */
++ /* Cortex A57/A72 require firmware workaround */
++ case ARM_CPU_PART_CORTEX_A57:
++ case ARM_CPU_PART_CORTEX_A72: {
++ struct arm_smccc_res res;
++
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++ break;
++
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++ if ((int)res.a0 != 0)
++ break;
++ if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
++ goto bl_error;
++ per_cpu(harden_branch_predictor_fn, cpu) =
++ call_hvc_arch_workaround_1;
++ processor.switch_mm = cpu_v7_hvc_switch_mm;
++ spectre_v2_method = "hypervisor";
++ break;
++
++ case PSCI_CONDUIT_SMC:
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++ if ((int)res.a0 != 0)
++ break;
++ if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
++ goto bl_error;
++ per_cpu(harden_branch_predictor_fn, cpu) =
++ call_smc_arch_workaround_1;
++ processor.switch_mm = cpu_v7_smc_switch_mm;
++ spectre_v2_method = "firmware";
++ break;
++
++ default:
++ break;
++ }
++ }
++#endif
++ }
++
++ if (spectre_v2_method)
++ pr_info("CPU%u: Spectre v2: using %s workaround\n",
++ smp_processor_id(), spectre_v2_method);
++ return;
++
++bl_error:
++ pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
++ cpu);
++}
++#else
++static void cpu_v7_spectre_init(void)
++{
++}
++#endif
++
++static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
++ u32 mask, const char *msg)
++{
++ u32 aux_cr;
++
++ asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
++
++ if ((aux_cr & mask) != mask) {
++ if (!*warned)
++ pr_err("CPU%u: %s", smp_processor_id(), msg);
++ *warned = true;
++ return false;
++ }
++ return true;
++}
++
++static DEFINE_PER_CPU(bool, spectre_warned);
++
++static bool check_spectre_auxcr(bool *warned, u32 bit)
++{
++ return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
++ cpu_v7_check_auxcr_set(warned, bit,
++ "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
++}
++
++void cpu_v7_ca8_ibe(void)
++{
++ if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
++ cpu_v7_spectre_init();
++}
++
++void cpu_v7_ca15_ibe(void)
++{
++ if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
++ cpu_v7_spectre_init();
++}
++
++void cpu_v7_bugs_init(void)
++{
++ cpu_v7_spectre_init();
++}
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 01d64c0b2563..12468d9378d8 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -9,6 +9,7 @@
+ *
+ * This is the "shell" of the ARMv7 processor support.
+ */
++#include <linux/arm-smccc.h>
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+ #include <asm/assembler.h>
+@@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area)
+ ret lr
+ ENDPROC(cpu_v7_dcache_clean_area)
+
++#ifdef CONFIG_ARM_PSCI
++ .arch_extension sec
++ENTRY(cpu_v7_smc_switch_mm)
++ stmfd sp!, {r0 - r3}
++ movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
++ movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
++ smc #0
++ ldmfd sp!, {r0 - r3}
++ b cpu_v7_switch_mm
++ENDPROC(cpu_v7_smc_switch_mm)
++ .arch_extension virt
++ENTRY(cpu_v7_hvc_switch_mm)
++ stmfd sp!, {r0 - r3}
++ movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
++ movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
++ hvc #0
++ ldmfd sp!, {r0 - r3}
++ b cpu_v7_switch_mm
++ENDPROC(cpu_v7_smc_switch_mm)
++#endif
++ENTRY(cpu_v7_iciallu_switch_mm)
++ mov r3, #0
++ mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
++ b cpu_v7_switch_mm
++ENDPROC(cpu_v7_iciallu_switch_mm)
++ENTRY(cpu_v7_bpiall_switch_mm)
++ mov r3, #0
++ mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
++ b cpu_v7_switch_mm
++ENDPROC(cpu_v7_bpiall_switch_mm)
++
+ string cpu_v7_name, "ARMv7 Processor"
+ .align
+
+@@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume)
+ ENDPROC(cpu_v7_do_resume)
+ #endif
+
+-/*
+- * Cortex-A8
+- */
+- globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
+- globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
+- globl_equ cpu_ca8_reset, cpu_v7_reset
+- globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
+- globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
+- globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
+- globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
+-#ifdef CONFIG_ARM_CPU_SUSPEND
+- globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
+- globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
+-#endif
+-
+-/*
+- * Cortex-A9 processor functions
+- */
+- globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
+- globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
+- globl_equ cpu_ca9mp_reset, cpu_v7_reset
+- globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
+- globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
+- globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
+- globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
+ .globl cpu_ca9mp_suspend_size
+ .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
+ #ifdef CONFIG_ARM_CPU_SUSPEND
+@@ -546,12 +553,79 @@ __v7_setup_stack:
+
+ __INITDATA
+
++ .weak cpu_v7_bugs_init
++
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+- define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++ define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
++
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++ @ generic v7 bpiall on context switch
++ globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
++ globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
++ globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
++ globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
++ globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
++ globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
++ globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
++#ifdef CONFIG_ARM_CPU_SUSPEND
++ globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
++ globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
++#endif
++ define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
++
++#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
++#else
++#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
++#endif
++
+ #ifndef CONFIG_ARM_LPAE
+- define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+- define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++ @ Cortex-A8 - always needs bpiall switch_mm implementation
++ globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
++ globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
++ globl_equ cpu_ca8_reset, cpu_v7_reset
++ globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
++ globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
++ globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
++ globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
++ globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
++#ifdef CONFIG_ARM_CPU_SUSPEND
++ globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
++ globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
++#endif
++ define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
++
++ @ Cortex-A9 - needs more registers preserved across suspend/resume
++ @ and bpiall switch_mm for hardening
++ globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
++ globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
++ globl_equ cpu_ca9mp_reset, cpu_v7_reset
++ globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
++ globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++ globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
++#else
++ globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
++#endif
++ globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
++ define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
+ #endif
++
++ @ Cortex-A15 - needs iciallu switch_mm for hardening
++ globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
++ globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
++ globl_equ cpu_ca15_reset, cpu_v7_reset
++ globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
++ globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++ globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
++#else
++ globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
++#endif
++ globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
++ globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
++ globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
++ globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
++ define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
+ #ifdef CONFIG_CPU_PJ4B
+ define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+ #endif
+@@ -658,7 +732,7 @@ __v7_ca7mp_proc_info:
+ __v7_ca12mp_proc_info:
+ .long 0x410fc0d0
+ .long 0xff0ffff0
+- __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
++ __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
+ .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
+
+ /*
+@@ -668,7 +742,7 @@ __v7_ca12mp_proc_info:
+ __v7_ca15mp_proc_info:
+ .long 0x410fc0f0
+ .long 0xff0ffff0
+- __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
++ __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
+ .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
+
+ /*
+@@ -678,7 +752,7 @@ __v7_ca15mp_proc_info:
+ __v7_b15mp_proc_info:
+ .long 0x420f00f0
+ .long 0xff0ffff0
+- __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
++ __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions
+ .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
+
+ /*
+@@ -688,9 +762,25 @@ __v7_b15mp_proc_info:
+ __v7_ca17mp_proc_info:
+ .long 0x410fc0e0
+ .long 0xff0ffff0
+- __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
++ __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
+ .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
+
++ /* ARM Ltd. Cortex A73 processor */
++ .type __v7_ca73_proc_info, #object
++__v7_ca73_proc_info:
++ .long 0x410fd090
++ .long 0xff0ffff0
++ __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
++ .size __v7_ca73_proc_info, . - __v7_ca73_proc_info
++
++ /* ARM Ltd. Cortex A75 processor */
++ .type __v7_ca75_proc_info, #object
++__v7_ca75_proc_info:
++ .long 0x410fd0a0
++ .long 0xff0ffff0
++ __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
++ .size __v7_ca75_proc_info, . - __v7_ca75_proc_info
++
+ /*
+ * Qualcomm Inc. Krait processors.
+ */
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index aa7496be311d..6abcd4af8274 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -597,13 +597,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+ }
+
+ /* Sanitise and restore the current VFP state from the provided structures. */
+-int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+- struct user_vfp_exc __user *ufp_exc)
++int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
+ {
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+ unsigned long fpexc;
+- int err = 0;
+
+ /* Disable VFP to avoid corrupting the new thread state. */
+ vfp_flush_hwstate(thread);
+@@ -612,17 +610,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+- err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
+- sizeof(hwstate->fpregs));
++ memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+- __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
++ hwstate->fpscr = ufp->fpscr;
+
+ /*
+ * Sanitise and restore the exception registers.
+ */
+- __get_user_error(fpexc, &ufp_exc->fpexc, err);
++ fpexc = ufp_exc->fpexc;
+
+ /* Ensure the VFP is enabled. */
+ fpexc |= FPEXC_EN;
+@@ -631,10 +628,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+ fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+ hwstate->fpexc = fpexc;
+
+- __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+- __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
++ hwstate->fpinst = ufp_exc->fpinst;
++ hwstate->fpinst2 = ufp_exc->fpinst2;
+
+- return err ? -EFAULT : 0;
++ return 0;
+ }
+
+ /*
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 1984e739f155..86249a24592d 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -824,6 +824,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
+ return 0;
+ }
+
++static int armv8pmu_filter_match(struct perf_event *event)
++{
++ unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
++ return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
++}
++
+ static void armv8pmu_reset(void *info)
+ {
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+@@ -970,6 +976,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
+ cpu_pmu->reset = armv8pmu_reset,
+ cpu_pmu->max_period = (1LLU << 32) - 1,
+ cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
++ cpu_pmu->filter_match = armv8pmu_filter_match;
+
+ return 0;
+ }
+diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
+index eb1f6030ab85..8bbbab611a3f 100644
+--- a/arch/mips/include/asm/processor.h
++++ b/arch/mips/include/asm/processor.h
+@@ -13,6 +13,7 @@
+
+ #include <linux/atomic.h>
+ #include <linux/cpumask.h>
++#include <linux/sizes.h>
+ #include <linux/threads.h>
+
+ #include <asm/cachectl.h>
+@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
+
+ #endif
+
+-/*
+- * One page above the stack is used for branch delay slot "emulation".
+- * See dsemul.c for details.
+- */
+-#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
++#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
++
++extern unsigned long mips_stack_top(void);
++#define STACK_TOP mips_stack_top()
+
+ /*
+ * This decides where the kernel will search for a free chunk of vm
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index e8d772a2597d..e8b166e9146a 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -31,6 +31,7 @@
+ #include <linux/prctl.h>
+ #include <linux/nmi.h>
+
++#include <asm/abi.h>
+ #include <asm/asm.h>
+ #include <asm/bootinfo.h>
+ #include <asm/cpu.h>
+@@ -38,6 +39,7 @@
+ #include <asm/dsp.h>
+ #include <asm/fpu.h>
+ #include <asm/irq.h>
++#include <asm/mips-cps.h>
+ #include <asm/msa.h>
+ #include <asm/pgtable.h>
+ #include <asm/mipsregs.h>
+@@ -644,6 +646,29 @@ out:
+ return pc;
+ }
+
++unsigned long mips_stack_top(void)
++{
++ unsigned long top = TASK_SIZE & PAGE_MASK;
++
++ /* One page for branch delay slot "emulation" */
++ top -= PAGE_SIZE;
++
++ /* Space for the VDSO, data page & GIC user page */
++ top -= PAGE_ALIGN(current->thread.abi->vdso->size);
++ top -= PAGE_SIZE;
++ top -= mips_gic_present() ? PAGE_SIZE : 0;
++
++ /* Space for cache colour alignment */
++ if (cpu_has_dc_aliases)
++ top -= shm_align_mask + 1;
++
++ /* Space to randomize the VDSO base */
++ if (current->flags & PF_RANDOMIZE)
++ top -= VDSO_RANDOMIZE_SIZE;
++
++ return top;
++}
++
+ /*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
+index 8f845f6e5f42..48a9c6b90e07 100644
+--- a/arch/mips/kernel/vdso.c
++++ b/arch/mips/kernel/vdso.c
+@@ -15,6 +15,7 @@
+ #include <linux/ioport.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
++#include <linux/random.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/timekeeper_internal.h>
+@@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
+ }
+ }
+
++static unsigned long vdso_base(void)
++{
++ unsigned long base;
++
++ /* Skip the delay slot emulation page */
++ base = STACK_TOP + PAGE_SIZE;
++
++ if (current->flags & PF_RANDOMIZE) {
++ base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
++ base = PAGE_ALIGN(base);
++ }
++
++ return base;
++}
++
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+ struct mips_vdso_image *image = current->thread.abi->vdso;
+@@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ if (cpu_has_dc_aliases)
+ size += shm_align_mask + 1;
+
+- base = get_unmapped_area(NULL, 0, size, 0, 0);
++ base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
+index 9a677cd5997f..4dd13b503dbb 100644
+--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
+@@ -102,7 +102,7 @@
+ */
+ #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
+- _PAGE_SOFT_DIRTY)
++ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ /*
+ * user access blocked by key
+ */
+@@ -120,7 +120,7 @@
+ */
+ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
+- _PAGE_SOFT_DIRTY)
++ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ /*
+ * Mask of bits returned by pte_pgprot()
+ */
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 246f15b4e64c..85f8279c885a 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -124,7 +124,7 @@
+ */
+ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
+- _PAGE_SOFT_DIRTY)
++ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
+
+ /*
+diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
+index f3a960488eae..dcf4dc9bf327 100644
+--- a/arch/x86/include/uapi/asm/kvm.h
++++ b/arch/x86/include/uapi/asm/kvm.h
+@@ -360,5 +360,6 @@ struct kvm_sync_regs {
+
+ #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+ #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
++#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+
+ #endif /* _ASM_X86_KVM_H */
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 6d0fbff71d7a..13dfb55b84db 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1282,9 +1282,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
+
+ static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
+ {
+- return kvm_apic_hw_enabled(apic) &&
+- addr >= apic->base_address &&
+- addr < apic->base_address + LAPIC_MMIO_LENGTH;
++ return addr >= apic->base_address &&
++ addr < apic->base_address + LAPIC_MMIO_LENGTH;
+ }
+
+ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+@@ -1296,6 +1295,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+ if (!apic_mmio_in_range(apic, address))
+ return -EOPNOTSUPP;
+
++ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
++ if (!kvm_check_has_quirk(vcpu->kvm,
++ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
++ return -EOPNOTSUPP;
++
++ memset(data, 0xff, len);
++ return 0;
++ }
++
+ kvm_lapic_reg_read(apic, offset, len, data);
+
+ return 0;
+@@ -1806,6 +1814,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+ if (!apic_mmio_in_range(apic, address))
+ return -EOPNOTSUPP;
+
++ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
++ if (!kvm_check_has_quirk(vcpu->kvm,
++ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
++ return -EOPNOTSUPP;
++
++ return 0;
++ }
++
+ /*
+ * APIC register must be aligned on 128-bits boundary.
+ * 32/64/128 bits registers must be accessed thru 32 bits.
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index c823914b3a80..30bbe19b4b85 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -539,6 +539,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
+ }
+ clear_bit(HCI_UART_PROTO_SET, &hu->flags);
+
++ percpu_free_rwsem(&hu->proto_lock);
++
+ kfree(hu);
+ }
+
+diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
+index 08ef69945ffb..d977193842df 100644
+--- a/drivers/clk/x86/clk-pmc-atom.c
++++ b/drivers/clk/x86/clk-pmc-atom.c
+@@ -55,6 +55,7 @@ struct clk_plt_data {
+ u8 nparents;
+ struct clk_plt *clks[PMC_CLK_NUM];
+ struct clk_lookup *mclk_lookup;
++ struct clk_lookup *ether_clk_lookup;
+ };
+
+ /* Return an index in parent table */
+@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
+ pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
+ spin_lock_init(&pclk->lock);
+
+- /*
+- * If the clock was already enabled by the firmware mark it as critical
+- * to avoid it being gated by the clock framework if no driver owns it.
+- */
+- if (plt_clk_is_enabled(&pclk->hw))
+- init.flags |= CLK_IS_CRITICAL;
+-
+ ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
+ if (ret) {
+ pclk = ERR_PTR(ret);
+@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
+ goto err_unreg_clk_plt;
+ }
+
++ data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
++ "ether_clk", NULL);
++ if (!data->ether_clk_lookup) {
++ err = -ENOMEM;
++ goto err_drop_mclk;
++ }
++
+ plt_clk_free_parent_names_loop(parent_names, data->nparents);
+
+ platform_set_drvdata(pdev, data);
+ return 0;
+
++err_drop_mclk:
++ clkdev_drop(data->mclk_lookup);
+ err_unreg_clk_plt:
+ plt_clk_unregister_loop(data, i);
+ plt_clk_unregister_parents(data);
+@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
+
+ data = platform_get_drvdata(pdev);
+
++ clkdev_drop(data->ether_clk_lookup);
+ clkdev_drop(data->mclk_lookup);
+ plt_clk_unregister_loop(data, PMC_CLK_NUM);
+ plt_clk_unregister_parents(data);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index bdabaa3399db..e2c0ff03f386 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -576,7 +576,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+
+ while (true) {
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
++ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (timeout <= 0)
+ return -ETIME;
+diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
+index 7aa7b9cb6203..efefcfa24a4c 100644
+--- a/drivers/i2c/busses/i2c-scmi.c
++++ b/drivers/i2c/busses/i2c-scmi.c
+@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
+ mt_params[3].type = ACPI_TYPE_INTEGER;
+ mt_params[3].integer.value = len;
+ mt_params[4].type = ACPI_TYPE_BUFFER;
++ mt_params[4].buffer.length = len;
+ mt_params[4].buffer.pointer = data->block + 1;
+ }
+ break;
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index e2ea57d5376e..b5f541112fca 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -3571,14 +3571,13 @@ static int __init dm_cache_init(void)
+ int r;
+
+ migration_cache = KMEM_CACHE(dm_cache_migration, 0);
+- if (!migration_cache) {
+- dm_unregister_target(&cache_target);
++ if (!migration_cache)
+ return -ENOMEM;
+- }
+
+ r = dm_register_target(&cache_target);
+ if (r) {
+ DMERR("cache target registration failed: %d", r);
++ kmem_cache_destroy(migration_cache);
+ return r;
+ }
+
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index b82cb1ab1eaa..0c1ef63c3461 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -463,7 +463,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
+ static struct target_type flakey_target = {
+ .name = "flakey",
+ .version = {1, 5, 0},
++#ifdef CONFIG_BLK_DEV_ZONED
+ .features = DM_TARGET_ZONED_HM,
++#endif
+ .module = THIS_MODULE,
+ .ctr = flakey_ctr,
+ .dtr = flakey_dtr,
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index d5f8eff7c11d..a53de71bc30c 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -101,6 +101,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
+ return DM_MAPIO_REMAPPED;
+ }
+
++#ifdef CONFIG_BLK_DEV_ZONED
+ static int linear_end_io(struct dm_target *ti, struct bio *bio,
+ blk_status_t *error)
+ {
+@@ -111,6 +112,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
+
+ return DM_ENDIO_DONE;
+ }
++#endif
+
+ static void linear_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+@@ -187,12 +189,16 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
+ static struct target_type linear_target = {
+ .name = "linear",
+ .version = {1, 4, 0},
++#ifdef CONFIG_BLK_DEV_ZONED
++ .end_io = linear_end_io,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
++#else
++ .features = DM_TARGET_PASSES_INTEGRITY,
++#endif
+ .module = THIS_MODULE,
+ .ctr = linear_ctr,
+ .dtr = linear_dtr,
+ .map = linear_map,
+- .end_io = linear_end_io,
+ .status = linear_status,
+ .prepare_ioctl = linear_prepare_ioctl,
+ .iterate_devices = linear_iterate_devices,
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 24ec6e039448..a56008b2e7c2 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1034,12 +1034,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+ EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+
+ /*
+- * The zone descriptors obtained with a zone report indicate
+- * zone positions within the target device. The zone descriptors
+- * must be remapped to match their position within the dm device.
+- * A target may call dm_remap_zone_report after completion of a
+- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
+- * from the target device mapping to the dm device.
++ * The zone descriptors obtained with a zone report indicate zone positions
++ * within the target backing device, regardless of that device is a partition
++ * and regardless of the target mapping start sector on the device or partition.
++ * The zone descriptors start sector and write pointer position must be adjusted
++ * to match their relative position within the dm device.
++ * A target may call dm_remap_zone_report() after completion of a
++ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
++ * backing device.
+ */
+ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+ {
+@@ -1050,6 +1052,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+ struct blk_zone *zone;
+ unsigned int nr_rep = 0;
+ unsigned int ofst;
++ sector_t part_offset;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ void *addr;
+@@ -1057,6 +1060,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+ if (bio->bi_status)
+ return;
+
++ /*
++ * bio sector was incremented by the request size on completion. Taking
++ * into account the original request sector, the target start offset on
++ * the backing device and the target mapping offset (ti->begin), the
++ * start sector of the backing device. The partition offset is always 0
++ * if the target uses a whole device.
++ */
++ part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
++
+ /*
+ * Remap the start sector of the reported zones. For sequential zones,
+ * also remap the write pointer position.
+@@ -1074,6 +1086,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+ /* Set zones start sector */
+ while (hdr->nr_zones && ofst < bvec.bv_len) {
+ zone = addr + ofst;
++ zone->start -= part_offset;
+ if (zone->start >= start + ti->len) {
+ hdr->nr_zones = 0;
+ break;
+@@ -1085,7 +1098,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+- zone->wp = zone->wp + ti->begin - start;
++ zone->wp = zone->wp + ti->begin - start - part_offset;
+ }
+ ofst += sizeof(struct blk_zone);
+ hdr->nr_zones--;
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 7aab376ecb84..3785c638d530 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -548,8 +548,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
+ }
+
+ static const struct of_device_id usbhs_child_match_table[] = {
+- { .compatible = "ti,omap-ehci", },
+- { .compatible = "ti,omap-ohci", },
++ { .compatible = "ti,ehci-omap", },
++ { .compatible = "ti,ohci-omap3", },
+ { }
+ };
+
+@@ -875,6 +875,7 @@ static struct platform_driver usbhs_omap_driver = {
+ .pm = &usbhsomap_dev_pm_ops,
+ .of_match_table = usbhs_omap_dt_ids,
+ },
++ .probe = usbhs_omap_probe,
+ .remove = usbhs_omap_remove,
+ };
+
+@@ -884,9 +885,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
+
+-static int __init omap_usbhs_drvinit(void)
++static int omap_usbhs_drvinit(void)
+ {
+- return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
++ return platform_driver_register(&usbhs_omap_driver);
+ }
+
+ /*
+@@ -898,7 +899,7 @@ static int __init omap_usbhs_drvinit(void)
+ */
+ fs_initcall_sync(omap_usbhs_drvinit);
+
+-static void __exit omap_usbhs_drvexit(void)
++static void omap_usbhs_drvexit(void)
+ {
+ platform_driver_unregister(&usbhs_omap_driver);
+ }
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 4281fdc0a13c..ce6dd49fbb98 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1613,6 +1613,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
+ brq->data.blocks = card->host->max_blk_count;
+
+ if (brq->data.blocks > 1) {
++ /*
++ * Some SD cards in SPI mode return a CRC error or even lock up
++ * completely when trying to read the last block using a
++ * multiblock read command.
++ */
++ if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
++ (blk_rq_pos(req) + blk_rq_sectors(req) ==
++ get_capacity(md->disk)))
++ brq->data.blocks--;
++
+ /*
+ * After a read error, we redo the request one sector
+ * at a time in order to accurately determine which
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 15aedb64a02b..cf64a365362b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
+ static void bond_slave_arr_handler(struct work_struct *work);
+ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
+ int mod);
++static void bond_netdev_notify_work(struct work_struct *work);
+
+ /*---------------------------- General routines -----------------------------*/
+
+@@ -1176,9 +1177,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
+ }
+ }
+
+- /* don't change skb->dev for link-local packets */
+- if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
++ /* Link-local multicast packets should be passed to the
++ * stack on the link they arrive as well as pass them to the
++ * bond-master device. These packets are mostly usable when
++ * stack receives it with the link on which they arrive
++ * (e.g. LLDP) they also must be available on master. Some of
++ * the use cases include (but are not limited to): LLDP agents
++ * that must be able to operate both on enslaved interfaces as
++ * well as on bonds themselves; linux bridges that must be able
++ * to process/pass BPDUs from attached bonds when any kind of
++ * STP version is enabled on the network.
++ */
++ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
++ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
++
++ if (nskb) {
++ nskb->dev = bond->dev;
++ nskb->queue_mapping = 0;
++ netif_rx(nskb);
++ }
+ return RX_HANDLER_PASS;
++ }
+ if (bond_should_deliver_exact_match(skb, slave, bond))
+ return RX_HANDLER_EXACT;
+
+@@ -1254,6 +1273,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
+ return NULL;
+ }
+ }
++ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
++
+ return slave;
+ }
+
+@@ -1261,6 +1282,7 @@ static void bond_free_slave(struct slave *slave)
+ {
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+
++ cancel_delayed_work_sync(&slave->notify_work);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ kfree(SLAVE_AD_INFO(slave));
+
+@@ -1282,39 +1304,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
+ info->link_failure_count = slave->link_failure_count;
+ }
+
+-static void bond_netdev_notify(struct net_device *dev,
+- struct netdev_bonding_info *info)
+-{
+- rtnl_lock();
+- netdev_bonding_info_change(dev, info);
+- rtnl_unlock();
+-}
+-
+ static void bond_netdev_notify_work(struct work_struct *_work)
+ {
+- struct netdev_notify_work *w =
+- container_of(_work, struct netdev_notify_work, work.work);
++ struct slave *slave = container_of(_work, struct slave,
++ notify_work.work);
++
++ if (rtnl_trylock()) {
++ struct netdev_bonding_info binfo;
+
+- bond_netdev_notify(w->dev, &w->bonding_info);
+- dev_put(w->dev);
+- kfree(w);
++ bond_fill_ifslave(slave, &binfo.slave);
++ bond_fill_ifbond(slave->bond, &binfo.master);
++ netdev_bonding_info_change(slave->dev, &binfo);
++ rtnl_unlock();
++ } else {
++ queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
++ }
+ }
+
+ void bond_queue_slave_event(struct slave *slave)
+ {
+- struct bonding *bond = slave->bond;
+- struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
+-
+- if (!nnw)
+- return;
+-
+- dev_hold(slave->dev);
+- nnw->dev = slave->dev;
+- bond_fill_ifslave(slave, &nnw->bonding_info.slave);
+- bond_fill_ifbond(bond, &nnw->bonding_info.master);
+- INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
+-
+- queue_delayed_work(slave->bond->wq, &nnw->work, 0);
++ queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
+ }
+
+ void bond_lower_state_changed(struct slave *slave)
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 72d6ffbfd638..0132921f408a 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -772,7 +772,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
+ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
+ {
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+- unsigned int port;
+ int ret;
+
+ ret = bcm_sf2_sw_rst(priv);
+@@ -784,12 +783,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(ds, true);
+
+- for (port = 0; port < DSA_MAX_PORTS; port++) {
+- if ((1 << port) & ds->enabled_port_mask)
+- bcm_sf2_port_setup(ds, port, NULL);
+- else if (dsa_is_cpu_port(ds, port))
+- bcm_sf2_imp_setup(ds, port);
+- }
++ ds->ops->setup(ds);
+
+ return 0;
+ }
+@@ -1270,10 +1264,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
+ {
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+- /* Disable all ports and interrupts */
+ priv->wol_ports_mask = 0;
+- bcm_sf2_sw_suspend(priv->dev->ds);
+ dsa_unregister_switch(priv->dev->ds);
++ /* Disable all ports and interrupts */
++ bcm_sf2_sw_suspend(priv->dev->ds);
+ bcm_sf2_mdio_unregister(priv);
+
+ return 0;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 0654e0c76bc2..640babf752ea 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -222,9 +222,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ }
+
+ /* for single fragment packets use build_skb() */
+- if (buff->is_eop) {
++ if (buff->is_eop &&
++ buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
+ skb = build_skb(page_address(buff->page),
+- buff->len + AQ_SKB_ALIGN);
++ AQ_CFG_RX_FRAME_MAX);
+ if (unlikely(!skb)) {
+ err = -ENOMEM;
+ goto err_exit;
+@@ -244,18 +245,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ buff->len - ETH_HLEN,
+ SKB_TRUESIZE(buff->len - ETH_HLEN));
+
+- for (i = 1U, next_ = buff->next,
+- buff_ = &self->buff_ring[next_]; true;
+- next_ = buff_->next,
+- buff_ = &self->buff_ring[next_], ++i) {
+- skb_add_rx_frag(skb, i, buff_->page, 0,
+- buff_->len,
+- SKB_TRUESIZE(buff->len -
+- ETH_HLEN));
+- buff_->is_cleaned = 1;
+-
+- if (buff_->is_eop)
+- break;
++ if (!buff->is_eop) {
++ for (i = 1U, next_ = buff->next,
++ buff_ = &self->buff_ring[next_];
++ true; next_ = buff_->next,
++ buff_ = &self->buff_ring[next_], ++i) {
++ skb_add_rx_frag(skb, i,
++ buff_->page, 0,
++ buff_->len,
++ SKB_TRUESIZE(buff->len -
++ ETH_HLEN));
++ buff_->is_cleaned = 1;
++
++ if (buff_->is_eop)
++ break;
++ }
+ }
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 0fff2432ab4c..6e7f9a470ea1 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1001,14 +1001,22 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
+ {
+ u32 reg;
+
+- /* Stop monitoring MPD interrupt */
+- intrl2_0_mask_set(priv, INTRL2_0_MPD);
+-
+ /* Clear the MagicPacket detection logic */
+ reg = umac_readl(priv, UMAC_MPD_CTRL);
+ reg &= ~MPD_EN;
+ umac_writel(priv, reg, UMAC_MPD_CTRL);
+
++ reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
++ if (reg & INTRL2_0_MPD)
++ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
++
++ if (reg & INTRL2_0_BRCM_MATCH_TAG) {
++ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
++ RXCHK_BRCM_TAG_MATCH_MASK;
++ netdev_info(priv->netdev,
++ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
++ }
++
+ netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
+ }
+
+@@ -1043,11 +1051,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+ if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+ bcm_sysport_tx_reclaim_all(priv);
+
+- if (priv->irq0_stat & INTRL2_0_MPD) {
+- netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
+- bcm_sysport_resume_from_wol(priv);
+- }
+-
+ if (!priv->is_lite)
+ goto out;
+
+@@ -2248,9 +2251,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
+ /* UniMAC receive needs to be turned on */
+ umac_enable_set(priv, CMD_RX_EN, 1);
+
+- /* Enable the interrupt wake-up source */
+- intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+-
+ netif_dbg(priv, wol, ndev, "entered WOL mode\n");
+
+ return 0;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 937db8019289..da6c73868fa0 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1864,8 +1864,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+ tx_pkts++;
+ /* return full budget so NAPI will complete. */
+- if (unlikely(tx_pkts > bp->tx_wake_thresh))
++ if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+ rx_pkts = budget;
++ raw_cons = NEXT_RAW_CMP(raw_cons);
++ break;
++ }
+ } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ if (likely(budget))
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+@@ -1893,7 +1896,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+- if (rx_pkts == budget)
++ if (rx_pkts && rx_pkts == budget)
+ break;
+ }
+
+@@ -2007,8 +2010,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
+ while (1) {
+ work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+- if (work_done >= budget)
++ if (work_done >= budget) {
++ if (!budget)
++ BNXT_CP_DB_REARM(cpr->cp_doorbell,
++ cpr->cp_raw_cons);
+ break;
++ }
+
+ if (!bnxt_has_work(bp, cpr)) {
+ if (napi_complete_done(napi, work_done))
+@@ -2957,10 +2964,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
+ {
+ struct pci_dev *pdev = bp->pdev;
+
+- dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+- bp->hwrm_cmd_resp_dma_addr);
+-
+- bp->hwrm_cmd_resp_addr = NULL;
++ if (bp->hwrm_cmd_resp_addr) {
++ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
++ bp->hwrm_cmd_resp_dma_addr);
++ bp->hwrm_cmd_resp_addr = NULL;
++ }
+ if (bp->hwrm_dbg_resp_addr) {
+ dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+ bp->hwrm_dbg_resp_addr,
+@@ -8210,6 +8218,7 @@ init_err_cleanup_tc:
+ bnxt_clear_int_mode(bp);
+
+ init_err_pci_clean:
++ bnxt_free_hwrm_resources(bp);
+ bnxt_cleanup_pci(bp);
+
+ init_err_free:
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index 6a185344b378..149d30f60459 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -78,17 +78,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
+ return 0;
+ }
+
+-static void bnxt_tc_parse_vlan(struct bnxt *bp,
+- struct bnxt_tc_actions *actions,
+- const struct tc_action *tc_act)
++static int bnxt_tc_parse_vlan(struct bnxt *bp,
++ struct bnxt_tc_actions *actions,
++ const struct tc_action *tc_act)
+ {
+- if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
++ switch (tcf_vlan_action(tc_act)) {
++ case TCA_VLAN_ACT_POP:
+ actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
+- } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
++ break;
++ case TCA_VLAN_ACT_PUSH:
+ actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
+ actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
+ actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
++ break;
++ default:
++ return -EOPNOTSUPP;
+ }
++ return 0;
+ }
+
+ static int bnxt_tc_parse_actions(struct bnxt *bp,
+@@ -122,7 +128,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
+
+ /* Push/pop VLAN */
+ if (is_tcf_vlan(tc_act)) {
+- bnxt_tc_parse_vlan(bp, actions, tc_act);
++ rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
++ if (rc)
++ return rc;
+ continue;
+ }
+ }
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index c1787be6a258..b4f92de1efbd 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -3301,6 +3301,13 @@ static const struct macb_config at91sam9260_config = {
+ .init = macb_init,
+ };
+
++static const struct macb_config sama5d3macb_config = {
++ .caps = MACB_CAPS_SG_DISABLED
++ | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
++ .clk_init = macb_clk_init,
++ .init = macb_init,
++};
++
+ static const struct macb_config pc302gem_config = {
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+ .dma_burst_length = 16,
+@@ -3368,6 +3375,7 @@ static const struct of_device_id macb_dt_ids[] = {
+ { .compatible = "cdns,gem", .data = &pc302gem_config },
+ { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
+ { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
++ { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
+ { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
+ { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
+ { .compatible = "cdns,emac", .data = &emac_config },
+diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
+index a051e582d541..79d03f8ee7b1 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
++++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
+@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+ if (cb->type == DESC_TYPE_SKB)
+ dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+- else
++ else if (cb->length)
+ dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index 07d6a9cf2c55..4faadc3ffe8c 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -40,9 +40,9 @@
+ #define SKB_TMP_LEN(SKB) \
+ (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
+
+-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+- int size, dma_addr_t dma, int frag_end,
+- int buf_num, enum hns_desc_type type, int mtu)
++static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
++ int send_sz, dma_addr_t dma, int frag_end,
++ int buf_num, enum hns_desc_type type, int mtu)
+ {
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ desc_cb->type = type;
+
+ desc->addr = cpu_to_le64(dma);
+- desc->tx.send_size = cpu_to_le16((u16)size);
++ desc->tx.send_size = cpu_to_le16((u16)send_sz);
+
+ /* config bd buffer end */
+ hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
+@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
++static void fill_v2_desc(struct hnae_ring *ring, void *priv,
++ int size, dma_addr_t dma, int frag_end,
++ int buf_num, enum hns_desc_type type, int mtu)
++{
++ fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
++ buf_num, type, mtu);
++}
++
+ static const struct acpi_device_id hns_enet_acpi_match[] = {
+ { "HISI00C1", 0 },
+ { "HISI00C2", 0 },
+@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+
+ /* when the frag size is bigger than hardware, split this frag */
+ for (k = 0; k < frag_buf_num; k++)
+- fill_v2_desc(ring, priv,
+- (k == frag_buf_num - 1) ?
++ fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
++ (k == frag_buf_num - 1) ?
+ sizeoflast : BD_MAX_SEND_SIZE,
+- dma + BD_MAX_SEND_SIZE * k,
+- frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+- buf_num,
+- (type == DESC_TYPE_SKB && !k) ?
++ dma + BD_MAX_SEND_SIZE * k,
++ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
++ buf_num,
++ (type == DESC_TYPE_SKB && !k) ?
+ DESC_TYPE_SKB : DESC_TYPE_PAGE,
+- mtu);
++ mtu);
+ }
+
+ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 529be74f609d..00e6f1d155a6 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -33,6 +33,7 @@
+ #include <linux/hrtimer.h>
+ #include <linux/ktime.h>
+ #include <linux/regmap.h>
++#include <linux/if_vlan.h>
+ #include <uapi/linux/ppp_defs.h>
+ #include <net/ip.h>
+ #include <net/ipv6.h>
+@@ -5101,7 +5102,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
+ }
+
+ /* Set Tx descriptors fields relevant for CSUM calculation */
+-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
++static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
+ int ip_hdr_len, int l4_proto)
+ {
+ u32 command;
+@@ -6065,14 +6066,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int ip_hdr_len = 0;
+ u8 l4_proto;
++ __be16 l3_proto = vlan_get_protocol(skb);
+
+- if (skb->protocol == htons(ETH_P_IP)) {
++ if (l3_proto == htons(ETH_P_IP)) {
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ /* Calculate IPv4 checksum and L4 checksum */
+ ip_hdr_len = ip4h->ihl;
+ l4_proto = ip4h->protocol;
+- } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ } else if (l3_proto == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ /* Read l4_protocol from one of IPv6 extra headers */
+@@ -6084,7 +6086,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
+ }
+
+ return mvpp2_txq_desc_csum(skb_network_offset(skb),
+- skb->protocol, ip_hdr_len, l4_proto);
++ l3_proto, ip_hdr_len, l4_proto);
+ }
+
+ return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+@@ -6532,10 +6534,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
+ cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+ }
+
+- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+- if (cause_tx) {
+- cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+- mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
++ if (port->has_tx_irqs) {
++ cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
++ if (cause_tx) {
++ cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
++ mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
++ }
+ }
+
+ /* Process RX packets */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index e28f9dab9ceb..9e0be077df9c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -864,6 +864,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
+ }
++ } else {
++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index de72b66df3e5..1af9894abd95 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1922,7 +1922,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
+ u32 max_guarantee = 0;
+ int i;
+
+- for (i = 0; i <= esw->total_vports; i++) {
++ for (i = 0; i < esw->total_vports; i++) {
+ evport = &esw->vports[i];
+ if (!evport->enabled || evport->info.min_rate < max_guarantee)
+ continue;
+@@ -1942,7 +1942,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+ int err;
+ int i;
+
+- for (i = 0; i <= esw->total_vports; i++) {
++ for (i = 0; i < esw->total_vports; i++) {
+ evport = &esw->vports[i];
+ if (!evport->enabled)
+ continue;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+index 56751990bcee..6df2c8b2ce6f 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+@@ -2058,14 +2058,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ return true;
+ }
+
+-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
++static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+ {
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
++ unsigned int budget = 512;
+
+- while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
++ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+ continue;
++
++ return budget;
+ }
+
+ static void nfp_ctrl_poll(unsigned long arg)
+@@ -2077,9 +2080,13 @@ static void nfp_ctrl_poll(unsigned long arg)
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock_bh(&r_vec->lock);
+
+- nfp_ctrl_rx(r_vec);
+-
+- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
++ if (nfp_ctrl_rx(r_vec)) {
++ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
++ } else {
++ tasklet_schedule(&r_vec->tasklet);
++ nn_dp_warn(&r_vec->nfp_net->dp,
++ "control message budget exceeded!\n");
++ }
+ }
+
+ /* Setup and Configuration
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+index 81312924df14..0c443ea98479 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
+ int (*config_loopback) (struct qlcnic_adapter *, u8);
+ int (*clear_loopback) (struct qlcnic_adapter *, u8);
+ int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
++ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
++ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
+ int (*get_board_info) (struct qlcnic_adapter *);
+ void (*set_mac_filter_count) (struct qlcnic_adapter *);
+ void (*free_mac_list) (struct qlcnic_adapter *);
+@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
+ }
+
+ static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+- u64 *addr, u16 id)
++ u64 *addr, u16 vlan,
++ struct qlcnic_host_tx_ring *tx_ring)
+ {
+- adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
++ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
+ }
+
+ static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+index 46b0372dd032..1fc84d8f891b 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+@@ -2134,7 +2134,8 @@ out:
+ }
+
+ void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+- u16 vlan_id)
++ u16 vlan_id,
++ struct qlcnic_host_tx_ring *tx_ring)
+ {
+ u8 mac[ETH_ALEN];
+ memcpy(&mac, addr, ETH_ALEN);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+index b75a81246856..73fe2f64491d 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
+ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+ int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
++void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
++ u16 vlan, struct qlcnic_host_tx_ring *ring);
+ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
+ int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+ void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+index 4bb33af8e2b3..56a3bd9e37dc 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev);
+ void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
+ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+- u64 *uaddr, u16 vlan_id);
++ u64 *uaddr, u16 vlan_id,
++ struct qlcnic_host_tx_ring *tx_ring);
+ int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+ int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+index 84dd83031a1b..9647578cbe6a 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+ }
+
+ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+- u16 vlan_id)
++ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
+ {
+ struct cmd_desc_type0 *hwdesc;
+ struct qlcnic_nic_req *req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ u32 producer;
+ u64 word;
+
+@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+
+ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+- struct sk_buff *skb)
++ struct sk_buff *skb,
++ struct qlcnic_host_tx_ring *tx_ring)
+ {
+ struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ tmp_fil->vlan_id == vlan_id) {
+ if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, &src_addr,
+- vlan_id);
++ vlan_id, tx_ring);
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ if (!fil)
+ return;
+
+- qlcnic_change_filter(adapter, &src_addr, vlan_id);
++ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
+ fil->ftime = jiffies;
+ fil->vlan_id = vlan_id;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+ }
+
+ if (adapter->drv_mac_learn)
+- qlcnic_send_filter(adapter, first_desc, skb);
++ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
+
+ tx_ring->tx_stats.tx_bytes += skb->len;
+ tx_ring->tx_stats.xmit_called++;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 1a9a382bf1c4..bafbebeb0e00 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2190,8 +2190,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+ priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+- tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+- (DMA_TX_SIZE * sizeof(struct dma_desc));
++ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
+ tx_q->tx_tail_addr,
+ chan);
+@@ -2963,6 +2962,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+
++ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
+ queue);
+
+@@ -3178,9 +3178,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+- else
++ else {
++ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
+ queue);
++ }
+
+ return NETDEV_TX_OK;
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 195eb7e71473..d48cc32dc507 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
+ * Description:
+ * This function validates the number of Unicast address entries supported
+ * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
+- * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
++ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * logic. This function validates a valid, supported configuration is
+ * selected, and defaults to 1 Unicast address if an unsupported
+ * configuration is selected.
+@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
+ int x = ucast_entries;
+
+ switch (x) {
+- case 1:
+- case 32:
++ case 1 ... 32:
+ case 64:
+ case 128:
+ break;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index aba16d81e9bb..2d90cffae9ff 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2110,17 +2110,15 @@ static int netvsc_remove(struct hv_device *dev)
+
+ cancel_delayed_work_sync(&ndev_ctx->dwork);
+
+- rcu_read_lock();
+- nvdev = rcu_dereference(ndev_ctx->nvdev);
+-
+- if (nvdev)
++ rtnl_lock();
++ nvdev = rtnl_dereference(ndev_ctx->nvdev);
++ if (nvdev)
+ cancel_work_sync(&nvdev->subchan_work);
+
+ /*
+ * Call to the vsc driver to let it know that the device is being
+ * removed. Also blocks mtu and channel changes.
+ */
+- rtnl_lock();
+ vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ if (vf_netdev)
+ netvsc_unregister_vf(vf_netdev);
+@@ -2132,7 +2130,6 @@ static int netvsc_remove(struct hv_device *dev)
+ list_del(&ndev_ctx->list);
+
+ rtnl_unlock();
+- rcu_read_unlock();
+
+ hv_set_drvdata(dev, NULL);
+
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 83c591713837..817451a1efd6 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1165,6 +1165,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
+ return -EBUSY;
+ }
+
++ if (dev == port_dev) {
++ netdev_err(dev, "Cannot enslave team device to itself\n");
++ return -EINVAL;
++ }
++
+ if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+ vlan_uses_dev(dev)) {
+ netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index c5d4b35bb72a..11a25cef113f 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1233,6 +1233,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
+ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
++ {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 05553d252446..b64b1ee56d2d 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -1517,6 +1517,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ if (pdata) {
++ cancel_work_sync(&pdata->set_multicast);
+ netif_dbg(dev, ifdown, dev->net, "free pdata\n");
+ kfree(pdata);
+ pdata = NULL;
+diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
+index ffc87a956d97..53d1c08cef4d 100644
+--- a/drivers/pci/host/pci-hyperv.c
++++ b/drivers/pci/host/pci-hyperv.c
+@@ -100,6 +100,9 @@ static enum pci_protocol_version_t pci_protocol_version;
+
+ #define STATUS_REVISION_MISMATCH 0xC0000059
+
++/* space for 32bit serial number as string */
++#define SLOT_NAME_SIZE 11
++
+ /*
+ * Message Types
+ */
+@@ -516,6 +519,7 @@ struct hv_pci_dev {
+ struct list_head list_entry;
+ refcount_t refs;
+ enum hv_pcichild_state state;
++ struct pci_slot *pci_slot;
+ struct pci_function_description desc;
+ bool reported_missing;
+ struct hv_pcibus_device *hbus;
+@@ -1481,6 +1485,34 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ }
+
++/*
++ * Assign entries in sysfs pci slot directory.
++ *
++ * Note that this function does not need to lock the children list
++ * because it is called from pci_devices_present_work which
++ * is serialized with hv_eject_device_work because they are on the
++ * same ordered workqueue. Therefore hbus->children list will not change
++ * even when pci_create_slot sleeps.
++ */
++static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
++{
++ struct hv_pci_dev *hpdev;
++ char name[SLOT_NAME_SIZE];
++ int slot_nr;
++
++ list_for_each_entry(hpdev, &hbus->children, list_entry) {
++ if (hpdev->pci_slot)
++ continue;
++
++ slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
++ snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
++ hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
++ name, NULL);
++ if (!hpdev->pci_slot)
++ pr_warn("pci_create slot %s failed\n", name);
++ }
++}
++
+ /**
+ * create_root_hv_pci_bus() - Expose a new root PCI bus
+ * @hbus: Root PCI bus, as understood by this driver
+@@ -1504,6 +1536,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
+ pci_lock_rescan_remove();
+ pci_scan_child_bus(hbus->pci_bus);
+ pci_bus_assign_resources(hbus->pci_bus);
++ hv_pci_assign_slots(hbus);
+ pci_bus_add_devices(hbus->pci_bus);
+ pci_unlock_rescan_remove();
+ hbus->state = hv_pcibus_installed;
+@@ -1787,6 +1820,7 @@ static void pci_devices_present_work(struct work_struct *work)
+ */
+ pci_lock_rescan_remove();
+ pci_scan_child_bus(hbus->pci_bus);
++ hv_pci_assign_slots(hbus);
+ pci_unlock_rescan_remove();
+ break;
+
+@@ -1895,6 +1929,9 @@ static void hv_eject_device_work(struct work_struct *work)
+ list_del(&hpdev->list_entry);
+ spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+
++ if (hpdev->pci_slot)
++ pci_destroy_slot(hpdev->pci_slot);
++
+ memset(&ctxt, 0, sizeof(ctxt));
+ ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
+diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
+index d14fc2e67f93..5e06917b4cef 100644
+--- a/drivers/perf/arm_pmu.c
++++ b/drivers/perf/arm_pmu.c
+@@ -483,7 +483,13 @@ static int armpmu_filter_match(struct perf_event *event)
+ {
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ unsigned int cpu = smp_processor_id();
+- return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
++ int ret;
++
++ ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
++ if (ret && armpmu->filter_match)
++ return armpmu->filter_match(event);
++
++ return ret;
+ }
+
+ static ssize_t armpmu_cpumask_show(struct device *dev,
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index db9cca4a83ff..22558bf29424 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -643,6 +643,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
+ return err;
+ }
+
++ return 0;
++}
++
++static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
++{
++ struct gpio_chip *chip = &mcp->chip;
++ int err;
++
+ err = gpiochip_irqchip_add_nested(chip,
+ &mcp23s08_irq_chip,
+ 0,
+@@ -907,7 +915,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ }
+
+ if (mcp->irq && mcp->irq_controller) {
+- ret = mcp23s08_irq_setup(mcp);
++ ret = mcp23s08_irqchip_setup(mcp);
+ if (ret)
+ goto fail;
+ }
+@@ -932,6 +940,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ goto fail;
+ }
+
++ if (mcp->irq)
++ ret = mcp23s08_irq_setup(mcp);
++
+ fail:
+ if (ret < 0)
+ dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index 72ce6ad95767..1419eaea03d8 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -172,7 +172,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
+
+ for (i = 0; i < pat->pat_nr; i++, pa++)
+ for (j = 0; j < pa->pa_nr; j++)
+- if (pa->pa_iova_pfn[i] == iova_pfn)
++ if (pa->pa_iova_pfn[j] == iova_pfn)
+ return true;
+
+ return false;
+diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
+index aba58d3848a6..511a31b359c7 100644
+--- a/drivers/scsi/qla2xxx/qla_target.h
++++ b/drivers/scsi/qla2xxx/qla_target.h
+@@ -374,8 +374,8 @@ struct atio_from_isp {
+ static inline int fcpcmd_is_corrupted(struct atio *atio)
+ {
+ if (atio->entry_type == ATIO_TYPE7 &&
+- (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
+- FCP_CMD_LENGTH_MIN))
++ ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
++ FCP_CMD_LENGTH_MIN))
+ return 1;
+ else
+ return 0;
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 52fa52c20be0..d2cafdae8317 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1421,7 +1421,8 @@ static void iscsit_do_crypto_hash_buf(
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ sg_set_buf(sg, buf, payload_length);
+- sg_set_buf(sg + 1, pad_bytes, padding);
++ if (padding)
++ sg_set_buf(sg + 1, pad_bytes, padding);
+
+ ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
+
+@@ -3942,10 +3943,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
+ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
+ {
+ int ret;
+- u8 buffer[ISCSI_HDR_LEN], opcode;
++ u8 *buffer, opcode;
+ u32 checksum = 0, digest = 0;
+ struct kvec iov;
+
++ buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
++ if (!buffer)
++ return;
++
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+@@ -3953,7 +3958,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
+ */
+ iscsit_thread_check_cpumask(conn, current, 0);
+
+- memset(buffer, 0, ISCSI_HDR_LEN);
+ memset(&iov, 0, sizeof(struct kvec));
+
+ iov.iov_base = buffer;
+@@ -3962,7 +3966,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
+ ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+ if (ret != ISCSI_HDR_LEN) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+- return;
++ break;
+ }
+
+ if (conn->conn_ops->HeaderDigest) {
+@@ -3972,7 +3976,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
+ ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+ if (ret != ISCSI_CRC_LEN) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+- return;
++ break;
+ }
+
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
+@@ -3996,7 +4000,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+- return;
++ break;
+
+ opcode = buffer[0] & ISCSI_OPCODE_MASK;
+
+@@ -4007,13 +4011,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
+ " while in Discovery Session, rejecting.\n", opcode);
+ iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buffer);
+- return;
++ break;
+ }
+
+ ret = iscsi_target_rx_opcode(conn, buffer);
+ if (ret < 0)
+- return;
++ break;
+ }
++
++ kfree(buffer);
+ }
+
+ int iscsi_target_rx_thread(void *arg)
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index c01d1f3a1c7d..d2a9767a8e9c 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1236,17 +1236,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ temp = readl(port_array[wIndex]);
+ break;
+ }
+-
+- /* Software should not attempt to set
+- * port link state above '3' (U3) and the port
+- * must be enabled.
+- */
+- if ((temp & PORT_PE) == 0 ||
+- (link_state > USB_SS_PORT_LS_U3)) {
+- xhci_warn(xhci, "Cannot set link state.\n");
++ /* Port must be enabled */
++ if (!(temp & PORT_PE)) {
++ retval = -ENODEV;
++ break;
++ }
++ /* Can't set port link state above '3' (U3) */
++ if (link_state > USB_SS_PORT_LS_U3) {
++ xhci_warn(xhci, "Cannot set port %d link state %d\n",
++ wIndex, link_state);
+ goto error;
+ }
+-
+ if (link_state == USB_SS_PORT_LS_U3) {
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ wIndex + 1);
+diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
+index 8235b285dbb2..d09bab3bf224 100644
+--- a/drivers/video/fbdev/aty/atyfb.h
++++ b/drivers/video/fbdev/aty/atyfb.h
+@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
+ extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
+ extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+
++extern const u8 aty_postdividers[8];
++
+
+ /*
+ * Hardware cursor support
+@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
+
+ extern void aty_reset_engine(const struct atyfb_par *par);
+ extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
+-extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+
+ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
+index 3ec72f19114b..d4b938276d23 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
+ /*
+ * PLL Reference Divider M:
+ */
+- M = pll_regs[2];
++ M = pll_regs[PLL_REF_DIV];
+
+ /*
+ * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
+ */
+- N = pll_regs[7 + (clock_cntl & 3)];
++ N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
+
+ /*
+ * PLL Post Divider P (Dependent on CLOCK_CNTL):
+ */
+- P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
++ P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
++ ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
+
+ /*
+ * PLL Divider Q:
+diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
+index 7d3bd723d3d5..d55f4bacb41c 100644
+--- a/drivers/video/fbdev/aty/mach64_ct.c
++++ b/drivers/video/fbdev/aty/mach64_ct.c
+@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
+ */
+
+ #define Maximum_DSP_PRECISION 7
+-static u8 postdividers[] = {1,2,4,8,3};
++const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
+
+ static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
+ {
+@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
+ pll->vclk_post_div += (q < 64*8);
+ pll->vclk_post_div += (q < 32*8);
+ }
+- pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
++ pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
+ // pll->vclk_post_div <<= 6;
+ pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
+ pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
+@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
+ u8 mclk_fb_div, pll_ext_cntl;
+ pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
+ pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
+- pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
++ pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
+ mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
+ if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
+ mclk_fb_div <<= 1;
+@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
+ xpost_div += (q < 64*8);
+ xpost_div += (q < 32*8);
+ }
+- pll->ct.xclk_post_div_real = postdividers[xpost_div];
++ pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
+ pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
+
+ #ifdef CONFIG_PPC
+@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
+ mpost_div += (q < 64*8);
+ mpost_div += (q < 32*8);
+ }
+- sclk_post_div_real = postdividers[mpost_div];
++ sclk_post_div_real = aty_postdividers[mpost_div];
+ pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
+ pll->ct.spll_cntl2 = mpost_div << 4;
+ #ifdef DEBUG
+diff --git a/fs/dcache.c b/fs/dcache.c
+index c1a7c174a905..28b2e770bb69 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -270,11 +270,25 @@ static void __d_free(struct rcu_head *head)
+ kmem_cache_free(dentry_cache, dentry);
+ }
+
++static void __d_free_external_name(struct rcu_head *head)
++{
++ struct external_name *name = container_of(head, struct external_name,
++ u.head);
++
++ mod_node_page_state(page_pgdat(virt_to_page(name)),
++ NR_INDIRECTLY_RECLAIMABLE_BYTES,
++ -ksize(name));
++
++ kfree(name);
++}
++
+ static void __d_free_external(struct rcu_head *head)
+ {
+ struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
+- kfree(external_name(dentry));
+- kmem_cache_free(dentry_cache, dentry);
++
++ __d_free_external_name(&external_name(dentry)->u.head);
++
++ kmem_cache_free(dentry_cache, dentry);
+ }
+
+ static inline int dname_external(const struct dentry *dentry)
+@@ -305,7 +319,7 @@ void release_dentry_name_snapshot(struct name_snapshot *name)
+ struct external_name *p;
+ p = container_of(name->name, struct external_name, name[0]);
+ if (unlikely(atomic_dec_and_test(&p->u.count)))
+- kfree_rcu(p, u.head);
++ call_rcu(&p->u.head, __d_free_external_name);
+ }
+ }
+ EXPORT_SYMBOL(release_dentry_name_snapshot);
+@@ -1605,6 +1619,7 @@ EXPORT_SYMBOL(d_invalidate);
+
+ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+ {
++ struct external_name *ext = NULL;
+ struct dentry *dentry;
+ char *dname;
+ int err;
+@@ -1625,14 +1640,13 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+ dname = dentry->d_iname;
+ } else if (name->len > DNAME_INLINE_LEN-1) {
+ size_t size = offsetof(struct external_name, name[1]);
+- struct external_name *p = kmalloc(size + name->len,
+- GFP_KERNEL_ACCOUNT);
+- if (!p) {
++ ext = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT);
++ if (!ext) {
+ kmem_cache_free(dentry_cache, dentry);
+ return NULL;
+ }
+- atomic_set(&p->u.count, 1);
+- dname = p->name;
++ atomic_set(&ext->u.count, 1);
++ dname = ext->name;
+ if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
+ kasan_unpoison_shadow(dname,
+ round_up(name->len + 1, sizeof(unsigned long)));
+@@ -1675,6 +1689,12 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+ }
+ }
+
++ if (unlikely(ext)) {
++ pg_data_t *pgdat = page_pgdat(virt_to_page(ext));
++ mod_node_page_state(pgdat, NR_INDIRECTLY_RECLAIMABLE_BYTES,
++ ksize(ext));
++ }
++
+ this_cpu_inc(nr_dentry);
+
+ return dentry;
+@@ -2769,7 +2789,7 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
+ dentry->d_name.hash_len = target->d_name.hash_len;
+ }
+ if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
+- kfree_rcu(old_name, u.head);
++ call_rcu(&old_name->u.head, __d_free_external_name);
+ }
+
+ static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 4e8f77504a57..e7905d9353e8 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -353,6 +353,7 @@ struct cgroup {
+ * specific task are charged to the dom_cgrp.
+ */
+ struct cgroup *dom_cgrp;
++ struct cgroup *old_dom_cgrp; /* used while enabling threaded */
+
+ /*
+ * list of pidlists, up to two for each namespace (one for procs, one
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index f0938257ee6d..f679f5268467 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -180,6 +180,7 @@ enum node_stat_item {
+ NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
+ NR_DIRTIED, /* page dirtyings since bootup */
+ NR_WRITTEN, /* page writings since bootup */
++ NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
+ NR_VM_NODE_STAT_ITEMS
+ };
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 2ea7ee1fb495..a516dbe5869f 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2307,6 +2307,13 @@ struct netdev_notifier_info {
+ struct net_device *dev;
+ };
+
++struct netdev_notifier_info_ext {
++ struct netdev_notifier_info info; /* must be first */
++ union {
++ u32 mtu;
++ } ext;
++};
++
+ struct netdev_notifier_change_info {
+ struct netdev_notifier_info info; /* must be first */
+ unsigned int flags_changed;
+diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
+index af0f44effd44..251bc43fdcfc 100644
+--- a/include/linux/perf/arm_pmu.h
++++ b/include/linux/perf/arm_pmu.h
+@@ -110,6 +110,7 @@ struct arm_pmu {
+ void (*stop)(struct arm_pmu *);
+ void (*reset)(void *);
+ int (*map_event)(struct perf_event *event);
++ int (*filter_match)(struct perf_event *event);
+ int num_events;
+ u64 max_period;
+ bool secure_access; /* 32-bit ARM only */
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 9397628a1967..cb462f9ab7dd 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -5,6 +5,24 @@
+ #include <linux/if_vlan.h>
+ #include <uapi/linux/virtio_net.h>
+
++static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
++ const struct virtio_net_hdr *hdr)
++{
++ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
++ case VIRTIO_NET_HDR_GSO_TCPV4:
++ case VIRTIO_NET_HDR_GSO_UDP:
++ skb->protocol = cpu_to_be16(ETH_P_IP);
++ break;
++ case VIRTIO_NET_HDR_GSO_TCPV6:
++ skb->protocol = cpu_to_be16(ETH_P_IPV6);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian)
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 73799da57400..04008209506a 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -139,12 +139,6 @@ struct bond_parm_tbl {
+ int mode;
+ };
+
+-struct netdev_notify_work {
+- struct delayed_work work;
+- struct net_device *dev;
+- struct netdev_bonding_info bonding_info;
+-};
+-
+ struct slave {
+ struct net_device *dev; /* first - useful for panic debug */
+ struct bonding *bond; /* our master */
+@@ -172,6 +166,7 @@ struct slave {
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+ #endif
++ struct delayed_work notify_work;
+ struct kobject kobj;
+ struct rtnl_link_stats64 slave_stats;
+ };
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index 8e51b4a69088..16a1492a5bd3 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -129,12 +129,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
+ return sk->sk_bound_dev_if;
+ }
+
+-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
+-{
+- return rcu_dereference_check(ireq->ireq_opt,
+- refcount_read(&ireq->req.rsk_refcnt) > 0);
+-}
+-
+ struct inet_cork {
+ unsigned int flags;
+ __be32 addr;
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 5c5d344c0629..32df52869a14 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -372,6 +372,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
+ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
+ int fib_sync_down_addr(struct net_device *dev, __be32 local);
+ int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
++void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
+
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
+diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
+index d8afd8a5bd76..926ea701cdc4 100644
+--- a/include/sound/hdaudio.h
++++ b/include/sound/hdaudio.h
+@@ -357,6 +357,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
+ void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
+ void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
+ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
++int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
+
+ void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
+ int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 76c0ef2cb509..3fc11b8851ac 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2780,11 +2780,12 @@ restart:
+ }
+
+ /**
+- * cgroup_save_control - save control masks of a subtree
++ * cgroup_save_control - save control masks and dom_cgrp of a subtree
+ * @cgrp: root of the target subtree
+ *
+- * Save ->subtree_control and ->subtree_ss_mask to the respective old_
+- * prefixed fields for @cgrp's subtree including @cgrp itself.
++ * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
++ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
++ * itself.
+ */
+ static void cgroup_save_control(struct cgroup *cgrp)
+ {
+@@ -2794,6 +2795,7 @@ static void cgroup_save_control(struct cgroup *cgrp)
+ cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
+ dsct->old_subtree_control = dsct->subtree_control;
+ dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
++ dsct->old_dom_cgrp = dsct->dom_cgrp;
+ }
+ }
+
+@@ -2819,11 +2821,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp)
+ }
+
+ /**
+- * cgroup_restore_control - restore control masks of a subtree
++ * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
+ * @cgrp: root of the target subtree
+ *
+- * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
+- * prefixed fields for @cgrp's subtree including @cgrp itself.
++ * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
++ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
++ * itself.
+ */
+ static void cgroup_restore_control(struct cgroup *cgrp)
+ {
+@@ -2833,6 +2836,7 @@ static void cgroup_restore_control(struct cgroup *cgrp)
+ cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
+ dsct->subtree_control = dsct->old_subtree_control;
+ dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
++ dsct->dom_cgrp = dsct->old_dom_cgrp;
+ }
+ }
+
+@@ -3140,6 +3144,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
+ {
+ struct cgroup *parent = cgroup_parent(cgrp);
+ struct cgroup *dom_cgrp = parent->dom_cgrp;
++ struct cgroup *dsct;
++ struct cgroup_subsys_state *d_css;
+ int ret;
+
+ lockdep_assert_held(&cgroup_mutex);
+@@ -3169,12 +3175,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
+ */
+ cgroup_save_control(cgrp);
+
+- cgrp->dom_cgrp = dom_cgrp;
++ cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
++ if (dsct == cgrp || cgroup_is_threaded(dsct))
++ dsct->dom_cgrp = dom_cgrp;
++
+ ret = cgroup_apply_control(cgrp);
+ if (!ret)
+ parent->nr_threaded_children++;
+- else
+- cgrp->dom_cgrp = cgrp;
+
+ cgroup_finalize_control(cgrp, ret);
+ return ret;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 174612f8339c..39c1fedcfdb4 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2843,9 +2843,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ if (!(pvmw->pmd && !pvmw->pte))
+ return;
+
+- mmu_notifier_invalidate_range_start(mm, address,
+- address + HPAGE_PMD_SIZE);
+-
+ flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
+ pmdval = *pvmw->pmd;
+ pmdp_invalidate(vma, address, pvmw->pmd);
+@@ -2858,9 +2855,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ set_pmd_at(mm, address, pvmw->pmd, pmdswp);
+ page_remove_rmap(page, true);
+ put_page(page);
+-
+- mmu_notifier_invalidate_range_end(mm, address,
+- address + HPAGE_PMD_SIZE);
+ }
+
+ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 59ccf455fcbd..a604b5da6755 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4557,6 +4557,13 @@ long si_mem_available(void)
+ min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
+ wmark_low);
+
++ /*
++ * Part of the kernel memory, which can be released under memory
++ * pressure.
++ */
++ available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
++ PAGE_SHIFT;
++
+ if (available < 0)
+ available = 0;
+ return available;
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 5fa5e79b69f0..3074148b7e0d 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1208,6 +1208,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
+ {
+ if (!chunk)
+ return;
++ pcpu_mem_free(chunk->md_blocks);
+ pcpu_mem_free(chunk->bound_map);
+ pcpu_mem_free(chunk->alloc_map);
+ pcpu_mem_free(chunk);
+diff --git a/mm/util.c b/mm/util.c
+index 34e57fae959d..547e04b5cfff 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -635,6 +635,13 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ */
+ free += global_node_page_state(NR_SLAB_RECLAIMABLE);
+
++ /*
++ * Part of the kernel memory, which can be released
++ * under memory pressure.
++ */
++ free += global_node_page_state(
++ NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
++
+ /*
+ * Leave reserved pages. The pages are not for anonymous pages.
+ */
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 2bdc962b2dfe..527ae727d547 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1090,6 +1090,7 @@ const char * const vmstat_text[] = {
+ "nr_vmscan_immediate_reclaim",
+ "nr_dirtied",
+ "nr_written",
++ "", /* nr_indirectly_reclaimable */
+
+ /* enum writeback_stat_item counters */
+ "nr_dirty_threshold",
+@@ -1214,7 +1215,6 @@ const char * const vmstat_text[] = {
+ #ifdef CONFIG_DEBUG_VM_VMACACHE
+ "vmacache_find_calls",
+ "vmacache_find_hits",
+- "vmacache_full_flushes",
+ #endif
+ #ifdef CONFIG_SWAP
+ "swap_ra",
+@@ -1673,6 +1673,10 @@ static int vmstat_show(struct seq_file *m, void *arg)
+ unsigned long *l = arg;
+ unsigned long off = l - (unsigned long *)m->private;
+
++ /* Skip hidden vmstat items. */
++ if (*vmstat_text[off] == '\0')
++ return 0;
++
+ seq_puts(m, vmstat_text[off]);
+ seq_put_decimal_ull(m, " ", *l);
+ seq_putc(m, '\n');
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 85f4a1047707..e8a66ad6d07c 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1688,6 +1688,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
+ }
+ EXPORT_SYMBOL(call_netdevice_notifiers);
+
++/**
++ * call_netdevice_notifiers_mtu - call all network notifier blocks
++ * @val: value passed unmodified to notifier function
++ * @dev: net_device pointer passed unmodified to notifier function
++ * @arg: additional u32 argument passed to the notifier function
++ *
++ * Call all network notifier blocks. Parameters and return value
++ * are as for raw_notifier_call_chain().
++ */
++static int call_netdevice_notifiers_mtu(unsigned long val,
++ struct net_device *dev, u32 arg)
++{
++ struct netdev_notifier_info_ext info = {
++ .info.dev = dev,
++ .ext.mtu = arg,
++ };
++
++ BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
++
++ return call_netdevice_notifiers_info(val, dev, &info.info);
++}
++
+ #ifdef CONFIG_NET_INGRESS
+ static struct static_key ingress_needed __read_mostly;
+
+@@ -6891,14 +6913,16 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
+ err = __dev_set_mtu(dev, new_mtu);
+
+ if (!err) {
+- err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
++ err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
++ orig_mtu);
+ err = notifier_to_errno(err);
+ if (err) {
+ /* setting mtu back and notifying everyone again,
+ * so that they have a chance to revert changes.
+ */
+ __dev_set_mtu(dev, orig_mtu);
+- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
++ call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
++ new_mtu);
+ }
+ }
+ return err;
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 490eab16b04b..0ae5ac5e090f 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -2572,6 +2572,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
+ case ETHTOOL_GPHYSTATS:
+ case ETHTOOL_GTSO:
+ case ETHTOOL_GPERMADDR:
++ case ETHTOOL_GUFO:
+ case ETHTOOL_GGSO:
+ case ETHTOOL_GGRO:
+ case ETHTOOL_GFLAGS:
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index efe396cc77b5..760364526dc1 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2430,6 +2430,12 @@ struct net_device *rtnl_create_link(struct net *net,
+ else if (ops->get_num_rx_queues)
+ num_rx_queues = ops->get_num_rx_queues();
+
++ if (num_tx_queues < 1 || num_tx_queues > 4096)
++ return ERR_PTR(-EINVAL);
++
++ if (num_rx_queues < 1 || num_rx_queues > 4096)
++ return ERR_PTR(-EINVAL);
++
+ dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
+ ops->setup, num_tx_queues, num_rx_queues);
+ if (!dev)
+@@ -3292,16 +3298,27 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ int err = 0;
+ int fidx = 0;
+
+- err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
+- IFLA_MAX, ifla_policy, NULL);
+- if (err < 0) {
+- return -EINVAL;
+- } else if (err == 0) {
+- if (tb[IFLA_MASTER])
+- br_idx = nla_get_u32(tb[IFLA_MASTER]);
+- }
++ /* A hack to preserve kernel<->userspace interface.
++ * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
++ * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
++ * So, check for ndmsg with an optional u32 attribute (not used here).
++ * Fortunately these sizes don't conflict with the size of ifinfomsg
++ * with an optional attribute.
++ */
++ if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) &&
++ (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) +
++ nla_attr_size(sizeof(u32)))) {
++ err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
++ IFLA_MAX, ifla_policy, NULL);
++ if (err < 0) {
++ return -EINVAL;
++ } else if (err == 0) {
++ if (tb[IFLA_MASTER])
++ br_idx = nla_get_u32(tb[IFLA_MASTER]);
++ }
+
+- brport_idx = ifm->ifi_index;
++ brport_idx = ifm->ifi_index;
++ }
+
+ if (br_idx) {
+ br_dev = __dev_get_by_index(net, br_idx);
+diff --git a/net/dccp/input.c b/net/dccp/input.c
+index fa6be9750bb4..849f399aec21 100644
+--- a/net/dccp/input.c
++++ b/net/dccp/input.c
+@@ -605,11 +605,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ if (sk->sk_state == DCCP_LISTEN) {
+ if (dh->dccph_type == DCCP_PKT_REQUEST) {
+ /* It is possible that we process SYN packets from backlog,
+- * so we need to make sure to disable BH right there.
++ * so we need to make sure to disable BH and RCU right there.
+ */
++ rcu_read_lock();
+ local_bh_disable();
+ acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
+ local_bh_enable();
++ rcu_read_unlock();
+ if (!acceptable)
+ return 1;
+ consume_skb(skb);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index b08feb219b44..8e08cea6f178 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
+
+ dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr);
++ rcu_read_lock();
+ err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr,
+- ireq_opt_deref(ireq));
++ rcu_dereference(ireq->ireq_opt));
++ rcu_read_unlock();
+ err = net_xmit_eval(err);
+ }
+
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 5bbdd05d0cd3..1b3f860f7dcd 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1185,7 +1185,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
+ static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
+ {
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+- struct netdev_notifier_changeupper_info *info;
++ struct netdev_notifier_changeupper_info *upper_info = ptr;
++ struct netdev_notifier_info_ext *info_ext = ptr;
+ struct in_device *in_dev;
+ struct net *net = dev_net(dev);
+ unsigned int flags;
+@@ -1220,16 +1221,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
+ fib_sync_up(dev, RTNH_F_LINKDOWN);
+ else
+ fib_sync_down_dev(dev, event, false);
+- /* fall through */
++ rt_cache_flush(net);
++ break;
+ case NETDEV_CHANGEMTU:
++ fib_sync_mtu(dev, info_ext->ext.mtu);
+ rt_cache_flush(net);
+ break;
+ case NETDEV_CHANGEUPPER:
+- info = ptr;
++ upper_info = ptr;
+ /* flush all routes if dev is linked to or unlinked from
+ * an L3 master device (e.g., VRF)
+ */
+- if (info->upper_dev && netif_is_l3_master(info->upper_dev))
++ if (upper_info->upper_dev &&
++ netif_is_l3_master(upper_info->upper_dev))
+ fib_disable_ip(dev, NETDEV_DOWN, true);
+ break;
+ }
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index b557af72cde9..e76b8a7bb891 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1520,6 +1520,56 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
+ return NOTIFY_DONE;
+ }
+
++/* Update the PMTU of exceptions when:
++ * - the new MTU of the first hop becomes smaller than the PMTU
++ * - the old MTU was the same as the PMTU, and it limited discovery of
++ * larger MTUs on the path. With that limit raised, we can now
++ * discover larger MTUs
++ * A special case is locked exceptions, for which the PMTU is smaller
++ * than the minimal accepted PMTU:
++ * - if the new MTU is greater than the PMTU, don't make any change
++ * - otherwise, unlock and set PMTU
++ */
++static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
++{
++ struct fnhe_hash_bucket *bucket;
++ int i;
++
++ bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
++ if (!bucket)
++ return;
++
++ for (i = 0; i < FNHE_HASH_SIZE; i++) {
++ struct fib_nh_exception *fnhe;
++
++ for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
++ fnhe;
++ fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
++ if (fnhe->fnhe_mtu_locked) {
++ if (new <= fnhe->fnhe_pmtu) {
++ fnhe->fnhe_pmtu = new;
++ fnhe->fnhe_mtu_locked = false;
++ }
++ } else if (new < fnhe->fnhe_pmtu ||
++ orig == fnhe->fnhe_pmtu) {
++ fnhe->fnhe_pmtu = new;
++ }
++ }
++ }
++}
++
++void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
++{
++ unsigned int hash = fib_devindex_hashfn(dev->ifindex);
++ struct hlist_head *head = &fib_info_devhash[hash];
++ struct fib_nh *nh;
++
++ hlist_for_each_entry(nh, head, nh_hash) {
++ if (nh->nh_dev == dev)
++ nh_update_mtu(nh, dev->mtu, orig_mtu);
++ }
++}
++
+ /* Event force Flags Description
+ * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
+ * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 0cc08c512202..9d6b172caf6c 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -542,7 +542,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
+ struct ip_options_rcu *opt;
+ struct rtable *rt;
+
+- opt = ireq_opt_deref(ireq);
++ rcu_read_lock();
++ opt = rcu_dereference(ireq->ireq_opt);
+
+ flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
+ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
+@@ -556,11 +557,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
+ goto no_route;
+ if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+ goto route_err;
++ rcu_read_unlock();
+ return &rt->dst;
+
+ route_err:
+ ip_rt_put(rt);
+ no_route:
++ rcu_read_unlock();
+ __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ return NULL;
+ }
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 048d5f6dd320..4ef92ebc4f6d 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -147,7 +147,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
+ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
+ {
+ struct sockaddr_in sin;
+- const struct iphdr *iph = ip_hdr(skb);
+ __be16 *ports;
+ int end;
+
+@@ -162,7 +161,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
+ ports = (__be16 *)skb_transport_header(skb);
+
+ sin.sin_family = AF_INET;
+- sin.sin_addr.s_addr = iph->daddr;
++ sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
+ sin.sin_port = ports[1];
+ memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
+
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 4784f3f36b7e..72eee34092ae 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -635,6 +635,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ const struct iphdr *tnl_params, u8 protocol)
+ {
+ struct ip_tunnel *tunnel = netdev_priv(dev);
++ unsigned int inner_nhdr_len = 0;
+ const struct iphdr *inner_iph;
+ struct flowi4 fl4;
+ u8 tos, ttl;
+@@ -644,6 +645,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ __be32 dst;
+ bool connected;
+
++ /* ensure we can access the inner net header, for several users below */
++ if (skb->protocol == htons(ETH_P_IP))
++ inner_nhdr_len = sizeof(struct iphdr);
++ else if (skb->protocol == htons(ETH_P_IPV6))
++ inner_nhdr_len = sizeof(struct ipv6hdr);
++ if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
++ goto tx_error;
++
+ inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+ connected = (tunnel->parms.iph.daddr != 0);
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 991f382afc1b..e24c0d7adf65 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5913,11 +5913,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+ if (th->fin)
+ goto discard;
+ /* It is possible that we process SYN packets from backlog,
+- * so we need to make sure to disable BH right there.
++ * so we need to make sure to disable BH and RCU right there.
+ */
++ rcu_read_lock();
+ local_bh_disable();
+ acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
+ local_bh_enable();
++ rcu_read_unlock();
+
+ if (!acceptable)
+ return 1;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 0e1a670dabd9..31b34c0c2d5f 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -875,9 +875,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
+ if (skb) {
+ __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
+
++ rcu_read_lock();
+ err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+ ireq->ir_rmt_addr,
+- ireq_opt_deref(ireq));
++ rcu_dereference(ireq->ireq_opt));
++ rcu_read_unlock();
+ err = net_xmit_eval(err);
+ }
+
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 3de413867991..dc0ec227b9d2 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1565,7 +1565,7 @@ busy_check:
+ *err = error;
+ return NULL;
+ }
+-EXPORT_SYMBOL_GPL(__skb_recv_udp);
++EXPORT_SYMBOL(__skb_recv_udp);
+
+ /*
+ * This should be easy, if there is something there we
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 6a76e41e6d51..569f7c3f6b95 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4136,7 +4136,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
+ p++;
+ continue;
+ }
+- state->offset++;
+ return ifa;
+ }
+
+@@ -4160,13 +4159,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
+ return ifa;
+ }
+
++ state->offset = 0;
+ while (++state->bucket < IN6_ADDR_HSIZE) {
+- state->offset = 0;
+ hlist_for_each_entry_rcu_bh(ifa,
+ &inet6_addr_lst[state->bucket], addr_lst) {
+ if (!net_eq(dev_net(ifa->idev->dev), net))
+ continue;
+- state->offset++;
+ return ifa;
+ }
+ }
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index ee8dbd228fe2..0e9296f44ee4 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1227,7 +1227,7 @@ static inline int
+ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct ip6_tnl *t = netdev_priv(dev);
+- const struct iphdr *iph = ip_hdr(skb);
++ const struct iphdr *iph;
+ int encap_limit = -1;
+ struct flowi6 fl6;
+ __u8 dsfield;
+@@ -1235,6 +1235,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ u8 tproto;
+ int err;
+
++ /* ensure we can access the full inner ip header */
++ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ return -1;
++
++ iph = ip_hdr(skb);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
+ tproto = ACCESS_ONCE(t->parms.proto);
+@@ -1298,7 +1303,7 @@ static inline int
+ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct ip6_tnl *t = netdev_priv(dev);
+- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++ struct ipv6hdr *ipv6h;
+ int encap_limit = -1;
+ __u16 offset;
+ struct flowi6 fl6;
+@@ -1307,6 +1312,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ u8 tproto;
+ int err;
+
++ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
++ return -1;
++
++ ipv6h = ipv6_hdr(skb);
+ tproto = ACCESS_ONCE(t->parms.proto);
+ if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
+ ip6_tnl_addr_conflict(t, ipv6h))
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index e4462b0ff801..f08cc6527339 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -650,8 +650,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->priority = sk->sk_priority;
+ skb->mark = sk->sk_mark;
+- skb_dst_set(skb, &rt->dst);
+- *dstp = NULL;
+
+ skb_put(skb, length);
+ skb_reset_network_header(skb);
+@@ -664,8 +662,14 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
+
+ skb->transport_header = skb->network_header;
+ err = memcpy_from_msg(iph, msg, length);
+- if (err)
+- goto error_fault;
++ if (err) {
++ err = -EFAULT;
++ kfree_skb(skb);
++ goto error;
++ }
++
++ skb_dst_set(skb, &rt->dst);
++ *dstp = NULL;
+
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+@@ -674,21 +678,28 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
+ if (unlikely(!skb))
+ return 0;
+
++ /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
++ * in the error path. Since skb has been freed, the dst could
++ * have been queued for deletion.
++ */
++ rcu_read_lock();
+ IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
+ NULL, rt->dst.dev, dst_output);
+ if (err > 0)
+ err = net_xmit_errno(err);
+- if (err)
+- goto error;
++ if (err) {
++ IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
++ rcu_read_unlock();
++ goto error_check;
++ }
++ rcu_read_unlock();
+ out:
+ return 0;
+
+-error_fault:
+- err = -EFAULT;
+- kfree_skb(skb);
+ error:
+ IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
++error_check:
+ if (err == -ENOBUFS && !np->recverr)
+ err = 0;
+ return err;
+diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
+index c070dfc0190a..c92894c3e40a 100644
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
+ {
+ u32 addr_len;
+
+- if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
++ if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
++ info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
+ addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
+ if (addr_len != sizeof(struct in_addr) &&
+ addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 8833a58ca3ee..8d1a7c900393 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2753,10 +2753,12 @@ tpacket_error:
+ }
+ }
+
+- if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
+- vio_le())) {
+- tp_len = -EINVAL;
+- goto tpacket_error;
++ if (po->has_vnet_hdr) {
++ if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
++ tp_len = -EINVAL;
++ goto tpacket_error;
++ }
++ virtio_net_hdr_set_proto(skb, vnet_hdr);
+ }
+
+ skb->destructor = tpacket_destruct_skb;
+@@ -2952,6 +2954,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ if (err)
+ goto out_free;
+ len += sizeof(vnet_hdr);
++ virtio_net_hdr_set_proto(skb, &vnet_hdr);
+ }
+
+ skb_probe_transport_header(skb, reserve);
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 22bc6fc48311..cd69aa067543 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1216,6 +1216,16 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
+ * Delete/get qdisc.
+ */
+
++const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
++ [TCA_KIND] = { .type = NLA_STRING },
++ [TCA_OPTIONS] = { .type = NLA_NESTED },
++ [TCA_RATE] = { .type = NLA_BINARY,
++ .len = sizeof(struct tc_estimator) },
++ [TCA_STAB] = { .type = NLA_NESTED },
++ [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
++ [TCA_CHAIN] = { .type = NLA_U32 },
++};
++
+ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack)
+ {
+@@ -1232,7 +1242,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
++ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
++ extack);
+ if (err < 0)
+ return err;
+
+@@ -1302,7 +1313,8 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+
+ replay:
+ /* Reinit, just in case something touches this. */
+- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
++ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
++ extack);
+ if (err < 0)
+ return err;
+
+@@ -1512,7 +1524,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
+ idx = 0;
+ ASSERT_RTNL();
+
+- err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
++ err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX,
++ rtm_tca_policy, NULL);
+ if (err < 0)
+ return err;
+
+@@ -1729,7 +1742,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
+ !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
++ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
++ extack);
+ if (err < 0)
+ return err;
+
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index e0c2a4e23039..43105cf04bc4 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -254,6 +254,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
+ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+ {
+ struct dst_entry *dst = sctp_transport_dst_check(t);
++ struct sock *sk = t->asoc->base.sk;
+ bool change = true;
+
+ if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
+@@ -265,12 +266,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+ pmtu = SCTP_TRUNC4(pmtu);
+
+ if (dst) {
+- dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
++ struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
++ union sctp_addr addr;
++
++ pf->af->from_sk(&addr, sk);
++ pf->to_sk_daddr(&t->ipaddr, sk);
++ dst->ops->update_pmtu(dst, sk, NULL, pmtu);
++ pf->to_sk_daddr(&addr, sk);
++
+ dst = sctp_transport_dst_check(t);
+ }
+
+ if (!dst) {
+- t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
++ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
+ dst = t->dst;
+ }
+
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 0aebf0695ae0..4d2125d258fe 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1063,8 +1063,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
+ /* Handle implicit connection setup */
+ if (unlikely(dest)) {
+ rc = __tipc_sendmsg(sock, m, dlen);
+- if (dlen && (dlen == rc))
++ if (dlen && dlen == rc) {
++ tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
+ tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
++ }
+ return rc;
+ }
+
+diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
+index f6d2985b2520..778b42ba90b8 100644
+--- a/sound/hda/hdac_controller.c
++++ b/sound/hda/hdac_controller.c
+@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
+ */
+ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
+ {
++ WARN_ON_ONCE(!bus->rb.area);
++
+ spin_lock_irq(&bus->reg_lock);
+ /* CORB set up */
+ bus->corb.addr = bus->rb.addr;
+@@ -382,7 +384,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
+ EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset);
+
+ /* reset codec link */
+-static int azx_reset(struct hdac_bus *bus, bool full_reset)
++int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
+ {
+ if (!full_reset)
+ goto skip_reset;
+@@ -407,7 +409,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
+ skip_reset:
+ /* check to see if controller is ready */
+ if (!snd_hdac_chip_readb(bus, GCTL)) {
+- dev_dbg(bus->dev, "azx_reset: controller not ready!\n");
++ dev_dbg(bus->dev, "controller not ready!\n");
+ return -EBUSY;
+ }
+
+@@ -422,6 +424,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
+
+ /* enable interrupts */
+ static void azx_int_enable(struct hdac_bus *bus)
+@@ -476,15 +479,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
+ return false;
+
+ /* reset controller */
+- azx_reset(bus, full_reset);
++ snd_hdac_bus_reset_link(bus, full_reset);
+
+- /* initialize interrupts */
++ /* clear interrupts */
+ azx_int_clear(bus);
+- azx_int_enable(bus);
+
+ /* initialize the codec command I/O */
+ snd_hdac_bus_init_cmd_io(bus);
+
++ /* enable interrupts after CORB/RIRB buffers are initialized above */
++ azx_int_enable(bus);
++
+ /* program the position buffer */
+ if (bus->use_posbuf && bus->posbuf.addr) {
+ snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
+diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
+index 1bfc8db1826a..56ddab43da7e 100644
+--- a/sound/soc/codecs/rt5514.c
++++ b/sound/soc/codecs/rt5514.c
+@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
+ {RT5514_ANA_CTRL_LDO10, 0x00028604},
+ {RT5514_ANA_CTRL_ADCFED, 0x00000800},
+ {RT5514_ASRC_IN_CTRL1, 0x00000003},
+- {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
+- {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
++ {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
++ {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
+ };
+
+ static const struct reg_default rt5514_reg[] = {
+@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
+ {RT5514_ASRC_IN_CTRL1, 0x00000003},
+ {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
+ {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
+- {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
++ {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
+ {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f},
+ {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f},
+- {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
++ {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
+ {RT5514_ANA_CTRL_LDO10, 0x00028604},
+ {RT5514_ANA_CTRL_LDO18_16, 0x02000345},
+ {RT5514_ANA_CTRL_ADC12, 0x0000a2a8},
+diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
+index d53680ac78e4..6df158669420 100644
+--- a/sound/soc/codecs/sigmadsp.c
++++ b/sound/soc/codecs/sigmadsp.c
+@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
+ struct sigmadsp_control *ctrl, void *data)
+ {
+ /* safeload loads up to 20 bytes in a atomic operation */
+- if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
+- sigmadsp->ops->safeload)
++ if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
+ return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
+ ctrl->num_bytes);
+ else
+diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
+index f27464c2c5ba..79541960f45d 100644
+--- a/sound/soc/codecs/wm8804-i2c.c
++++ b/sound/soc/codecs/wm8804-i2c.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/i2c.h>
++#include <linux/acpi.h>
+
+ #include "wm8804.h"
+
+@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
+ };
+ MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+
++#if defined(CONFIG_OF)
+ static const struct of_device_id wm8804_of_match[] = {
+ { .compatible = "wlf,wm8804", },
+ { }
+ };
+ MODULE_DEVICE_TABLE(of, wm8804_of_match);
++#endif
++
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id wm8804_acpi_match[] = {
++ { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
++ { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
++ { },
++};
++MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
++#endif
+
+ static struct i2c_driver wm8804_i2c_driver = {
+ .driver = {
+ .name = "wm8804",
+ .pm = &wm8804_pm,
+- .of_match_table = wm8804_of_match,
++ .of_match_table = of_match_ptr(wm8804_of_match),
++ .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
+ },
+ .probe = wm8804_i2c_probe,
+ .remove = wm8804_i2c_remove,
+diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
+index f94b484abb99..a0bef63b8fb1 100644
+--- a/sound/soc/intel/skylake/skl.c
++++ b/sound/soc/intel/skylake/skl.c
+@@ -698,7 +698,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
+ return -ENXIO;
+ }
+
+- skl_init_chip(bus, true);
++ snd_hdac_bus_reset_link(bus, true);
+
+ snd_hdac_bus_parse_capabilities(bus);
+
+diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
+index e28edb1f7263..eb7879bcc6a7 100644
+--- a/sound/soc/sh/rcar/adg.c
++++ b/sound/soc/sh/rcar/adg.c
+@@ -467,6 +467,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
+ goto rsnd_adg_get_clkout_end;
+
+ req_size = prop->length / sizeof(u32);
++ if (req_size > REQ_SIZE) {
++ dev_err(dev,
++ "too many clock-frequency, use top %d\n", REQ_SIZE);
++ req_size = REQ_SIZE;
++ }
+
+ of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
+ req_48kHz_rate = 0;
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index 9896e736fa5c..710c01cd2ad2 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -486,7 +486,7 @@ static int rsnd_status_update(u32 *status,
+ (func_call && (mod)->ops->fn) ? #fn : ""); \
+ if (func_call && (mod)->ops->fn) \
+ tmp = (mod)->ops->fn(mod, io, param); \
+- if (tmp) \
++ if (tmp && (tmp != -EPROBE_DEFER)) \
+ dev_err(dev, "%s[%d] : %s error %d\n", \
+ rsnd_mod_name(mod), rsnd_mod_id(mod), \
+ #fn, tmp); \
+@@ -1469,6 +1469,14 @@ exit_snd_probe:
+ rsnd_dai_call(remove, &rdai->capture, priv);
+ }
+
++ /*
++ * adg is very special mod which can't use rsnd_dai_call(remove),
++ * and it registers ADG clock on probe.
++ * It should be unregister if probe failed.
++ * Mainly it is assuming -EPROBE_DEFER case
++ */
++ rsnd_adg_remove(priv);
++
+ return ret;
+ }
+
+diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
+index 041ec1080d52..39a46e302bab 100644
+--- a/sound/soc/sh/rcar/dma.c
++++ b/sound/soc/sh/rcar/dma.c
+@@ -330,6 +330,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
+ /* try to get DMAEngine channel */
+ chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
+ if (IS_ERR_OR_NULL(chan)) {
++ /* Let's follow when -EPROBE_DEFER case */
++ if (PTR_ERR(chan) == -EPROBE_DEFER)
++ return PTR_ERR(chan);
++
+ /*
+ * DMA failed. try to PIO mode
+ * see
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index e37653b0f2d0..76789523429a 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -2304,8 +2304,8 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
+ }
+
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
+- snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+- lang_dirent->d_name);
++ scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
++ lang_dirent->d_name);
+ lang_dir = opendir(lang_path);
+ if (!lang_dir)
+ continue;
+@@ -2314,8 +2314,8 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
+ script_root = get_script_root(script_dirent, REPORT_SUFFIX);
+ if (script_root) {
+ desc = script_desc__findnew(script_root);
+- snprintf(script_path, MAXPATHLEN, "%s/%s",
+- lang_path, script_dirent->d_name);
++ scnprintf(script_path, MAXPATHLEN, "%s/%s",
++ lang_path, script_dirent->d_name);
+ read_script_info(desc, script_path);
+ free(script_root);
+ }
+@@ -2351,7 +2351,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
+ int match, len;
+ FILE *fp;
+
+- sprintf(filename, "%s/bin/%s-record", dir_name, scriptname);
++ scnprintf(filename, MAXPATHLEN, "%s/bin/%s-record", dir_name, scriptname);
+
+ fp = fopen(filename, "r");
+ if (!fp)
+@@ -2427,8 +2427,8 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
+ }
+
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
+- snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
+- lang_dirent->d_name);
++ scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
++ lang_dirent->d_name);
+ #ifdef NO_LIBPERL
+ if (strstr(lang_path, "perl"))
+ continue;
+@@ -2483,8 +2483,8 @@ static char *get_script_path(const char *script_root, const char *suffix)
+ return NULL;
+
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
+- snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+- lang_dirent->d_name);
++ scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
++ lang_dirent->d_name);
+ lang_dir = opendir(lang_path);
+ if (!lang_dir)
+ continue;
+@@ -2495,8 +2495,8 @@ static char *get_script_path(const char *script_root, const char *suffix)
+ free(__script_root);
+ closedir(lang_dir);
+ closedir(scripts_dir);
+- snprintf(script_path, MAXPATHLEN, "%s/%s",
+- lang_path, script_dirent->d_name);
++ scnprintf(script_path, MAXPATHLEN, "%s/%s",
++ lang_path, script_dirent->d_name);
+ return strdup(script_path);
+ }
+ free(__script_root);
+diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
+index efcaf6cac2eb..e46f51b17513 100644
+--- a/tools/perf/scripts/python/export-to-postgresql.py
++++ b/tools/perf/scripts/python/export-to-postgresql.py
+@@ -204,14 +204,23 @@ from ctypes import *
+ libpq = CDLL("libpq.so.5")
+ PQconnectdb = libpq.PQconnectdb
+ PQconnectdb.restype = c_void_p
++PQconnectdb.argtypes = [ c_char_p ]
+ PQfinish = libpq.PQfinish
++PQfinish.argtypes = [ c_void_p ]
+ PQstatus = libpq.PQstatus
++PQstatus.restype = c_int
++PQstatus.argtypes = [ c_void_p ]
+ PQexec = libpq.PQexec
+ PQexec.restype = c_void_p
++PQexec.argtypes = [ c_void_p, c_char_p ]
+ PQresultStatus = libpq.PQresultStatus
++PQresultStatus.restype = c_int
++PQresultStatus.argtypes = [ c_void_p ]
+ PQputCopyData = libpq.PQputCopyData
++PQputCopyData.restype = c_int
+ PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
+ PQputCopyEnd = libpq.PQputCopyEnd
++PQputCopyEnd.restype = c_int
+ PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
+
+ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
+index f827bf77e9d2..e4bb82c8aba9 100644
+--- a/tools/perf/scripts/python/export-to-sqlite.py
++++ b/tools/perf/scripts/python/export-to-sqlite.py
+@@ -440,7 +440,11 @@ def branch_type_table(*x):
+
+ def sample_table(*x):
+ if branches:
+- bind_exec(sample_query, 18, x)
++ for xx in x[0:15]:
++ sample_query.addBindValue(str(xx))
++ for xx in x[19:22]:
++ sample_query.addBindValue(str(xx))
++ do_query_(sample_query)
+ else:
+ bind_exec(sample_query, 22, x)
+
+diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
+index 0e1367f90af5..60fea0a376fc 100644
+--- a/tools/perf/tests/attr.c
++++ b/tools/perf/tests/attr.c
+@@ -164,8 +164,8 @@ static int run_dir(const char *d, const char *perf)
+ if (verbose > 0)
+ vcnt++;
+
+- snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
+- d, d, perf, vcnt, v);
++ scnprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
++ d, d, perf, vcnt, v);
+
+ return system(cmd) ? TEST_FAIL : TEST_OK;
+ }
+diff --git a/tools/perf/tests/mem.c b/tools/perf/tests/mem.c
+index 21952e1e6e6d..0f82ee9fd3f7 100644
+--- a/tools/perf/tests/mem.c
++++ b/tools/perf/tests/mem.c
+@@ -16,7 +16,7 @@ static int check(union perf_mem_data_src data_src,
+
+ n = perf_mem__snp_scnprintf(out, sizeof out, &mi);
+ n += perf_mem__lvl_scnprintf(out + n, sizeof out - n, &mi);
+- snprintf(failure, sizeof failure, "unexpected %s", out);
++ scnprintf(failure, sizeof failure, "unexpected %s", out);
+ TEST_ASSERT_VAL(failure, !strcmp(string, out));
+ return 0;
+ }
+diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
+index 9abca267afa9..7bedf8608fdd 100644
+--- a/tools/perf/tests/pmu.c
++++ b/tools/perf/tests/pmu.c
+@@ -98,7 +98,7 @@ static char *test_format_dir_get(void)
+ struct test_format *format = &test_formats[i];
+ FILE *file;
+
+- snprintf(name, PATH_MAX, "%s/%s", dir, format->name);
++ scnprintf(name, PATH_MAX, "%s/%s", dir, format->name);
+
+ file = fopen(name, "w");
+ if (!file)
+diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
+index d9ffc1e6eb39..ce6bcb0a5368 100644
+--- a/tools/perf/util/cgroup.c
++++ b/tools/perf/util/cgroup.c
+@@ -78,7 +78,7 @@ static int open_cgroup(char *name)
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
+ return -1;
+
+- snprintf(path, PATH_MAX, "%s/%s", mnt, name);
++ scnprintf(path, PATH_MAX, "%s/%s", mnt, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index b25635e945f3..53f620472151 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -202,8 +202,8 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
+
+ for_each_event(sys_dirent, evt_dir, evt_dirent) {
+
+- snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
+- evt_dirent->d_name);
++ scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
++ evt_dirent->d_name);
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ continue;
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 9dff41bcc776..d87d458996b7 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -349,7 +349,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
+ if (pmu_alias_info_file(name))
+ continue;
+
+- snprintf(path, PATH_MAX, "%s/%s", dir, name);
++ scnprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+ file = fopen(path, "r");
+ if (!file) {
+diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
+new file mode 100644
+index 000000000000..4e151f1005b2
+--- /dev/null
++++ b/tools/testing/selftests/efivarfs/config
+@@ -0,0 +1 @@
++CONFIG_EFIVAR_FS=y
+diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config
+index 2fde30191a47..a7e8cd5bb265 100644
+--- a/tools/testing/selftests/memory-hotplug/config
++++ b/tools/testing/selftests/memory-hotplug/config
+@@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y
+ CONFIG_MEMORY_HOTPLUG_SPARSE=y
+ CONFIG_NOTIFIER_ERROR_INJECTION=y
+ CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
++CONFIG_MEMORY_HOTREMOVE=y