summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2018-12-01 10:05:55 -0500
committerMike Pagano <mpagano@gentoo.org>2018-12-01 10:05:55 -0500
commita0335979bdabd1a61a12b25e022f8b08e86139db (patch)
treed32f921ad76aa6b9e3a4825f5fddcf4e337dbdf6
parentproj/linux-patches: Linux patch 4.14.84 (diff)
downloadlinux-patches-a0335979.tar.gz
linux-patches-a0335979.tar.bz2
linux-patches-a0335979.zip
proj/linux-patches: Linux patch 4.14.85
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1084_linux-4.14.85.patch4286
2 files changed, 4290 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index b8625f05..b328a3b6 100644
--- a/0000_README
+++ b/0000_README
@@ -379,6 +379,10 @@ Patch: 1083-4.14.84.patch
From: http://www.kernel.org
Desc: Linux 4.14.84
+Patch: 1084-4.14.85.patch
+From: http://www.kernel.org
+Desc: Linux 4.14.85
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1084_linux-4.14.85.patch b/1084_linux-4.14.85.patch
new file mode 100644
index 00000000..ce3192e0
--- /dev/null
+++ b/1084_linux-4.14.85.patch
@@ -0,0 +1,4286 @@
+diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
+index 23aa94eab207..4e0ec14f7abf 100644
+--- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
++++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
+@@ -18,7 +18,7 @@ Example:
+ reg = <1>;
+ clocks = <&clk32m>;
+ interrupt-parent = <&gpio4>;
+- interrupts = <13 IRQ_TYPE_EDGE_RISING>;
++ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+ vdd-supply = <&reg5v0>;
+ xceiver-supply = <&reg5v0>;
+ };
+diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
+index 35e17f748ca7..af5859b2d0f9 100644
+--- a/Documentation/sysctl/fs.txt
++++ b/Documentation/sysctl/fs.txt
+@@ -34,7 +34,9 @@ Currently, these files are in /proc/sys/fs:
+ - overflowgid
+ - pipe-user-pages-hard
+ - pipe-user-pages-soft
++- protected_fifos
+ - protected_hardlinks
++- protected_regular
+ - protected_symlinks
+ - suid_dumpable
+ - super-max
+@@ -182,6 +184,24 @@ applied.
+
+ ==============================================================
+
++protected_fifos:
++
++The intent of this protection is to avoid unintentional writes to
++an attacker-controlled FIFO, where a program expected to create a regular
++file.
++
++When set to "0", writing to FIFOs is unrestricted.
++
++When set to "1" don't allow O_CREAT open on FIFOs that we don't own
++in world writable sticky directories, unless they are owned by the
++owner of the directory.
++
++When set to "2" it also applies to group writable sticky directories.
++
++This protection is based on the restrictions in Openwall.
++
++==============================================================
++
+ protected_hardlinks:
+
+ A long-standing class of security issues is the hardlink-based
+@@ -202,6 +222,22 @@ This protection is based on the restrictions in Openwall and grsecurity.
+
+ ==============================================================
+
++protected_regular:
++
++This protection is similar to protected_fifos, but it
++avoids writes to an attacker-controlled regular file, where a program
++expected to create one.
++
++When set to "0", writing to regular files is unrestricted.
++
++When set to "1" don't allow O_CREAT open on regular files that we
++don't own in world writable sticky directories, unless they are
++owned by the owner of the directory.
++
++When set to "2" it also applies to group writable sticky directories.
++
++==============================================================
++
+ protected_symlinks:
+
+ A long-standing class of security issues is the symlink-based
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 546beb6b0176..6cb70b853323 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -12662,6 +12662,7 @@ F: arch/alpha/kernel/srm_env.c
+
+ STABLE BRANCH
+ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++M: Sasha Levin <sashal@kernel.org>
+ L: stable@vger.kernel.org
+ S: Supported
+ F: Documentation/process/stable-kernel-rules.rst
+diff --git a/Makefile b/Makefile
+index 874d72a3e6a7..58a248264090 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 84
++SUBLEVEL = 85
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+@@ -480,13 +480,15 @@ endif
+ ifeq ($(cc-name),clang)
+ ifneq ($(CROSS_COMPILE),)
+ CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
+-GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
++GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
++CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
++GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+ endif
+ ifneq ($(GCC_TOOLCHAIN),)
+ CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
+ endif
+-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
++KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
++KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+ KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+ KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+ endif
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index 36ae4454554c..17e80f483281 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -106,7 +106,7 @@ tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
+ tune-y := $(tune-y)
+
+ ifeq ($(CONFIG_AEABI),y)
+-CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp
++CFLAGS_ABI :=-mabi=aapcs-linux -mfpu=vfp
+ else
+ CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,)
+ endif
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index a5889238fc9f..746c8c575f98 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -113,7 +113,7 @@ CFLAGS_fdt_ro.o := $(nossp_flags)
+ CFLAGS_fdt_rw.o := $(nossp_flags)
+ CFLAGS_fdt_wip.o := $(nossp_flags)
+
+-ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
++ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
+ asflags-y := -DZIMAGE
+
+ # Supply kernel BSS size to the decompressor via a linker symbol.
+diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
+index 683a4cfb4a23..c91eff8475a8 100644
+--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
++++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
+@@ -31,7 +31,7 @@
+
+ aliases {
+ /* Assign 20 so we don't get confused w/ builtin ones */
+- i2c20 = "/spi@12d40000/cros-ec@0/i2c-tunnel";
++ i2c20 = &i2c_tunnel;
+ };
+
+ backlight: backlight {
+@@ -952,7 +952,7 @@
+ samsung,spi-feedback-delay = <1>;
+ };
+
+- i2c-tunnel {
++ i2c_tunnel: i2c-tunnel {
+ compatible = "google,cros-ec-i2c-tunnel";
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
+index b2b95ff205e8..daad5d425cf5 100644
+--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
++++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
+@@ -29,7 +29,7 @@
+
+ aliases {
+ /* Assign 20 so we don't get confused w/ builtin ones */
+- i2c20 = "/spi@12d40000/cros-ec@0/i2c-tunnel";
++ i2c20 = &i2c_tunnel;
+ };
+
+ backlight: backlight {
+@@ -921,7 +921,7 @@
+ samsung,spi-feedback-delay = <1>;
+ };
+
+- i2c-tunnel {
++ i2c_tunnel: i2c-tunnel {
+ compatible = "google,cros-ec-i2c-tunnel";
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/firmware/trusted_foundations.c b/arch/arm/firmware/trusted_foundations.c
+index 3fb1b5a1dce9..689e6565abfc 100644
+--- a/arch/arm/firmware/trusted_foundations.c
++++ b/arch/arm/firmware/trusted_foundations.c
+@@ -31,21 +31,25 @@
+
+ static unsigned long cpu_boot_addr;
+
+-static void __naked tf_generic_smc(u32 type, u32 arg1, u32 arg2)
++static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+ {
++ register u32 r0 asm("r0") = type;
++ register u32 r1 asm("r1") = arg1;
++ register u32 r2 asm("r2") = arg2;
++
+ asm volatile(
+ ".arch_extension sec\n\t"
+- "stmfd sp!, {r4 - r11, lr}\n\t"
++ "stmfd sp!, {r4 - r11}\n\t"
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ "mov r3, #0\n\t"
+ "mov r4, #0\n\t"
+ "smc #0\n\t"
+- "ldmfd sp!, {r4 - r11, pc}"
++ "ldmfd sp!, {r4 - r11}\n\t"
+ :
+- : "r" (type), "r" (arg1), "r" (arg2)
+- : "memory");
++ : "r" (r0), "r" (r1), "r" (r2)
++ : "memory", "r3", "r12", "lr");
+ }
+
+ static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 7318165cfc90..48f2b3657507 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -10,7 +10,7 @@
+ #
+ # Copyright (C) 1995-2001 by Russell King
+
+-LDFLAGS_vmlinux :=-p --no-undefined -X
++LDFLAGS_vmlinux :=--no-undefined -X
+ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
+ GZFLAGS :=-9
+
+diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
+index 422f99cf9924..e6d33eed8202 100644
+--- a/arch/powerpc/include/asm/io.h
++++ b/arch/powerpc/include/asm/io.h
+@@ -287,19 +287,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
+ * their hooks, a bitfield is reserved for use by the platform near the
+ * top of MMIO addresses (not PIO, those have to cope the hard way).
+ *
+- * This bit field is 12 bits and is at the top of the IO virtual
+- * addresses PCI_IO_INDIRECT_TOKEN_MASK.
++ * The highest address in the kernel virtual space are:
+ *
+- * The kernel virtual space is thus:
++ * d0003fffffffffff # with Hash MMU
++ * c00fffffffffffff # with Radix MMU
+ *
+- * 0xD000000000000000 : vmalloc
+- * 0xD000080000000000 : PCI PHB IO space
+- * 0xD000080080000000 : ioremap
+- * 0xD0000fffffffffff : end of ioremap region
+- *
+- * Since the top 4 bits are reserved as the region ID, we use thus
+- * the next 12 bits and keep 4 bits available for the future if the
+- * virtual address space is ever to be extended.
++ * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
++ * that can be used for the field.
+ *
+ * The direct IO mapping operations will then mask off those bits
+ * before doing the actual access, though that only happen when
+@@ -311,8 +305,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
+ */
+
+ #ifdef CONFIG_PPC_INDIRECT_MMIO
+-#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
+-#define PCI_IO_IND_TOKEN_SHIFT 48
++#define PCI_IO_IND_TOKEN_SHIFT 52
++#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
+ #define PCI_FIX_ADDR(addr) \
+ ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
+ #define PCI_GET_ADDR_TOKEN(addr) \
+diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
+index 491b0f715d6b..ea1d7c808319 100644
+--- a/arch/powerpc/kvm/trace.h
++++ b/arch/powerpc/kvm/trace.h
+@@ -6,8 +6,6 @@
+
+ #undef TRACE_SYSTEM
+ #define TRACE_SYSTEM kvm
+-#define TRACE_INCLUDE_PATH .
+-#define TRACE_INCLUDE_FILE trace
+
+ /*
+ * Tracepoint for guest mode entry.
+@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
+ #endif /* _TRACE_KVM_H */
+
+ /* This part must be outside protection */
++#undef TRACE_INCLUDE_PATH
++#undef TRACE_INCLUDE_FILE
++
++#define TRACE_INCLUDE_PATH .
++#define TRACE_INCLUDE_FILE trace
++
+ #include <trace/define_trace.h>
+diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
+index ac640e81fdc5..3837842986aa 100644
+--- a/arch/powerpc/kvm/trace_booke.h
++++ b/arch/powerpc/kvm/trace_booke.h
+@@ -6,8 +6,6 @@
+
+ #undef TRACE_SYSTEM
+ #define TRACE_SYSTEM kvm_booke
+-#define TRACE_INCLUDE_PATH .
+-#define TRACE_INCLUDE_FILE trace_booke
+
+ #define kvm_trace_symbol_exit \
+ {0, "CRITICAL"}, \
+@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
+ #endif
+
+ /* This part must be outside protection */
++
++#undef TRACE_INCLUDE_PATH
++#undef TRACE_INCLUDE_FILE
++
++#define TRACE_INCLUDE_PATH .
++#define TRACE_INCLUDE_FILE trace_booke
++
+ #include <trace/define_trace.h>
+diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
+index bcfe8a987f6a..8a1e3b0047f1 100644
+--- a/arch/powerpc/kvm/trace_hv.h
++++ b/arch/powerpc/kvm/trace_hv.h
+@@ -9,8 +9,6 @@
+
+ #undef TRACE_SYSTEM
+ #define TRACE_SYSTEM kvm_hv
+-#define TRACE_INCLUDE_PATH .
+-#define TRACE_INCLUDE_FILE trace_hv
+
+ #define kvm_trace_symbol_hcall \
+ {H_REMOVE, "H_REMOVE"}, \
+@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
+ #endif /* _TRACE_KVM_HV_H */
+
+ /* This part must be outside protection */
++
++#undef TRACE_INCLUDE_PATH
++#undef TRACE_INCLUDE_FILE
++
++#define TRACE_INCLUDE_PATH .
++#define TRACE_INCLUDE_FILE trace_hv
++
+ #include <trace/define_trace.h>
+diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
+index 85785a370c0e..256530eb1354 100644
+--- a/arch/powerpc/kvm/trace_pr.h
++++ b/arch/powerpc/kvm/trace_pr.h
+@@ -8,8 +8,6 @@
+
+ #undef TRACE_SYSTEM
+ #define TRACE_SYSTEM kvm_pr
+-#define TRACE_INCLUDE_PATH .
+-#define TRACE_INCLUDE_FILE trace_pr
+
+ TRACE_EVENT(kvm_book3s_reenter,
+ TP_PROTO(int r, struct kvm_vcpu *vcpu),
+@@ -272,4 +270,11 @@ TRACE_EVENT(kvm_unmap_hva,
+ #endif /* _TRACE_KVM_H */
+
+ /* This part must be outside protection */
++
++#undef TRACE_INCLUDE_PATH
++#undef TRACE_INCLUDE_FILE
++
++#define TRACE_INCLUDE_PATH .
++#define TRACE_INCLUDE_FILE trace_pr
++
+ #include <trace/define_trace.h>
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index 9fead0796364..40fb9a8835fe 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -1261,7 +1261,7 @@ static long vphn_get_associativity(unsigned long cpu,
+
+ switch (rc) {
+ case H_FUNCTION:
+- printk(KERN_INFO
++ printk_once(KERN_INFO
+ "VPHN is not supported. Disabling polling...\n");
+ stop_topology_update();
+ break;
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 2f66290c9b92..ec9292917d3f 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -689,6 +689,8 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
+ vmaddr |= gaddr & ~PMD_MASK;
+ /* Find vma in the parent mm */
+ vma = find_vma(gmap->mm, vmaddr);
++ if (!vma)
++ continue;
+ size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
+ zap_page_range(vma, vmaddr, size);
+ }
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
+index aee5e8496be4..aa4e6f4e6a01 100644
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -15,6 +15,25 @@
+ #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
+ #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
+ #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
++#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
++#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
++#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
++#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
++#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
++#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
++#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
++#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
++#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
++#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
++#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
++#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
++#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
++#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
++#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
++#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
++#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
++#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
++#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
+
+ /* SNB event control */
+ #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
+@@ -632,7 +651,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+-
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
++ { /* IMC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
++ },
+ { /* end: all zeroes */ },
+ };
+
+@@ -681,6 +775,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
+ IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
+ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
+ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
++ IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
++ IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
++ IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
++ IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
++ IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
++ IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
++ IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
++ IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
++ IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
++ IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
++ IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
++ IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
++ IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
++ IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
++ IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
++ IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
++ IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
++ IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
++ IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
+ { /* end marker */ }
+ };
+
+diff --git a/crypto/simd.c b/crypto/simd.c
+index 88203370a62f..894c62944106 100644
+--- a/crypto/simd.c
++++ b/crypto/simd.c
+@@ -126,8 +126,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
+
+ ctx->cryptd_tfm = cryptd_tfm;
+
+- reqsize = sizeof(struct skcipher_request);
+- reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base);
++ reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
++ reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
++ reqsize += sizeof(struct skcipher_request);
+
+ crypto_skcipher_set_reqsize(tfm, reqsize);
+
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index eb066cc827ef..3f463a61f8cf 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1973,7 +1973,6 @@ void device_del(struct device *dev)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DEL_DEVICE, dev);
+
+- device_links_purge(dev);
+ dpm_sysfs_remove(dev);
+ if (parent)
+ klist_del(&dev->p->knode_parent);
+@@ -2001,6 +2000,7 @@ void device_del(struct device *dev)
+ device_pm_remove(dev);
+ driver_deferred_probe_del(dev);
+ device_remove_properties(dev);
++ device_links_purge(dev);
+
+ /* Notify the platform of the removal, in case they
+ * need to do anything...
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 3d0287e212fe..a7f212ea17bf 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4146,10 +4146,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
+ bio.bi_end_io = floppy_rb0_cb;
+ bio_set_op_attrs(&bio, REQ_OP_READ, 0);
+
++ init_completion(&cbdata.complete);
++
+ submit_bio(&bio);
+ process_fd_request();
+
+- init_completion(&cbdata.complete);
+ wait_for_completion(&cbdata.complete);
+
+ __free_page(page);
+diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
+index 5426c04fe24b..fc2da3a617ac 100644
+--- a/drivers/bus/arm-cci.c
++++ b/drivers/bus/arm-cci.c
+@@ -2103,8 +2103,6 @@ asmlinkage void __naked cci_enable_port_for_self(void)
+ [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
+ [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
+ [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
+-
+- unreachable();
+ }
+
+ /**
+diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
+index 6a0cb8a515e8..b609219c802b 100644
+--- a/drivers/clk/samsung/clk-exynos5250.c
++++ b/drivers/clk/samsung/clk-exynos5250.c
+@@ -560,6 +560,8 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
+ 0),
+ GATE(CLK_GSCL3, "gscl3", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 3, 0,
+ 0),
++ GATE(CLK_CAMIF_TOP, "camif_top", "mout_aclk266_gscl_sub",
++ GATE_IP_GSCL, 4, 0, 0),
+ GATE(CLK_GSCL_WA, "gscl_wa", "div_gscl_wa", GATE_IP_GSCL, 5, 0, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "div_gscl_wb", GATE_IP_GSCL, 6, 0, 0),
+ GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "mout_aclk266_gscl_sub",
+@@ -570,6 +572,10 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
+ GATE_IP_GSCL, 9, 0, 0),
+ GATE(CLK_SMMU_GSCL3, "smmu_gscl3", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 10, 0, 0),
++ GATE(CLK_SMMU_FIMC_LITE0, "smmu_fimc_lite0", "mout_aclk266_gscl_sub",
++ GATE_IP_GSCL, 11, 0, 0),
++ GATE(CLK_SMMU_FIMC_LITE1, "smmu_fimc_lite1", "mout_aclk266_gscl_sub",
++ GATE_IP_GSCL, 12, 0, 0),
+
+ GATE(CLK_FIMD1, "fimd1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 0, 0,
+ 0),
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index 14466a9b01c0..63d28323a29c 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -135,8 +135,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
+ /* Ensure the arm clock divider is what we expect */
+ ret = clk_set_rate(arm_clk, new_freq * 1000);
+ if (ret) {
++ int ret1;
++
+ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+- regulator_set_voltage_tol(arm_reg, volt_old, 0);
++ ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
++ if (ret1)
++ dev_warn(cpu_dev,
++ "failed to restore vddarm voltage: %d\n", ret1);
+ return ret;
+ }
+
+diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
+index a7c522eac640..312f9f32e168 100644
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -265,6 +265,10 @@ void __init efi_init(void)
+ (params.mmap & ~PAGE_MASK)));
+
+ init_screen_info();
++
++ /* ARM does not permit early mappings to persist across paging_init() */
++ if (IS_ENABLED(CONFIG_ARM))
++ efi_memmap_unmap();
+ }
+
+ static int __init register_gop_device(void)
+diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
+index 8995a48bd067..ad1530aff633 100644
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -122,7 +122,7 @@ static int __init arm_enable_runtime_services(void)
+ {
+ u64 mapsize;
+
+- if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
++ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return 0;
+ }
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index adaa4a964f0c..678bc910e080 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -13,7 +13,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
+
+ cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
+ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+- -fno-builtin -fpic -mno-single-pic-base
++ -fno-builtin -fpic \
++ $(call cc-option,-mno-single-pic-base)
+
+ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
+
+diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
+index 5fc70520e04c..1907db2b38d8 100644
+--- a/drivers/firmware/efi/memmap.c
++++ b/drivers/firmware/efi/memmap.c
+@@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
+
+ void __init efi_memmap_unmap(void)
+ {
++ if (!efi_enabled(EFI_MEMMAP))
++ return;
++
+ if (!efi.memmap.late) {
+ unsigned long size;
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 7d5de4ef4f22..21062cb6b85f 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1166,7 +1166,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
+ gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
+ if (!gdev->descs) {
+ status = -ENOMEM;
+- goto err_free_gdev;
++ goto err_free_ida;
+ }
+
+ if (chip->ngpio == 0) {
+@@ -1298,8 +1298,9 @@ err_free_label:
+ kfree(gdev->label);
+ err_free_descs:
+ kfree(gdev->descs);
+-err_free_gdev:
++err_free_ida:
+ ida_simple_remove(&gpio_ida, gdev->id);
++err_free_gdev:
+ /* failures here can mean systems won't boot... */
+ pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
+ gdev->base, gdev->base + gdev->ngpio - 1,
+diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
+index 69dab82a3771..bf589c53b908 100644
+--- a/drivers/gpu/drm/ast/ast_drv.c
++++ b/drivers/gpu/drm/ast/ast_drv.c
+@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
+
+ MODULE_DEVICE_TABLE(pci, pciidlist);
+
++static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
++{
++ struct apertures_struct *ap;
++ bool primary = false;
++
++ ap = alloc_apertures(1);
++ if (!ap)
++ return;
++
++ ap->ranges[0].base = pci_resource_start(pdev, 0);
++ ap->ranges[0].size = pci_resource_len(pdev, 0);
++
++#ifdef CONFIG_X86
++ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
++#endif
++ drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
++ kfree(ap);
++}
++
+ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
++ ast_kick_out_firmware_fb(pdev);
++
+ return drm_get_pci_dev(pdev, ent, &driver);
+ }
+
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index e9f1e6fe7b94..fae1176b2472 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+ }
+ ast_bo_unreserve(bo);
+
++ ast_set_offset_reg(crtc);
+ ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+
+ return 0;
+@@ -1254,7 +1255,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
+
+ /* dummy write to fire HWC */
+- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
++ ast_show_cursor(crtc);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+index 690c67507cbc..aba27ea9cea5 100644
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+@@ -1446,8 +1446,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ }
+
+ /* The CEC module handles HDMI hotplug detection */
+- cec_np = of_find_compatible_node(np->parent, NULL,
+- "mediatek,mt8173-cec");
++ cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
+ if (!cec_np) {
+ dev_err(dev, "Failed to find CEC node\n");
+ return -EINVAL;
+@@ -1457,8 +1456,10 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ if (!cec_pdev) {
+ dev_err(hdmi->dev, "Waiting for CEC device %pOF\n",
+ cec_np);
++ of_node_put(cec_np);
+ return -EPROBE_DEFER;
+ }
++ of_node_put(cec_np);
+ hdmi->cec_dev = &cec_pdev->dev;
+
+ /*
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index feb80dbb5948..6d59af07d338 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -1285,7 +1285,7 @@ EXPORT_SYMBOL(ib_resolve_eth_dmac);
+
+ /**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+- * @qp: The QP to modify.
++ * @ib_qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify. On output,
+ * the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+@@ -1294,9 +1294,10 @@ EXPORT_SYMBOL(ib_resolve_eth_dmac);
+ * are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+-int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
++int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+ {
++ struct ib_qp *qp = ib_qp->real_qp;
+ int ret;
+
+ if (attr_mask & IB_QP_AV) {
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index 8c954a0ae3b6..c14ec04f2a89 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -328,7 +328,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ u8 opcode, sc, vl;
+ u16 pkey;
+ u32 slid;
+- int req_queued = 0;
+ u16 dlid;
+ u32 selector;
+
+@@ -392,7 +391,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ req->data_len = 0;
+ req->pq = pq;
+ req->cq = cq;
+- req->status = -1;
+ req->ahg_idx = -1;
+ req->iov_idx = 0;
+ req->sent = 0;
+@@ -400,12 +398,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ req->seqcomp = 0;
+ req->seqsubmitted = 0;
+ req->tids = NULL;
+- req->done = 0;
+ req->has_error = 0;
+ INIT_LIST_HEAD(&req->txps);
+
+ memcpy(&req->info, &info, sizeof(info));
+
++ /* The request is initialized, count it */
++ atomic_inc(&pq->n_reqs);
++
+ if (req_opcode(info.ctrl) == EXPECTED) {
+ /* expected must have a TID info and at least one data vector */
+ if (req->data_iovs < 2) {
+@@ -500,7 +500,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ ret = pin_vector_pages(req, &req->iovs[i]);
+ if (ret) {
+ req->data_iovs = i;
+- req->status = ret;
+ goto free_req;
+ }
+ req->data_len += req->iovs[i].iov.iov_len;
+@@ -561,14 +560,10 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ req->ahg_idx = sdma_ahg_alloc(req->sde);
+
+ set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
+- atomic_inc(&pq->n_reqs);
+- req_queued = 1;
+ /* Send the first N packets in the request to buy us some time */
+ ret = user_sdma_send_pkts(req, pcount);
+- if (unlikely(ret < 0 && ret != -EBUSY)) {
+- req->status = ret;
++ if (unlikely(ret < 0 && ret != -EBUSY))
+ goto free_req;
+- }
+
+ /*
+ * It is possible that the SDMA engine would have processed all the
+@@ -588,14 +583,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ while (req->seqsubmitted != req->info.npkts) {
+ ret = user_sdma_send_pkts(req, pcount);
+ if (ret < 0) {
+- if (ret != -EBUSY) {
+- req->status = ret;
+- WRITE_ONCE(req->has_error, 1);
+- if (ACCESS_ONCE(req->seqcomp) ==
+- req->seqsubmitted - 1)
+- goto free_req;
+- return ret;
+- }
++ if (ret != -EBUSY)
++ goto free_req;
+ wait_event_interruptible_timeout(
+ pq->busy.wait_dma,
+ (pq->state == SDMA_PKT_Q_ACTIVE),
+@@ -606,10 +595,19 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ *count += idx;
+ return 0;
+ free_req:
+- user_sdma_free_request(req, true);
+- if (req_queued)
++ /*
++ * If the submitted seqsubmitted == npkts, the completion routine
++ * controls the final state. If sequbmitted < npkts, wait for any
++ * outstanding packets to finish before cleaning up.
++ */
++ if (req->seqsubmitted < req->info.npkts) {
++ if (req->seqsubmitted)
++ wait_event(pq->busy.wait_dma,
++ (req->seqcomp == req->seqsubmitted - 1));
++ user_sdma_free_request(req, true);
+ pq_update(pq);
+- set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
++ set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
++ }
+ return ret;
+ }
+
+@@ -917,7 +915,6 @@ dosend:
+ ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
+ req->seqsubmitted += count;
+ if (req->seqsubmitted == req->info.npkts) {
+- WRITE_ONCE(req->done, 1);
+ /*
+ * The txreq has already been submitted to the HW queue
+ * so we can free the AHG entry now. Corruption will not
+@@ -1347,11 +1344,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
+ return diff;
+ }
+
+-/*
+- * SDMA tx request completion callback. Called when the SDMA progress
+- * state machine gets notification that the SDMA descriptors for this
+- * tx request have been processed by the DMA engine. Called in
+- * interrupt context.
++/**
++ * user_sdma_txreq_cb() - SDMA tx request completion callback.
++ * @txreq: valid sdma tx request
++ * @status: success/failure of request
++ *
++ * Called when the SDMA progress state machine gets notification that
++ * the SDMA descriptors for this tx request have been processed by the
++ * DMA engine. Called in interrupt context.
++ * Only do work on completed sequences.
+ */
+ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
+ {
+@@ -1360,7 +1361,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
+ struct user_sdma_request *req;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct hfi1_user_sdma_comp_q *cq;
+- u16 idx;
++ enum hfi1_sdma_comp_state state = COMPLETE;
+
+ if (!tx->req)
+ return;
+@@ -1373,31 +1374,19 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
+ SDMA_DBG(req, "SDMA completion with error %d",
+ status);
+ WRITE_ONCE(req->has_error, 1);
++ state = ERROR;
+ }
+
+ req->seqcomp = tx->seqnum;
+ kmem_cache_free(pq->txreq_cache, tx);
+- tx = NULL;
+-
+- idx = req->info.comp_idx;
+- if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
+- if (req->seqcomp == req->info.npkts - 1) {
+- req->status = 0;
+- user_sdma_free_request(req, false);
+- pq_update(pq);
+- set_comp_state(pq, cq, idx, COMPLETE, 0);
+- }
+- } else {
+- if (status != SDMA_TXREQ_S_OK)
+- req->status = status;
+- if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
+- (READ_ONCE(req->done) ||
+- READ_ONCE(req->has_error))) {
+- user_sdma_free_request(req, false);
+- pq_update(pq);
+- set_comp_state(pq, cq, idx, ERROR, req->status);
+- }
+- }
++
++ /* sequence isn't complete? We are done */
++ if (req->seqcomp != req->info.npkts - 1)
++ return;
++
++ user_sdma_free_request(req, false);
++ set_comp_state(pq, cq, req->info.comp_idx, state, status);
++ pq_update(pq);
+ }
+
+ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
+@@ -1430,6 +1419,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
+ if (!node)
+ continue;
+
++ req->iovs[i].node = NULL;
++
+ if (unpin)
+ hfi1_mmu_rb_remove(req->pq->handler,
+ &node->rb);
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
+index 9b8bb5634c0d..5af52334b7dc 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -196,8 +196,6 @@ struct user_sdma_request {
+ /* Writeable fields shared with interrupt */
+ u64 seqcomp ____cacheline_aligned_in_smp;
+ u64 seqsubmitted;
+- /* status of the last txreq completed */
+- int status;
+
+ /* Send side fields */
+ struct list_head txps ____cacheline_aligned_in_smp;
+@@ -219,7 +217,6 @@ struct user_sdma_request {
+ u16 tididx;
+ /* progress index moving along the iovs array */
+ u8 iov_idx;
+- u8 done;
+ u8 has_error;
+
+ struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 39398dd074d6..c1021b4afb41 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -631,6 +631,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ return ERR_PTR(-ENOMEM);
+
+ iwqp = (struct i40iw_qp *)mem;
++ iwqp->allocated_buffer = mem;
+ qp = &iwqp->sc_qp;
+ qp->back_qp = (void *)iwqp;
+ qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+@@ -659,7 +660,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ goto error;
+ }
+
+- iwqp->allocated_buffer = mem;
+ iwqp->iwdev = iwdev;
+ iwqp->iwpd = iwpd;
+ iwqp->ibqp.qp_num = qp_num;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 53f775c41cd1..2e52015634f9 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -89,8 +89,10 @@
+
+ #define XPAD_PKT_LEN 64
+
+-/* xbox d-pads should map to buttons, as is required for DDR pads
+- but we map them to axes when possible to simplify things */
++/*
++ * xbox d-pads should map to buttons, as is required for DDR pads
++ * but we map them to axes when possible to simplify things
++ */
+ #define MAP_DPAD_TO_BUTTONS (1 << 0)
+ #define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
+ #define MAP_STICKS_TO_NULL (1 << 2)
+@@ -231,6 +233,8 @@ static const struct xpad_device {
+ { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
+@@ -390,15 +394,15 @@ static const signed short xpad_abs_triggers[] = {
+ * match against vendor id as well. Wired Xbox 360 devices have protocol 1,
+ * wireless controllers have protocol 129.
+ */
+-#define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \
++#define XPAD_XBOX360_VENDOR_PROTOCOL(vend, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
+ .idVendor = (vend), \
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
+ .bInterfaceSubClass = 93, \
+ .bInterfaceProtocol = (pr)
+ #define XPAD_XBOX360_VENDOR(vend) \
+- { XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
+- { XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
++ { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 1) }, \
++ { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 129) }
+
+ /* The Xbox One controller uses subclass 71 and protocol 208. */
+ #define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \
+@@ -408,7 +412,7 @@ static const signed short xpad_abs_triggers[] = {
+ .bInterfaceSubClass = 71, \
+ .bInterfaceProtocol = (pr)
+ #define XPAD_XBOXONE_VENDOR(vend) \
+- { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) }
++ { XPAD_XBOXONE_VENDOR_PROTOCOL((vend), 208) }
+
+ static const struct usb_device_id xpad_table[] = {
+ { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
+@@ -480,7 +484,8 @@ static const u8 xboxone_hori_init[] = {
+
+ /*
+ * This packet is required for some of the PDP pads to start
+- * sending input reports. One of those pads is (0x0e6f:0x02ab).
++ * sending input reports. These pads include: (0x0e6f:0x02ab),
++ * (0x0e6f:0x02a4).
+ */
+ static const u8 xboxone_pdp_init1[] = {
+ 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+@@ -488,7 +493,8 @@ static const u8 xboxone_pdp_init1[] = {
+
+ /*
+ * This packet is required for some of the PDP pads to start
+- * sending input reports. One of those pads is (0x0e6f:0x02ab).
++ * sending input reports. These pads include: (0x0e6f:0x02ab),
++ * (0x0e6f:0x02a4).
+ */
+ static const u8 xboxone_pdp_init2[] = {
+ 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+@@ -526,6 +532,10 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
++ XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
++ XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
++ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
++ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
+@@ -1573,7 +1583,6 @@ static void xpad_close(struct input_dev *dev)
+ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
+ {
+ struct usb_xpad *xpad = input_get_drvdata(input_dev);
+- set_bit(abs, input_dev->absbit);
+
+ switch (abs) {
+ case ABS_X:
+@@ -1593,6 +1602,9 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
+ case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
+ input_set_abs_params(input_dev, abs, -1, 1, 0, 0);
+ break;
++ default:
++ input_set_abs_params(input_dev, abs, 0, 0, 0, 0);
++ break;
+ }
+ }
+
+@@ -1633,10 +1645,7 @@ static int xpad_init_input(struct usb_xpad *xpad)
+ input_dev->close = xpad_close;
+ }
+
+- __set_bit(EV_KEY, input_dev->evbit);
+-
+ if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
+- __set_bit(EV_ABS, input_dev->evbit);
+ /* set up axes */
+ for (i = 0; xpad_abs[i] >= 0; i++)
+ xpad_set_up_abs(input_dev, xpad_abs[i]);
+@@ -1644,21 +1653,22 @@ static int xpad_init_input(struct usb_xpad *xpad)
+
+ /* set up standard buttons */
+ for (i = 0; xpad_common_btn[i] >= 0; i++)
+- __set_bit(xpad_common_btn[i], input_dev->keybit);
++ input_set_capability(input_dev, EV_KEY, xpad_common_btn[i]);
+
+ /* set up model-specific ones */
+ if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W ||
+ xpad->xtype == XTYPE_XBOXONE) {
+ for (i = 0; xpad360_btn[i] >= 0; i++)
+- __set_bit(xpad360_btn[i], input_dev->keybit);
++ input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
+ } else {
+ for (i = 0; xpad_btn[i] >= 0; i++)
+- __set_bit(xpad_btn[i], input_dev->keybit);
++ input_set_capability(input_dev, EV_KEY, xpad_btn[i]);
+ }
+
+ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
+ for (i = 0; xpad_btn_pad[i] >= 0; i++)
+- __set_bit(xpad_btn_pad[i], input_dev->keybit);
++ input_set_capability(input_dev, EV_KEY,
++ xpad_btn_pad[i]);
+ }
+
+ /*
+@@ -1675,7 +1685,8 @@ static int xpad_init_input(struct usb_xpad *xpad)
+
+ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+ for (i = 0; xpad_btn_triggers[i] >= 0; i++)
+- __set_bit(xpad_btn_triggers[i], input_dev->keybit);
++ input_set_capability(input_dev, EV_KEY,
++ xpad_btn_triggers[i]);
+ } else {
+ for (i = 0; xpad_abs_triggers[i] >= 0; i++)
+ xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 6c4bbd38700e..6f36e2d01e2e 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -99,9 +99,7 @@ static int synaptics_mode_cmd(struct psmouse *psmouse, u8 mode)
+ int synaptics_detect(struct psmouse *psmouse, bool set_properties)
+ {
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+- u8 param[4];
+-
+- param[0] = 0;
++ u8 param[4] = { 0 };
+
+ ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
+ ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 44da037b13ba..0e386f5cc836 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -1607,8 +1607,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
+ host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+
+ if (slot->cd_idx >= 0) {
+- ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
++ ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
+ slot->cd_override_level, 0, NULL);
++ if (ret && ret != -EPROBE_DEFER)
++ ret = mmc_gpiod_request_cd(host->mmc, NULL,
++ slot->cd_idx,
++ slot->cd_override_level,
++ 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ goto remove;
+
+diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
+index 32a2f947a454..0b93f152d993 100644
+--- a/drivers/mtd/nand/atmel/nand-controller.c
++++ b/drivers/mtd/nand/atmel/nand-controller.c
+@@ -2077,8 +2077,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
+ int ret;
+
+ nand_np = dev->of_node;
+- nfc_np = of_find_compatible_node(dev->of_node, NULL,
+- "atmel,sama5d3-nfc");
++ nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
+ if (!nfc_np) {
+ dev_err(dev, "Could not find device node for sama5d3-nfc\n");
+ return -ENODEV;
+@@ -2492,15 +2491,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
+ }
+
+ if (caps->legacy_of_bindings) {
++ struct device_node *nfc_node;
+ u32 ale_offs = 21;
+
+ /*
+ * If we are parsing legacy DT props and the DT contains a
+ * valid NFC node, forward the request to the sama5 logic.
+ */
+- if (of_find_compatible_node(pdev->dev.of_node, NULL,
+- "atmel,sama5d3-nfc"))
++ nfc_node = of_get_compatible_child(pdev->dev.of_node,
++ "atmel,sama5d3-nfc");
++ if (nfc_node) {
+ caps = &atmel_sama5_nand_caps;
++ of_node_put(nfc_node);
++ }
+
+ /*
+ * Even if the compatible says we are dealing with an
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 18a72da759a0..6445c693d935 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -526,6 +526,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
+ for (i = ubi->vtbl_slots;
+ i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ ubi_eba_replace_table(ubi->volumes[i], NULL);
++ ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
+ kfree(ubi->volumes[i]);
+ }
+ }
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index d0884bd9d955..c4d4b8f07630 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -517,6 +517,9 @@ static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnu
+ if (!ubi->fast_attach)
+ return 0;
+
++ if (!vol->checkmap || test_bit(lnum, vol->checkmap))
++ return 0;
++
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+@@ -551,6 +554,7 @@ static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnu
+ goto out_free;
+ }
+
++ set_bit(lnum, vol->checkmap);
+ err = 0;
+
+ out_free:
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 5a832bc79b1b..63e8527f7b65 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -1101,6 +1101,26 @@ free_fm_sb:
+ goto out;
+ }
+
++int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
++{
++ struct ubi_device *ubi = vol->ubi;
++
++ if (!ubi->fast_attach)
++ return 0;
++
++ vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
++ GFP_KERNEL);
++ if (!vol->checkmap)
++ return -ENOMEM;
++
++ return 0;
++}
++
++void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
++{
++ kfree(vol->checkmap);
++}
++
+ /**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index 5fe62653995e..f5ba97c46160 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -334,6 +334,9 @@ struct ubi_eba_leb_desc {
+ * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
+ * @direct_writes: %1 if direct writes are enabled for this volume
+ *
++ * @checkmap: bitmap to remember which PEB->LEB mappings got checked,
++ * protected by UBI LEB lock tree.
++ *
+ * The @corrupted field indicates that the volume's contents is corrupted.
+ * Since UBI protects only static volumes, this field is not relevant to
+ * dynamic volumes - it is user's responsibility to assure their data
+@@ -377,6 +380,10 @@ struct ubi_volume {
+ unsigned int updating:1;
+ unsigned int changing_leb:1;
+ unsigned int direct_writes:1;
++
++#ifdef CONFIG_MTD_UBI_FASTMAP
++ unsigned long *checkmap;
++#endif
+ };
+
+ /**
+@@ -965,8 +972,12 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi);
+ int ubi_update_fastmap(struct ubi_device *ubi);
+ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_attach_info *scan_ai);
++int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
++void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
+ #else
+ static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
++int static inline ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
++static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
+ #endif
+
+ /* block.c */
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
+index 3fd8d7ff7a02..0be516780e92 100644
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -139,6 +139,7 @@ static void vol_release(struct device *dev)
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+
+ ubi_eba_replace_table(vol, NULL);
++ ubi_fastmap_destroy_checkmap(vol);
+ kfree(vol);
+ }
+
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index 263743e7b741..94d7a865b135 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -534,7 +534,7 @@ static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
+ const struct ubi_vtbl_record *vtbl)
+ {
+- int i, reserved_pebs = 0;
++ int i, err, reserved_pebs = 0;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+
+@@ -620,6 +620,16 @@ static int init_volumes(struct ubi_device *ubi,
+ (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
++
++ /*
++ * We use ubi->peb_count and not vol->reserved_pebs because
++ * we want to keep the code simple. Otherwise we'd have to
++ * resize/check the bitmap upon volume resize too.
++ * Allocating a few bytes more does not hurt.
++ */
++ err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
++ if (err)
++ return err;
+ }
+
+ /* And add the layout volume */
+@@ -645,6 +655,9 @@ static int init_volumes(struct ubi_device *ubi,
+ reserved_pebs += vol->reserved_pebs;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
++ err = ubi_fastmap_init_checkmap(vol, UBI_LAYOUT_VOLUME_EBS);
++ if (err)
++ return err;
+
+ if (reserved_pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs, required %d, available %d",
+@@ -849,6 +862,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ out_free:
+ vfree(ubi->vtbl);
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
++ ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
+ kfree(ubi->volumes[i]);
+ ubi->volumes[i] = NULL;
+ }
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index b6a681bce400..035daca63168 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -476,6 +476,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ }
+ EXPORT_SYMBOL_GPL(can_put_echo_skb);
+
++struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ struct sk_buff *skb = priv->echo_skb[idx];
++ struct canfd_frame *cf;
++
++ if (idx >= priv->echo_skb_max) {
++ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++ __func__, idx, priv->echo_skb_max);
++ return NULL;
++ }
++
++ if (!skb) {
++ netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
++ __func__, idx);
++ return NULL;
++ }
++
++ /* Using "struct canfd_frame::len" for the frame
++ * length is supported on both CAN and CANFD frames.
++ */
++ cf = (struct canfd_frame *)skb->data;
++ *len_ptr = cf->len;
++ priv->echo_skb[idx] = NULL;
++
++ return skb;
++}
++
+ /*
+ * Get the skb from the stack and loop it back locally
+ *
+@@ -485,22 +513,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
+ */
+ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+ {
+- struct can_priv *priv = netdev_priv(dev);
+-
+- BUG_ON(idx >= priv->echo_skb_max);
+-
+- if (priv->echo_skb[idx]) {
+- struct sk_buff *skb = priv->echo_skb[idx];
+- struct can_frame *cf = (struct can_frame *)skb->data;
+- u8 dlc = cf->can_dlc;
++ struct sk_buff *skb;
++ u8 len;
+
+- netif_rx(priv->echo_skb[idx]);
+- priv->echo_skb[idx] = NULL;
++ skb = __can_get_echo_skb(dev, idx, &len);
++ if (!skb)
++ return 0;
+
+- return dlc;
+- }
++ netif_rx(skb);
+
+- return 0;
++ return len;
+ }
+ EXPORT_SYMBOL_GPL(can_get_echo_skb);
+
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index ed8a2a7ce500..9ef501fd153f 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -599,7 +599,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
+ if (tx_errors)
+ dev->stats.tx_errors++;
+
+- can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
++ can_rx_offload_queue_tail(&priv->offload, skb);
+ }
+
+ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
+@@ -639,7 +639,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
+ if (unlikely(new_state == CAN_STATE_BUS_OFF))
+ can_bus_off(dev);
+
+- can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
++ can_rx_offload_queue_tail(&priv->offload, skb);
+ }
+
+ static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
+index f394f77d7528..d227db45fec9 100644
+--- a/drivers/net/can/rx-offload.c
++++ b/drivers/net/can/rx-offload.c
+@@ -209,7 +209,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
+ }
+ EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
+
+-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
++int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
++ struct sk_buff *skb, u32 timestamp)
++{
++ struct can_rx_offload_cb *cb;
++ unsigned long flags;
++
++ if (skb_queue_len(&offload->skb_queue) >
++ offload->skb_queue_len_max)
++ return -ENOMEM;
++
++ cb = can_rx_offload_get_cb(skb);
++ cb->timestamp = timestamp;
++
++ spin_lock_irqsave(&offload->skb_queue.lock, flags);
++ __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
++ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
++
++ can_rx_offload_schedule(offload);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
++
++unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
++ unsigned int idx, u32 timestamp)
++{
++ struct net_device *dev = offload->dev;
++ struct net_device_stats *stats = &dev->stats;
++ struct sk_buff *skb;
++ u8 len;
++ int err;
++
++ skb = __can_get_echo_skb(dev, idx, &len);
++ if (!skb)
++ return 0;
++
++ err = can_rx_offload_queue_sorted(offload, skb, timestamp);
++ if (err) {
++ stats->rx_errors++;
++ stats->tx_fifo_errors++;
++ }
++
++ return len;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
++
++int can_rx_offload_queue_tail(struct can_rx_offload *offload,
++ struct sk_buff *skb)
+ {
+ if (skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max)
+@@ -220,7 +267,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb);
++EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
+
+ static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+ {
+diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
+index 53e320c92a8b..ddaf46239e39 100644
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net)
+ {
+ struct hi3110_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+- unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING;
++ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH;
+ int ret;
+
+ ret = open_candev(net);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index abbd2894f870..c421e2753c8c 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -360,7 +360,7 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
+ if (!compat)
+ return NULL;
+
+- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
++ priv->mdio_dn = of_get_compatible_child(dn, compat);
+ kfree(compat);
+ if (!priv->mdio_dn) {
+ dev_err(kdev, "unable to find MDIO bus node\n");
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index b26da0952a4d..a5381b091710 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+ MLX4_MAX_PORTS;
+ else
+ res_alloc->guaranteed[t] = 0;
+- res_alloc->res_free -= res_alloc->guaranteed[t];
+ break;
+ default:
+ break;
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 50e2e10a9050..e069b310d6a6 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -37,6 +37,7 @@
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/microchipphy.h>
+ #include <linux/phy.h>
++#include <linux/of_net.h>
+ #include "lan78xx.h"
+
+ #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
+@@ -1645,34 +1646,31 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+ addr[5] = (addr_hi >> 8) & 0xFF;
+
+ if (!is_valid_ether_addr(addr)) {
+- /* reading mac address from EEPROM or OTP */
+- if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+- addr) == 0) ||
+- (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+- addr) == 0)) {
+- if (is_valid_ether_addr(addr)) {
+- /* eeprom values are valid so use them */
+- netif_dbg(dev, ifup, dev->net,
+- "MAC address read from EEPROM");
+- } else {
+- /* generate random MAC */
+- random_ether_addr(addr);
+- netif_dbg(dev, ifup, dev->net,
+- "MAC address set to random addr");
+- }
+-
+- addr_lo = addr[0] | (addr[1] << 8) |
+- (addr[2] << 16) | (addr[3] << 24);
+- addr_hi = addr[4] | (addr[5] << 8);
+-
+- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
++ if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
++ /* valid address present in Device Tree */
++ netif_dbg(dev, ifup, dev->net,
++ "MAC address read from Device Tree");
++ } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
++ ETH_ALEN, addr) == 0) ||
++ (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
++ ETH_ALEN, addr) == 0)) &&
++ is_valid_ether_addr(addr)) {
++ /* eeprom values are valid so use them */
++ netif_dbg(dev, ifup, dev->net,
++ "MAC address read from EEPROM");
+ } else {
+ /* generate random MAC */
+ random_ether_addr(addr);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address set to random addr");
+ }
++
++ addr_lo = addr[0] | (addr[1] << 8) |
++ (addr[2] << 16) | (addr[3] << 24);
++ addr_hi = addr[4] | (addr[5] << 8);
++
++ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
++ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ }
+
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 083e5ce7eac7..cd6c5ece9a5d 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -6098,7 +6098,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
+ * for subsequent chanspecs.
+ */
+ channel->flags = IEEE80211_CHAN_NO_HT40 |
+- IEEE80211_CHAN_NO_80MHZ;
++ IEEE80211_CHAN_NO_80MHZ |
++ IEEE80211_CHAN_NO_160MHZ;
+ ch.bw = BRCMU_CHAN_BW_20;
+ cfg->d11inf.encchspec(&ch);
+ chaninfo = ch.chspec;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index b71a9d11a50f..cebf0ce76d27 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -590,7 +590,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
+ #define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2)
+ #define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
+ IWL_MVM_SAR_TABLE_SIZE + 3)
+-#define ACPI_WGDS_WIFI_DATA_SIZE 18
++#define ACPI_WGDS_WIFI_DATA_SIZE 19
+ #define ACPI_WGDS_NUM_BANDS 2
+ #define ACPI_WGDS_TABLE_SIZE 3
+
+@@ -964,7 +964,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
+
+ BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+- ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE);
++ ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
+
+ for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
+ struct iwl_per_chain_offset *chain =
+@@ -997,6 +997,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+ return -ENOENT;
+ }
+
++static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
++{
++ return -ENOENT;
++}
++
+ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ {
+ return 0;
+@@ -1023,8 +1028,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+ IWL_DEBUG_RADIO(mvm,
+ "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+- /* if not available, don't fail and don't bother with EWRD */
+- return 0;
++ /*
++ * If not available, don't fail and don't bother with EWRD.
++ * Return 1 to tell that we can't use WGDS either.
++ */
++ return 1;
+ }
+
+ ret = iwl_mvm_sar_get_ewrd_table(mvm);
+@@ -1037,9 +1045,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+ /* choose profile 1 (WRDS) as default for both chains */
+ ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+- /* if we don't have profile 0 from BIOS, just skip it */
++ /*
++ * If we don't have profile 0 from BIOS, just skip it. This
++ * means that SAR Geo will not be enabled either, even if we
++ * have other valid profiles.
++ */
+ if (ret == -ENOENT)
+- return 0;
++ return 1;
+
+ return ret;
+ }
+@@ -1229,11 +1241,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+ ret = iwl_mvm_sar_init(mvm);
+- if (ret)
+- goto error;
++ if (ret == 0) {
++ ret = iwl_mvm_sar_geo_init(mvm);
++ } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
++ /*
++ * If basic SAR is not available, we check for WGDS,
++ * which should *not* be available either. If it is
++ * available, issue an error, because we can't use SAR
++ * Geo without basic SAR.
++ */
++ IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
++ }
+
+- ret = iwl_mvm_sar_geo_init(mvm);
+- if (ret)
++ if (ret < 0)
+ goto error;
+
+ iwl_mvm_leds_sync(mvm);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 80a653950e86..77ed6ecf5ee5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -328,8 +328,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+ goto out;
+ }
+
+- if (changed)
+- *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
++ if (changed) {
++ u32 status = le32_to_cpu(resp->status);
++
++ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
++ status == MCC_RESP_ILLEGAL);
++ }
+
+ regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
+ __le32_to_cpu(resp->n_channels),
+@@ -4189,10 +4193,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ }
+
+- if (!fw_has_capa(&mvm->fw->ucode_capa,
+- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+- return;
+-
+ /* if beacon filtering isn't on mac80211 does it anyway */
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ return;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+index fb25b6f29323..ca2d66ce8424 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+@@ -732,9 +732,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ }
+
+ IWL_DEBUG_LAR(mvm,
+- "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
+- status, mcc, mcc >> 8, mcc & 0xff,
+- !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
++ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
++ status, mcc, mcc >> 8, mcc & 0xff, n_channels);
+
+ exit:
+ iwl_free_resp(&cmd);
+diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
+index 91162f8e0366..9a22056e8d9e 100644
+--- a/drivers/nfc/nfcmrvl/uart.c
++++ b/drivers/nfc/nfcmrvl/uart.c
+@@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
+ struct device_node *matched_node;
+ int ret;
+
+- matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
++ matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
+ if (!matched_node) {
+- matched_node = of_find_compatible_node(node, NULL,
+- "mrvl,nfc-uart");
++ matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
+ if (!matched_node)
+ return -ENODEV;
+ }
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 63897531cd75..ce8a6e0c9b6a 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -737,6 +737,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
+ }
+ EXPORT_SYMBOL(of_get_next_available_child);
+
++/**
++ * of_get_compatible_child - Find compatible child node
++ * @parent: parent node
++ * @compatible: compatible string
++ *
++ * Lookup child node whose compatible property contains the given compatible
++ * string.
++ *
++ * Returns a node pointer with refcount incremented, use of_node_put() on it
++ * when done; or NULL if not found.
++ */
++struct device_node *of_get_compatible_child(const struct device_node *parent,
++ const char *compatible)
++{
++ struct device_node *child;
++
++ for_each_child_of_node(parent, child) {
++ if (of_device_is_compatible(child, compatible))
++ break;
++ }
++
++ return child;
++}
++EXPORT_SYMBOL(of_get_compatible_child);
++
+ /**
+ * of_get_child_by_name - Find the child node by name for a given parent
+ * @node: parent node
+diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
+index 16cec66b1d0b..8fdb9d07c50a 100644
+--- a/drivers/pci/endpoint/pci-ep-cfs.c
++++ b/drivers/pci/endpoint/pci-ep-cfs.c
+@@ -97,16 +97,10 @@ static int pci_epc_epf_link(struct config_item *epc_item,
+ {
+ int ret;
+ u32 func_no = 0;
+- struct pci_epc *epc;
+- struct pci_epf *epf;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+-
+- epc = epc_group->epc;
+- epf = epf_group->epf;
+- ret = pci_epc_add_epf(epc, epf);
+- if (ret)
+- goto err_add_epf;
++ struct pci_epc *epc = epc_group->epc;
++ struct pci_epf *epf = epf_group->epf;
+
+ func_no = find_first_zero_bit(&epc_group->function_num_map,
+ BITS_PER_LONG);
+@@ -116,6 +110,10 @@ static int pci_epc_epf_link(struct config_item *epc_item,
+ set_bit(func_no, &epc_group->function_num_map);
+ epf->func_no = func_no;
+
++ ret = pci_epc_add_epf(epc, epf);
++ if (ret)
++ goto err_add_epf;
++
+ ret = pci_epf_bind(epf);
+ if (ret)
+ goto err_epf_bind;
+diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
+index 66ed70c12733..6c43322dbb97 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson.c
++++ b/drivers/pinctrl/meson/pinctrl-meson.c
+@@ -273,7 +273,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
+ dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
+
+ meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+- ret = regmap_update_bits(pc->reg_pull, reg,
++ ret = regmap_update_bits(pc->reg_pullen, reg,
+ BIT(bit), 0);
+ if (ret)
+ return ret;
+diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
+index a5915f498eea..0cc12bfe7b02 100644
+--- a/drivers/power/supply/twl4030_charger.c
++++ b/drivers/power/supply/twl4030_charger.c
+@@ -996,12 +996,13 @@ static int twl4030_bci_probe(struct platform_device *pdev)
+ if (bci->dev->of_node) {
+ struct device_node *phynode;
+
+- phynode = of_find_compatible_node(bci->dev->of_node->parent,
+- NULL, "ti,twl4030-usb");
++ phynode = of_get_compatible_child(bci->dev->of_node->parent,
++ "ti,twl4030-usb");
+ if (phynode) {
+ bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
+ bci->transceiver = devm_usb_get_phy_by_node(
+ bci->dev, phynode, &bci->usb_nb);
++ of_node_put(phynode);
+ if (IS_ERR(bci->transceiver)) {
+ ret = PTR_ERR(bci->transceiver);
+ if (ret == -EPROBE_DEFER)
+diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
+index ac6e6a6a194c..ae6506a8b4f5 100644
+--- a/drivers/rtc/rtc-omap.c
++++ b/drivers/rtc/rtc-omap.c
+@@ -823,7 +823,8 @@ static int omap_rtc_probe(struct platform_device *pdev)
+ rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
+ if (IS_ERR(rtc->pctldev)) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+- return PTR_ERR(rtc->pctldev);
++ ret = PTR_ERR(rtc->pctldev);
++ goto err;
+ }
+
+ if (rtc->is_pmic_controller) {
+diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
+index f33447c5db85..9f1b14bf91ae 100644
+--- a/drivers/rtc/rtc-pcf2127.c
++++ b/drivers/rtc/rtc-pcf2127.c
+@@ -248,6 +248,9 @@ static int pcf2127_i2c_gather_write(void *context,
+ memcpy(buf + 1, val, val_size);
+
+ ret = i2c_master_send(client, buf, val_size + 1);
++
++ kfree(buf);
++
+ if (ret != val_size + 1)
+ return ret < 0 ? ret : -EIO;
+
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 0475f9685a41..904fc9c37fde 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -154,17 +154,28 @@ static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i)
+ return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
+ }
+
++/* If we are not echoing the data, perhaps this is a secret so erase it */
++static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size)
++{
++ bool icanon = !!L_ICANON(tty);
++ bool no_echo = !L_ECHO(tty);
++
++ if (icanon && no_echo)
++ memset(buffer, 0x00, size);
++}
++
+ static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
+ size_t tail, size_t n)
+ {
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t size = N_TTY_BUF_SIZE - tail;
+- const void *from = read_buf_addr(ldata, tail);
++ void *from = read_buf_addr(ldata, tail);
+ int uncopied;
+
+ if (n > size) {
+ tty_audit_add_data(tty, from, size);
+ uncopied = copy_to_user(to, from, size);
++ zero_buffer(tty, from, size - uncopied);
+ if (uncopied)
+ return uncopied;
+ to += size;
+@@ -173,7 +184,9 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
+ }
+
+ tty_audit_add_data(tty, from, n);
+- return copy_to_user(to, from, n);
++ uncopied = copy_to_user(to, from, n);
++ zero_buffer(tty, from, n - uncopied);
++ return uncopied;
+ }
+
+ /**
+@@ -1962,11 +1975,12 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail);
+ n = min(*nr, n);
+ if (n) {
+- const unsigned char *from = read_buf_addr(ldata, tail);
++ unsigned char *from = read_buf_addr(ldata, tail);
+ retval = copy_to_user(*b, from, n);
+ n -= retval;
+ is_eof = n == 1 && *from == EOF_CHAR(tty);
+ tty_audit_add_data(tty, from, n);
++ zero_buffer(tty, from, n);
+ smp_store_release(&ldata->read_tail, ldata->read_tail + n);
+ /* Turn single EOF into zero-length read */
+ if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 677fa99b7747..217114227f8d 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -467,11 +467,15 @@ receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
+ {
+ unsigned char *p = char_buf_ptr(head, head->read);
+ char *f = NULL;
++ int n;
+
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->read);
+
+- return port->client_ops->receive_buf(port, p, f, count);
++ n = port->client_ops->receive_buf(port, p, f, count);
++ if (n > 0)
++ memset(p, 0, n);
++ return n;
+ }
+
+ /**
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a9db0887edca..638dc6f66d70 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2815,7 +2815,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+- usb_clear_port_feature(hub->hdev, port1,
++
++ if (udev)
++ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+
+ /*
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 8b323a360e03..783d16a53466 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1276,6 +1276,7 @@ static int dwc3_probe(struct platform_device *pdev)
+
+ err5:
+ dwc3_event_buffers_cleanup(dwc);
++ dwc3_ulpi_exit(dwc);
+
+ err4:
+ dwc3_free_scratch_buffers(dwc);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index d7fae66a0681..ac8d619ff887 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1088,7 +1088,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ /* Now prepare one extra TRB to align transfer size */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
+- maxp - rem, false, 0,
++ maxp - rem, false, 1,
+ req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+@@ -1120,7 +1120,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+ /* Now prepare one extra TRB to align transfer size */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
+- false, 0, req->request.stream_id,
++ false, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+ } else if (req->request.zero && req->request.length &&
+@@ -1136,7 +1136,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+ /* Now prepare one extra TRB to handle ZLP */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
+- false, 0, req->request.stream_id,
++ false, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+ } else {
+@@ -2249,7 +2249,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ * with one TRB pending in the ring. We need to manually clear HWO bit
+ * from that TRB.
+ */
+- if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
++ if ((req->zero || req->unaligned) && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ return 1;
+ }
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index d2a9767a8e9c..6b2f6c41e2a9 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -895,7 +895,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ status |= USB_PORT_STAT_SUSPEND;
+ }
+ if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
+- !DEV_SUPERSPEED_ANY(raw_port_status)) {
++ !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
+ if ((raw_port_status & PORT_RESET) ||
+ !(raw_port_status & PORT_PE))
+ return 0xffffffff;
+@@ -941,7 +941,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ time_left = wait_for_completion_timeout(
+ &bus_state->rexit_done[wIndex],
+ msecs_to_jiffies(
+- XHCI_MAX_REXIT_TIMEOUT));
++ XHCI_MAX_REXIT_TIMEOUT_MS));
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (time_left) {
+@@ -955,7 +955,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ } else {
+ int port_status = readl(port_array[wIndex]);
+ xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
+- XHCI_MAX_REXIT_TIMEOUT,
++ XHCI_MAX_REXIT_TIMEOUT_MS,
+ port_status);
+ status |= USB_PORT_STAT_SUSPEND;
+ clear_bit(wIndex, &bus_state->rexit_ports);
+@@ -1481,13 +1481,16 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ __le32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
+ unsigned long flags;
++ u32 portsc_buf[USB_MAXCHILDREN];
++ bool wake_enabled;
+
+ max_ports = xhci_get_ports(hcd, &port_array);
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
++ wake_enabled = hcd->self.root_hub->do_remote_wakeup;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+- if (hcd->self.root_hub->do_remote_wakeup) {
++ if (wake_enabled) {
+ if (bus_state->resuming_ports || /* USB2 */
+ bus_state->port_remote_wakeup) { /* USB3 */
+ spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -1495,26 +1498,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ return -EBUSY;
+ }
+ }
+-
+- port_index = max_ports;
++ /*
++ * Prepare ports for suspend, but don't write anything before all ports
++ * are checked and we know bus suspend can proceed
++ */
+ bus_state->bus_suspended = 0;
++ port_index = max_ports;
+ while (port_index--) {
+- /* suspend the port if the port is not suspended */
+ u32 t1, t2;
+- int slot_id;
+
+ t1 = readl(port_array[port_index]);
+ t2 = xhci_port_state_to_neutral(t1);
++ portsc_buf[port_index] = 0;
+
+- if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
+- xhci_dbg(xhci, "port %d not suspended\n", port_index);
+- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+- port_index + 1);
+- if (slot_id) {
++ /* Bail out if a USB3 port has a new device in link training */
++ if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
++ bus_state->bus_suspended = 0;
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
++ return -EBUSY;
++ }
++
++ /* suspend ports in U0, or bail out for new connect changes */
++ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
++ if ((t1 & PORT_CSC) && wake_enabled) {
++ bus_state->bus_suspended = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+- xhci_stop_device(xhci, slot_id, 1);
+- spin_lock_irqsave(&xhci->lock, flags);
++ xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
++ return -EBUSY;
+ }
++ xhci_dbg(xhci, "port %d not suspended\n", port_index);
+ t2 &= ~PORT_PLS_MASK;
+ t2 |= PORT_LINK_STROBE | XDEV_U3;
+ set_bit(port_index, &bus_state->bus_suspended);
+@@ -1523,7 +1536,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ * including the USB 3.0 roothub, but only if CONFIG_PM
+ * is enabled, so also enable remote wake here.
+ */
+- if (hcd->self.root_hub->do_remote_wakeup) {
++ if (wake_enabled) {
+ if (t1 & PORT_CONNECT) {
+ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+ t2 &= ~PORT_WKCONN_E;
+@@ -1543,7 +1556,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+
+ t1 = xhci_port_state_to_neutral(t1);
+ if (t1 != t2)
+- writel(t2, port_array[port_index]);
++ portsc_buf[port_index] = t2;
++ }
++
++ /* write port settings, stopping and suspending ports if needed */
++ port_index = max_ports;
++ while (port_index--) {
++ if (!portsc_buf[port_index])
++ continue;
++ if (test_bit(port_index, &bus_state->bus_suspended)) {
++ int slot_id;
++
++ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
++ port_index + 1);
++ if (slot_id) {
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ xhci_stop_device(xhci, slot_id, 1);
++ spin_lock_irqsave(&xhci->lock, flags);
++ }
++ }
++ writel(portsc_buf[port_index], port_array[port_index]);
+ }
+ hcd->state = HC_STATE_SUSPENDED;
+ bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 9218f506f8e3..4b07b6859b4c 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -236,6 +236,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
+ xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
+
++ if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||
++ pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&
++ pdev->device == 0x9026)
++ xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
++
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Resetting on resume");
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6996235e34a9..aa230706b875 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1568,6 +1568,35 @@ static void handle_device_notification(struct xhci_hcd *xhci,
+ usb_wakeup_notification(udev->parent, udev->portnum);
+ }
+
++/*
++ * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
++ * Controller.
++ * As per ThunderX2errata-129 USB 2 device may come up as USB 1
++ * If a connection to a USB 1 device is followed by another connection
++ * to a USB 2 device.
++ *
++ * Reset the PHY after the USB device is disconnected if device speed
++ * is less than HCD_USB3.
++ * Retry the reset sequence max of 4 times checking the PLL lock status.
++ *
++ */
++static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
++{
++ struct usb_hcd *hcd = xhci_to_hcd(xhci);
++ u32 pll_lock_check;
++ u32 retry_count = 4;
++
++ do {
++ /* Assert PHY reset */
++ writel(0x6F, hcd->regs + 0x1048);
++ udelay(10);
++ /* De-assert the PHY reset */
++ writel(0x7F, hcd->regs + 0x1048);
++ udelay(200);
++ pll_lock_check = readl(hcd->regs + 0x1070);
++ } while (!(pll_lock_check & 0x1) && --retry_count);
++}
++
+ static void handle_port_status(struct xhci_hcd *xhci,
+ union xhci_trb *event)
+ {
+@@ -1717,7 +1746,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ * RExit to a disconnect state). If so, let the the driver know it's
+ * out of the RExit state.
+ */
+- if (!DEV_SUPERSPEED_ANY(portsc) &&
++ if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
+ test_and_clear_bit(faked_port_index,
+ &bus_state->rexit_ports)) {
+ complete(&bus_state->rexit_done[faked_port_index]);
+@@ -1725,9 +1754,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ goto cleanup;
+ }
+
+- if (hcd->speed < HCD_USB3)
++ if (hcd->speed < HCD_USB3) {
+ xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
+ PORT_PLC);
++ if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
++ (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
++ xhci_cavium_reset_phy_quirk(xhci);
++ }
+
+ cleanup:
+ /* Update event ring dequeue pointer before dropping the lock */
+@@ -2335,6 +2368,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ goto cleanup;
+ case COMP_RING_UNDERRUN:
+ case COMP_RING_OVERRUN:
++ case COMP_STOPPED_LENGTH_INVALID:
+ goto cleanup;
+ default:
+ xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 64ddba3f79a9..faf048682194 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -43,8 +43,8 @@ static int link_quirk;
+ module_param(link_quirk, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+
+-static unsigned int quirks;
+-module_param(quirks, uint, S_IRUGO);
++static unsigned long long quirks;
++module_param(quirks, ullong, S_IRUGO);
+ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
+
+ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
+@@ -4956,7 +4956,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ return retval;
+ xhci_dbg(xhci, "Called HCD init\n");
+
+- xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
++ xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
+ xhci->hcc_params, xhci->hci_version, xhci->quirks);
+
+ return 0;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 11232e62b898..74ba20556020 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1684,7 +1684,7 @@ struct xhci_bus_state {
+ * It can take up to 20 ms to transition from RExit to U0 on the
+ * Intel Lynx Point LP xHCI host.
+ */
+-#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
++#define XHCI_MAX_REXIT_TIMEOUT_MS 20
+
+ static inline unsigned int hcd_index(struct usb_hcd *hcd)
+ {
+@@ -1794,12 +1794,12 @@ struct xhci_hcd {
+ #define XHCI_STATE_DYING (1 << 0)
+ #define XHCI_STATE_HALTED (1 << 1)
+ #define XHCI_STATE_REMOVING (1 << 2)
+- unsigned int quirks;
+-#define XHCI_LINK_TRB_QUIRK (1 << 0)
+-#define XHCI_RESET_EP_QUIRK (1 << 1)
+-#define XHCI_NEC_HOST (1 << 2)
+-#define XHCI_AMD_PLL_FIX (1 << 3)
+-#define XHCI_SPURIOUS_SUCCESS (1 << 4)
++ unsigned long long quirks;
++#define XHCI_LINK_TRB_QUIRK BIT_ULL(0)
++#define XHCI_RESET_EP_QUIRK BIT_ULL(1)
++#define XHCI_NEC_HOST BIT_ULL(2)
++#define XHCI_AMD_PLL_FIX BIT_ULL(3)
++#define XHCI_SPURIOUS_SUCCESS BIT_ULL(4)
+ /*
+ * Certain Intel host controllers have a limit to the number of endpoint
+ * contexts they can handle. Ideally, they would signal that they can't handle
+@@ -1809,33 +1809,36 @@ struct xhci_hcd {
+ * commands, reset device commands, disable slot commands, and address device
+ * commands.
+ */
+-#define XHCI_EP_LIMIT_QUIRK (1 << 5)
+-#define XHCI_BROKEN_MSI (1 << 6)
+-#define XHCI_RESET_ON_RESUME (1 << 7)
+-#define XHCI_SW_BW_CHECKING (1 << 8)
+-#define XHCI_AMD_0x96_HOST (1 << 9)
+-#define XHCI_TRUST_TX_LENGTH (1 << 10)
+-#define XHCI_LPM_SUPPORT (1 << 11)
+-#define XHCI_INTEL_HOST (1 << 12)
+-#define XHCI_SPURIOUS_REBOOT (1 << 13)
+-#define XHCI_COMP_MODE_QUIRK (1 << 14)
+-#define XHCI_AVOID_BEI (1 << 15)
+-#define XHCI_PLAT (1 << 16)
+-#define XHCI_SLOW_SUSPEND (1 << 17)
+-#define XHCI_SPURIOUS_WAKEUP (1 << 18)
++#define XHCI_EP_LIMIT_QUIRK BIT_ULL(5)
++#define XHCI_BROKEN_MSI BIT_ULL(6)
++#define XHCI_RESET_ON_RESUME BIT_ULL(7)
++#define XHCI_SW_BW_CHECKING BIT_ULL(8)
++#define XHCI_AMD_0x96_HOST BIT_ULL(9)
++#define XHCI_TRUST_TX_LENGTH BIT_ULL(10)
++#define XHCI_LPM_SUPPORT BIT_ULL(11)
++#define XHCI_INTEL_HOST BIT_ULL(12)
++#define XHCI_SPURIOUS_REBOOT BIT_ULL(13)
++#define XHCI_COMP_MODE_QUIRK BIT_ULL(14)
++#define XHCI_AVOID_BEI BIT_ULL(15)
++#define XHCI_PLAT BIT_ULL(16)
++#define XHCI_SLOW_SUSPEND BIT_ULL(17)
++#define XHCI_SPURIOUS_WAKEUP BIT_ULL(18)
+ /* For controllers with a broken beyond repair streams implementation */
+-#define XHCI_BROKEN_STREAMS (1 << 19)
+-#define XHCI_PME_STUCK_QUIRK (1 << 20)
+-#define XHCI_MTK_HOST (1 << 21)
+-#define XHCI_SSIC_PORT_UNUSED (1 << 22)
+-#define XHCI_NO_64BIT_SUPPORT (1 << 23)
+-#define XHCI_MISSING_CAS (1 << 24)
++#define XHCI_BROKEN_STREAMS BIT_ULL(19)
++#define XHCI_PME_STUCK_QUIRK BIT_ULL(20)
++#define XHCI_MTK_HOST BIT_ULL(21)
++#define XHCI_SSIC_PORT_UNUSED BIT_ULL(22)
++#define XHCI_NO_64BIT_SUPPORT BIT_ULL(23)
++#define XHCI_MISSING_CAS BIT_ULL(24)
+ /* For controller with a broken Port Disable implementation */
+-#define XHCI_BROKEN_PORT_PED (1 << 25)
+-#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
+-#define XHCI_U2_DISABLE_WAKE (1 << 27)
+-#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
+-#define XHCI_SUSPEND_DELAY (1 << 30)
++#define XHCI_BROKEN_PORT_PED BIT_ULL(25)
++#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 BIT_ULL(26)
++#define XHCI_U2_DISABLE_WAKE BIT_ULL(27)
++#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL BIT_ULL(28)
++#define XHCI_HW_LPM_DISABLE BIT_ULL(29)
++#define XHCI_SUSPEND_DELAY BIT_ULL(30)
++#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
++#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
+
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
+index b0405d6aac85..48db9a9f13f9 100644
+--- a/fs/9p/vfs_dir.c
++++ b/fs/9p/vfs_dir.c
+@@ -76,15 +76,6 @@ static inline int dt_type(struct p9_wstat *mistat)
+ return rettype;
+ }
+
+-static void p9stat_init(struct p9_wstat *stbuf)
+-{
+- stbuf->name = NULL;
+- stbuf->uid = NULL;
+- stbuf->gid = NULL;
+- stbuf->muid = NULL;
+- stbuf->extension = NULL;
+-}
+-
+ /**
+ * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir
+ * @filp: opened file structure
+@@ -145,12 +136,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
+ rdir->tail = n;
+ }
+ while (rdir->head < rdir->tail) {
+- p9stat_init(&st);
+ err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
+ rdir->tail - rdir->head, &st);
+ if (err) {
+ p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
+- p9stat_free(&st);
+ return -EIO;
+ }
+ reclen = st.size+2;
+diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
+index 9a69392f1fb3..d81c148682e7 100644
+--- a/fs/bfs/inode.c
++++ b/fs/bfs/inode.c
+@@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
+
+ s->s_magic = BFS_MAGIC;
+
+- if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
++ if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) ||
++ le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) {
+ printf("Superblock is corrupted\n");
+ goto out1;
+ }
+@@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
+ sizeof(struct bfs_inode)
+ + BFS_ROOT_INO - 1;
+ imap_len = (info->si_lasti / 8) + 1;
+- info->si_imap = kzalloc(imap_len, GFP_KERNEL);
+- if (!info->si_imap)
++ info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN);
++ if (!info->si_imap) {
++ printf("Cannot allocate %u bytes\n", imap_len);
+ goto out1;
++ }
+ for (i = 0; i < BFS_ROOT_INO; i++)
+ set_bit(i, info->si_imap);
+
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 28d6c65c8bb3..057be88eb1b4 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -72,13 +72,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
+ if (!sdp)
+ return NULL;
+
+- sb->s_fs_info = sdp;
+ sdp->sd_vfs = sb;
+ sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
+ if (!sdp->sd_lkstats) {
+ kfree(sdp);
+ return NULL;
+ }
++ sb->s_fs_info = sdp;
+
+ set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
+ gfs2_tune_init(&sdp->sd_tune);
+diff --git a/fs/namei.c b/fs/namei.c
+index 0b46b858cd42..d1e467b7b9de 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -904,6 +904,8 @@ static inline void put_link(struct nameidata *nd)
+
+ int sysctl_protected_symlinks __read_mostly = 0;
+ int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_fifos __read_mostly;
++int sysctl_protected_regular __read_mostly;
+
+ /**
+ * may_follow_link - Check symlink following for unsafe situations
+@@ -1017,6 +1019,45 @@ static int may_linkat(struct path *link)
+ return -EPERM;
+ }
+
++/**
++ * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
++ * should be allowed, or not, on files that already
++ * exist.
++ * @dir: the sticky parent directory
++ * @inode: the inode of the file to open
++ *
++ * Block an O_CREAT open of a FIFO (or a regular file) when:
++ * - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
++ * - the file already exists
++ * - we are in a sticky directory
++ * - we don't own the file
++ * - the owner of the directory doesn't own the file
++ * - the directory is world writable
++ * If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
++ * the directory doesn't have to be world writable: being group writable will
++ * be enough.
++ *
++ * Returns 0 if the open is allowed, -ve on error.
++ */
++static int may_create_in_sticky(struct dentry * const dir,
++ struct inode * const inode)
++{
++ if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
++ (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
++ likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
++ uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
++ uid_eq(current_fsuid(), inode->i_uid))
++ return 0;
++
++ if (likely(dir->d_inode->i_mode & 0002) ||
++ (dir->d_inode->i_mode & 0020 &&
++ ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
++ (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
++ return -EACCES;
++ }
++ return 0;
++}
++
+ static __always_inline
+ const char *get_link(struct nameidata *nd)
+ {
+@@ -3355,9 +3396,15 @@ finish_open:
+ if (error)
+ return error;
+ audit_inode(nd->name, nd->path.dentry, 0);
+- error = -EISDIR;
+- if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
+- goto out;
++ if (open_flag & O_CREAT) {
++ error = -EISDIR;
++ if (d_is_dir(nd->path.dentry))
++ goto out;
++ error = may_create_in_sticky(dir,
++ d_backing_inode(nd->path.dentry));
++ if (unlikely(error))
++ goto out;
++ }
+ error = -ENOTDIR;
+ if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
+ goto out;
+diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
+index 61f1cf2d9f44..c0c0b992210e 100644
+--- a/include/linux/can/dev.h
++++ b/include/linux/can/dev.h
+@@ -163,6 +163,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
+
+ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx);
++struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
+ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
+ void can_free_echo_skb(struct net_device *dev, unsigned int idx);
+
+diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
+index cb31683bbe15..8268811a697e 100644
+--- a/include/linux/can/rx-offload.h
++++ b/include/linux/can/rx-offload.h
+@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
+ int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
+ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
+ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
+-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
++int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
++ struct sk_buff *skb, u32 timestamp);
++unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
++ unsigned int idx, u32 timestamp);
++int can_rx_offload_queue_tail(struct can_rx_offload *offload,
++ struct sk_buff *skb);
+ void can_rx_offload_reset(struct can_rx_offload *offload);
+ void can_rx_offload_del(struct can_rx_offload *offload);
+ void can_rx_offload_enable(struct can_rx_offload *offload);
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 7374639f0aa0..f6a577edec67 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -72,6 +72,8 @@ extern struct inodes_stat_t inodes_stat;
+ extern int leases_enable, lease_break_time;
+ extern int sysctl_protected_symlinks;
+ extern int sysctl_protected_hardlinks;
++extern int sysctl_protected_fifos;
++extern int sysctl_protected_regular;
+
+ typedef __kernel_rwf_t rwf_t;
+
+diff --git a/include/linux/integrity.h b/include/linux/integrity.h
+index c2d6082a1a4c..858d3f4a2241 100644
+--- a/include/linux/integrity.h
++++ b/include/linux/integrity.h
+@@ -14,6 +14,7 @@
+
+ enum integrity_status {
+ INTEGRITY_PASS = 0,
++ INTEGRITY_PASS_IMMUTABLE,
+ INTEGRITY_FAIL,
+ INTEGRITY_NOLABEL,
+ INTEGRITY_NOXATTRS,
+diff --git a/include/linux/of.h b/include/linux/of.h
+index b240ed69dc96..70b7dacf9238 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -288,6 +288,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
+ extern struct device_node *of_get_next_available_child(
+ const struct device_node *node, struct device_node *prev);
+
++extern struct device_node *of_get_compatible_child(const struct device_node *parent,
++ const char *compatible);
+ extern struct device_node *of_get_child_by_name(const struct device_node *node,
+ const char *name);
+
+@@ -625,6 +627,12 @@ static inline bool of_have_populated_dt(void)
+ return false;
+ }
+
++static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
++ const char *compatible)
++{
++ return NULL;
++}
++
+ static inline struct device_node *of_get_child_by_name(
+ const struct device_node *node,
+ const char *name)
+diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
+index 43b1d7648e82..a8aef18c6244 100644
+--- a/include/linux/pfn_t.h
++++ b/include/linux/pfn_t.h
+@@ -10,7 +10,7 @@
+ * PFN_DEV - pfn is not covered by system memmap by default
+ * PFN_MAP - pfn has a dynamic page mapping established by a device driver
+ */
+-#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
++#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
+ #define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
+ #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
+ #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 9bd5d68076d9..64a330544dad 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1452,6 +1452,7 @@ static inline void lock_sock(struct sock *sk)
+ lock_sock_nested(sk, 0);
+ }
+
++void __release_sock(struct sock *sk);
+ void release_sock(struct sock *sk);
+
+ /* BH context may only use the following locking interface. */
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index ed5d34925ad0..6a4b41484afe 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ int count;
+ int i;
+ int diag, dtab_count;
+- int key;
++ int key, buf_size, ret;
+
+
+ diag = kdbgetintenv("DTABCOUNT", &dtab_count);
+@@ -336,9 +336,8 @@ poll_again:
+ else
+ p_tmp = tmpbuffer;
+ len = strlen(p_tmp);
+- count = kallsyms_symbol_complete(p_tmp,
+- sizeof(tmpbuffer) -
+- (p_tmp - tmpbuffer));
++ buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
++ count = kallsyms_symbol_complete(p_tmp, buf_size);
+ if (tab == 2 && count > 0) {
+ kdb_printf("\n%d symbols are found.", count);
+ if (count > dtab_count) {
+@@ -350,9 +349,13 @@ poll_again:
+ }
+ kdb_printf("\n");
+ for (i = 0; i < count; i++) {
+- if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
++ ret = kallsyms_symbol_next(p_tmp, i, buf_size);
++ if (WARN_ON(!ret))
+ break;
+- kdb_printf("%s ", p_tmp);
++ if (ret != -E2BIG)
++ kdb_printf("%s ", p_tmp);
++ else
++ kdb_printf("%s... ", p_tmp);
+ *(p_tmp + len) = '\0';
+ }
+ if (i >= dtab_count)
+diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
+index fc224fbcf954..f2158e463a0f 100644
+--- a/kernel/debug/kdb/kdb_private.h
++++ b/kernel/debug/kdb/kdb_private.h
+@@ -83,7 +83,7 @@ typedef struct __ksymtab {
+ unsigned long sym_start;
+ unsigned long sym_end;
+ } kdb_symtab_t;
+-extern int kallsyms_symbol_next(char *prefix_name, int flag);
++extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
+ extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
+
+ /* Exported Symbols for kernel loadable modules to use. */
+diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
+index 84422d2b95c0..014f6fbb3832 100644
+--- a/kernel/debug/kdb/kdb_support.c
++++ b/kernel/debug/kdb/kdb_support.c
+@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
+ * Parameters:
+ * prefix_name prefix of a symbol name to lookup
+ * flag 0 means search from the head, 1 means continue search.
++ * buf_size maximum length that can be written to prefix_name
++ * buffer
+ * Returns:
+ * 1 if a symbol matches the given prefix.
+ * 0 if no string found
+ */
+-int kallsyms_symbol_next(char *prefix_name, int flag)
++int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
+ {
+ int prefix_len = strlen(prefix_name);
+ static loff_t pos;
+@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
+ pos = 0;
+
+ while ((name = kdb_walk_kallsyms(&pos))) {
+- if (strncmp(name, prefix_name, prefix_len) == 0) {
+- strncpy(prefix_name, name, strlen(name)+1);
+- return 1;
+- }
++ if (!strncmp(name, prefix_name, prefix_len))
++ return strscpy(prefix_name, name, buf_size);
+ }
+ return 0;
+ }
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 3e3650e94ae6..710ce1d6b982 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -2772,6 +2772,15 @@ void rcu_check_callbacks(int user)
+ rcu_bh_qs();
+ }
+ rcu_preempt_check_callbacks();
++ /* The load-acquire pairs with the store-release setting to true. */
++ if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
++ /* Idle and userspace execution already are quiescent states. */
++ if (!rcu_is_cpu_rrupt_from_idle() && !user) {
++ set_tsk_need_resched(current);
++ set_preempt_need_resched();
++ }
++ __this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
++ }
+ if (rcu_pending())
+ invoke_rcu_core();
+ if (user)
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 069550540a39..d330b1ce3b94 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1793,6 +1793,24 @@ static struct ctl_table fs_table[] = {
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
++ {
++ .procname = "protected_fifos",
++ .data = &sysctl_protected_fifos,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &two,
++ },
++ {
++ .procname = "protected_regular",
++ .data = &sysctl_protected_regular,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &two,
++ },
+ {
+ .procname = "suid_dumpable",
+ .data = &suid_dumpable,
+diff --git a/mm/memory.c b/mm/memory.c
+index 93d5d324904b..b6cfe0cf0ead 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3697,10 +3697,36 @@ static int do_fault(struct vm_fault *vmf)
+ struct vm_area_struct *vma = vmf->vma;
+ int ret;
+
+- /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+- if (!vma->vm_ops->fault)
+- ret = VM_FAULT_SIGBUS;
+- else if (!(vmf->flags & FAULT_FLAG_WRITE))
++ /*
++ * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
++ */
++ if (!vma->vm_ops->fault) {
++ /*
++ * If we find a migration pmd entry or a none pmd entry, which
++ * should never happen, return SIGBUS
++ */
++ if (unlikely(!pmd_present(*vmf->pmd)))
++ ret = VM_FAULT_SIGBUS;
++ else {
++ vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
++ vmf->pmd,
++ vmf->address,
++ &vmf->ptl);
++ /*
++ * Make sure this is not a temporary clearing of pte
++ * by holding ptl and checking again. A R/M/W update
++ * of pte involves: take ptl, clearing the pte so that
++ * we don't have concurrent modification by hardware
++ * followed by an update.
++ */
++ if (unlikely(pte_none(*vmf->pte)))
++ ret = VM_FAULT_SIGBUS;
++ else
++ ret = VM_FAULT_NOPAGE;
++
++ pte_unmap_unlock(vmf->pte, vmf->ptl);
++ }
++ } else if (!(vmf->flags & FAULT_FLAG_WRITE))
+ ret = do_read_fault(vmf);
+ else if (!(vma->vm_flags & VM_SHARED))
+ ret = do_cow_fault(vmf);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a604b5da6755..2074f424dabf 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3867,17 +3867,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ unsigned int cpuset_mems_cookie;
+ int reserve_flags;
+
+- /*
+- * In the slowpath, we sanity check order to avoid ever trying to
+- * reclaim >= MAX_ORDER areas which will never succeed. Callers may
+- * be using allocators in order of preference for an area that is
+- * too large.
+- */
+- if (order >= MAX_ORDER) {
+- WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
+- return NULL;
+- }
+-
+ /*
+ * We also sanity check to catch abuse of atomic reserves being used by
+ * callers that are not in atomic context.
+@@ -4179,6 +4168,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
+ gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
+ struct alloc_context ac = { };
+
++ /*
++ * There are several places where we assume that the order value is sane
++ * so bail out early if the request is out of bound.
++ */
++ if (unlikely(order >= MAX_ORDER)) {
++ WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
++ return NULL;
++ }
++
+ gfp_mask &= gfp_allowed_mask;
+ alloc_mask = gfp_mask;
+ if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
+diff --git a/mm/shmem.c b/mm/shmem.c
+index ea786a504e1b..fa08f56fd5e5 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2590,9 +2590,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
+ inode_lock(inode);
+ /* We're holding i_mutex so we can access i_size directly */
+
+- if (offset < 0)
+- offset = -EINVAL;
+- else if (offset >= inode->i_size)
++ if (offset < 0 || offset >= inode->i_size)
+ offset = -ENXIO;
+ else {
+ start = offset >> PAGE_SHIFT;
+diff --git a/mm/slab.c b/mm/slab.c
+index 198c1e2c5358..68ab88e2920e 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3670,6 +3670,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
+ struct kmem_cache *cachep;
+ void *ret;
+
++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
++ return NULL;
+ cachep = kmalloc_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(cachep)))
+ return cachep;
+@@ -3705,6 +3707,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
+ struct kmem_cache *cachep;
+ void *ret;
+
++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
++ return NULL;
+ cachep = kmalloc_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(cachep)))
+ return cachep;
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 91d271b90600..f6764cf162b8 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -971,18 +971,18 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+ {
+ int index;
+
+- if (unlikely(size > KMALLOC_MAX_SIZE)) {
+- WARN_ON_ONCE(!(flags & __GFP_NOWARN));
+- return NULL;
+- }
+-
+ if (size <= 192) {
+ if (!size)
+ return ZERO_SIZE_PTR;
+
+ index = size_index[size_index_elem(size)];
+- } else
++ } else {
++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
++ WARN_ON(1);
++ return NULL;
++ }
+ index = fls(size - 1);
++ }
+
+ #ifdef CONFIG_ZONE_DMA
+ if (unlikely((flags & GFP_DMA)))
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index f33403d718ac..2813cdfa46b9 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -99,6 +99,7 @@ struct z3fold_header {
+ #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
+
+ #define BUDDY_MASK (0x3)
++#define BUDDY_SHIFT 2
+
+ /**
+ * struct z3fold_pool - stores metadata for each z3fold pool
+@@ -145,7 +146,7 @@ enum z3fold_page_flags {
+ MIDDLE_CHUNK_MAPPED,
+ NEEDS_COMPACTING,
+ PAGE_STALE,
+- UNDER_RECLAIM
++ PAGE_CLAIMED, /* by either reclaim or free */
+ };
+
+ /*****************
+@@ -174,7 +175,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
+ clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+ clear_bit(NEEDS_COMPACTING, &page->private);
+ clear_bit(PAGE_STALE, &page->private);
+- clear_bit(UNDER_RECLAIM, &page->private);
++ clear_bit(PAGE_CLAIMED, &page->private);
+
+ spin_lock_init(&zhdr->page_lock);
+ kref_init(&zhdr->refcount);
+@@ -223,8 +224,11 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
+ unsigned long handle;
+
+ handle = (unsigned long)zhdr;
+- if (bud != HEADLESS)
+- handle += (bud + zhdr->first_num) & BUDDY_MASK;
++ if (bud != HEADLESS) {
++ handle |= (bud + zhdr->first_num) & BUDDY_MASK;
++ if (bud == LAST)
++ handle |= (zhdr->last_chunks << BUDDY_SHIFT);
++ }
+ return handle;
+ }
+
+@@ -234,6 +238,12 @@ static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
+ return (struct z3fold_header *)(handle & PAGE_MASK);
+ }
+
++/* only for LAST bud, returns zero otherwise */
++static unsigned short handle_to_chunks(unsigned long handle)
++{
++ return (handle & ~PAGE_MASK) >> BUDDY_SHIFT;
++}
++
+ /*
+ * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
+ * but that doesn't matter. because the masking will result in the
+@@ -717,37 +727,39 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
+ page = virt_to_page(zhdr);
+
+ if (test_bit(PAGE_HEADLESS, &page->private)) {
+- /* HEADLESS page stored */
+- bud = HEADLESS;
+- } else {
+- z3fold_page_lock(zhdr);
+- bud = handle_to_buddy(handle);
+-
+- switch (bud) {
+- case FIRST:
+- zhdr->first_chunks = 0;
+- break;
+- case MIDDLE:
+- zhdr->middle_chunks = 0;
+- zhdr->start_middle = 0;
+- break;
+- case LAST:
+- zhdr->last_chunks = 0;
+- break;
+- default:
+- pr_err("%s: unknown bud %d\n", __func__, bud);
+- WARN_ON(1);
+- z3fold_page_unlock(zhdr);
+- return;
++ /* if a headless page is under reclaim, just leave.
++ * NB: we use test_and_set_bit for a reason: if the bit
++ * has not been set before, we release this page
++ * immediately so we don't care about its value any more.
++ */
++ if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
++ spin_lock(&pool->lock);
++ list_del(&page->lru);
++ spin_unlock(&pool->lock);
++ free_z3fold_page(page);
++ atomic64_dec(&pool->pages_nr);
+ }
++ return;
+ }
+
+- if (bud == HEADLESS) {
+- spin_lock(&pool->lock);
+- list_del(&page->lru);
+- spin_unlock(&pool->lock);
+- free_z3fold_page(page);
+- atomic64_dec(&pool->pages_nr);
++ /* Non-headless case */
++ z3fold_page_lock(zhdr);
++ bud = handle_to_buddy(handle);
++
++ switch (bud) {
++ case FIRST:
++ zhdr->first_chunks = 0;
++ break;
++ case MIDDLE:
++ zhdr->middle_chunks = 0;
++ break;
++ case LAST:
++ zhdr->last_chunks = 0;
++ break;
++ default:
++ pr_err("%s: unknown bud %d\n", __func__, bud);
++ WARN_ON(1);
++ z3fold_page_unlock(zhdr);
+ return;
+ }
+
+@@ -755,7 +767,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
+ atomic64_dec(&pool->pages_nr);
+ return;
+ }
+- if (test_bit(UNDER_RECLAIM, &page->private)) {
++ if (test_bit(PAGE_CLAIMED, &page->private)) {
+ z3fold_page_unlock(zhdr);
+ return;
+ }
+@@ -833,20 +845,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ }
+ list_for_each_prev(pos, &pool->lru) {
+ page = list_entry(pos, struct page, lru);
++
++ /* this bit could have been set by free, in which case
++ * we pass over to the next page in the pool.
++ */
++ if (test_and_set_bit(PAGE_CLAIMED, &page->private))
++ continue;
++
++ zhdr = page_address(page);
+ if (test_bit(PAGE_HEADLESS, &page->private))
+- /* candidate found */
+ break;
+
+- zhdr = page_address(page);
+- if (!z3fold_page_trylock(zhdr))
++ if (!z3fold_page_trylock(zhdr)) {
++ zhdr = NULL;
+ continue; /* can't evict at this point */
++ }
+ kref_get(&zhdr->refcount);
+ list_del_init(&zhdr->buddy);
+ zhdr->cpu = -1;
+- set_bit(UNDER_RECLAIM, &page->private);
+ break;
+ }
+
++ if (!zhdr)
++ break;
++
+ list_del_init(&page->lru);
+ spin_unlock(&pool->lock);
+
+@@ -895,6 +917,7 @@ next:
+ if (test_bit(PAGE_HEADLESS, &page->private)) {
+ if (ret == 0) {
+ free_z3fold_page(page);
++ atomic64_dec(&pool->pages_nr);
+ return 0;
+ }
+ spin_lock(&pool->lock);
+@@ -902,7 +925,7 @@ next:
+ spin_unlock(&pool->lock);
+ } else {
+ z3fold_page_lock(zhdr);
+- clear_bit(UNDER_RECLAIM, &page->private);
++ clear_bit(PAGE_CLAIMED, &page->private);
+ if (kref_put(&zhdr->refcount,
+ release_z3fold_page_locked)) {
+ atomic64_dec(&pool->pages_nr);
+@@ -961,7 +984,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
+ set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+ break;
+ case LAST:
+- addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
++ addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
+ break;
+ default:
+ pr_err("unknown buddy id %d\n", buddy);
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 864c80dbdb72..e1f26441b49a 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ } else
+ ifindex = ro->ifindex;
+
+- if (ro->fd_frames) {
++ dev = dev_get_by_index(sock_net(sk), ifindex);
++ if (!dev)
++ return -ENXIO;
++
++ err = -EINVAL;
++ if (ro->fd_frames && dev->mtu == CANFD_MTU) {
+ if (unlikely(size != CANFD_MTU && size != CAN_MTU))
+- return -EINVAL;
++ goto put_dev;
+ } else {
+ if (unlikely(size != CAN_MTU))
+- return -EINVAL;
++ goto put_dev;
+ }
+
+- dev = dev_get_by_index(sock_net(sk), ifindex);
+- if (!dev)
+- return -ENXIO;
+-
+ skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 68d08ed5521e..36f19458e2fe 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2242,7 +2242,7 @@ static void __lock_sock(struct sock *sk)
+ finish_wait(&sk->sk_lock.wq, &wait);
+ }
+
+-static void __release_sock(struct sock *sk)
++void __release_sock(struct sock *sk)
+ __releases(&sk->sk_lock.slock)
+ __acquires(&sk->sk_lock.slock)
+ {
+diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
+index b8d95cb71c25..44a7e16bf3b5 100644
+--- a/net/ieee802154/6lowpan/6lowpan_i.h
++++ b/net/ieee802154/6lowpan/6lowpan_i.h
+@@ -20,8 +20,8 @@ typedef unsigned __bitwise lowpan_rx_result;
+ struct frag_lowpan_compare_key {
+ u16 tag;
+ u16 d_size;
+- const struct ieee802154_addr src;
+- const struct ieee802154_addr dst;
++ struct ieee802154_addr src;
++ struct ieee802154_addr dst;
+ };
+
+ /* Equivalent of ipv4 struct ipq
+diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
+index 1790b65944b3..2cc224106b69 100644
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -75,14 +75,14 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
+ {
+ struct netns_ieee802154_lowpan *ieee802154_lowpan =
+ net_ieee802154_lowpan(net);
+- struct frag_lowpan_compare_key key = {
+- .tag = cb->d_tag,
+- .d_size = cb->d_size,
+- .src = *src,
+- .dst = *dst,
+- };
++ struct frag_lowpan_compare_key key = {};
+ struct inet_frag_queue *q;
+
++ key.tag = cb->d_tag;
++ key.d_size = cb->d_size;
++ key.src = *src;
++ key.dst = *dst;
++
+ q = inet_frag_find(&ieee802154_lowpan->frags, &key);
+ if (!q)
+ return NULL;
+@@ -372,7 +372,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
+ struct lowpan_frag_queue *fq;
+ struct net *net = dev_net(skb->dev);
+ struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
+- struct ieee802154_hdr hdr;
++ struct ieee802154_hdr hdr = {};
+ int err;
+
+ if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index f9c985460faa..8109985e78a1 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2217,16 +2217,10 @@ adjudge_to_death:
+ sock_hold(sk);
+ sock_orphan(sk);
+
+- /* It is the last release_sock in its life. It will remove backlog. */
+- release_sock(sk);
+-
+-
+- /* Now socket is owned by kernel and we acquire BH lock
+- * to finish close. No need to check for user refs.
+- */
+ local_bh_disable();
+ bh_lock_sock(sk);
+- WARN_ON(sock_owned_by_user(sk));
++ /* remove backlog if any, without releasing ownership. */
++ __release_sock(sk);
+
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
+@@ -2295,6 +2289,7 @@ adjudge_to_death:
+ out:
+ bh_unlock_sock(sk);
+ local_bh_enable();
++ release_sock(sk);
+ sock_put(sk);
+ }
+ EXPORT_SYMBOL(tcp_close);
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index b49f5afab405..2e472d5c3ea4 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -730,7 +730,6 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ struct sk_buff *skb = NULL;
+ struct sock *sk = sock->sk;
+ struct llc_sock *llc = llc_sk(sk);
+- unsigned long cpu_flags;
+ size_t copied = 0;
+ u32 peek_seq = 0;
+ u32 *seq, skb_len;
+@@ -855,9 +854,8 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ goto copy_uaddr;
+
+ if (!(flags & MSG_PEEK)) {
+- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
+- sk_eat_skb(sk, skb);
+- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
++ skb_unlink(skb, &sk->sk_receive_queue);
++ kfree_skb(skb);
+ *seq = 0;
+ }
+
+@@ -878,9 +876,8 @@ copy_uaddr:
+ llc_cmsg_rcv(msg, skb);
+
+ if (!(flags & MSG_PEEK)) {
+- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
+- sk_eat_skb(sk, skb);
+- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
++ skb_unlink(skb, &sk->sk_receive_queue);
++ kfree_skb(skb);
+ *seq = 0;
+ }
+
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 58f7d8cfd748..4982b31fec8e 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -497,8 +497,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
+ void sctp_assoc_rm_peer(struct sctp_association *asoc,
+ struct sctp_transport *peer)
+ {
+- struct list_head *pos;
+- struct sctp_transport *transport;
++ struct sctp_transport *transport;
++ struct list_head *pos;
++ struct sctp_chunk *ch;
+
+ pr_debug("%s: association:%p addr:%pISpc\n",
+ __func__, asoc, &peer->ipaddr.sa);
+@@ -562,7 +563,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
+ */
+ if (!list_empty(&peer->transmitted)) {
+ struct sctp_transport *active = asoc->peer.active_path;
+- struct sctp_chunk *ch;
+
+ /* Reset the transport of each chunk on this list */
+ list_for_each_entry(ch, &peer->transmitted,
+@@ -584,6 +584,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
+ sctp_transport_hold(active);
+ }
+
++ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
++ if (ch->transport == peer)
++ ch->transport = NULL;
++
+ asoc->peer.transport_count--;
+
+ sctp_transport_free(peer);
+diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
+index f1df9837f1ac..1ac08dcbf85d 100644
+--- a/net/sunrpc/auth_generic.c
++++ b/net/sunrpc/auth_generic.c
+@@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
+ {
+ struct auth_cred *acred = &container_of(cred, struct generic_cred,
+ gc_base)->acred;
+- bool ret;
+-
+- get_rpccred(cred);
+- ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
+- put_rpccred(cred);
+-
+- return ret;
++ return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
+ }
+
+ static const struct rpc_credops generic_credops = {
+diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
+index f5f12727771a..2ff02459fcfd 100644
+--- a/security/integrity/evm/evm.h
++++ b/security/integrity/evm/evm.h
+@@ -48,7 +48,7 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ size_t req_xattr_value_len, char *digest);
+ int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+- size_t req_xattr_value_len, char *digest);
++ size_t req_xattr_value_len, char type, char *digest);
+ int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+ char *hmac_val);
+ int evm_init_secfs(void);
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index ee9c3de5065a..f1f030ae363b 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -139,7 +139,7 @@ out:
+ * protection.)
+ */
+ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+- char *digest)
++ char type, char *digest)
+ {
+ struct h_misc {
+ unsigned long ino;
+@@ -150,8 +150,13 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+ } hmac_misc;
+
+ memset(&hmac_misc, 0, sizeof(hmac_misc));
+- hmac_misc.ino = inode->i_ino;
+- hmac_misc.generation = inode->i_generation;
++ /* Don't include the inode or generation number in portable
++ * signatures
++ */
++ if (type != EVM_XATTR_PORTABLE_DIGSIG) {
++ hmac_misc.ino = inode->i_ino;
++ hmac_misc.generation = inode->i_generation;
++ }
+ /* The hmac uid and gid must be encoded in the initial user
+ * namespace (not the filesystems user namespace) as encoding
+ * them in the filesystems user namespace allows an attack
+@@ -164,7 +169,8 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+ hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
+ hmac_misc.mode = inode->i_mode;
+ crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
+- if (evm_hmac_attrs & EVM_ATTR_FSUUID)
++ if ((evm_hmac_attrs & EVM_ATTR_FSUUID) &&
++ type != EVM_XATTR_PORTABLE_DIGSIG)
+ crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
+ sizeof(inode->i_sb->s_uuid));
+ crypto_shash_final(desc, digest);
+@@ -190,6 +196,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
+ char *xattr_value = NULL;
+ int error;
+ int size;
++ bool ima_present = false;
+
+ if (!(inode->i_opflags & IOP_XATTR))
+ return -EOPNOTSUPP;
+@@ -200,11 +207,18 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
+
+ error = -ENODATA;
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
++ bool is_ima = false;
++
++ if (strcmp(*xattrname, XATTR_NAME_IMA) == 0)
++ is_ima = true;
++
+ if ((req_xattr_name && req_xattr_value)
+ && !strcmp(*xattrname, req_xattr_name)) {
+ error = 0;
+ crypto_shash_update(desc, (const u8 *)req_xattr_value,
+ req_xattr_value_len);
++ if (is_ima)
++ ima_present = true;
+ continue;
+ }
+ size = vfs_getxattr_alloc(dentry, *xattrname,
+@@ -219,9 +233,14 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
+ error = 0;
+ xattr_size = size;
+ crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
++ if (is_ima)
++ ima_present = true;
+ }
+- hmac_add_misc(desc, inode, digest);
++ hmac_add_misc(desc, inode, type, digest);
+
++ /* Portable EVM signatures must include an IMA hash */
++ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
++ return -EPERM;
+ out:
+ kfree(xattr_value);
+ kfree(desc);
+@@ -233,17 +252,45 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ char *digest)
+ {
+ return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+- req_xattr_value_len, EVM_XATTR_HMAC, digest);
++ req_xattr_value_len, EVM_XATTR_HMAC, digest);
+ }
+
+ int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+- char *digest)
++ char type, char *digest)
+ {
+ return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+- req_xattr_value_len, IMA_XATTR_DIGEST, digest);
++ req_xattr_value_len, type, digest);
++}
++
++static int evm_is_immutable(struct dentry *dentry, struct inode *inode)
++{
++ const struct evm_ima_xattr_data *xattr_data = NULL;
++ struct integrity_iint_cache *iint;
++ int rc = 0;
++
++ iint = integrity_iint_find(inode);
++ if (iint && (iint->flags & EVM_IMMUTABLE_DIGSIG))
++ return 1;
++
++ /* Do this the hard way */
++ rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
++ GFP_NOFS);
++ if (rc <= 0) {
++ if (rc == -ENODATA)
++ return 0;
++ return rc;
++ }
++ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG)
++ rc = 1;
++ else
++ rc = 0;
++
++ kfree(xattr_data);
++ return rc;
+ }
+
++
+ /*
+ * Calculate the hmac and update security.evm xattr
+ *
+@@ -256,6 +303,16 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ struct evm_ima_xattr_data xattr_data;
+ int rc = 0;
+
++ /*
++ * Don't permit any transformation of the EVM xattr if the signature
++ * is of an immutable type
++ */
++ rc = evm_is_immutable(dentry, inode);
++ if (rc < 0)
++ return rc;
++ if (rc)
++ return -EPERM;
++
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data.digest);
+ if (rc == 0) {
+@@ -281,7 +338,7 @@ int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
+ }
+
+ crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
+- hmac_add_misc(desc, inode, hmac_val);
++ hmac_add_misc(desc, inode, EVM_XATTR_HMAC, hmac_val);
+ kfree(desc);
+ return 0;
+ }
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 063d38aef64e..1d1a7053144b 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -31,7 +31,7 @@
+ int evm_initialized;
+
+ static char *integrity_status_msg[] = {
+- "pass", "fail", "no_label", "no_xattrs", "unknown"
++ "pass", "pass_immutable", "fail", "no_label", "no_xattrs", "unknown"
+ };
+ char *evm_hmac = "hmac(sha1)";
+ char *evm_hash = "sha1";
+@@ -120,7 +120,8 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ enum integrity_status evm_status = INTEGRITY_PASS;
+ int rc, xattr_len;
+
+- if (iint && iint->evm_status == INTEGRITY_PASS)
++ if (iint && (iint->evm_status == INTEGRITY_PASS ||
++ iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
+ return iint->evm_status;
+
+ /* if status is not PASS, try to check again - against -ENOMEM */
+@@ -161,22 +162,26 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ rc = -EINVAL;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
++ case EVM_XATTR_PORTABLE_DIGSIG:
+ rc = evm_calc_hash(dentry, xattr_name, xattr_value,
+- xattr_value_len, calc.digest);
++ xattr_value_len, xattr_data->type,
++ calc.digest);
+ if (rc)
+ break;
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
+ (const char *)xattr_data, xattr_len,
+ calc.digest, sizeof(calc.digest));
+ if (!rc) {
+- /* Replace RSA with HMAC if not mounted readonly and
+- * not immutable
+- */
+- if (!IS_RDONLY(d_backing_inode(dentry)) &&
+- !IS_IMMUTABLE(d_backing_inode(dentry)))
++ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG) {
++ if (iint)
++ iint->flags |= EVM_IMMUTABLE_DIGSIG;
++ evm_status = INTEGRITY_PASS_IMMUTABLE;
++ } else if (!IS_RDONLY(d_backing_inode(dentry)) &&
++ !IS_IMMUTABLE(d_backing_inode(dentry))) {
+ evm_update_evmxattr(dentry, xattr_name,
+ xattr_value,
+ xattr_value_len);
++ }
+ }
+ break;
+ default:
+@@ -277,7 +282,7 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
+ * affect security.evm. An interesting side affect of writing posix xattr
+ * acls is their modifying of the i_mode, which is included in security.evm.
+ * For posix xattr acls only, permit security.evm, even if it currently
+- * doesn't exist, to be updated.
++ * doesn't exist, to be updated unless the EVM signature is immutable.
+ */
+ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+@@ -345,7 +350,8 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!xattr_value_len)
+ return -EINVAL;
+- if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
++ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG &&
++ xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG)
+ return -EPERM;
+ }
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+@@ -422,6 +428,9 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
+ /**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @dentry: pointer to the affected dentry
++ *
++ * Permit update of file attributes when files have a valid EVM signature,
++ * except in the case of them having an immutable portable signature.
+ */
+ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+diff --git a/security/integrity/iint.c b/security/integrity/iint.c
+index 6fc888ca468e..f4a40fb84b1e 100644
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -74,6 +74,7 @@ static void iint_free(struct integrity_iint_cache *iint)
+ iint->ima_hash = NULL;
+ iint->version = 0;
+ iint->flags = 0UL;
++ iint->atomic_flags = 0UL;
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+@@ -155,12 +156,14 @@ static void init_once(void *foo)
+ memset(iint, 0, sizeof(*iint));
+ iint->version = 0;
+ iint->flags = 0UL;
++ iint->atomic_flags = 0;
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_read_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ iint->measured_pcrs = 0;
++ mutex_init(&iint->mutex);
+ }
+
+ static int __init integrity_iintcache_init(void)
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index c2edba8de35e..c7e8db0ea4c0 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -199,42 +199,59 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct inode *inode = file_inode(file);
+ const char *filename = file->f_path.dentry->d_name.name;
+ int result = 0;
++ int length;
++ void *tmpbuf;
++ u64 i_version;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+
+- if (!(iint->flags & IMA_COLLECTED)) {
+- u64 i_version = file_inode(file)->i_version;
++ if (iint->flags & IMA_COLLECTED)
++ goto out;
+
+- if (file->f_flags & O_DIRECT) {
+- audit_cause = "failed(directio)";
+- result = -EACCES;
+- goto out;
+- }
++ /*
++ * Dectecting file change is based on i_version. On filesystems
++ * which do not support i_version, support is limited to an initial
++ * measurement/appraisal/audit.
++ */
++ i_version = file_inode(file)->i_version;
++ hash.hdr.algo = algo;
+
+- hash.hdr.algo = algo;
+-
+- result = (!buf) ? ima_calc_file_hash(file, &hash.hdr) :
+- ima_calc_buffer_hash(buf, size, &hash.hdr);
+- if (!result) {
+- int length = sizeof(hash.hdr) + hash.hdr.length;
+- void *tmpbuf = krealloc(iint->ima_hash, length,
+- GFP_NOFS);
+- if (tmpbuf) {
+- iint->ima_hash = tmpbuf;
+- memcpy(iint->ima_hash, &hash, length);
+- iint->version = i_version;
+- iint->flags |= IMA_COLLECTED;
+- } else
+- result = -ENOMEM;
+- }
++ /* Initialize hash digest to 0's in case of failure */
++ memset(&hash.digest, 0, sizeof(hash.digest));
++
++ if (buf)
++ result = ima_calc_buffer_hash(buf, size, &hash.hdr);
++ else
++ result = ima_calc_file_hash(file, &hash.hdr);
++
++ if (result && result != -EBADF && result != -EINVAL)
++ goto out;
++
++ length = sizeof(hash.hdr) + hash.hdr.length;
++ tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS);
++ if (!tmpbuf) {
++ result = -ENOMEM;
++ goto out;
+ }
++
++ iint->ima_hash = tmpbuf;
++ memcpy(iint->ima_hash, &hash, length);
++ iint->version = i_version;
++
++ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
++ if (!result)
++ iint->flags |= IMA_COLLECTED;
+ out:
+- if (result)
++ if (result) {
++ if (file->f_flags & O_DIRECT)
++ audit_cause = "failed(directio)";
++
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data", audit_cause,
+ result, 0);
++ }
+ return result;
+ }
+
+@@ -278,7 +295,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
+ }
+
+ result = ima_store_template(entry, violation, inode, filename, pcr);
+- if (!result || result == -EEXIST) {
++ if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) {
+ iint->flags |= IMA_MEASURED;
+ iint->measured_pcrs |= (0x1 << pcr);
+ }
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 348db9b78681..84eb6cc956bf 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -230,7 +230,9 @@ int ima_appraise_measurement(enum ima_hooks func,
+ }
+
+ status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
+- if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
++ if ((status != INTEGRITY_PASS) &&
++ (status != INTEGRITY_PASS_IMMUTABLE) &&
++ (status != INTEGRITY_UNKNOWN)) {
+ if ((status == INTEGRITY_NOLABEL)
+ || (status == INTEGRITY_NOXATTRS))
+ cause = "missing-HMAC";
+@@ -249,6 +251,7 @@ int ima_appraise_measurement(enum ima_hooks func,
+ status = INTEGRITY_FAIL;
+ break;
+ }
++ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /* xattr length may be longer. md5 hash in previous
+@@ -267,7 +270,7 @@ int ima_appraise_measurement(enum ima_hooks func,
+ status = INTEGRITY_PASS;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+- iint->flags |= IMA_DIGSIG;
++ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value, rc,
+ iint->ima_hash->digest,
+@@ -318,7 +321,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+ int rc = 0;
+
+ /* do not collect and update hash for digital signatures */
+- if (iint->flags & IMA_DIGSIG)
++ if (test_bit(IMA_DIGSIG, &iint->atomic_flags))
+ return;
+
+ if (iint->ima_file_status != INTEGRITY_PASS)
+@@ -328,7 +331,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+ if (rc < 0)
+ return;
+
++ inode_lock(file_inode(file));
+ ima_fix_xattr(dentry, iint);
++ inode_unlock(file_inode(file));
+ }
+
+ /**
+@@ -351,16 +356,14 @@ void ima_inode_post_setattr(struct dentry *dentry)
+ return;
+
+ must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
++ if (!must_appraise)
++ __vfs_removexattr(dentry, XATTR_NAME_IMA);
+ iint = integrity_iint_find(inode);
+ if (iint) {
+- iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+- IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+- IMA_ACTION_RULE_FLAGS);
+- if (must_appraise)
+- iint->flags |= IMA_APPRAISE;
++ set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
++ if (!must_appraise)
++ clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+- if (!must_appraise)
+- __vfs_removexattr(dentry, XATTR_NAME_IMA);
+ }
+
+ /*
+@@ -389,12 +392,12 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig)
+ iint = integrity_iint_find(inode);
+ if (!iint)
+ return;
+-
+- iint->flags &= ~IMA_DONE_MASK;
+ iint->measured_pcrs = 0;
++ set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags);
+ if (digsig)
+- iint->flags |= IMA_DIGSIG;
+- return;
++ set_bit(IMA_DIGSIG, &iint->atomic_flags);
++ else
++ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+ }
+
+ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
+index 90453aa1c813..cb041af9eddb 100644
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -443,6 +443,16 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+ loff_t i_size;
+ int rc;
+
++ /*
++ * For consistency, fail file's opened with the O_DIRECT flag on
++ * filesystems mounted with/without DAX option.
++ */
++ if (file->f_flags & O_DIRECT) {
++ hash->length = hash_digest_size[ima_hash_algo];
++ hash->algo = ima_hash_algo;
++ return -EINVAL;
++ }
++
+ i_size = i_size_read(file_inode(file));
+
+ if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index f8553179bdd7..92d0ca7309f9 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -99,10 +99,13 @@ static void ima_rdwr_violation_check(struct file *file,
+ if (!iint)
+ iint = integrity_iint_find(inode);
+ /* IMA_MEASURE is set from reader side */
+- if (iint && (iint->flags & IMA_MEASURE))
++ if (iint && test_bit(IMA_MUST_MEASURE,
++ &iint->atomic_flags))
+ send_tomtou = true;
+ }
+ } else {
++ if (must_measure)
++ set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
+ if ((atomic_read(&inode->i_writecount) > 0) && must_measure)
+ send_writers = true;
+ }
+@@ -124,21 +127,24 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
+ struct inode *inode, struct file *file)
+ {
+ fmode_t mode = file->f_mode;
++ bool update;
+
+ if (!(mode & FMODE_WRITE))
+ return;
+
+- inode_lock(inode);
++ mutex_lock(&iint->mutex);
+ if (atomic_read(&inode->i_writecount) == 1) {
++ update = test_and_clear_bit(IMA_UPDATE_XATTR,
++ &iint->atomic_flags);
+ if ((iint->version != inode->i_version) ||
+ (iint->flags & IMA_NEW_FILE)) {
+ iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
+ iint->measured_pcrs = 0;
+- if (iint->flags & IMA_APPRAISE)
++ if (update)
+ ima_update_xattr(iint, file);
+ }
+ }
+- inode_unlock(inode);
++ mutex_unlock(&iint->mutex);
+ }
+
+ /**
+@@ -171,7 +177,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
+ char *pathbuf = NULL;
+ char filename[NAME_MAX];
+ const char *pathname = NULL;
+- int rc = -ENOMEM, action, must_appraise;
++ int rc = 0, action, must_appraise = 0;
+ int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ struct evm_ima_xattr_data *xattr_value = NULL;
+ int xattr_len = 0;
+@@ -202,17 +208,31 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
+ if (action) {
+ iint = integrity_inode_get(inode);
+ if (!iint)
+- goto out;
++ rc = -ENOMEM;
+ }
+
+- if (violation_check) {
++ if (!rc && violation_check)
+ ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
+ &pathbuf, &pathname);
+- if (!action) {
+- rc = 0;
+- goto out_free;
+- }
+- }
++
++ inode_unlock(inode);
++
++ if (rc)
++ goto out;
++ if (!action)
++ goto out;
++
++ mutex_lock(&iint->mutex);
++
++ if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
++ /* reset appraisal flags if ima_inode_post_setattr was called */
++ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
++ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
++ IMA_ACTION_FLAGS);
++
++ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags))
++ /* reset all flags if ima_inode_setxattr was called */
++ iint->flags &= ~IMA_DONE_MASK;
+
+ /* Determine if already appraised/measured based on bitmask
+ * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+@@ -230,7 +250,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
+ if (!action) {
+ if (must_appraise)
+ rc = ima_get_cache_status(iint, func);
+- goto out_digsig;
++ goto out_locked;
+ }
+
+ template_desc = ima_template_desc_current();
+@@ -242,11 +262,8 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
+ hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
+
+ rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
+- if (rc != 0) {
+- if (file->f_flags & O_DIRECT)
+- rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
+- goto out_digsig;
+- }
++ if (rc != 0 && rc != -EBADF && rc != -EINVAL)
++ goto out_locked;
+
+ if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
+ pathname = ima_d_path(&file->f_path, &pathbuf, filename);
+@@ -254,24 +271,32 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
+ if (action & IMA_MEASURE)
+ ima_store_measurement(iint, file, pathname,
+ xattr_value, xattr_len, pcr);
+- if (action & IMA_APPRAISE_SUBMASK)
++ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
++ inode_lock(inode);
+ rc = ima_appraise_measurement(func, iint, file, pathname,
+ xattr_value, xattr_len, opened);
++ inode_unlock(inode);
++ }
+ if (action & IMA_AUDIT)
+ ima_audit_measurement(iint, pathname);
+
+-out_digsig:
+- if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) &&
++ if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
++ rc = 0;
++out_locked:
++ if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) &&
+ !(iint->flags & IMA_NEW_FILE))
+ rc = -EACCES;
++ mutex_unlock(&iint->mutex);
+ kfree(xattr_value);
+-out_free:
++out:
+ if (pathbuf)
+ __putname(pathbuf);
+-out:
+- inode_unlock(inode);
+- if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
+- return -EACCES;
++ if (must_appraise) {
++ if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE))
++ return -EACCES;
++ if (file->f_mode & FMODE_WRITE)
++ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
++ }
+ return 0;
+ }
+
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index a53e7e4ab06c..f43ac351c172 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -29,10 +29,10 @@
+ /* iint cache flags */
+ #define IMA_ACTION_FLAGS 0xff000000
+ #define IMA_ACTION_RULE_FLAGS 0x06000000
+-#define IMA_DIGSIG 0x01000000
+-#define IMA_DIGSIG_REQUIRED 0x02000000
+-#define IMA_PERMIT_DIRECTIO 0x04000000
+-#define IMA_NEW_FILE 0x08000000
++#define IMA_DIGSIG_REQUIRED 0x01000000
++#define IMA_PERMIT_DIRECTIO 0x02000000
++#define IMA_NEW_FILE 0x04000000
++#define EVM_IMMUTABLE_DIGSIG 0x08000000
+
+ #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ IMA_APPRAISE_SUBMASK)
+@@ -53,11 +53,19 @@
+ #define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
+ IMA_BPRM_APPRAISED | IMA_READ_APPRAISED)
+
++/* iint cache atomic_flags */
++#define IMA_CHANGE_XATTR 0
++#define IMA_UPDATE_XATTR 1
++#define IMA_CHANGE_ATTR 2
++#define IMA_DIGSIG 3
++#define IMA_MUST_MEASURE 4
++
+ enum evm_ima_xattr_type {
+ IMA_XATTR_DIGEST = 0x01,
+ EVM_XATTR_HMAC,
+ EVM_IMA_XATTR_DIGSIG,
+ IMA_XATTR_DIGEST_NG,
++ EVM_XATTR_PORTABLE_DIGSIG,
+ IMA_XATTR_LAST
+ };
+
+@@ -100,10 +108,12 @@ struct signature_v2_hdr {
+ /* integrity data associated with an inode */
+ struct integrity_iint_cache {
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
++ struct mutex mutex; /* protects: version, flags, digest */
+ struct inode *inode; /* back pointer to inode in question */
+ u64 version; /* track inode changes */
+ unsigned long flags;
+ unsigned long measured_pcrs;
++ unsigned long atomic_flags;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index 6e8c8056d7ad..6688ac5b991e 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -1099,7 +1099,7 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
+ if ((len == 0) || (len == (u32)-1))
+ return -EINVAL;
+
+- str = kmalloc(len + 1, flags);
++ str = kmalloc(len + 1, flags | __GFP_NOWARN);
+ if (!str)
+ return -ENOMEM;
+
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index b4f954e6d2db..df358e838b5b 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -1062,8 +1062,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
+ runtime->oss.channels = params_channels(params);
+ runtime->oss.rate = params_rate(params);
+
+- vfree(runtime->oss.buffer);
+- runtime->oss.buffer = vmalloc(runtime->oss.period_bytes);
++ kvfree(runtime->oss.buffer);
++ runtime->oss.buffer = kvzalloc(runtime->oss.period_bytes, GFP_KERNEL);
+ if (!runtime->oss.buffer) {
+ err = -ENOMEM;
+ goto failure;
+@@ -2328,7 +2328,7 @@ static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime;
+ runtime = substream->runtime;
+- vfree(runtime->oss.buffer);
++ kvfree(runtime->oss.buffer);
+ runtime->oss.buffer = NULL;
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+ snd_pcm_oss_plugin_clear(substream);
+diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
+index 85a56af104bd..617845d4a811 100644
+--- a/sound/core/oss/pcm_plugin.c
++++ b/sound/core/oss/pcm_plugin.c
+@@ -66,8 +66,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t
+ return -ENXIO;
+ size /= 8;
+ if (plugin->buf_frames < frames) {
+- vfree(plugin->buf);
+- plugin->buf = vmalloc(size);
++ kvfree(plugin->buf);
++ plugin->buf = kvzalloc(size, GFP_KERNEL);
+ plugin->buf_frames = frames;
+ }
+ if (!plugin->buf) {
+@@ -191,7 +191,7 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
+ if (plugin->private_free)
+ plugin->private_free(plugin);
+ kfree(plugin->buf_channels);
+- vfree(plugin->buf);
++ kvfree(plugin->buf);
+ kfree(plugin);
+ return 0;
+ }
+diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
+index d79ab161cc75..f68b4bc55273 100644
+--- a/tools/power/cpupower/bench/Makefile
++++ b/tools/power/cpupower/bench/Makefile
+@@ -9,7 +9,7 @@ endif
+ ifeq ($(strip $(STATIC)),true)
+ LIBS = -L../ -L$(OUTPUT) -lm
+ OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \
+- $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o
++ $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o
+ else
+ LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
+ OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
+diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
+index 1b993fe1ce23..0c0f3e3f0d80 100644
+--- a/tools/power/cpupower/lib/cpufreq.c
++++ b/tools/power/cpupower/lib/cpufreq.c
+@@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
+ cpu, fname);
+- return sysfs_read_file(path, buf, buflen);
++ return cpupower_read_sysfs(path, buf, buflen);
+ }
+
+ /* helper function to write a new value to a /sys file */
+diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
+index 9bd4c7655fdb..852d25462388 100644
+--- a/tools/power/cpupower/lib/cpuidle.c
++++ b/tools/power/cpupower/lib/cpuidle.c
+@@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
+
+- return sysfs_read_file(path, buf, buflen);
++ return cpupower_read_sysfs(path, buf, buflen);
+ }
+
+
+diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
+index 9c395ec924de..9711d628b0f4 100644
+--- a/tools/power/cpupower/lib/cpupower.c
++++ b/tools/power/cpupower/lib/cpupower.c
+@@ -15,7 +15,7 @@
+ #include "cpupower.h"
+ #include "cpupower_intern.h"
+
+-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
++unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen)
+ {
+ int fd;
+ ssize_t numread;
+@@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
+ cpu, fname);
+- if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
++ if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0)
+ return -1;
+ *result = strtol(linebuf, &endp, 0);
+ if (endp == linebuf || errno == ERANGE)
+diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h
+index 92affdfbe417..4887c76d23f8 100644
+--- a/tools/power/cpupower/lib/cpupower_intern.h
++++ b/tools/power/cpupower/lib/cpupower_intern.h
+@@ -3,4 +3,4 @@
+ #define MAX_LINE_LEN 4096
+ #define SYSFS_PATH_MAX 255
+
+-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen);
++unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen);