summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2016-05-04 19:51:26 -0400
committerMike Pagano <mpagano@gentoo.org>2016-05-04 19:51:26 -0400
commit767fef5527df9042fe8c078084c89159c991d530 (patch)
tree3234839426313d027dfd0b1802a5d5e81541a92c
parentLinux patch 4.4.8 (diff)
downloadlinux-patches-767fef55.tar.gz
linux-patches-767fef55.tar.bz2
linux-patches-767fef55.zip
Linux patch 4.4.94.4-11
-rw-r--r--0000_README4
-rw-r--r--1008_linux-4.4.9.patch6094
2 files changed, 6098 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 55963089..18110faf 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-4.4.8.patch
From: http://www.kernel.org
Desc: Linux 4.4.8
+Patch: 1008_linux-4.4.9.patch
+From: http://www.kernel.org
+Desc: Linux 4.4.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-4.4.9.patch b/1008_linux-4.4.9.patch
new file mode 100644
index 00000000..57b3baaf
--- /dev/null
+++ b/1008_linux-4.4.9.patch
@@ -0,0 +1,6094 @@
+diff --git a/Makefile b/Makefile
+index 1928fcd539cc..0722cdf52152 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
+index 47954ed990f8..00707aac72fc 100644
+--- a/arch/arm/boot/dts/am43x-epos-evm.dts
++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
+@@ -792,3 +792,8 @@
+ tx-num-evt = <32>;
+ rx-num-evt = <32>;
+ };
++
++&synctimer_32kclk {
++ assigned-clocks = <&mux_synctimer32k_ck>;
++ assigned-clock-parents = <&clkdiv32k_ick>;
++};
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index 7ccce7529b0c..cc952cf8ec30 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -529,7 +529,7 @@
+ };
+
+ sata@a0000 {
+- compatible = "marvell,orion-sata";
++ compatible = "marvell,armada-370-sata";
+ reg = <0xa0000 0x5000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gateclk 14>, <&gateclk 20>;
+diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
+index 3710755c6d76..85d2c377c332 100644
+--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
+@@ -117,7 +117,7 @@
+ };
+
+ /* USB part of the eSATA/USB 2.0 port */
+- usb@50000 {
++ usb@58000 {
+ status = "okay";
+ };
+
+diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
+index cf6998a0804d..564341af7e97 100644
+--- a/arch/arm/boot/dts/pxa3xx.dtsi
++++ b/arch/arm/boot/dts/pxa3xx.dtsi
+@@ -30,7 +30,7 @@
+ reg = <0x43100000 90>;
+ interrupts = <45>;
+ clocks = <&clks CLK_NAND>;
+- dmas = <&pdma 97>;
++ dmas = <&pdma 97 3>;
+ dma-names = "data";
+ #address-cells = <1>;
+ #size-cells = <1>;
+diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
+index 3a10f1a8317a..bfd8bb371477 100644
+--- a/arch/arm/mach-exynos/Kconfig
++++ b/arch/arm/mach-exynos/Kconfig
+@@ -26,6 +26,7 @@ menuconfig ARCH_EXYNOS
+ select S5P_DEV_MFC
+ select SRAM
+ select THERMAL
++ select THERMAL_OF
+ select MFD_SYSCON
+ help
+ Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
+diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
+index aa7b379e2661..2a3db0bd9e15 100644
+--- a/arch/arm/mach-omap2/cpuidle34xx.c
++++ b/arch/arm/mach-omap2/cpuidle34xx.c
+@@ -34,6 +34,7 @@
+ #include "pm.h"
+ #include "control.h"
+ #include "common.h"
++#include "soc.h"
+
+ /* Mach specific information to be recorded in the C-state driver_data */
+ struct omap3_idle_statedata {
+@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
+ .safe_state_index = 0,
+ };
+
++/*
++ * Numbers based on measurements made in October 2009 for PM optimized kernel
++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
++ * and worst case latencies).
++ */
++static struct cpuidle_driver omap3430_idle_driver = {
++ .name = "omap3430_idle",
++ .owner = THIS_MODULE,
++ .states = {
++ {
++ .enter = omap3_enter_idle_bm,
++ .exit_latency = 110 + 162,
++ .target_residency = 5,
++ .name = "C1",
++ .desc = "MPU ON + CORE ON",
++ },
++ {
++ .enter = omap3_enter_idle_bm,
++ .exit_latency = 106 + 180,
++ .target_residency = 309,
++ .name = "C2",
++ .desc = "MPU ON + CORE ON",
++ },
++ {
++ .enter = omap3_enter_idle_bm,
++ .exit_latency = 107 + 410,
++ .target_residency = 46057,
++ .name = "C3",
++ .desc = "MPU RET + CORE ON",
++ },
++ {
++ .enter = omap3_enter_idle_bm,
++ .exit_latency = 121 + 3374,
++ .target_residency = 46057,
++ .name = "C4",
++ .desc = "MPU OFF + CORE ON",
++ },
++ {
++ .enter = omap3_enter_idle_bm,
++ .exit_latency = 855 + 1146,
++ .target_residency = 46057,
++ .name = "C5",
++ .desc = "MPU RET + CORE RET",
++ },
++ {
++ .enter = omap3_enter_idle_bm,
++ .exit_latency = 7580 + 4134,
++ .target_residency = 484329,
++ .name = "C6",
++ .desc = "MPU OFF + CORE RET",
++ },
++ {
++ .enter = omap3_enter_idle_bm,
++ .exit_latency = 7505 + 15274,
++ .target_residency = 484329,
++ .name = "C7",
++ .desc = "MPU OFF + CORE OFF",
++ },
++ },
++ .state_count = ARRAY_SIZE(omap3_idle_data),
++ .safe_state_index = 0,
++};
++
+ /* Public functions */
+
+ /**
+@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
+ if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
+ return -ENODEV;
+
+- return cpuidle_register(&omap3_idle_driver, NULL);
++ if (cpu_is_omap3430())
++ return cpuidle_register(&omap3430_idle_driver, NULL);
++ else
++ return cpuidle_register(&omap3_idle_driver, NULL);
+ }
+diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
+index 3eaeaca5da05..3a911d8dea8b 100644
+--- a/arch/arm/mach-omap2/io.c
++++ b/arch/arm/mach-omap2/io.c
+@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
+ void __init dra7xx_map_io(void)
+ {
+ iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
++ omap_barriers_init();
+ }
+ #endif
+ /*
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 8e0bd5939e5a..147c90e70b2e 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
+ (sf & SYSC_HAS_CLOCKACTIVITY))
+ _set_clockactivity(oh, oh->class->sysc->clockact, &v);
+
+- /* If the cached value is the same as the new value, skip the write */
+- if (oh->_sysc_cache != v)
+- _write_sysconfig(v, oh);
++ _write_sysconfig(v, oh);
+
+ /*
+ * Set the autoidle bit only after setting the smartidle bit
+@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
+ _set_master_standbymode(oh, idlemode, &v);
+ }
+
+- _write_sysconfig(v, oh);
++ /* If the cached value is the same as the new value, skip the write */
++ if (oh->_sysc_cache != v)
++ _write_sysconfig(v, oh);
+ }
+
+ /**
+diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
+index 9ab8932403e5..56e55fd37d13 100644
+--- a/arch/arm/mach-prima2/Kconfig
++++ b/arch/arm/mach-prima2/Kconfig
+@@ -1,6 +1,7 @@
+ menuconfig ARCH_SIRF
+ bool "CSR SiRF" if ARCH_MULTI_V7
+ select ARCH_HAS_RESET_CONTROLLER
++ select RESET_CONTROLLER
+ select ARCH_REQUIRE_GPIOLIB
+ select GENERIC_IRQ_CHIP
+ select NO_IOPORT_MAP
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index eaa9cabf4066..c63868ae9a4a 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -69,11 +69,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
+ #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+ #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+-#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
+-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
+-#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
++#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
++#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
++#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
++#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
++#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+
+ #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+@@ -83,7 +83,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
+
+ #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+-#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
++#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+ #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
+
+@@ -155,6 +155,7 @@ extern struct page *empty_zero_page;
+ #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
+ #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
+ #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
++#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
+
+ #ifdef CONFIG_ARM64_HW_AFDBM
+ #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
+@@ -165,8 +166,6 @@ extern struct page *empty_zero_page;
+ #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
+
+ #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+-#define pte_valid_user(pte) \
+- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+ #define pte_valid_not_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+
+@@ -264,13 +263,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
+ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+ {
+- if (pte_valid_user(pte)) {
+- if (!pte_special(pte) && pte_exec(pte))
+- __sync_icache_dcache(pte, addr);
++ if (pte_present(pte)) {
+ if (pte_sw_dirty(pte) && pte_write(pte))
+ pte_val(pte) &= ~PTE_RDONLY;
+ else
+ pte_val(pte) |= PTE_RDONLY;
++ if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
++ __sync_icache_dcache(pte, addr);
+ }
+
+ /*
+@@ -641,6 +640,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+ * bits 0-1: present (must be zero)
+ * bits 2-7: swap type
+ * bits 8-57: swap offset
++ * bit 58: PTE_PROT_NONE (must be zero)
+ */
+ #define __SWP_TYPE_SHIFT 2
+ #define __SWP_TYPE_BITS 6
+diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
+index 43686043e297..2734c005da21 100644
+--- a/arch/powerpc/include/uapi/asm/cputable.h
++++ b/arch/powerpc/include/uapi/asm/cputable.h
+@@ -31,6 +31,7 @@
+ #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
+ 0x00000040
+
++/* Reserved - do not use 0x00000004 */
+ #define PPC_FEATURE_TRUE_LE 0x00000002
+ #define PPC_FEATURE_PPC_LE 0x00000001
+
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 7030b035905d..a15fe1d4e84a 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
+ unsigned long cpu_features; /* CPU_FTR_xxx bit */
+ unsigned long mmu_features; /* MMU_FTR_xxx bit */
+ unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
++ unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
+ unsigned char pabyte; /* byte number in ibm,pa-features */
+ unsigned char pabit; /* bit number (big-endian) */
+ unsigned char invert; /* if 1, pa bit set => clear feature */
+ } ibm_pa_features[] __initdata = {
+- {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
+- {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
+- {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
+- {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
+- {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
+- {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
+- {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
++ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
++ {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
++ {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
++ {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
++ {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
++ {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
++ {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
+ /*
+- * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
+- * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
+- * which is 0 if the kernel doesn't support TM.
++ * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
++ * we don't want to turn on TM here, so we use the *_COMP versions
++ * which are 0 if the kernel doesn't support TM.
+ */
+- {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
++ {CPU_FTR_TM_COMP, 0, 0,
++ PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
+ };
+
+ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+ if (bit ^ fp->invert) {
+ cur_cpu_spec->cpu_features |= fp->cpu_features;
+ cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
++ cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
+ cur_cpu_spec->mmu_features |= fp->mmu_features;
+ } else {
+ cur_cpu_spec->cpu_features &= ~fp->cpu_features;
+ cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
++ cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
+ cur_cpu_spec->mmu_features &= ~fp->mmu_features;
+ }
+ }
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index 2b2ced9dc00a..6dafabb6ae1a 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -45,7 +45,8 @@ struct zpci_fmb {
+ u64 rpcit_ops;
+ u64 dma_rbytes;
+ u64 dma_wbytes;
+-} __packed __aligned(64);
++ u64 pad[2];
++} __packed __aligned(128);
+
+ enum zpci_state {
+ ZPCI_FN_STATE_RESERVED,
+diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
+index a841e9765bd6..8381c09d2870 100644
+--- a/arch/x86/crypto/sha-mb/sha1_mb.c
++++ b/arch/x86/crypto/sha-mb/sha1_mb.c
+@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
+
+ req = cast_mcryptd_ctx_to_req(req_ctx);
+ if (irqs_disabled())
+- rctx->complete(&req->base, ret);
++ req_ctx->complete(&req->base, ret);
+ else {
+ local_bh_disable();
+- rctx->complete(&req->base, ret);
++ req_ctx->complete(&req->base, ret);
+ local_bh_enable();
+ }
+ }
+diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
+index f8a29d2c97b0..e6a8613fbfb0 100644
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -4,6 +4,7 @@
+ #include <asm/page.h>
+ #include <asm-generic/hugetlb.h>
+
++#define hugepages_supported() cpu_has_pse
+
+ static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 7af2505f20c2..df6b4eeac0bd 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -254,7 +254,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ struct irq_desc *desc;
+ int cpu, vector;
+
+- BUG_ON(!data->cfg.vector);
++ if (!data->cfg.vector)
++ return;
+
+ vector = data->cfg.vector;
+ for_each_cpu_and(cpu, data->domain, cpu_online_mask)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+index 0a850100c594..2658e2af74ec 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
+ void mce_gen_pool_process(void)
+ {
+ struct llist_node *head;
+- struct mce_evt_llist *node;
++ struct mce_evt_llist *node, *tmp;
+ struct mce *mce;
+
+ head = llist_del_all(&mce_event_llist);
+@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
+ return;
+
+ head = llist_reverse_order(head);
+- llist_for_each_entry(node, head, llnode) {
++ llist_for_each_entry_safe(node, tmp, head, llnode) {
+ mce = &node->mce;
+ atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+ gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 7eb4ebd3ebea..605cea75eb0d 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
+ return 1;
+ }
+- kvm_put_guest_xcr0(vcpu);
+ vcpu->arch.xcr0 = xcr0;
+
+ if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
+@@ -6495,8 +6494,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ kvm_x86_ops->prepare_guest_switch(vcpu);
+ if (vcpu->fpu_active)
+ kvm_load_guest_fpu(vcpu);
+- kvm_load_guest_xcr0(vcpu);
+-
+ vcpu->mode = IN_GUEST_MODE;
+
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+@@ -6519,6 +6516,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ goto cancel_injection;
+ }
+
++ kvm_load_guest_xcr0(vcpu);
++
+ if (req_immediate_exit)
+ smp_send_reschedule(vcpu->cpu);
+
+@@ -6568,6 +6567,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ vcpu->mode = OUTSIDE_GUEST_MODE;
+ smp_wmb();
+
++ kvm_put_guest_xcr0(vcpu);
++
+ /* Interrupt is enabled by handle_external_intr() */
+ kvm_x86_ops->handle_external_intr(vcpu);
+
+@@ -7215,7 +7216,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ * and assume host would use all available bits.
+ * Guest xcr0 would be loaded later.
+ */
+- kvm_put_guest_xcr0(vcpu);
+ vcpu->guest_fpu_loaded = 1;
+ __kernel_fpu_begin();
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
+@@ -7224,8 +7224,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+
+ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+ {
+- kvm_put_guest_xcr0(vcpu);
+-
+ if (!vcpu->guest_fpu_loaded) {
+ vcpu->fpu_counter = 0;
+ return;
+diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
+index 637ab34ed632..ddb2244b06a1 100644
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -33,7 +33,7 @@
+ struct kmmio_fault_page {
+ struct list_head list;
+ struct kmmio_fault_page *release_next;
+- unsigned long page; /* location of the fault page */
++ unsigned long addr; /* the requested address */
+ pteval_t old_presence; /* page presence prior to arming */
+ bool armed;
+
+@@ -70,9 +70,16 @@ unsigned int kmmio_count;
+ static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
+ static LIST_HEAD(kmmio_probes);
+
+-static struct list_head *kmmio_page_list(unsigned long page)
++static struct list_head *kmmio_page_list(unsigned long addr)
+ {
+- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
++ unsigned int l;
++ pte_t *pte = lookup_address(addr, &l);
++
++ if (!pte)
++ return NULL;
++ addr &= page_level_mask(l);
++
++ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
+ }
+
+ /* Accessed per-cpu */
+@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
+ }
+
+ /* You must be holding RCU read lock. */
+-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
+ {
+ struct list_head *head;
+ struct kmmio_fault_page *f;
++ unsigned int l;
++ pte_t *pte = lookup_address(addr, &l);
+
+- page &= PAGE_MASK;
+- head = kmmio_page_list(page);
++ if (!pte)
++ return NULL;
++ addr &= page_level_mask(l);
++ head = kmmio_page_list(addr);
+ list_for_each_entry_rcu(f, head, list) {
+- if (f->page == page)
++ if (f->addr == addr)
+ return f;
+ }
+ return NULL;
+@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
+ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ {
+ unsigned int level;
+- pte_t *pte = lookup_address(f->page, &level);
++ pte_t *pte = lookup_address(f->addr, &level);
+
+ if (!pte) {
+- pr_err("no pte for page 0x%08lx\n", f->page);
++ pr_err("no pte for addr 0x%08lx\n", f->addr);
+ return -1;
+ }
+
+@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ return -1;
+ }
+
+- __flush_tlb_one(f->page);
++ __flush_tlb_one(f->addr);
+ return 0;
+ }
+
+@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
+ int ret;
+ WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
+ if (f->armed) {
+- pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
+- f->page, f->count, !!f->old_presence);
++ pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
++ f->addr, f->count, !!f->old_presence);
+ }
+ ret = clear_page_presence(f, true);
+- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
+- f->page);
++ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
++ f->addr);
+ f->armed = true;
+ return ret;
+ }
+@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
+ {
+ int ret = clear_page_presence(f, false);
+ WARN_ONCE(ret < 0,
+- KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
++ KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
+ f->armed = false;
+ }
+
+@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ struct kmmio_context *ctx;
+ struct kmmio_fault_page *faultpage;
+ int ret = 0; /* default to fault not handled */
++ unsigned long page_base = addr;
++ unsigned int l;
++ pte_t *pte = lookup_address(addr, &l);
++ if (!pte)
++ return -EINVAL;
++ page_base &= page_level_mask(l);
+
+ /*
+ * Preemption is now disabled to prevent process switch during
+@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ preempt_disable();
+ rcu_read_lock();
+
+- faultpage = get_kmmio_fault_page(addr);
++ faultpage = get_kmmio_fault_page(page_base);
+ if (!faultpage) {
+ /*
+ * Either this page fault is not caused by kmmio, or
+@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+
+ ctx = &get_cpu_var(kmmio_ctx);
+ if (ctx->active) {
+- if (addr == ctx->addr) {
++ if (page_base == ctx->addr) {
+ /*
+ * A second fault on the same page means some other
+ * condition needs handling by do_page_fault(), the
+@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ ctx->active++;
+
+ ctx->fpage = faultpage;
+- ctx->probe = get_kmmio_probe(addr);
++ ctx->probe = get_kmmio_probe(page_base);
+ ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
+- ctx->addr = addr;
++ ctx->addr = page_base;
+
+ if (ctx->probe && ctx->probe->pre_handler)
+ ctx->probe->pre_handler(ctx->probe, regs, addr);
+@@ -354,12 +371,11 @@ out:
+ }
+
+ /* You must be holding kmmio_lock. */
+-static int add_kmmio_fault_page(unsigned long page)
++static int add_kmmio_fault_page(unsigned long addr)
+ {
+ struct kmmio_fault_page *f;
+
+- page &= PAGE_MASK;
+- f = get_kmmio_fault_page(page);
++ f = get_kmmio_fault_page(addr);
+ if (f) {
+ if (!f->count)
+ arm_kmmio_fault_page(f);
+@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
+ return -1;
+
+ f->count = 1;
+- f->page = page;
++ f->addr = addr;
+
+ if (arm_kmmio_fault_page(f)) {
+ kfree(f);
+ return -1;
+ }
+
+- list_add_rcu(&f->list, kmmio_page_list(f->page));
++ list_add_rcu(&f->list, kmmio_page_list(f->addr));
+
+ return 0;
+ }
+
+ /* You must be holding kmmio_lock. */
+-static void release_kmmio_fault_page(unsigned long page,
++static void release_kmmio_fault_page(unsigned long addr,
+ struct kmmio_fault_page **release_list)
+ {
+ struct kmmio_fault_page *f;
+
+- page &= PAGE_MASK;
+- f = get_kmmio_fault_page(page);
++ f = get_kmmio_fault_page(addr);
+ if (!f)
+ return;
+
+@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
+ int ret = 0;
+ unsigned long size = 0;
+ const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
++ unsigned int l;
++ pte_t *pte;
+
+ spin_lock_irqsave(&kmmio_lock, flags);
+ if (get_kmmio_probe(p->addr)) {
+ ret = -EEXIST;
+ goto out;
+ }
++
++ pte = lookup_address(p->addr, &l);
++ if (!pte) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ kmmio_count++;
+ list_add_rcu(&p->list, &kmmio_probes);
+ while (size < size_lim) {
+ if (add_kmmio_fault_page(p->addr + size))
+ pr_err("Unable to set page fault.\n");
+- size += PAGE_SIZE;
++ size += page_level_size(l);
+ }
+ out:
+ spin_unlock_irqrestore(&kmmio_lock, flags);
+@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
+ const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+ struct kmmio_fault_page *release_list = NULL;
+ struct kmmio_delayed_release *drelease;
++ unsigned int l;
++ pte_t *pte;
++
++ pte = lookup_address(p->addr, &l);
++ if (!pte)
++ return;
+
+ spin_lock_irqsave(&kmmio_lock, flags);
+ while (size < size_lim) {
+ release_kmmio_fault_page(p->addr + size, &release_list);
+- size += PAGE_SIZE;
++ size += page_level_size(l);
+ }
+ list_del_rcu(&p->list);
+ kmmio_count--;
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index 746935a5973c..a241e3900bc9 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -349,15 +349,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
+ goto out_del;
+ }
+
++ err = hd_ref_init(p);
++ if (err) {
++ if (flags & ADDPART_FLAG_WHOLEDISK)
++ goto out_remove_file;
++ goto out_del;
++ }
++
+ /* everything is up and running, commence */
+ rcu_assign_pointer(ptbl->part[partno], p);
+
+ /* suppress uevent if the disk suppresses it */
+ if (!dev_get_uevent_suppress(ddev))
+ kobject_uevent(&pdev->kobj, KOBJ_ADD);
+-
+- if (!hd_ref_init(p))
+- return p;
++ return p;
+
+ out_free_info:
+ free_part_info(p);
+@@ -366,6 +371,8 @@ out_free_stats:
+ out_free:
+ kfree(p);
+ return ERR_PTR(err);
++out_remove_file:
++ device_remove_file(pdev, &dev_attr_whole_disk);
+ out_del:
+ kobject_put(p->holder_dir);
+ device_del(pdev);
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 65f50eccd49b..a48824deabc5 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1381,7 +1381,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+
+ mutex_lock(&genpd->lock);
+
+- if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
++ if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+ pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
+ subdomain->name);
+ ret = -EBUSY;
+diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
+index b8e76f75073b..f8580900c273 100644
+--- a/drivers/base/power/opp/core.c
++++ b/drivers/base/power/opp/core.c
+@@ -809,8 +809,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
+ }
+
+ opp->u_volt = microvolt[0];
+- opp->u_volt_min = microvolt[1];
+- opp->u_volt_max = microvolt[2];
++
++ if (count == 1) {
++ opp->u_volt_min = opp->u_volt;
++ opp->u_volt_max = opp->u_volt;
++ } else {
++ opp->u_volt_min = microvolt[1];
++ opp->u_volt_max = microvolt[2];
++ }
+
+ if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+ opp->u_amp = val;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 423f4ca7d712..80cf8add46ff 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+ bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
+ bio_segments(bio), blk_rq_bytes(cmd->rq));
++ /*
++ * This bio may be started from the middle of the 'bvec'
++ * because of bio splitting, so offset from the bvec must
++ * be passed to iov iterator
++ */
++ iter.iov_offset = bio->bi_iter.bi_bvec_done;
+
+ cmd->iocb.ki_pos = pos;
+ cmd->iocb.ki_filp = file;
+diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
+index 562b5a4ca7b7..78a39f736c64 100644
+--- a/drivers/block/paride/pd.c
++++ b/drivers/block/paride/pd.c
+@@ -126,7 +126,7 @@
+ */
+ #include <linux/types.h>
+
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PD_MAJOR;
+ static char *name = PD_NAME;
+ static int cluster = 64;
+@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
+ static DEFINE_MUTEX(pd_mutex);
+ static DEFINE_SPINLOCK(pd_lock);
+
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param(cluster, int, 0);
+diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
+index 1740d75e8a32..216a94fed5b4 100644
+--- a/drivers/block/paride/pt.c
++++ b/drivers/block/paride/pt.c
+@@ -117,7 +117,7 @@
+
+ */
+
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PT_MAJOR;
+ static char *name = PT_NAME;
+ static int disable = 0;
+@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
+
+ #include <asm/uaccess.h>
+
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param_array(drive0, int, NULL, 0);
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index e98d15eaa799..1827fc4d15c1 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
+ return ret;
+ }
+
+- for_each_child_of_node(pdev->dev.of_node, child) {
++ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ if (!child->name)
+ continue;
+
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 98fb8821382d..f53b02a6bc05 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -667,6 +667,11 @@ static int core_get_max_pstate(void)
+ if (err)
+ goto skip_tar;
+
++ /* For level 1 and 2, bits[23:16] contain the ratio */
++ if (tdp_ctrl)
++ tdp_ratio >>= 16;
++
++ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
+ if (tdp_ratio - 1 == tar) {
+ max_pstate = tar;
+ pr_debug("max_pstate=TAC %x\n", max_pstate);
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+index 3d9acc53d247..60fc0fa26fd3 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
++ /* Don't let anything leak to 'out' */
++ memset(&state, 0, sizeof(state));
++
+ state.null_msg = rctx->null_msg;
+ memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ state.buf_count = rctx->buf_count;
+diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
+index 8ef06fad8b14..ab9945f2cb7a 100644
+--- a/drivers/crypto/ccp/ccp-crypto-sha.c
++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
+@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
++ /* Don't let anything leak to 'out' */
++ memset(&state, 0, sizeof(state));
++
+ state.type = rctx->type;
+ state.msg_bits = rctx->msg_bits;
+ state.first = rctx->first;
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index b6f9f42e2985..a04fea4d0063 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
+ ptr->eptr = upper_32_bits(dma_addr);
+ }
+
++static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
++ struct talitos_ptr *src_ptr, bool is_sec1)
++{
++ dst_ptr->ptr = src_ptr->ptr;
++ if (!is_sec1)
++ dst_ptr->eptr = src_ptr->eptr;
++}
++
+ static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
+ bool is_sec1)
+ {
+@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE);
+-
+ /* hmac data */
+ desc->ptr[1].len = cpu_to_be16(areq->assoclen);
+ if (sg_count > 1 &&
+ (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
+ areq->assoclen,
+ &edesc->link_tbl[tbl_off])) > 1) {
+- tbl_off += ret;
+-
+ to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
+ sizeof(struct talitos_ptr), 0);
+ desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
+
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
++
++ tbl_off += ret;
+ } else {
+ to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
+ desc->ptr[1].j_extent = 0;
+@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ sg_link_tbl_len += authsize;
+
+- if (sg_count > 1 &&
+- (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
+- sg_link_tbl_len,
+- &edesc->link_tbl[tbl_off])) > 1) {
+- tbl_off += ret;
++ if (sg_count == 1) {
++ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
++ areq->assoclen, 0);
++ } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
++ areq->assoclen, sg_link_tbl_len,
++ &edesc->link_tbl[tbl_off])) >
++ 1) {
+ desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+ tbl_off *
+@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+- } else
+- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
++ tbl_off += ret;
++ } else {
++ copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
++ }
+
+ /* cipher out */
+ desc->ptr[5].len = cpu_to_be16(cryptlen);
+@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+
+ edesc->icv_ool = false;
+
+- if (sg_count > 1 &&
+- (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
++ if (sg_count == 1) {
++ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
++ areq->assoclen, 0);
++ } else if ((sg_count =
++ sg_to_link_tbl_offset(areq->dst, sg_count,
+ areq->assoclen, cryptlen,
+- &edesc->link_tbl[tbl_off])) >
+- 1) {
++ &edesc->link_tbl[tbl_off])) > 1) {
+ struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+
+ to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
+@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
+
+ edesc->icv_ool = true;
+- } else
+- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
++ } else {
++ copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
++ }
+
+ /* iv out */
+ map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
+@@ -2519,21 +2533,11 @@ struct talitos_crypto_alg {
+ struct talitos_alg_template algt;
+ };
+
+-static int talitos_cra_init(struct crypto_tfm *tfm)
++static int talitos_init_common(struct talitos_ctx *ctx,
++ struct talitos_crypto_alg *talitos_alg)
+ {
+- struct crypto_alg *alg = tfm->__crt_alg;
+- struct talitos_crypto_alg *talitos_alg;
+- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct talitos_private *priv;
+
+- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+- talitos_alg = container_of(__crypto_ahash_alg(alg),
+- struct talitos_crypto_alg,
+- algt.alg.hash);
+- else
+- talitos_alg = container_of(alg, struct talitos_crypto_alg,
+- algt.alg.crypto);
+-
+ /* update context with ptr to dev */
+ ctx->dev = talitos_alg->dev;
+
+@@ -2551,10 +2555,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
+ return 0;
+ }
+
++static int talitos_cra_init(struct crypto_tfm *tfm)
++{
++ struct crypto_alg *alg = tfm->__crt_alg;
++ struct talitos_crypto_alg *talitos_alg;
++ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
++ talitos_alg = container_of(__crypto_ahash_alg(alg),
++ struct talitos_crypto_alg,
++ algt.alg.hash);
++ else
++ talitos_alg = container_of(alg, struct talitos_crypto_alg,
++ algt.alg.crypto);
++
++ return talitos_init_common(ctx, talitos_alg);
++}
++
+ static int talitos_cra_init_aead(struct crypto_aead *tfm)
+ {
+- talitos_cra_init(crypto_aead_tfm(tfm));
+- return 0;
++ struct aead_alg *alg = crypto_aead_alg(tfm);
++ struct talitos_crypto_alg *talitos_alg;
++ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
++
++ talitos_alg = container_of(alg, struct talitos_crypto_alg,
++ algt.alg.aead);
++
++ return talitos_init_common(ctx, talitos_alg);
+ }
+
+ static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 4f099ea29f83..c66133b5e852 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+ static void dwc_initialize(struct dw_dma_chan *dwc)
+ {
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- struct dw_dma_slave *dws = dwc->chan.private;
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+ if (dwc->initialized == true)
+ return;
+
+- if (dws) {
+- /*
+- * We need controller-specific data to set up slave
+- * transfers.
+- */
+- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+-
+- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
+- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
+- } else {
+- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+- }
++ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
++ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+@@ -936,7 +924,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma_slave *dws = param;
+
+- if (!dws || dws->dma_dev != chan->device->dev)
++ if (dws->dma_dev != chan->device->dev)
+ return false;
+
+ /* We have to copy data since dws can be temporary storage */
+@@ -1160,6 +1148,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
+ * doesn't mean what you think it means), and status writeback.
+ */
+
++ /*
++ * We need controller-specific data to set up slave transfers.
++ */
++ if (chan->private && !dw_dma_filter(chan, chan->private)) {
++ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
++ return -EINVAL;
++ }
++
+ /* Enable controller here if needed */
+ if (!dw->in_use)
+ dw_dma_on(dw);
+@@ -1221,6 +1217,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ spin_lock_irqsave(&dwc->lock, flags);
+ list_splice_init(&dwc->free_list, &list);
+ dwc->descs_allocated = 0;
++
++ /* Clear custom channel configuration */
++ dwc->src_id = 0;
++ dwc->dst_id = 0;
++
++ dwc->src_master = 0;
++ dwc->dst_master = 0;
++
+ dwc->initialized = false;
+
+ /* Disable interrupts */
+diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
+index 823ad728aecf..efc02b98e6ba 100644
+--- a/drivers/dma/hsu/hsu.c
++++ b/drivers/dma/hsu/hsu.c
+@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
+ sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+- return sr;
++ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
+ }
+
+ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
+index f06579c6d548..26da2865b025 100644
+--- a/drivers/dma/hsu/hsu.h
++++ b/drivers/dma/hsu/hsu.h
+@@ -41,6 +41,9 @@
+ #define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
+ #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+ #define HSU_CH_SR_CHE BIT(15)
++#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
++#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
++#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
+
+ /* Bits in HSU_CH_CR */
+ #define HSU_CH_CR_CHA BIT(0)
+diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
+index a59061e4221a..55f5d33f6dc7 100644
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -122,6 +122,7 @@ struct pxad_chan {
+ struct pxad_device {
+ struct dma_device slave;
+ int nr_chans;
++ int nr_requestors;
+ void __iomem *base;
+ struct pxad_phy *phys;
+ spinlock_t phy_lock; /* Phy association */
+@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
+ return;
+
+ /* clear the channel mapping in DRCMR */
+- if (chan->drcmr <= DRCMR_CHLNUM) {
++ if (chan->drcmr <= pdev->nr_requestors) {
+ reg = pxad_drcmr(chan->drcmr);
+ writel_relaxed(0, chan->phy->base + reg);
+ }
+@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
+
+ static void phy_enable(struct pxad_phy *phy, bool misaligned)
+ {
++ struct pxad_device *pdev;
+ u32 reg, dalgn;
+
+ if (!phy->vchan)
+@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
+ "%s(); phy=%p(%d) misaligned=%d\n", __func__,
+ phy, phy->idx, misaligned);
+
+- if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
++ pdev = to_pxad_dev(phy->vchan->vc.chan.device);
++ if (phy->vchan->drcmr <= pdev->nr_requestors) {
+ reg = pxad_drcmr(phy->vchan->drcmr);
+ writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ }
+@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ {
+ u32 maxburst = 0, dev_addr = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
++ struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
+
+ *dcmd = 0;
+ if (dir == DMA_DEV_TO_MEM) {
+@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ dev_addr = chan->cfg.src_addr;
+ *dev_src = dev_addr;
+ *dcmd |= PXA_DCMD_INCTRGADDR;
+- if (chan->drcmr <= DRCMR_CHLNUM)
++ if (chan->drcmr <= pdev->nr_requestors)
+ *dcmd |= PXA_DCMD_FLOWSRC;
+ }
+ if (dir == DMA_MEM_TO_DEV) {
+@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ dev_addr = chan->cfg.dst_addr;
+ *dev_dst = dev_addr;
+ *dcmd |= PXA_DCMD_INCSRCADDR;
+- if (chan->drcmr <= DRCMR_CHLNUM)
++ if (chan->drcmr <= pdev->nr_requestors)
+ *dcmd |= PXA_DCMD_FLOWTRG;
+ }
+ if (dir == DMA_MEM_TO_MEM)
+@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
+
+ static int pxad_init_dmadev(struct platform_device *op,
+ struct pxad_device *pdev,
+- unsigned int nr_phy_chans)
++ unsigned int nr_phy_chans,
++ unsigned int nr_requestors)
+ {
+ int ret;
+ unsigned int i;
+ struct pxad_chan *c;
+
+ pdev->nr_chans = nr_phy_chans;
++ pdev->nr_requestors = nr_requestors;
+ INIT_LIST_HEAD(&pdev->slave.channels);
+ pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
+ pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
+@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
+ const struct of_device_id *of_id;
+ struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct resource *iores;
+- int ret, dma_channels = 0;
++ int ret, dma_channels = 0, nb_requestors = 0;
+ const enum dma_slave_buswidth widths =
+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
+ return PTR_ERR(pdev->base);
+
+ of_id = of_match_device(pxad_dt_ids, &op->dev);
+- if (of_id)
++ if (of_id) {
+ of_property_read_u32(op->dev.of_node, "#dma-channels",
+ &dma_channels);
+- else if (pdata && pdata->dma_channels)
++ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
++ &nb_requestors);
++ if (ret) {
++ dev_warn(pdev->slave.dev,
++ "#dma-requests set to default 32 as missing in OF: %d",
++ ret);
++ nb_requestors = 32;
++ };
++ } else if (pdata && pdata->dma_channels) {
+ dma_channels = pdata->dma_channels;
+- else
++ nb_requestors = pdata->nb_requestors;
++ } else {
+ dma_channels = 32; /* default 32 channel */
++ }
+
+ dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
+@@ -1422,7 +1438,7 @@ static int pxad_probe(struct platform_device *op)
+ pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ pdev->slave.dev = &op->dev;
+- ret = pxad_init_dmadev(op, pdev, dma_channels);
++ ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
+ if (ret) {
+ dev_err(pdev->slave.dev, "unable to register\n");
+ return ret;
+@@ -1441,7 +1457,8 @@ static int pxad_probe(struct platform_device *op)
+
+ platform_set_drvdata(op, pdev);
+ pxad_init_debugfs(pdev);
+- dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
++ dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
++ dma_channels, nb_requestors);
+ return 0;
+ }
+
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index 01087a38da22..792bdae2b91d 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+
+ i7_dev = get_i7core_dev(mce->socketid);
+ if (!i7_dev)
+- return NOTIFY_BAD;
++ return NOTIFY_DONE;
+
+ mci = i7_dev->mci;
+ pvt = mci->pvt_info;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index cbee3179ec08..37649221f81c 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ }
+
+ ch_way = TAD_CH(reg) + 1;
+- sck_way = 1 << TAD_SOCK(reg);
++ sck_way = TAD_SOCK(reg);
+
+ if (ch_way == 3)
+ idx = addr >> 6;
+@@ -1435,7 +1435,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ switch(ch_way) {
+ case 2:
+ case 4:
+- sck_xch = 1 << sck_way * (ch_way >> 1);
++ sck_xch = (1 << sck_way) * (ch_way >> 1);
+ break;
+ default:
+ sprintf(msg, "Invalid mirror set. Can't decode addr");
+@@ -1471,7 +1471,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+
+ ch_addr = addr - offset;
+ ch_addr >>= (6 + shiftup);
+- ch_addr /= ch_way * sck_way;
++ ch_addr /= sck_xch;
+ ch_addr <<= (6 + shiftup);
+ ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
+
+@@ -2254,7 +2254,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+
+ mci = get_mci_for_node_id(mce->socketid);
+ if (!mci)
+- return NOTIFY_BAD;
++ return NOTIFY_DONE;
+ pvt = mci->pvt_info;
+
+ /*
+diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
+index 9f9ea334399c..b6cb30d207be 100644
+--- a/drivers/extcon/extcon-max77843.c
++++ b/drivers/extcon/extcon-max77843.c
+@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
+ /* Clear IRQ bits before request IRQs */
+ ret = regmap_bulk_read(max77843->regmap_muic,
+ MAX77843_MUIC_REG_INT1, info->status,
+- MAX77843_MUIC_IRQ_NUM);
++ MAX77843_MUIC_STATUS_NUM);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
+ goto err_muic_irq;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 027ca212179f..3b52677f459a 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -180,6 +180,7 @@ static int generic_ops_register(void)
+ {
+ generic_ops.get_variable = efi.get_variable;
+ generic_ops.set_variable = efi.set_variable;
++ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+ generic_ops.get_next_variable = efi.get_next_variable;
+ generic_ops.query_variable_store = efi_query_variable_store;
+
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 7f2ea21c730d..6f182fd91a6d 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
+ { NULL_GUID, "", NULL },
+ };
+
++/*
++ * Check if @var_name matches the pattern given in @match_name.
++ *
++ * @var_name: an array of @len non-NUL characters.
++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
++ * final "*" character matches any trailing characters @var_name,
++ * including the case when there are none left in @var_name.
++ * @match: on output, the number of non-wildcard characters in @match_name
++ * that @var_name matches, regardless of the return value.
++ * @return: whether @var_name fully matches @match_name.
++ */
+ static bool
+ variable_matches(const char *var_name, size_t len, const char *match_name,
+ int *match)
+ {
+ for (*match = 0; ; (*match)++) {
+ char c = match_name[*match];
+- char u = var_name[*match];
+
+- /* Wildcard in the matching name means we've matched */
+- if (c == '*')
++ switch (c) {
++ case '*':
++ /* Wildcard in @match_name means we've matched. */
+ return true;
+
+- /* Case sensitive match */
+- if (!c && *match == len)
+- return true;
++ case '\0':
++ /* @match_name has ended. Has @var_name too? */
++ return (*match == len);
+
+- if (c != u)
++ default:
++ /*
++ * We've reached a non-wildcard char in @match_name.
++ * Continue only if there's an identical character in
++ * @var_name.
++ */
++ if (*match < len && c == var_name[*match])
++ continue;
+ return false;
+-
+- if (!c)
+- return true;
++ }
+ }
+- return true;
+ }
+
+ bool
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index bb1099c549df..053fc2f465df 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1673,6 +1673,7 @@ struct amdgpu_uvd {
+ struct amdgpu_bo *vcpu_bo;
+ void *cpu_addr;
+ uint64_t gpu_addr;
++ unsigned fw_version;
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+ struct delayed_work idle_work;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 8ac49812a716..5a8fbadbd27b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
+ return amdgpu_atpx_priv.atpx_detected;
+ }
+
+-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+- return amdgpu_atpx_priv.atpx.functions.power_cntl;
+-}
+-
+ /**
+ * amdgpu_atpx_call - call an ATPX method
+ *
+@@ -146,6 +142,10 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
+ */
+ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
+ {
++ /* make sure required functions are enabled */
++ /* dGPU power control is required */
++ atpx->functions.power_cntl = true;
++
+ if (atpx->functions.px_params) {
+ union acpi_object *info;
+ struct atpx_px_params output;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9d88023df836..c961fe093e12 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -61,12 +61,6 @@ static const char *amdgpu_asic_name[] = {
+ "LAST",
+ };
+
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool amdgpu_has_atpx_dgpu_power_cntl(void);
+-#else
+-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+-#endif
+-
+ bool amdgpu_device_is_px(struct drm_device *dev)
+ {
+ struct amdgpu_device *adev = dev->dev_private;
+@@ -1475,7 +1469,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+
+ if (amdgpu_runtime_pm == 1)
+ runtime = true;
+- if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
++ if (amdgpu_device_is_px(ddev))
+ runtime = true;
+ vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
+ if (runtime)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index e23843f4d877..4488e82f87b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ fw_info.feature = adev->vce.fb_version;
+ break;
+ case AMDGPU_INFO_FW_UVD:
+- fw_info.ver = 0;
++ fw_info.ver = adev->uvd.fw_version;
+ fw_info.feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GMC:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+index 064ebb347074..89df7871653d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+@@ -52,7 +52,7 @@ struct amdgpu_hpd;
+
+ #define AMDGPU_MAX_HPD_PINS 6
+ #define AMDGPU_MAX_CRTCS 6
+-#define AMDGPU_MAX_AFMT_BLOCKS 7
++#define AMDGPU_MAX_AFMT_BLOCKS 9
+
+ enum amdgpu_rmx_type {
+ RMX_OFF,
+@@ -308,8 +308,8 @@ struct amdgpu_mode_info {
+ struct atom_context *atom_context;
+ struct card_info *atom_card_info;
+ bool mode_config_initialized;
+- struct amdgpu_crtc *crtcs[6];
+- struct amdgpu_afmt *afmt[7];
++ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
++ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
+ /* DVI-I properties */
+ struct drm_property *coherent_mode_property;
+ /* DAC enable load detect */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 53f987aeeacf..3b35ad83867c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ version_major, version_minor, family_id);
+
++ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
++ (family_id << 8));
++
+ bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+ r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
+@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+ memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+ (adev->uvd.fw->size) - offset);
+
++ cancel_delayed_work_sync(&adev->uvd.idle_work);
++
+ size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr = adev->uvd.cpu_addr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index a745eeeb5d82..bb0da76051a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
+ if (i == AMDGPU_MAX_VCE_HANDLES)
+ return 0;
+
++ cancel_delayed_work_sync(&adev->vce.idle_work);
+ /* TODO: suspending running encoding sessions isn't supported */
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index aa491540ba85..946300764609 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+ {
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+- uint32_t seq = ring->fence_drv.sync_seq;
++ uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+index c34c393e9aea..d5e19b5fbbfb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
+ union SQ_CMD_BITS *in_reg_sq_cmd,
+ union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
+ {
+- int status;
++ int status = 0;
+ union SQ_CMD_BITS reg_sq_cmd;
+ union GRBM_GFX_INDEX_BITS reg_gfx_index;
+ struct HsaDbgWaveMsgAMDGen2 *pMsg;
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 39d7e2e15c11..d268bf18a662 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1665,13 +1665,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb;
+ int len, ret, port_num;
+
++ port = drm_dp_get_validated_port_ref(mgr, port);
++ if (!port)
++ return -EINVAL;
++
+ port_num = port->port_num;
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb) {
+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+
+- if (!mstb)
++ if (!mstb) {
++ drm_dp_put_port(port);
+ return -EINVAL;
++ }
+ }
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+@@ -1697,6 +1703,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ kfree(txmsg);
+ fail_put:
+ drm_dp_put_mst_branch_device(mstb);
++ drm_dp_put_port(port);
+ return ret;
+ }
+
+@@ -1779,6 +1786,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ req_payload.start_slot = cur_slots;
+ if (mgr->proposed_vcpis[i]) {
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++ port = drm_dp_get_validated_port_ref(mgr, port);
++ if (!port) {
++ mutex_unlock(&mgr->payload_lock);
++ return -EINVAL;
++ }
+ req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ } else {
+ port = NULL;
+@@ -1804,6 +1816,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ mgr->payloads[i].payload_state = req_payload.payload_state;
+ }
+ cur_slots += req_payload.num_slots;
++
++ if (port)
++ drm_dp_put_port(port);
+ }
+
+ for (i = 0; i < mgr->max_payloads; i++) {
+@@ -2109,6 +2124,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+
+ if (mgr->mst_primary) {
+ int sret;
++ u8 guid[16];
++
+ sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (sret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+@@ -2123,6 +2140,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ ret = -1;
+ goto out_unlock;
+ }
++
++ /* Some hubs forget their guids after they resume */
++ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
++ if (sret != 16) {
++ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
++ ret = -1;
++ goto out_unlock;
++ }
++ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
++
+ ret = 0;
+ } else
+ ret = -1;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index f859a5b87ed4..afa81691163d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4447,7 +4447,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
+ intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+
+ return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+- &state->scaler_state.scaler_id, DRM_ROTATE_0,
++ &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
+ state->pipe_src_w, state->pipe_src_h,
+ adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index 0639275fc471..06bd9257acdc 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -477,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+
++ intel_connector->unregister(intel_connector);
++
+ /* need to nuke the connector */
+ drm_modeset_lock_all(dev);
+ if (connector->state->crtc) {
+@@ -490,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+
+ WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+ }
+- drm_modeset_unlock_all(dev);
+
+- intel_connector->unregister(intel_connector);
+-
+- drm_modeset_lock_all(dev);
+ intel_connector_remove_from_fbdev(intel_connector);
+ drm_connector_cleanup(connector);
+ drm_modeset_unlock_all(dev);
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index d69547a65dbb..7058f75c7b42 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
+ if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+- * falls off the end. So only need to to wait for the
+- * reserved size after flushing out the remainder.
++ * falls off the end. So don't need an immediate wrap
++ * and only need to effectively wait for the reserved
++ * size space from the start of ringbuffer.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
+- need_wrap = true;
+ } else if (total_bytes > ringbuf->space) {
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index f6b2a814e629..9d48443bca2e 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1922,6 +1922,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
+ return 0;
+ }
+
++static void cleanup_phys_status_page(struct intel_engine_cs *ring)
++{
++ struct drm_i915_private *dev_priv = to_i915(ring->dev);
++
++ if (!dev_priv->status_page_dmah)
++ return;
++
++ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
++ ring->status_page.page_addr = NULL;
++}
++
+ static void cleanup_status_page(struct intel_engine_cs *ring)
+ {
+ struct drm_i915_gem_object *obj;
+@@ -1938,9 +1949,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
+
+ static int init_status_page(struct intel_engine_cs *ring)
+ {
+- struct drm_i915_gem_object *obj;
++ struct drm_i915_gem_object *obj = ring->status_page.obj;
+
+- if ((obj = ring->status_page.obj) == NULL) {
++ if (obj == NULL) {
+ unsigned flags;
+ int ret;
+
+@@ -2134,7 +2145,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
+ if (ret)
+ goto error;
+ } else {
+- BUG_ON(ring->id != RCS);
++ WARN_ON(ring->id != RCS);
+ ret = init_phys_status_page(ring);
+ if (ret)
+ goto error;
+@@ -2179,7 +2190,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+- cleanup_status_page(ring);
++ if (I915_NEED_GFX_HWS(ring->dev)) {
++ cleanup_status_page(ring);
++ } else {
++ WARN_ON(ring->id != RCS);
++ cleanup_phys_status_page(ring);
++ }
+
+ i915_cmd_parser_fini_ring(ring);
+ i915_gem_batch_pool_fini(&ring->batch_pool);
+@@ -2341,11 +2357,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+ if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+- * falls off the end. So only need to to wait for the
+- * reserved size after flushing out the remainder.
++ * falls off the end. So don't need an immediate wrap
++ * and only need to effectively wait for the reserved
++ * size space from the start of ringbuffer.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
+- need_wrap = true;
+ } else if (total_bytes > ringbuf->space) {
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
+index 43cba129a0c0..cc91ae832ffb 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -1132,7 +1132,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_thread_status;
+- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
++ if (IS_HASWELL(dev))
++ dev_priv->uncore.funcs.force_wake_put =
++ fw_domains_put_with_fifo;
++ else
++ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
+ } else if (IS_IVYBRIDGE(dev)) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+index 3216e157a8a0..89da47234016 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
++++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
+ struct nvkm_ramht *ramht = *pramht;
+ if (ramht) {
+ nvkm_gpuobj_del(&ramht->gpuobj);
+- kfree(*pramht);
++ vfree(*pramht);
+ *pramht = NULL;
+ }
+ }
+@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
+ struct nvkm_ramht *ramht;
+ int ret, i;
+
+- if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
+- sizeof(*ramht->data), GFP_KERNEL)))
++ if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
++ (size >> 3) * sizeof(*ramht->data))))
+ return -ENOMEM;
+
+ ramht->device = device;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+index 9f5dfc85147a..36655a74c538 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+@@ -1717,6 +1717,8 @@ gf100_gr_init(struct gf100_gr *gr)
+
+ gf100_gr_mmio(gr, gr->func->mmio);
+
++ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
++
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ do {
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 183aea1abebc..5edebf495c07 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
+
+ qxl_bo_kunmap(user_bo);
+
++ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
++ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
++ qcrtc->hot_spot_x = hot_x;
++ qcrtc->hot_spot_y = hot_y;
++
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_SET;
+- cmd->u.set.position.x = qcrtc->cur_x;
+- cmd->u.set.position.y = qcrtc->cur_y;
++ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+
+ cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+
+@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_MOVE;
+- cmd->u.position.x = qcrtc->cur_x;
+- cmd->u.position.y = qcrtc->cur_y;
++ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index 01a86948eb8c..3ab90179e9ab 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -135,6 +135,8 @@ struct qxl_crtc {
+ int index;
+ int cur_x;
+ int cur_y;
++ int hot_spot_x;
++ int hot_spot_y;
+ };
+
+ struct qxl_output {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 2ad462896896..32491355a1d4 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
+ WREG32(VM_CONTEXT1_CNTL, 0);
+ }
+
++static const unsigned ni_dig_offsets[] =
++{
++ NI_DIG0_REGISTER_OFFSET,
++ NI_DIG1_REGISTER_OFFSET,
++ NI_DIG2_REGISTER_OFFSET,
++ NI_DIG3_REGISTER_OFFSET,
++ NI_DIG4_REGISTER_OFFSET,
++ NI_DIG5_REGISTER_OFFSET
++};
++
++static const unsigned ni_tx_offsets[] =
++{
++ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
++ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
++ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
++ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
++ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
++ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
++};
++
++static const unsigned evergreen_dp_offsets[] =
++{
++ EVERGREEN_DP0_REGISTER_OFFSET,
++ EVERGREEN_DP1_REGISTER_OFFSET,
++ EVERGREEN_DP2_REGISTER_OFFSET,
++ EVERGREEN_DP3_REGISTER_OFFSET,
++ EVERGREEN_DP4_REGISTER_OFFSET,
++ EVERGREEN_DP5_REGISTER_OFFSET
++};
++
++
++/*
++ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
++ * We go from crtc to connector and it is not relible since it
++ * should be an opposite direction .If crtc is enable then
++ * find the dig_fe which selects this crtc and insure that it enable.
++ * if such dig_fe is found then find dig_be which selects found dig_be and
++ * insure that it enable and in DP_SST mode.
++ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
++ * from dp symbols clocks .
++ */
++static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
++ unsigned crtc_id, unsigned *ret_dig_fe)
++{
++ unsigned i;
++ unsigned dig_fe;
++ unsigned dig_be;
++ unsigned dig_en_be;
++ unsigned uniphy_pll;
++ unsigned digs_fe_selected;
++ unsigned dig_be_mode;
++ unsigned dig_fe_mask;
++ bool is_enabled = false;
++ bool found_crtc = false;
++
++ /* loop through all running dig_fe to find selected crtc */
++ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
++ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
++ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
++ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
++ /* found running pipe */
++ found_crtc = true;
++ dig_fe_mask = 1 << i;
++ dig_fe = i;
++ break;
++ }
++ }
++
++ if (found_crtc) {
++ /* loop through all running dig_be to find selected dig_fe */
++ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
++ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
++ /* if dig_fe_selected by dig_be? */
++ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
++ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
++ if (dig_fe_mask & digs_fe_selected &&
++ /* if dig_be in sst mode? */
++ dig_be_mode == NI_DIG_BE_DPSST) {
++ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
++ ni_dig_offsets[i]);
++ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
++ ni_tx_offsets[i]);
++ /* dig_be enable and tx is running */
++ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
++ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
++ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
++ is_enabled = true;
++ *ret_dig_fe = dig_fe;
++ break;
++ }
++ }
++ }
++ }
++
++ return is_enabled;
++}
++
++/*
++ * Blank dig when in dp sst mode
++ * Dig ignores crtc timing
++ */
++static void evergreen_blank_dp_output(struct radeon_device *rdev,
++ unsigned dig_fe)
++{
++ unsigned stream_ctrl;
++ unsigned fifo_ctrl;
++ unsigned counter = 0;
++
++ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
++ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
++ return;
++ }
++
++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++ evergreen_dp_offsets[dig_fe]);
++ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
++ DRM_ERROR("dig %d , should be enable\n", dig_fe);
++ return;
++ }
++
++ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
++ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++ evergreen_dp_offsets[dig_fe], stream_ctrl);
++
++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++ evergreen_dp_offsets[dig_fe]);
++ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
++ msleep(1);
++ counter++;
++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++ evergreen_dp_offsets[dig_fe]);
++ }
++ if (counter >= 32 )
++ DRM_ERROR("counter exceeds %d\n", counter);
++
++ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
++ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
++ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
++
++}
++
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+ u32 crtc_enabled, tmp, frame_count, blackout;
+ int i, j;
++ unsigned dig_fe;
+
+ if (!ASIC_IS_NODCE(rdev)) {
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ break;
+ udelay(1);
+ }
+-
++ /*we should disable dig if it drives dp sst*/
++ /*but we are in radeon_device_init and the topology is unknown*/
++ /*and it is available after radeon_modeset_init*/
++ /*the following method radeon_atom_encoder_dpms_dig*/
++ /*does the job if we initialize it properly*/
++ /*for now we do it this manually*/
++ /**/
++ if (ASIC_IS_DCE5(rdev) &&
++ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
++ evergreen_blank_dp_output(rdev, dig_fe);
++ /*we could remove 6 lines below*/
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
+index aa939dfed3a3..b436badf9efa 100644
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -250,8 +250,43 @@
+
+ /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+ #define EVERGREEN_HDMI_BASE 0x7030
++/*DIG block*/
++#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
++#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
++#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
++#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
++#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
++#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
++
++
++#define NI_DIG_FE_CNTL 0x7000
++# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
++# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
++
++
++#define NI_DIG_BE_CNTL 0x7140
++# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
++# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
++
++#define NI_DIG_BE_EN_CNTL 0x7144
++# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
++# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
++# define NI_DIG_BE_DPSST 0
+
+ /* Display Port block */
++#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
++#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
++#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
++#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
++#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
++#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
++
++
++#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
++# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
++# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
++#define EVERGREEN_DP_STEER_FIFO 0x7310
++# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
+ #define EVERGREEN_DP_SEC_CNTL 0x7280
+ # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
+ # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
+@@ -266,4 +301,15 @@
+ # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
+ # define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+
++/*DCIO_UNIPHY block*/
++#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
++#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
++#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
++#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
++#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
++#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
++
++#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
++# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
++
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 9bc408c9f9f6..c4b4f298a283 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
+ return radeon_atpx_priv.atpx_detected;
+ }
+
+-bool radeon_has_atpx_dgpu_power_cntl(void) {
+- return radeon_atpx_priv.atpx.functions.power_cntl;
+-}
+-
+ /**
+ * radeon_atpx_call - call an ATPX method
+ *
+@@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
+ */
+ static int radeon_atpx_validate(struct radeon_atpx *atpx)
+ {
++ /* make sure required functions are enabled */
++ /* dGPU power control is required */
++ atpx->functions.power_cntl = true;
++
+ if (atpx->functions.px_params) {
+ union acpi_object *info;
+ struct atpx_px_params output;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 340f3f549f29..9cfc1c3e1965 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+
+- if (radeon_audio != 0)
++ if (radeon_audio != 0) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_AUTO);
++ radeon_connector->audio = RADEON_AUDIO_AUTO;
++ }
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.output_csc_property,
+@@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_AUTO);
++ radeon_connector->audio = RADEON_AUDIO_AUTO;
+ }
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+@@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_AUTO);
++ radeon_connector->audio = RADEON_AUDIO_AUTO;
+ }
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+@@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_AUTO);
++ radeon_connector->audio = RADEON_AUDIO_AUTO;
+ }
+ if (ASIC_IS_DCE5(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index f78f111e68de..c566993a2ec3 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
+ "LAST",
+ };
+
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool radeon_has_atpx_dgpu_power_cntl(void);
+-#else
+-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+-#endif
+-
+ #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
+ #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+
+@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ * ignore it */
+ vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+
+- if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
++ if (rdev->flags & RADEON_IS_PX)
+ runtime = true;
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+ if (runtime)
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index e06ac546a90f..f342aad79cc6 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+ {
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+
++ if (radeon_ttm_tt_has_userptr(bo->ttm))
++ return -EPERM;
+ return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 7285adb27099..caa73de584a5 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
++ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
+ { 0, 0, 0, 0 },
+ };
+
+diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
+index 83e9f591a54b..e7a348807f0c 100644
+--- a/drivers/hwtracing/stm/Kconfig
++++ b/drivers/hwtracing/stm/Kconfig
+@@ -1,6 +1,7 @@
+ config STM
+ tristate "System Trace Module devices"
+ select CONFIGFS_FS
++ select SRCU
+ help
+ A System Trace Module (STM) is a device exporting data in System
+ Trace Protocol (STP) format as defined by MIPI STP standards.
+diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
+index 714bdc837769..b167ab25310a 100644
+--- a/drivers/i2c/busses/i2c-cpm.c
++++ b/drivers/i2c/busses/i2c-cpm.c
+@@ -116,8 +116,8 @@ struct cpm_i2c {
+ cbd_t __iomem *rbase;
+ u_char *txbuf[CPM_MAXBD];
+ u_char *rxbuf[CPM_MAXBD];
+- u32 txdma[CPM_MAXBD];
+- u32 rxdma[CPM_MAXBD];
++ dma_addr_t txdma[CPM_MAXBD];
++ dma_addr_t rxdma[CPM_MAXBD];
+ };
+
+ static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
+diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
+index b29c7500461a..f54ece8fce78 100644
+--- a/drivers/i2c/busses/i2c-exynos5.c
++++ b/drivers/i2c/busses/i2c-exynos5.c
+@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ return -EIO;
+ }
+
+- clk_prepare_enable(i2c->clk);
++ ret = clk_enable(i2c->clk);
++ if (ret)
++ return ret;
+
+ for (i = 0; i < num; i++, msgs++) {
+ stop = (i == num - 1);
+@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ }
+
+ out:
+- clk_disable_unprepare(i2c->clk);
++ clk_disable(i2c->clk);
+ return ret;
+ }
+
+@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
+ return -ENOENT;
+ }
+
+- clk_prepare_enable(i2c->clk);
++ ret = clk_prepare_enable(i2c->clk);
++ if (ret)
++ return ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, i2c);
+
++ clk_disable(i2c->clk);
++
++ return 0;
++
+ err_clk:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
+
+ i2c_del_adapter(&i2c->adap);
+
++ clk_unprepare(i2c->clk);
++
+ return 0;
+ }
+
+@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
+
+ i2c->suspended = 1;
+
++ clk_unprepare(i2c->clk);
++
+ return 0;
+ }
+
+@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
+ struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+ int ret = 0;
+
+- clk_prepare_enable(i2c->clk);
++ ret = clk_prepare_enable(i2c->clk);
++ if (ret)
++ return ret;
+
+ ret = exynos5_hsi2c_clock_setup(i2c);
+ if (ret) {
+@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
+ }
+
+ exynos5_i2c_init(i2c);
+- clk_disable_unprepare(i2c->clk);
++ clk_disable(i2c->clk);
+ i2c->suspended = 0;
+
+ return 0;
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index 6b4e8a008bc0..564adf3116e8 100644
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -48,6 +48,7 @@
+
+ #include <asm/uaccess.h>
+
++#include <rdma/ib.h>
+ #include <rdma/ib_cm.h>
+ #include <rdma/ib_user_cm.h>
+ #include <rdma/ib_marshall.h>
+@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
+ struct ib_ucm_cmd_hdr hdr;
+ ssize_t result;
+
++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++ return -EACCES;
++
+ if (len < sizeof(hdr))
+ return -EINVAL;
+
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 8b5a934e1133..886f61ea6cc7 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+ struct rdma_ucm_cmd_hdr hdr;
+ ssize_t ret;
+
++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++ return -EACCES;
++
+ if (len < sizeof(hdr))
+ return -EINVAL;
+
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index e3ef28861be6..24f3ca2c4ad7 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -48,6 +48,8 @@
+
+ #include <asm/uaccess.h>
+
++#include <rdma/ib.h>
++
+ #include "uverbs.h"
+
+ MODULE_AUTHOR("Roland Dreier");
+@@ -682,6 +684,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
+ int srcu_key;
+ ssize_t ret;
+
++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++ return -EACCES;
++
+ if (count < sizeof hdr)
+ return -EINVAL;
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index c4e091528390..721d63f5b461 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -273,7 +273,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ sizeof(struct mlx5_wqe_ctrl_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ props->max_sge = min(max_rq_sg, max_sq_sg);
+- props->max_sge_rd = props->max_sge;
++ props->max_sge_rd = MLX5_MAX_SGE_RD;
+ props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
+ props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index e449e394963f..24f4a782e0f4 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -45,6 +45,8 @@
+ #include <linux/export.h>
+ #include <linux/uio.h>
+
++#include <rdma/ib.h>
++
+ #include "qib.h"
+ #include "qib_common.h"
+ #include "qib_user_sdma.h"
+@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
+ ssize_t ret = 0;
+ void *dest;
+
++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++ return -EACCES;
++
+ if (count < sizeof(cmd.type)) {
+ ret = -EINVAL;
+ goto bail;
+diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
+index 3f02e0e03d12..67aab86048ad 100644
+--- a/drivers/input/misc/pmic8xxx-pwrkey.c
++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
+@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+ if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
+ kpd_delay = 15625;
+
+- if (kpd_delay > 62500 || kpd_delay == 0) {
++ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
++ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
+ dev_err(&pdev->dev, "invalid power key trigger delay\n");
+ return -EINVAL;
+ }
+@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+ pwr->name = "pmic8xxx_pwrkey";
+ pwr->phys = "pmic8xxx_pwrkey/input0";
+
+- delay = (kpd_delay << 10) / USEC_PER_SEC;
+- delay = 1 + ilog2(delay);
++ delay = (kpd_delay << 6) / USEC_PER_SEC;
++ delay = ilog2(delay);
+
+ err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
+ if (err < 0) {
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
+index 3a7f3a4a4396..7c18249d6c8e 100644
+--- a/drivers/input/tablet/gtco.c
++++ b/drivers/input/tablet/gtco.c
+@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ goto err_free_buf;
+ }
+
++ /* Sanity check that a device has an endpoint */
++ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
++ dev_err(&usbinterface->dev,
++ "Invalid number of endpoints\n");
++ error = -EINVAL;
++ goto err_free_urb;
++ }
++
+ /*
+ * The endpoint is always altsetting 0, we know this since we know
+ * this device only has one interrupt endpoint
+@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ * HID report descriptor
+ */
+ if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
+- HID_DEVICE_TYPE, &hid_desc) != 0){
++ HID_DEVICE_TYPE, &hid_desc) != 0) {
+ dev_err(&usbinterface->dev,
+ "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
+ error = -EIO;
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index fc836f523afa..b9319b76a8a1 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -91,6 +91,7 @@ struct iommu_dev_data {
+ struct list_head dev_data_list; /* For global dev_data_list */
+ struct protection_domain *domain; /* Domain the device is bound to */
+ u16 devid; /* PCI Device ID */
++ u16 alias; /* Alias Device ID */
+ bool iommu_v2; /* Device can make use of IOMMUv2 */
+ bool passthrough; /* Device is identity mapped */
+ struct {
+@@ -125,6 +126,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+ return container_of(dom, struct protection_domain, domain);
+ }
+
++static inline u16 get_device_id(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++
++ return PCI_DEVID(pdev->bus->number, pdev->devfn);
++}
++
+ static struct iommu_dev_data *alloc_dev_data(u16 devid)
+ {
+ struct iommu_dev_data *dev_data;
+@@ -162,6 +170,68 @@ out_unlock:
+ return dev_data;
+ }
+
++static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
++{
++ *(u16 *)data = alias;
++ return 0;
++}
++
++static u16 get_alias(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ u16 devid, ivrs_alias, pci_alias;
++
++ devid = get_device_id(dev);
++ ivrs_alias = amd_iommu_alias_table[devid];
++ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
++
++ if (ivrs_alias == pci_alias)
++ return ivrs_alias;
++
++ /*
++ * DMA alias showdown
++ *
++ * The IVRS is fairly reliable in telling us about aliases, but it
++ * can't know about every screwy device. If we don't have an IVRS
++ * reported alias, use the PCI reported alias. In that case we may
++ * still need to initialize the rlookup and dev_table entries if the
++ * alias is to a non-existent device.
++ */
++ if (ivrs_alias == devid) {
++ if (!amd_iommu_rlookup_table[pci_alias]) {
++ amd_iommu_rlookup_table[pci_alias] =
++ amd_iommu_rlookup_table[devid];
++ memcpy(amd_iommu_dev_table[pci_alias].data,
++ amd_iommu_dev_table[devid].data,
++ sizeof(amd_iommu_dev_table[pci_alias].data));
++ }
++
++ return pci_alias;
++ }
++
++ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
++ "for device %s[%04x:%04x], kernel reported alias "
++ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
++ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
++ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
++ PCI_FUNC(pci_alias));
++
++ /*
++ * If we don't have a PCI DMA alias and the IVRS alias is on the same
++ * bus, then the IVRS table may know about a quirk that we don't.
++ */
++ if (pci_alias == devid &&
++ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
++ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
++ pdev->dma_alias_devfn = ivrs_alias & 0xff;
++ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
++ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
++ dev_name(dev));
++ }
++
++ return ivrs_alias;
++}
++
+ static struct iommu_dev_data *find_dev_data(u16 devid)
+ {
+ struct iommu_dev_data *dev_data;
+@@ -174,13 +244,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
+ return dev_data;
+ }
+
+-static inline u16 get_device_id(struct device *dev)
+-{
+- struct pci_dev *pdev = to_pci_dev(dev);
+-
+- return PCI_DEVID(pdev->bus->number, pdev->devfn);
+-}
+-
+ static struct iommu_dev_data *get_dev_data(struct device *dev)
+ {
+ return dev->archdata.iommu;
+@@ -308,6 +371,8 @@ static int iommu_init_device(struct device *dev)
+ if (!dev_data)
+ return -ENOMEM;
+
++ dev_data->alias = get_alias(dev);
++
+ if (pci_iommuv2_capable(pdev)) {
+ struct amd_iommu *iommu;
+
+@@ -328,7 +393,7 @@ static void iommu_ignore_device(struct device *dev)
+ u16 devid, alias;
+
+ devid = get_device_id(dev);
+- alias = amd_iommu_alias_table[devid];
++ alias = get_alias(dev);
+
+ memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
+@@ -1017,7 +1082,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
+ int ret;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+- alias = amd_iommu_alias_table[dev_data->devid];
++ alias = dev_data->alias;
+
+ ret = iommu_flush_dte(iommu, dev_data->devid);
+ if (!ret && alias != dev_data->devid)
+@@ -1891,7 +1956,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
+ bool ats;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+- alias = amd_iommu_alias_table[dev_data->devid];
++ alias = dev_data->alias;
+ ats = dev_data->ats.enabled;
+
+ /* Update data structures */
+@@ -1925,7 +1990,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
+ return;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+- alias = amd_iommu_alias_table[dev_data->devid];
++ alias = dev_data->alias;
+
+ /* decrease reference counters */
+ dev_data->domain->dev_iommu[iommu->index] -= 1;
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 72d6182666cb..58f2fe687a24 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
+ unsigned int s_length = sg_dma_len(s);
+ unsigned int s_dma_len = s->length;
+
+- s->offset = s_offset;
++ s->offset += s_offset;
+ s->length = s_length;
+ sg_dma_address(s) = dma_addr + s_offset;
+ dma_addr += s_dma_len;
+@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_address(s) != DMA_ERROR_CODE)
+- s->offset = sg_dma_address(s);
++ s->offset += sg_dma_address(s);
+ if (sg_dma_len(s))
+ s->length = sg_dma_len(s);
+ sg_dma_address(s) = DMA_ERROR_CODE;
+diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
+index efe50845939d..17304705f2cf 100644
+--- a/drivers/irqchip/irq-mxs.c
++++ b/drivers/irqchip/irq-mxs.c
+@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
+ void __iomem *icoll_base;
+
+ icoll_base = of_io_request_and_map(np, 0, np->name);
+- if (!icoll_base)
++ if (IS_ERR(icoll_base))
+ panic("%s: unable to map resource", np->full_name);
+ return icoll_base;
+ }
+diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
+index 4ef178078e5b..1254e98f6b57 100644
+--- a/drivers/irqchip/irq-sunxi-nmi.c
++++ b/drivers/irqchip/irq-sunxi-nmi.c
+@@ -154,9 +154,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
+- if (!gc->reg_base) {
++ if (IS_ERR(gc->reg_base)) {
+ pr_err("unable to map resource\n");
+- ret = -ENOMEM;
++ ret = PTR_ERR(gc->reg_base);
+ goto fail_irqd_remove;
+ }
+
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 27f2ef300f8b..3970cda10080 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+ return 0;
+ }
+
+-#define WRITE_LOCK(cmd) \
+- down_write(&cmd->root_lock); \
+- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+- up_write(&cmd->root_lock); \
+- return -EINVAL; \
++static bool cmd_write_lock(struct dm_cache_metadata *cmd)
++{
++ down_write(&cmd->root_lock);
++ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
++ up_write(&cmd->root_lock);
++ return false;
+ }
++ return true;
++}
+
+-#define WRITE_LOCK_VOID(cmd) \
+- down_write(&cmd->root_lock); \
+- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+- up_write(&cmd->root_lock); \
+- return; \
+- }
++#define WRITE_LOCK(cmd) \
++ do { \
++ if (!cmd_write_lock((cmd))) \
++ return -EINVAL; \
++ } while(0)
++
++#define WRITE_LOCK_VOID(cmd) \
++ do { \
++ if (!cmd_write_lock((cmd))) \
++ return; \
++ } while(0)
+
+ #define WRITE_UNLOCK(cmd) \
+- up_write(&cmd->root_lock)
++ up_write(&(cmd)->root_lock)
+
+-#define READ_LOCK(cmd) \
+- down_read(&cmd->root_lock); \
+- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+- up_read(&cmd->root_lock); \
+- return -EINVAL; \
++static bool cmd_read_lock(struct dm_cache_metadata *cmd)
++{
++ down_read(&cmd->root_lock);
++ if (cmd->fail_io) {
++ up_read(&cmd->root_lock);
++ return false;
+ }
++ return true;
++}
+
+-#define READ_LOCK_VOID(cmd) \
+- down_read(&cmd->root_lock); \
+- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+- up_read(&cmd->root_lock); \
+- return; \
+- }
++#define READ_LOCK(cmd) \
++ do { \
++ if (!cmd_read_lock((cmd))) \
++ return -EINVAL; \
++ } while(0)
++
++#define READ_LOCK_VOID(cmd) \
++ do { \
++ if (!cmd_read_lock((cmd))) \
++ return; \
++ } while(0)
+
+ #define READ_UNLOCK(cmd) \
+- up_read(&cmd->root_lock)
++ up_read(&(cmd)->root_lock)
+
+ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+ {
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index 33bdd81065e8..11f39791ec33 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+ * Will sleep if required for nonblocking == false.
+ */
+ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+- int nonblocking)
++ void *pb, int nonblocking)
+ {
+ unsigned long flags;
+ int ret;
+@@ -1523,10 +1523,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+ /*
+ * Only remove the buffer from done_list if v4l2_buffer can handle all
+ * the planes.
+- * Verifying planes is NOT necessary since it already has been checked
+- * before the buffer is queued/prepared. So it can never fail.
+ */
+- list_del(&(*vb)->done_entry);
++ ret = call_bufop(q, verify_planes_array, *vb, pb);
++ if (!ret)
++ list_del(&(*vb)->done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ return ret;
+@@ -1604,7 +1604,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
+ struct vb2_buffer *vb = NULL;
+ int ret;
+
+- ret = __vb2_get_done_vb(q, &vb, nonblocking);
++ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
+index dbec5923fcf0..3c3b517f1d1c 100644
+--- a/drivers/media/v4l2-core/videobuf2-memops.c
++++ b/drivers/media/v4l2-core/videobuf2-memops.c
+@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
+ vec = frame_vector_create(nr);
+ if (!vec)
+ return ERR_PTR(-ENOMEM);
+- ret = get_vaddr_frames(start, nr, write, 1, vec);
++ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ if (ret < 0)
+ goto out_destroy;
+ /* We accept only complete set of PFNs */
+diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
+index 502984c724ff..6c441be8f893 100644
+--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
+@@ -67,6 +67,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
+ return 0;
+ }
+
++static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
++{
++ return __verify_planes_array(vb, pb);
++}
++
+ /**
+ * __verify_length() - Verify that the bytesused value for each plane fits in
+ * the plane length and that the data offset doesn't exceed the bytesused value.
+@@ -432,6 +437,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
+ }
+
+ static const struct vb2_buf_ops v4l2_buf_ops = {
++ .verify_planes_array = __verify_planes_array_core,
+ .fill_user_buffer = __fill_v4l2_buffer,
+ .fill_vb2_buffer = __fill_vb2_buffer,
+ .set_timestamp = __set_timestamp,
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 22892c701c63..4bf7d50b1bc7 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -439,7 +439,7 @@ config ARM_CHARLCD
+ still useful.
+
+ config BMP085
+- bool
++ tristate
+ depends on SYSFS
+
+ config BMP085_I2C
+diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
+index 15e88078ba1e..f1a0b99f5a9a 100644
+--- a/drivers/misc/ad525x_dpot.c
++++ b/drivers/misc/ad525x_dpot.c
+@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+ */
+ value = swab16(value);
+
+- if (dpot->uid == DPOT_UID(AD5271_ID))
++ if (dpot->uid == DPOT_UID(AD5274_ID))
+ value = value >> 2;
+ return value;
+ default:
+diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
+index 09a406058c46..efbb6945eb18 100644
+--- a/drivers/misc/cxl/irq.c
++++ b/drivers/misc/cxl/irq.c
+@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
+ void cxl_unmap_irq(unsigned int virq, void *cookie)
+ {
+ free_irq(virq, cookie);
+- irq_dispose_mapping(virq);
+ }
+
+ static int cxl_register_one_irq(struct cxl *adapter,
+diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
+index 8310b4dbff06..6a451bd65bf3 100644
+--- a/drivers/misc/mic/scif/scif_rma.c
++++ b/drivers/misc/mic/scif/scif_rma.c
+@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
+ if ((map_flags & SCIF_MAP_FIXED) &&
+ ((ALIGN(offset, PAGE_SIZE) != offset) ||
+ (offset < 0) ||
+- (offset + (off_t)len < offset)))
++ (len > LONG_MAX - offset)))
+ return -EINVAL;
+
+ might_sleep();
+@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
+ if ((map_flags & SCIF_MAP_FIXED) &&
+ ((ALIGN(offset, PAGE_SIZE) != offset) ||
+ (offset < 0) ||
+- (offset + (off_t)len < offset)))
++ (len > LONG_MAX - offset)))
+ return -EINVAL;
+
+ /* Unsupported protection requested */
+@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
+
+ /* Offset is not page aligned or offset+len wraps around */
+ if ((ALIGN(offset, PAGE_SIZE) != offset) ||
+- (offset + (off_t)len < offset))
++ (offset < 0) ||
++ (len > LONG_MAX - offset))
+ return -EINVAL;
+
+ err = scif_verify_epd(ep);
+diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
+index 12c6190c6e33..4a07ba1195b5 100644
+--- a/drivers/mtd/nand/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/brcmnand/brcmnand.c
+@@ -309,6 +309,36 @@ static const u16 brcmnand_regs_v60[] = {
+ [BRCMNAND_FC_BASE] = 0x400,
+ };
+
++/* BRCMNAND v7.1 */
++static const u16 brcmnand_regs_v71[] = {
++ [BRCMNAND_CMD_START] = 0x04,
++ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
++ [BRCMNAND_CMD_ADDRESS] = 0x0c,
++ [BRCMNAND_INTFC_STATUS] = 0x14,
++ [BRCMNAND_CS_SELECT] = 0x18,
++ [BRCMNAND_CS_XOR] = 0x1c,
++ [BRCMNAND_LL_OP] = 0x20,
++ [BRCMNAND_CS0_BASE] = 0x50,
++ [BRCMNAND_CS1_BASE] = 0,
++ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
++ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
++ [BRCMNAND_UNCORR_COUNT] = 0xfc,
++ [BRCMNAND_CORR_COUNT] = 0x100,
++ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
++ [BRCMNAND_CORR_ADDR] = 0x110,
++ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
++ [BRCMNAND_UNCORR_ADDR] = 0x118,
++ [BRCMNAND_SEMAPHORE] = 0x150,
++ [BRCMNAND_ID] = 0x194,
++ [BRCMNAND_ID_EXT] = 0x198,
++ [BRCMNAND_LL_RDATA] = 0x19c,
++ [BRCMNAND_OOB_READ_BASE] = 0x200,
++ [BRCMNAND_OOB_READ_10_BASE] = 0,
++ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
++ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
++ [BRCMNAND_FC_BASE] = 0x400,
++};
++
+ enum brcmnand_cs_reg {
+ BRCMNAND_CS_CFG_EXT = 0,
+ BRCMNAND_CS_CFG,
+@@ -404,7 +434,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+ }
+
+ /* Register offsets */
+- if (ctrl->nand_version >= 0x0600)
++ if (ctrl->nand_version >= 0x0701)
++ ctrl->reg_offsets = brcmnand_regs_v71;
++ else if (ctrl->nand_version >= 0x0600)
+ ctrl->reg_offsets = brcmnand_regs_v60;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->reg_offsets = brcmnand_regs_v50;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 3ff583f165cd..ce7b2cab5762 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -3979,7 +3979,6 @@ static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+- * The mtd->owner field must be set to the module of the caller.
+ */
+ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
+@@ -4403,19 +4402,12 @@ EXPORT_SYMBOL(nand_scan_tail);
+ *
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+- * appropriate values. The mtd->owner field must be set to the module of the
+- * caller.
++ * appropriate values.
+ */
+ int nand_scan(struct mtd_info *mtd, int maxchips)
+ {
+ int ret;
+
+- /* Many callers got this wrong, so check for it for a while... */
+- if (!mtd->owner && caller_is_module()) {
+- pr_crit("%s called with NULL mtd->owner!\n", __func__);
+- BUG();
+- }
+-
+ ret = nand_scan_ident(mtd, maxchips, NULL);
+ if (!ret)
+ ret = nand_scan_tail(mtd);
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index 32477c4eb421..37e4135ab213 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1067,45 +1067,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
+ return 0;
+ }
+
+-static int micron_quad_enable(struct spi_nor *nor)
+-{
+- int ret;
+- u8 val;
+-
+- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+- if (ret < 0) {
+- dev_err(nor->dev, "error %d reading EVCR\n", ret);
+- return ret;
+- }
+-
+- write_enable(nor);
+-
+- /* set EVCR, enable quad I/O */
+- nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
+- ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
+- if (ret < 0) {
+- dev_err(nor->dev, "error while writing EVCR register\n");
+- return ret;
+- }
+-
+- ret = spi_nor_wait_till_ready(nor);
+- if (ret)
+- return ret;
+-
+- /* read EVCR and check it */
+- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+- if (ret < 0) {
+- dev_err(nor->dev, "error %d reading EVCR\n", ret);
+- return ret;
+- }
+- if (val & EVCR_QUAD_EN_MICRON) {
+- dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
+- return -EINVAL;
+- }
+-
+- return 0;
+-}
+-
+ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
+ {
+ int status;
+@@ -1119,12 +1080,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
+ }
+ return status;
+ case SNOR_MFR_MICRON:
+- status = micron_quad_enable(nor);
+- if (status) {
+- dev_err(nor->dev, "Micron quad-read not enabled\n");
+- return -EINVAL;
+- }
+- return status;
++ return 0;
+ default:
+ status = spansion_quad_enable(nor);
+ if (status) {
+diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
+index 973dade2d07f..1257b18e6b90 100644
+--- a/drivers/net/ethernet/jme.c
++++ b/drivers/net/ethernet/jme.c
+@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
+ }
+
+ static inline void
+-jme_clear_pm(struct jme_adapter *jme)
++jme_clear_pm_enable_wol(struct jme_adapter *jme)
+ {
+ jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
+ }
+
++static inline void
++jme_clear_pm_disable_wol(struct jme_adapter *jme)
++{
++ jwrite32(jme, JME_PMCS, PMCS_STMASK);
++}
++
+ static int
+ jme_reload_eeprom(struct jme_adapter *jme)
+ {
+@@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int rc;
+
+- jme_clear_pm(jme);
++ jme_clear_pm_disable_wol(jme);
+ JME_NAPI_ENABLE(jme);
+
+ tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
+@@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
+ static void
+ jme_powersave_phy(struct jme_adapter *jme)
+ {
+- if (jme->reg_pmcs) {
++ if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
+ jme_set_100m_half(jme);
+ if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+ jme_wait_link(jme);
+- jme_clear_pm(jme);
++ jme_clear_pm_enable_wol(jme);
+ } else {
+ jme_phy_off(jme);
+ }
+@@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
+ if (wol->wolopts & WAKE_MAGIC)
+ jme->reg_pmcs |= PMCS_MFEN;
+
+- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+- device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
+-
+ return 0;
+ }
+
+@@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
+ jme->mii_if.mdio_read = jme_mdio_read;
+ jme->mii_if.mdio_write = jme_mdio_write;
+
+- jme_clear_pm(jme);
+- device_set_wakeup_enable(&pdev->dev, true);
++ jme_clear_pm_disable_wol(jme);
++ device_init_wakeup(&pdev->dev, true);
+
+ jme_set_phyfifo_5level(jme);
+ jme->pcirev = pdev->revision;
+@@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
+ if (!netif_running(netdev))
+ return 0;
+
+- jme_clear_pm(jme);
++ jme_clear_pm_disable_wol(jme);
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
+ jme_set_settings(netdev, &jme->old_ecmd);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index e88afac51c5d..f96ab2f4b90e 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -1557,6 +1557,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+ /* the fw is stopped, the aux sta is dead: clean up driver state */
+ iwl_mvm_del_aux_sta(mvm);
+
++ iwl_free_fw_paging(mvm);
++
+ /*
+ * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
+ * won't be called in this case).
+diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
+index c3adf2bcdc85..13c97f665ba8 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
+@@ -645,8 +645,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+ for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
+ kfree(mvm->nvm_sections[i].data);
+
+- iwl_free_fw_paging(mvm);
+-
+ iwl_mvm_tof_clean(mvm);
+
+ ieee80211_free_hw(mvm->hw);
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 8c7204738aa3..00e0332e2544 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+ */
+ val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+ if (val & (BIT(1) | BIT(17))) {
+- IWL_INFO(trans,
+- "can't access the RSA semaphore it is write protected\n");
++ IWL_DEBUG_INFO(trans,
++ "can't access the RSA semaphore it is write protected\n");
+ return 0;
+ }
+
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+index 5c717275a7fa..3d8019eb3d84 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+@@ -939,7 +939,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
+ int eint_num, virq, eint_offset;
+ unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
+- static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
++ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
++ 128000, 256000};
+ const struct mtk_desc_pin *pin;
+ struct irq_data *d;
+
+@@ -957,9 +958,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ if (!mtk_eint_can_en_debounce(pctl, eint_num))
+ return -ENOSYS;
+
+- dbnc = ARRAY_SIZE(dbnc_arr);
+- for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
+- if (debounce <= dbnc_arr[i]) {
++ dbnc = ARRAY_SIZE(debounce_time);
++ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
++ if (debounce <= debounce_time[i]) {
+ dbnc = i;
+ break;
+ }
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index ef04b962c3d5..23b6b8c29a99 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
+
+ /* Parse pins in each row from LSB */
+ while (mask) {
+- bit_pos = ffs(mask);
++ bit_pos = __ffs(mask);
+ pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
+- mask_pos = ((pcs->fmask) << (bit_pos - 1));
++ mask_pos = ((pcs->fmask) << bit_pos);
+ val_pos = val & mask_pos;
+ submask = mask & mask_pos;
+
+@@ -1847,7 +1847,7 @@ static int pcs_probe(struct platform_device *pdev)
+ ret = of_property_read_u32(np, "pinctrl-single,function-mask",
+ &pcs->fmask);
+ if (!ret) {
+- pcs->fshift = ffs(pcs->fmask) - 1;
++ pcs->fshift = __ffs(pcs->fmask);
+ pcs->fmax = pcs->fmask >> pcs->fshift;
+ } else {
+ /* If mask property doesn't exist, function mux is invalid. */
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index b0f62141ea4d..f774cb576ffa 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -131,7 +131,7 @@ MODULE_LICENSE("GPL");
+ /* Field definitions */
+ #define HCI_ACCEL_MASK 0x7fff
+ #define HCI_HOTKEY_DISABLE 0x0b
+-#define HCI_HOTKEY_ENABLE 0x01
++#define HCI_HOTKEY_ENABLE 0x09
+ #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
+ #define HCI_LCD_BRIGHTNESS_BITS 3
+ #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
+diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
+index 423ce087cd9c..5d5adee16886 100644
+--- a/drivers/pwm/pwm-brcmstb.c
++++ b/drivers/pwm/pwm-brcmstb.c
+@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->base = devm_ioremap_resource(&pdev->dev, res);
+- if (!p->base) {
+- ret = -ENOMEM;
++ if (IS_ERR(p->base)) {
++ ret = PTR_ERR(p->base);
+ goto out_clk;
+ }
+
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 7b94b8ee087c..732ac71b82cd 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -148,7 +148,7 @@ static void regulator_lock_supply(struct regulator_dev *rdev)
+ {
+ int i;
+
+- for (i = 0; rdev->supply; rdev = rdev_get_supply(rdev), i++)
++ for (i = 0; rdev; rdev = rdev_get_supply(rdev), i++)
+ mutex_lock_nested(&rdev->mutex, i);
+ }
+
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 58f5d3b8e981..27343e1c43ef 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
+ }
+ }
+
+- if (i < s5m8767->num_regulators)
+- *enable_ctrl =
+- s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
++ if (i >= s5m8767->num_regulators)
++ return -EINVAL;
++
++ *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+
+ return 0;
+ }
+@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
+ else
+ regulators[id].vsel_mask = 0xff;
+
+- s5m8767_get_register(s5m8767, id, &enable_reg,
++ ret = s5m8767_get_register(s5m8767, id, &enable_reg,
+ &enable_val);
++ if (ret) {
++ dev_err(s5m8767->dev, "error reading registers\n");
++ return ret;
++ }
+ regulators[id].enable_reg = enable_reg;
+ regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
+ regulators[id].enable_val = enable_val;
+diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
+index 05a51ef52703..d5c1b057a739 100644
+--- a/drivers/rtc/rtc-ds1685.c
++++ b/drivers/rtc/rtc-ds1685.c
+@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
+ * Only use this where you are certain another lock will not be held.
+ */
+ static inline void
+-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
++ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
+ {
+- spin_lock_irqsave(&rtc->lock, flags);
++ spin_lock_irqsave(&rtc->lock, *flags);
+ ds1685_rtc_switch_to_bank1(rtc);
+ }
+
+@@ -1304,7 +1304,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ {
+ struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ u8 reg = 0, bit = 0, tmp;
+- unsigned long flags = 0;
++ unsigned long flags;
+ long int val = 0;
+ const struct ds1685_rtc_ctrl_regs *reg_info =
+ ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
+@@ -1325,7 +1325,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ bit = reg_info->bit;
+
+ /* Safe to spinlock during a write. */
+- ds1685_rtc_begin_ctrl_access(rtc, flags);
++ ds1685_rtc_begin_ctrl_access(rtc, &flags);
+ tmp = rtc->read(rtc, reg);
+ rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
+ ds1685_rtc_end_ctrl_access(rtc, flags);
+diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
+index 097325d96db5..b1b4746a0eab 100644
+--- a/drivers/rtc/rtc-hym8563.c
++++ b/drivers/rtc/rtc-hym8563.c
+@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ * it does not seem to carry it over a subsequent write/read.
+ * So we'll limit ourself to 100 years, starting at 2000 for now.
+ */
+- buf[6] = tm->tm_year - 100;
++ buf[6] = bin2bcd(tm->tm_year - 100);
+
+ /*
+ * CTL1 only contains TEST-mode bits apart from stop,
+diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
+index 7184a0eda793..725dccae24e7 100644
+--- a/drivers/rtc/rtc-max77686.c
++++ b/drivers/rtc/rtc-max77686.c
+@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
+
+ info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
+ MAX77686_RTCIRQ_RTCA1);
+- if (!info->virq) {
++ if (info->virq <= 0) {
+ ret = -ENXIO;
+ goto err_rtc;
+ }
+diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
+index bd911bafb809..17341feadad1 100644
+--- a/drivers/rtc/rtc-rx8025.c
++++ b/drivers/rtc/rtc-rx8025.c
+@@ -65,7 +65,6 @@
+
+ static const struct i2c_device_id rx8025_id[] = {
+ { "rx8025", 0 },
+- { "rv8803", 1 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(i2c, rx8025_id);
+diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
+index f64c282275b3..e1b86bb01062 100644
+--- a/drivers/rtc/rtc-vr41xx.c
++++ b/drivers/rtc/rtc-vr41xx.c
+@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
+ }
+
+ static const struct rtc_class_ops vr41xx_rtc_ops = {
+- .release = vr41xx_rtc_release,
+- .ioctl = vr41xx_rtc_ioctl,
+- .read_time = vr41xx_rtc_read_time,
+- .set_time = vr41xx_rtc_set_time,
+- .read_alarm = vr41xx_rtc_read_alarm,
+- .set_alarm = vr41xx_rtc_set_alarm,
++ .release = vr41xx_rtc_release,
++ .ioctl = vr41xx_rtc_ioctl,
++ .read_time = vr41xx_rtc_read_time,
++ .set_time = vr41xx_rtc_set_time,
++ .read_alarm = vr41xx_rtc_read_alarm,
++ .set_alarm = vr41xx_rtc_set_alarm,
++ .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
+ };
+
+ static int rtc_probe(struct platform_device *pdev)
+diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
+index e5647d59224f..0b331c9c0a8f 100644
+--- a/drivers/scsi/device_handler/Kconfig
++++ b/drivers/scsi/device_handler/Kconfig
+@@ -13,13 +13,13 @@ menuconfig SCSI_DH
+
+ config SCSI_DH_RDAC
+ tristate "LSI RDAC Device Handler"
+- depends on SCSI_DH
++ depends on SCSI_DH && SCSI
+ help
+ If you have a LSI RDAC select y. Otherwise, say N.
+
+ config SCSI_DH_HP_SW
+ tristate "HP/COMPAQ MSA Device Handler"
+- depends on SCSI_DH
++ depends on SCSI_DH && SCSI
+ help
+ If you have a HP/COMPAQ MSA device that requires START_STOP to
+ be sent to start it and cannot upgrade the firmware then select y.
+@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
+
+ config SCSI_DH_EMC
+ tristate "EMC CLARiiON Device Handler"
+- depends on SCSI_DH
++ depends on SCSI_DH && SCSI
+ help
+ If you have a EMC CLARiiON select y. Otherwise, say N.
+
+ config SCSI_DH_ALUA
+ tristate "SPC-3 ALUA Device Handler"
+- depends on SCSI_DH
++ depends on SCSI_DH && SCSI
+ help
+ SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
+ Access (ALUA).
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 97a1c1c33b05..00ce3e269a43 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6282,12 +6282,13 @@ out:
+ }
+
+ for (i = 0; i < ioc->sge_count; i++) {
+- if (kbuff_arr[i])
++ if (kbuff_arr[i]) {
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge32[i].length),
+ kbuff_arr[i],
+ le32_to_cpu(kern_sge32[i].phys_addr));
+ kbuff_arr[i] = NULL;
++ }
+ }
+
+ megasas_return_cmd(instance, cmd);
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 79a8bc4f6cec..035767c02072 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
+ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ {
+ u32 ser;
+- struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
++ struct spi_master *master = spi->master;
++ struct rockchip_spi *rs = spi_master_get_devdata(master);
++
++ pm_runtime_get_sync(rs->dev);
+
+ ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
+
+@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ ser &= ~(1 << spi->chip_select);
+
+ writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
++
++ pm_runtime_put_sync(rs->dev);
+ }
+
+ static int rockchip_spi_prepare_message(struct spi_master *master,
+diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
+index 05de0dad8762..4c6f1d7d2eaf 100644
+--- a/drivers/staging/rdma/hfi1/TODO
++++ b/drivers/staging/rdma/hfi1/TODO
+@@ -3,4 +3,4 @@ July, 2015
+ - Remove unneeded file entries in sysfs
+ - Remove software processing of IB protocol and place in library for use
+ by qib, ipath (if still present), hfi1, and eventually soft-roce
+-
++- Replace incorrect uAPI
+diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
+index aae9826ec62b..c851e51b1dc3 100644
+--- a/drivers/staging/rdma/hfi1/file_ops.c
++++ b/drivers/staging/rdma/hfi1/file_ops.c
+@@ -62,6 +62,8 @@
+ #include <linux/cred.h>
+ #include <linux/uio.h>
+
++#include <rdma/ib.h>
++
+ #include "hfi.h"
+ #include "pio.h"
+ #include "device.h"
+@@ -214,6 +216,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
+ int uctxt_required = 1;
+ int must_be_root = 0;
+
++ /* FIXME: This interface cannot continue out of staging */
++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++ return -EACCES;
++
+ if (count < sizeof(cmd)) {
+ ret = -EINVAL;
+ goto bail;
+diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
+index e845841ab036..7106288efae3 100644
+--- a/drivers/thermal/rockchip_thermal.c
++++ b/drivers/thermal/rockchip_thermal.c
+@@ -545,15 +545,14 @@ static int rockchip_configure_from_dt(struct device *dev,
+ thermal->chip->tshut_temp);
+ thermal->tshut_temp = thermal->chip->tshut_temp;
+ } else {
++ if (shut_temp > INT_MAX) {
++ dev_err(dev, "Invalid tshut temperature specified: %d\n",
++ shut_temp);
++ return -ERANGE;
++ }
+ thermal->tshut_temp = shut_temp;
+ }
+
+- if (thermal->tshut_temp > INT_MAX) {
+- dev_err(dev, "Invalid tshut temperature specified: %d\n",
+- thermal->tshut_temp);
+- return -ERANGE;
+- }
+-
+ if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
+ dev_warn(dev,
+ "Missing tshut mode property, using default (%s)\n",
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 51c7507b0444..63a06ab6ba03 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -38,7 +38,6 @@
+ #include <linux/major.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+-#include <linux/notifier.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+@@ -116,8 +115,6 @@ struct sci_port {
+ struct timer_list rx_timer;
+ unsigned int rx_timeout;
+ #endif
+-
+- struct notifier_block freq_transition;
+ };
+
+ #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
+@@ -1606,29 +1603,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
+ return ret;
+ }
+
+-/*
+- * Here we define a transition notifier so that we can update all of our
+- * ports' baud rate when the peripheral clock changes.
+- */
+-static int sci_notifier(struct notifier_block *self,
+- unsigned long phase, void *p)
+-{
+- struct sci_port *sci_port;
+- unsigned long flags;
+-
+- sci_port = container_of(self, struct sci_port, freq_transition);
+-
+- if (phase == CPUFREQ_POSTCHANGE) {
+- struct uart_port *port = &sci_port->port;
+-
+- spin_lock_irqsave(&port->lock, flags);
+- port->uartclk = clk_get_rate(sci_port->iclk);
+- spin_unlock_irqrestore(&port->lock, flags);
+- }
+-
+- return NOTIFY_OK;
+-}
+-
+ static const struct sci_irq_desc {
+ const char *desc;
+ irq_handler_t handler;
+@@ -2559,9 +2533,6 @@ static int sci_remove(struct platform_device *dev)
+ {
+ struct sci_port *port = platform_get_drvdata(dev);
+
+- cpufreq_unregister_notifier(&port->freq_transition,
+- CPUFREQ_TRANSITION_NOTIFIER);
+-
+ uart_remove_one_port(&sci_uart_driver, &port->port);
+
+ sci_cleanup_single(port);
+@@ -2714,16 +2685,6 @@ static int sci_probe(struct platform_device *dev)
+ if (ret)
+ return ret;
+
+- sp->freq_transition.notifier_call = sci_notifier;
+-
+- ret = cpufreq_register_notifier(&sp->freq_transition,
+- CPUFREQ_TRANSITION_NOTIFIER);
+- if (unlikely(ret < 0)) {
+- uart_remove_one_port(&sci_uart_driver, &sp->port);
+- sci_cleanup_single(sp);
+- return ret;
+- }
+-
+ #ifdef CONFIG_SH_STANDARD_BIOS
+ sh_bios_gdb_detach();
+ #endif
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 9eb1cff28bd4..b8b580e5ae6e 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
+ if (companion->bus != pdev->bus ||
+ PCI_SLOT(companion->devfn) != slot)
+ continue;
++
++ /*
++ * Companion device should be either UHCI,OHCI or EHCI host
++ * controller, otherwise skip.
++ */
++ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
++ companion->class != CL_EHCI)
++ continue;
++
+ companion_hcd = pci_get_drvdata(companion);
+ if (!companion_hcd || !companion_hcd->self.root_hub)
+ continue;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index cf43e9e18368..79d895c2dd71 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ work);
+ int ret = io_data->req->status ? io_data->req->status :
+ io_data->req->actual;
++ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+
+ if (io_data->read && ret > 0) {
+ use_mm(io_data->mm);
+@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
+
+ io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
+
+- if (io_data->ffs->ffs_eventfd &&
+- !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
++ if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
+ eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+
+ usb_ep_free_request(io_data->ep, io_data->req);
+
+- io_data->kiocb->private = NULL;
+ if (io_data->read)
+ kfree(io_data->to_free);
+ kfree(io_data->buf);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index c48cbe731356..d8dbd7e5194b 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1875,6 +1875,12 @@ no_bw:
+ kfree(xhci->rh_bw);
+ kfree(xhci->ext_caps);
+
++ xhci->usb2_ports = NULL;
++ xhci->usb3_ports = NULL;
++ xhci->port_array = NULL;
++ xhci->rh_bw = NULL;
++ xhci->ext_caps = NULL;
++
+ xhci->page_size = 0;
+ xhci->page_shift = 0;
+ xhci->bus_state[0].bus_suspended = 0;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index c2d65206ec6c..ea4fb4b0cd44 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -48,6 +48,7 @@
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
+ #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
+
+ static const char hcd_name[] = "xhci_hcd";
+
+@@ -156,7 +157,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
+ xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+@@ -299,6 +301,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(pci_get_drvdata(dev));
++ xhci->xhc_state |= XHCI_STATE_REMOVING;
+ if (xhci->shared_hcd) {
+ usb_remove_hcd(xhci->shared_hcd);
+ usb_put_hcd(xhci->shared_hcd);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index db0f0831b94f..2b63969c2bbf 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -4008,7 +4008,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+ int ret;
+
+- if (xhci->xhc_state) {
++ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++ (xhci->xhc_state & XHCI_STATE_HALTED)) {
+ xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
+ return -ESHUTDOWN;
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 776d59c32bc5..ec9e758d5fcd 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
+ "waited %u microseconds.\n",
+ XHCI_MAX_HALT_USEC);
+ if (!ret)
+- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
++ /* clear state flags. Including dying, halted or removing */
++ xhci->xhc_state = 0;
+
+ return ret;
+ }
+@@ -1103,8 +1104,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ /* Resume root hubs only when have pending events. */
+ status = readl(&xhci->op_regs->status);
+ if (status & STS_EINT) {
+- usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
++ usb_hcd_resume_root_hub(hcd);
+ }
+ }
+
+@@ -1119,10 +1120,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+
+ /* Re-enable port polling. */
+ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+- usb_hcd_poll_rh_status(hcd);
+ set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ usb_hcd_poll_rh_status(xhci->shared_hcd);
++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++ usb_hcd_poll_rh_status(hcd);
+
+ return retval;
+ }
+@@ -2753,7 +2754,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+- if (xhci->xhc_state & XHCI_STATE_DYING)
++ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++ (xhci->xhc_state & XHCI_STATE_REMOVING))
+ return -ENODEV;
+
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+@@ -3800,7 +3802,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+
+ mutex_lock(&xhci->mutex);
+
+- if (xhci->xhc_state) /* dying or halted */
++ if (xhci->xhc_state) /* dying, removing or halted */
+ goto out;
+
+ if (!udev->slot_id) {
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 0b9451250e33..99ac2289dbf3 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1596,6 +1596,7 @@ struct xhci_hcd {
+ */
+ #define XHCI_STATE_DYING (1 << 0)
+ #define XHCI_STATE_HALTED (1 << 1)
++#define XHCI_STATE_REMOVING (1 << 2)
+ /* Statistics */
+ int error_bitmask;
+ unsigned int quirks;
+diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
+index facaaf003f19..e40da7759a0e 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
+ if (!(size > 0))
+ return 0;
+
++ if (size > urb->transfer_buffer_length) {
++ /* should not happen, probably malicious packet */
++ if (ud->side == USBIP_STUB) {
++ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
++ return 0;
++ } else {
++ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
++ return -EPIPE;
++ }
++ }
++
+ ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
+ if (ret != size) {
+ dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index e6d16d65e4e6..f07a0974fda2 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2249,7 +2249,6 @@ config XEN_FBDEV_FRONTEND
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select FB_DEFERRED_IO
+- select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
+ select XEN_XENBUS_FRONTEND
+ default y
+ help
+diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
+index 0081725c6b5b..d00510029c93 100644
+--- a/drivers/video/fbdev/da8xx-fb.c
++++ b/drivers/video/fbdev/da8xx-fb.c
+@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ .lower_margin = 2,
+ .hsync_len = 0,
+ .vsync_len = 0,
+- .sync = FB_SYNC_CLK_INVERT |
+- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ .sync = FB_SYNC_CLK_INVERT,
+ },
+ /* Sharp LK043T1DG01 */
+ [1] = {
+@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ .lower_margin = 2,
+ .hsync_len = 41,
+ .vsync_len = 10,
+- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ .sync = 0,
+ .flag = 0,
+ },
+ [2] = {
+@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ .lower_margin = 10,
+ .hsync_len = 10,
+ .vsync_len = 10,
+- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ .sync = 0,
+ .flag = 0,
+ },
+ [3] = {
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index b7fcc0de0b2f..0f5d05bf2131 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
+ if (unlikely(!inode))
+ return failed_creating(dentry);
+
+- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++ make_empty_dir_inode(inode);
+ inode->i_flags |= S_AUTOMOUNT;
+ inode->i_private = data;
+ dentry->d_fsdata = (void *)f;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index d4156e1c128d..b7e921d207fb 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -933,6 +933,15 @@ struct ext4_inode_info {
+ * by other means, so we have i_data_sem.
+ */
+ struct rw_semaphore i_data_sem;
++ /*
++ * i_mmap_sem is for serializing page faults with truncate / punch hole
++ * operations. We have to make sure that new page cannot be faulted in
++ * a section of the inode that is being punched. We cannot easily use
++ * i_data_sem for this since we need protection for the whole punch
++ * operation and i_data_sem ranks below transaction start so we have
++ * to occasionally drop it.
++ */
++ struct rw_semaphore i_mmap_sem;
+ struct inode vfs_inode;
+ struct jbd2_inode *jinode;
+
+@@ -2507,6 +2516,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
+ extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
+ loff_t lstart, loff_t lend);
+ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
++extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ extern qsize_t *ext4_get_reserved_space(struct inode *inode);
+ extern void ext4_da_update_reserve_space(struct inode *inode,
+ int used, int quota_claim);
+@@ -2871,6 +2881,9 @@ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
+ return changed;
+ }
+
++int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
++ loff_t len);
++
+ struct ext4_group_info {
+ unsigned long bb_state;
+ struct rb_root bb_free_root;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 551353b1b17a..3578b25fccfd 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4685,10 +4685,6 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+ if (len <= EXT_UNWRITTEN_MAX_LEN)
+ flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
+
+- /* Wait all existing dio workers, newcomers will block on i_mutex */
+- ext4_inode_block_unlocked_dio(inode);
+- inode_dio_wait(inode);
+-
+ /*
+ * credits to insert 1 extent into extent tree
+ */
+@@ -4752,8 +4748,6 @@ retry:
+ goto retry;
+ }
+
+- ext4_inode_resume_unlocked_dio(inode);
+-
+ return ret > 0 ? ret2 : ret;
+ }
+
+@@ -4770,7 +4764,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ int partial_begin, partial_end;
+ loff_t start, end;
+ ext4_lblk_t lblk;
+- struct address_space *mapping = inode->i_mapping;
+ unsigned int blkbits = inode->i_blkbits;
+
+ trace_ext4_zero_range(inode, offset, len, mode);
+@@ -4786,17 +4779,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ }
+
+ /*
+- * Write out all dirty pages to avoid race conditions
+- * Then release them.
+- */
+- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+- ret = filemap_write_and_wait_range(mapping, offset,
+- offset + len - 1);
+- if (ret)
+- return ret;
+- }
+-
+- /*
+ * Round up offset. This is not fallocate, we neet to zero out
+ * blocks, so convert interior block aligned part of the range to
+ * unwritten and possibly manually zero out unaligned parts of the
+@@ -4839,6 +4821,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
+
++ /* Wait all existing dio workers, newcomers will block on i_mutex */
++ ext4_inode_block_unlocked_dio(inode);
++ inode_dio_wait(inode);
++
+ /* Preallocate the range including the unaligned edges */
+ if (partial_begin || partial_end) {
+ ret = ext4_alloc_file_blocks(file,
+@@ -4847,7 +4833,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ round_down(offset, 1 << blkbits)) >> blkbits,
+ new_size, flags, mode);
+ if (ret)
+- goto out_mutex;
++ goto out_dio;
+
+ }
+
+@@ -4856,16 +4842,23 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+ EXT4_EX_NOCACHE);
+
+- /* Now release the pages and zero block aligned part of pages*/
++ /*
++ * Prevent page faults from reinstantiating pages we have
++ * released from page cache.
++ */
++ down_write(&EXT4_I(inode)->i_mmap_sem);
++ ret = ext4_update_disksize_before_punch(inode, offset, len);
++ if (ret) {
++ up_write(&EXT4_I(inode)->i_mmap_sem);
++ goto out_dio;
++ }
++ /* Now release the pages and zero block aligned part of pages */
+ truncate_pagecache_range(inode, start, end - 1);
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+
+- /* Wait all existing dio workers, newcomers will block on i_mutex */
+- ext4_inode_block_unlocked_dio(inode);
+- inode_dio_wait(inode);
+-
+ ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+ flags, mode);
++ up_write(&EXT4_I(inode)->i_mmap_sem);
+ if (ret)
+ goto out_dio;
+ }
+@@ -4998,8 +4991,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ goto out;
+ }
+
++ /* Wait all existing dio workers, newcomers will block on i_mutex */
++ ext4_inode_block_unlocked_dio(inode);
++ inode_dio_wait(inode);
++
+ ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+ flags, mode);
++ ext4_inode_resume_unlocked_dio(inode);
+ if (ret)
+ goto out;
+
+@@ -5494,21 +5492,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ return ret;
+ }
+
+- /*
+- * Need to round down offset to be aligned with page size boundary
+- * for page size > block size.
+- */
+- ioffset = round_down(offset, PAGE_SIZE);
+-
+- /* Write out all dirty pages */
+- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+- LLONG_MAX);
+- if (ret)
+- return ret;
+-
+- /* Take mutex lock */
+ mutex_lock(&inode->i_mutex);
+-
+ /*
+ * There is no need to overlap collapse range with EOF, in which case
+ * it is effectively a truncate operation
+@@ -5524,17 +5508,43 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ goto out_mutex;
+ }
+
+- truncate_pagecache(inode, ioffset);
+-
+ /* Wait for existing dio to complete */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+
++ /*
++ * Prevent page faults from reinstantiating pages we have released from
++ * page cache.
++ */
++ down_write(&EXT4_I(inode)->i_mmap_sem);
++ /*
++ * Need to round down offset to be aligned with page size boundary
++ * for page size > block size.
++ */
++ ioffset = round_down(offset, PAGE_SIZE);
++ /*
++ * Write tail of the last page before removed range since it will get
++ * removed from the page cache below.
++ */
++ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
++ if (ret)
++ goto out_mmap;
++ /*
++ * Write data that will be shifted to preserve them when discarding
++ * page cache below. We are also protected from pages becoming dirty
++ * by i_mmap_sem.
++ */
++ ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
++ LLONG_MAX);
++ if (ret)
++ goto out_mmap;
++ truncate_pagecache(inode, ioffset);
++
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+- goto out_dio;
++ goto out_mmap;
+ }
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+@@ -5573,7 +5583,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+
+ out_stop:
+ ext4_journal_stop(handle);
+-out_dio:
++out_mmap:
++ up_write(&EXT4_I(inode)->i_mmap_sem);
+ ext4_inode_resume_unlocked_dio(inode);
+ out_mutex:
+ mutex_unlock(&inode->i_mutex);
+@@ -5627,21 +5638,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ return ret;
+ }
+
+- /*
+- * Need to round down to align start offset to page size boundary
+- * for page size > block size.
+- */
+- ioffset = round_down(offset, PAGE_SIZE);
+-
+- /* Write out all dirty pages */
+- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+- LLONG_MAX);
+- if (ret)
+- return ret;
+-
+- /* Take mutex lock */
+ mutex_lock(&inode->i_mutex);
+-
+ /* Currently just for extent based files */
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ ret = -EOPNOTSUPP;
+@@ -5660,17 +5657,32 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ goto out_mutex;
+ }
+
+- truncate_pagecache(inode, ioffset);
+-
+ /* Wait for existing dio to complete */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+
++ /*
++ * Prevent page faults from reinstantiating pages we have released from
++ * page cache.
++ */
++ down_write(&EXT4_I(inode)->i_mmap_sem);
++ /*
++ * Need to round down to align start offset to page size boundary
++ * for page size > block size.
++ */
++ ioffset = round_down(offset, PAGE_SIZE);
++ /* Write out all dirty pages */
++ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
++ LLONG_MAX);
++ if (ret)
++ goto out_mmap;
++ truncate_pagecache(inode, ioffset);
++
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+- goto out_dio;
++ goto out_mmap;
+ }
+
+ /* Expand file to avoid data loss if there is error while shifting */
+@@ -5741,7 +5753,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+
+ out_stop:
+ ext4_journal_stop(handle);
+-out_dio:
++out_mmap:
++ up_write(&EXT4_I(inode)->i_mmap_sem);
+ ext4_inode_resume_unlocked_dio(inode);
+ out_mutex:
+ mutex_unlock(&inode->i_mutex);
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 113837e7ba98..0d24ebcd7c9e 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -209,15 +209,18 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+ int result;
+ handle_t *handle = NULL;
+- struct super_block *sb = file_inode(vma->vm_file)->i_sb;
++ struct inode *inode = file_inode(vma->vm_file);
++ struct super_block *sb = inode->i_sb;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+ if (write) {
+ sb_start_pagefault(sb);
+ file_update_time(vma->vm_file);
++ down_read(&EXT4_I(inode)->i_mmap_sem);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
+ EXT4_DATA_TRANS_BLOCKS(sb));
+- }
++ } else
++ down_read(&EXT4_I(inode)->i_mmap_sem);
+
+ if (IS_ERR(handle))
+ result = VM_FAULT_SIGBUS;
+@@ -228,8 +231,10 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ if (write) {
+ if (!IS_ERR(handle))
+ ext4_journal_stop(handle);
++ up_read(&EXT4_I(inode)->i_mmap_sem);
+ sb_end_pagefault(sb);
+- }
++ } else
++ up_read(&EXT4_I(inode)->i_mmap_sem);
+
+ return result;
+ }
+@@ -246,10 +251,12 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+ if (write) {
+ sb_start_pagefault(sb);
+ file_update_time(vma->vm_file);
++ down_read(&EXT4_I(inode)->i_mmap_sem);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
+ ext4_chunk_trans_blocks(inode,
+ PMD_SIZE / PAGE_SIZE));
+- }
++ } else
++ down_read(&EXT4_I(inode)->i_mmap_sem);
+
+ if (IS_ERR(handle))
+ result = VM_FAULT_SIGBUS;
+@@ -260,30 +267,71 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+ if (write) {
+ if (!IS_ERR(handle))
+ ext4_journal_stop(handle);
++ up_read(&EXT4_I(inode)->i_mmap_sem);
+ sb_end_pagefault(sb);
+- }
++ } else
++ up_read(&EXT4_I(inode)->i_mmap_sem);
+
+ return result;
+ }
+
+ static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+- return dax_mkwrite(vma, vmf, ext4_get_block_dax,
+- ext4_end_io_unwritten);
++ int err;
++ struct inode *inode = file_inode(vma->vm_file);
++
++ sb_start_pagefault(inode->i_sb);
++ file_update_time(vma->vm_file);
++ down_read(&EXT4_I(inode)->i_mmap_sem);
++ err = __dax_mkwrite(vma, vmf, ext4_get_block_dax,
++ ext4_end_io_unwritten);
++ up_read(&EXT4_I(inode)->i_mmap_sem);
++ sb_end_pagefault(inode->i_sb);
++
++ return err;
++}
++
++/*
++ * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
++ * handler we check for races agaist truncate. Note that since we cycle through
++ * i_mmap_sem, we are sure that also any hole punching that began before we
++ * were called is finished by now and so if it included part of the file we
++ * are working on, our pte will get unmapped and the check for pte_same() in
++ * wp_pfn_shared() fails. Thus fault gets retried and things work out as
++ * desired.
++ */
++static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
++ struct vm_fault *vmf)
++{
++ struct inode *inode = file_inode(vma->vm_file);
++ struct super_block *sb = inode->i_sb;
++ int ret = VM_FAULT_NOPAGE;
++ loff_t size;
++
++ sb_start_pagefault(sb);
++ file_update_time(vma->vm_file);
++ down_read(&EXT4_I(inode)->i_mmap_sem);
++ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (vmf->pgoff >= size)
++ ret = VM_FAULT_SIGBUS;
++ up_read(&EXT4_I(inode)->i_mmap_sem);
++ sb_end_pagefault(sb);
++
++ return ret;
+ }
+
+ static const struct vm_operations_struct ext4_dax_vm_ops = {
+ .fault = ext4_dax_fault,
+ .pmd_fault = ext4_dax_pmd_fault,
+ .page_mkwrite = ext4_dax_mkwrite,
+- .pfn_mkwrite = dax_pfn_mkwrite,
++ .pfn_mkwrite = ext4_dax_pfn_mkwrite,
+ };
+ #else
+ #define ext4_dax_vm_ops ext4_file_vm_ops
+ #endif
+
+ static const struct vm_operations_struct ext4_file_vm_ops = {
+- .fault = filemap_fault,
++ .fault = ext4_filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = ext4_page_mkwrite,
+ };
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 06bda0361e7c..e31d762eedce 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3587,6 +3587,35 @@ int ext4_can_truncate(struct inode *inode)
+ }
+
+ /*
++ * We have to make sure i_disksize gets properly updated before we truncate
++ * page cache due to hole punching or zero range. Otherwise i_disksize update
++ * can get lost as it may have been postponed to submission of writeback but
++ * that will never happen after we truncate page cache.
++ */
++int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
++ loff_t len)
++{
++ handle_t *handle;
++ loff_t size = i_size_read(inode);
++
++ WARN_ON(!mutex_is_locked(&inode->i_mutex));
++ if (offset > size || offset + len < size)
++ return 0;
++
++ if (EXT4_I(inode)->i_disksize >= size)
++ return 0;
++
++ handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++ ext4_update_i_disksize(inode, size);
++ ext4_mark_inode_dirty(handle, inode);
++ ext4_journal_stop(handle);
++
++ return 0;
++}
++
++/*
+ * ext4_punch_hole: punches a hole in a file by releaseing the blocks
+ * associated with the given offset and length
+ *
+@@ -3651,17 +3680,26 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+
+ }
+
++ /* Wait all existing dio workers, newcomers will block on i_mutex */
++ ext4_inode_block_unlocked_dio(inode);
++ inode_dio_wait(inode);
++
++ /*
++ * Prevent page faults from reinstantiating pages we have released from
++ * page cache.
++ */
++ down_write(&EXT4_I(inode)->i_mmap_sem);
+ first_block_offset = round_up(offset, sb->s_blocksize);
+ last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
+
+ /* Now release the pages and zero block aligned part of pages*/
+- if (last_block_offset > first_block_offset)
++ if (last_block_offset > first_block_offset) {
++ ret = ext4_update_disksize_before_punch(inode, offset, length);
++ if (ret)
++ goto out_dio;
+ truncate_pagecache_range(inode, first_block_offset,
+ last_block_offset);
+-
+- /* Wait all existing dio workers, newcomers will block on i_mutex */
+- ext4_inode_block_unlocked_dio(inode);
+- inode_dio_wait(inode);
++ }
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ credits = ext4_writepage_trans_blocks(inode);
+@@ -3708,16 +3746,12 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+
+- /* Now release the pages again to reduce race window */
+- if (last_block_offset > first_block_offset)
+- truncate_pagecache_range(inode, first_block_offset,
+- last_block_offset);
+-
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
+ out_stop:
+ ext4_journal_stop(handle);
+ out_dio:
++ up_write(&EXT4_I(inode)->i_mmap_sem);
+ ext4_inode_resume_unlocked_dio(inode);
+ out_mutex:
+ mutex_unlock(&inode->i_mutex);
+@@ -4851,6 +4885,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ } else
+ ext4_wait_for_tail_page_commit(inode);
+ }
++ down_write(&EXT4_I(inode)->i_mmap_sem);
+ /*
+ * Truncate pagecache after we've waited for commit
+ * in data=journal mode to make pages freeable.
+@@ -4858,6 +4893,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ truncate_pagecache(inode, inode->i_size);
+ if (shrink)
+ ext4_truncate(inode);
++ up_write(&EXT4_I(inode)->i_mmap_sem);
+ }
+
+ if (!rc) {
+@@ -5109,6 +5145,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ might_sleep();
+ trace_ext4_mark_inode_dirty(inode, _RET_IP_);
+ err = ext4_reserve_inode_write(handle, inode, &iloc);
++ if (err)
++ return err;
+ if (ext4_handle_valid(handle) &&
+ EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+ !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
+@@ -5139,9 +5177,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ }
+ }
+ }
+- if (!err)
+- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+- return err;
++ return ext4_mark_iloc_dirty(handle, inode, &iloc);
+ }
+
+ /*
+@@ -5306,6 +5342,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vma->vm_file);
++
++ down_read(&EXT4_I(inode)->i_mmap_sem);
+ /* Delalloc case is easy... */
+ if (test_opt(inode->i_sb, DELALLOC) &&
+ !ext4_should_journal_data(inode) &&
+@@ -5375,6 +5413,19 @@ retry_alloc:
+ out_ret:
+ ret = block_page_mkwrite_return(ret);
+ out:
++ up_read(&EXT4_I(inode)->i_mmap_sem);
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+ }
++
++int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct inode *inode = file_inode(vma->vm_file);
++ int err;
++
++ down_read(&EXT4_I(inode)->i_mmap_sem);
++ err = filemap_fault(vma, vmf);
++ up_read(&EXT4_I(inode)->i_mmap_sem);
++
++ return err;
++}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ba1cf0bf2f81..852c26806af2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -958,6 +958,7 @@ static void init_once(void *foo)
+ INIT_LIST_HEAD(&ei->i_orphan);
+ init_rwsem(&ei->xattr_sem);
+ init_rwsem(&ei->i_data_sem);
++ init_rwsem(&ei->i_mmap_sem);
+ inode_init_once(&ei->vfs_inode);
+ }
+
+diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
+index 011ba6670d99..c70d06a383e2 100644
+--- a/fs/ext4/truncate.h
++++ b/fs/ext4/truncate.h
+@@ -10,8 +10,10 @@
+ */
+ static inline void ext4_truncate_failed_write(struct inode *inode)
+ {
++ down_write(&EXT4_I(inode)->i_mmap_sem);
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
+ ext4_truncate(inode);
++ up_write(&EXT4_I(inode)->i_mmap_sem);
+ }
+
+ /*
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 09cd3edde08a..f6478301db00 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1435,6 +1435,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
+ return page;
+ }
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
++ struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ struct page *page;
++ int nid;
++
++ if (!pmd_present(pmd))
++ return NULL;
++
++ page = vm_normal_page_pmd(vma, addr, pmd);
++ if (!page)
++ return NULL;
++
++ if (PageReserved(page))
++ return NULL;
++
++ nid = page_to_nid(page);
++ if (!node_isset(nid, node_states[N_MEMORY]))
++ return NULL;
++
++ return page;
++}
++#endif
++
+ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+ {
+@@ -1444,13 +1470,13 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ pte_t *orig_pte;
+ pte_t *pte;
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
+- pte_t huge_pte = *(pte_t *)pmd;
+ struct page *page;
+
+- page = can_gather_numa_stats(huge_pte, vma, addr);
++ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
+ if (page)
+- gather_stats(page, md, pte_dirty(huge_pte),
++ gather_stats(page, md, pmd_dirty(*pmd),
+ HPAGE_PMD_SIZE/PAGE_SIZE);
+ spin_unlock(ptl);
+ return 0;
+@@ -1458,6 +1484,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
++#endif
+ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ do {
+ struct page *page = can_gather_numa_stats(*pte, vma, addr);
+diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
+index e56272c919b5..bf2d34c9d804 100644
+--- a/include/asm-generic/futex.h
++++ b/include/asm-generic/futex.h
+@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 val;
+
+ preempt_disable();
+- if (unlikely(get_user(val, uaddr) != 0))
++ if (unlikely(get_user(val, uaddr) != 0)) {
++ preempt_enable();
+ return -EFAULT;
++ }
+
+- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
++ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
++ preempt_enable();
+ return -EFAULT;
++ }
+
+ *uval = val;
+ preempt_enable();
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index 461a0558bca4..cebecff536a3 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
+ {
+ #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ return false;
++#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
++ return false;
+ #else
+ return true;
+ #endif
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index a7c7f74808a4..8da263299754 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -434,6 +434,7 @@ struct cgroup_subsys {
+ int (*can_attach)(struct cgroup_taskset *tset);
+ void (*cancel_attach)(struct cgroup_taskset *tset);
+ void (*attach)(struct cgroup_taskset *tset);
++ void (*post_attach)(void);
+ int (*can_fork)(struct task_struct *task, void **priv_p);
+ void (*cancel_fork)(struct task_struct *task, void *priv);
+ void (*fork)(struct task_struct *task, void *priv);
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index fea160ee5803..85a868ccb493 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ task_unlock(current);
+ }
+
+-extern void cpuset_post_attach_flush(void);
+-
+ #else /* !CONFIG_CPUSETS */
+
+ static inline bool cpusets_enabled(void) { return false; }
+@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
+ return false;
+ }
+
+-static inline void cpuset_post_attach_flush(void)
+-{
+-}
+-
+ #endif /* !CONFIG_CPUSETS */
+
+ #endif /* _LINUX_CPUSET_H */
+diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
+index 0b473cbfa7ef..a91b67b18a73 100644
+--- a/include/linux/mlx5/device.h
++++ b/include/linux/mlx5/device.h
+@@ -334,6 +334,17 @@ enum {
+ MLX5_CAP_OFF_CMDIF_CSUM = 46,
+ };
+
++enum {
++ /*
++ * Max wqe size for rdma read is 512 bytes, so this
++ * limits our max_sge_rd as the wqe needs to fit:
++ * - ctrl segment (16 bytes)
++ * - rdma segment (16 bytes)
++ * - scatter elements (16 bytes each)
++ */
++ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
++};
++
+ struct mlx5_inbox_hdr {
+ __be16 opcode;
+ u8 rsvd[4];
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 00bad7793788..fb8b20e5d021 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1084,6 +1084,8 @@ struct zap_details {
+
+ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
++ pmd_t pmd);
+
+ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
+diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
+index 647ebfe5174f..d4227a8a2a23 100644
+--- a/include/media/videobuf2-core.h
++++ b/include/media/videobuf2-core.h
+@@ -363,6 +363,7 @@ struct vb2_ops {
+ };
+
+ struct vb2_buf_ops {
++ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
+ int (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
+ int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
+ struct vb2_plane *planes);
+diff --git a/include/rdma/ib.h b/include/rdma/ib.h
+index cf8f9e700e48..a6b93706b0fc 100644
+--- a/include/rdma/ib.h
++++ b/include/rdma/ib.h
+@@ -34,6 +34,7 @@
+ #define _RDMA_IB_H
+
+ #include <linux/types.h>
++#include <linux/sched.h>
+
+ struct ib_addr {
+ union {
+@@ -86,4 +87,19 @@ struct sockaddr_ib {
+ __u64 sib_scope_id;
+ };
+
++/*
++ * The IB interfaces that use write() as bi-directional ioctl() are
++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
++ * calls from various contexts with elevated privileges. That includes the
++ * traditional suid executable error message writes, but also various kernel
++ * interfaces that can write to file descriptors.
++ *
++ * This function provides protection for the legacy API by restricting the
++ * calling context.
++ */
++static inline bool ib_safe_file_access(struct file *filp)
++{
++ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
++}
++
+ #endif /* _RDMA_IB_H */
+diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
+index c039f1d68a09..086168e18ca8 100644
+--- a/include/uapi/linux/v4l2-dv-timings.h
++++ b/include/uapi/linux/v4l2-dv-timings.h
+@@ -183,7 +183,8 @@
+
+ #define V4L2_DV_BT_CEA_3840X2160P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -191,14 +192,16 @@
+
+ #define V4L2_DV_BT_CEA_3840X2160P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+
+ #define V4L2_DV_BT_CEA_3840X2160P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -206,14 +209,16 @@
+
+ #define V4L2_DV_BT_CEA_3840X2160P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+
+ #define V4L2_DV_BT_CEA_3840X2160P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -221,7 +226,8 @@
+
+ #define V4L2_DV_BT_CEA_4096X2160P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -229,14 +235,16 @@
+
+ #define V4L2_DV_BT_CEA_4096X2160P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+
+ #define V4L2_DV_BT_CEA_4096X2160P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -244,14 +252,16 @@
+
+ #define V4L2_DV_BT_CEA_4096X2160P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+
+ #define V4L2_DV_BT_CEA_4096X2160P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index dc94f8beb097..1c9d701f7a72 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2721,9 +2721,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off, bool threadgroup)
+ {
+ struct task_struct *tsk;
++ struct cgroup_subsys *ss;
+ struct cgroup *cgrp;
+ pid_t pid;
+- int ret;
++ int ssid, ret;
+
+ if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+ return -EINVAL;
+@@ -2771,8 +2772,10 @@ out_unlock_rcu:
+ rcu_read_unlock();
+ out_unlock_threadgroup:
+ percpu_up_write(&cgroup_threadgroup_rwsem);
++ for_each_subsys(ss, ssid)
++ if (ss->post_attach)
++ ss->post_attach();
+ cgroup_kn_unlock(of->kn);
+- cpuset_post_attach_flush();
+ return ret ?: nbytes;
+ }
+
+@@ -4689,14 +4692,15 @@ static void css_free_work_fn(struct work_struct *work)
+
+ if (ss) {
+ /* css free path */
++ struct cgroup_subsys_state *parent = css->parent;
+ int id = css->id;
+
+- if (css->parent)
+- css_put(css->parent);
+-
+ ss->css_free(css);
+ cgroup_idr_remove(&ss->css_idr, id);
+ cgroup_put(cgrp);
++
++ if (parent)
++ css_put(parent);
+ } else {
+ /* cgroup free path */
+ atomic_dec(&cgrp->root->nr_cgrps);
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 2ade632197d5..11eaf14b52c2 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -57,7 +57,6 @@
+ #include <asm/uaccess.h>
+ #include <linux/atomic.h>
+ #include <linux/mutex.h>
+-#include <linux/workqueue.h>
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+
+@@ -1015,7 +1014,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+ }
+ }
+
+-void cpuset_post_attach_flush(void)
++static void cpuset_post_attach(void)
+ {
+ flush_workqueue(cpuset_migrate_mm_wq);
+ }
+@@ -2083,6 +2082,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ .can_attach = cpuset_can_attach,
+ .cancel_attach = cpuset_cancel_attach,
+ .attach = cpuset_attach,
++ .post_attach = cpuset_post_attach,
+ .bind = cpuset_bind,
+ .legacy_cftypes = files,
+ .early_init = 1,
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 461c72b2dac2..9d8163afd87c 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1244,10 +1244,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ if (unlikely(should_fail_futex(true)))
+ ret = -EFAULT;
+
+- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
+ ret = -EFAULT;
+- else if (curval != uval)
+- ret = -EINVAL;
++ } else if (curval != uval) {
++ /*
++ * If a unconditional UNLOCK_PI operation (user space did not
++ * try the TID->0 transition) raced with a waiter setting the
++ * FUTEX_WAITERS flag between get_user() and locking the hash
++ * bucket lock, retry the operation.
++ */
++ if ((FUTEX_TID_MASK & curval) == uval)
++ ret = -EAGAIN;
++ else
++ ret = -EINVAL;
++ }
+ if (ret) {
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ return ret;
+@@ -1474,8 +1484,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
+ if (likely(&hb1->chain != &hb2->chain)) {
+ plist_del(&q->list, &hb1->chain);
+ hb_waiters_dec(hb1);
+- plist_add(&q->list, &hb2->chain);
+ hb_waiters_inc(hb2);
++ plist_add(&q->list, &hb2->chain);
+ q->lock_ptr = &hb2->lock;
+ }
+ get_futex_key_refs(key2);
+@@ -2538,6 +2548,15 @@ retry:
+ if (ret == -EFAULT)
+ goto pi_faulted;
+ /*
++ * A unconditional UNLOCK_PI op raced against a waiter
++ * setting the FUTEX_WAITERS bit. Try again.
++ */
++ if (ret == -EAGAIN) {
++ spin_unlock(&hb->lock);
++ put_futex_key(&key);
++ goto retry;
++ }
++ /*
+ * wake_futex_pi has detected invalid state. Tell user
+ * space.
+ */
+diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
+index 5b9102a47ea5..c835270f0c2f 100644
+--- a/kernel/locking/mcs_spinlock.h
++++ b/kernel/locking/mcs_spinlock.h
+@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+ node->locked = 0;
+ node->next = NULL;
+
+- prev = xchg_acquire(lock, node);
++ /*
++ * We rely on the full barrier with global transitivity implied by the
++ * below xchg() to order the initialization stores above against any
++ * observation of @node. And to provide the ACQUIRE ordering associated
++ * with a LOCK primitive.
++ */
++ prev = xchg(lock, node);
+ if (likely(prev == NULL)) {
+ /*
+ * Lock acquired, don't need to set node->locked to 1. Threads
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 70e5e09341f1..55bebf924946 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7693,7 +7693,7 @@ void set_curr_task(int cpu, struct task_struct *p)
+ /* task_group_lock serializes the addition/removal of task groups */
+ static DEFINE_SPINLOCK(task_group_lock);
+
+-static void free_sched_group(struct task_group *tg)
++static void sched_free_group(struct task_group *tg)
+ {
+ free_fair_sched_group(tg);
+ free_rt_sched_group(tg);
+@@ -7719,7 +7719,7 @@ struct task_group *sched_create_group(struct task_group *parent)
+ return tg;
+
+ err:
+- free_sched_group(tg);
++ sched_free_group(tg);
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -7739,17 +7739,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
+ }
+
+ /* rcu callback to free various structures associated with a task group */
+-static void free_sched_group_rcu(struct rcu_head *rhp)
++static void sched_free_group_rcu(struct rcu_head *rhp)
+ {
+ /* now it should be safe to free those cfs_rqs */
+- free_sched_group(container_of(rhp, struct task_group, rcu));
++ sched_free_group(container_of(rhp, struct task_group, rcu));
+ }
+
+-/* Destroy runqueue etc associated with a task group */
+ void sched_destroy_group(struct task_group *tg)
+ {
+ /* wait for possible concurrent references to cfs_rqs complete */
+- call_rcu(&tg->rcu, free_sched_group_rcu);
++ call_rcu(&tg->rcu, sched_free_group_rcu);
+ }
+
+ void sched_offline_group(struct task_group *tg)
+@@ -8210,31 +8209,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+ if (IS_ERR(tg))
+ return ERR_PTR(-ENOMEM);
+
++ sched_online_group(tg, parent);
++
+ return &tg->css;
+ }
+
+-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
+ {
+ struct task_group *tg = css_tg(css);
+- struct task_group *parent = css_tg(css->parent);
+
+- if (parent)
+- sched_online_group(tg, parent);
+- return 0;
++ sched_offline_group(tg);
+ }
+
+ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
+ {
+ struct task_group *tg = css_tg(css);
+
+- sched_destroy_group(tg);
+-}
+-
+-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+-{
+- struct task_group *tg = css_tg(css);
+-
+- sched_offline_group(tg);
++ /*
++ * Relies on the RCU grace period between css_released() and this.
++ */
++ sched_free_group(tg);
+ }
+
+ static void cpu_cgroup_fork(struct task_struct *task, void *private)
+@@ -8594,9 +8588,8 @@ static struct cftype cpu_files[] = {
+
+ struct cgroup_subsys cpu_cgrp_subsys = {
+ .css_alloc = cpu_cgroup_css_alloc,
++ .css_released = cpu_cgroup_css_released,
+ .css_free = cpu_cgroup_css_free,
+- .css_online = cpu_cgroup_css_online,
+- .css_offline = cpu_cgroup_css_offline,
+ .fork = cpu_cgroup_fork,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 450c21fd0e6e..0ec05948a97b 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -649,6 +649,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
+ */
+ smp_wmb();
+ set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
++ /*
++ * The following mb guarantees that previous clear of a PENDING bit
++ * will not be reordered with any speculative LOADS or STORES from
++ * work->current_func, which is executed afterwards. This possible
++ * reordering can lead to a missed execution on attempt to qeueue
++ * the same @work. E.g. consider this case:
++ *
++ * CPU#0 CPU#1
++ * ---------------------------- --------------------------------
++ *
++ * 1 STORE event_indicated
++ * 2 queue_work_on() {
++ * 3 test_and_set_bit(PENDING)
++ * 4 } set_..._and_clear_pending() {
++ * 5 set_work_data() # clear bit
++ * 6 smp_mb()
++ * 7 work->current_func() {
++ * 8 LOAD event_indicated
++ * }
++ *
++ * Without an explicit full barrier speculative LOAD on line 8 can
++ * be executed before CPU#0 does STORE on line 1. If that happens,
++ * CPU#0 observes the PENDING bit is still set and new execution of
++ * a @work is not queued in a hope, that CPU#1 will eventually
++ * finish the queued @work. Meanwhile CPU#1 does not see
++ * event_indicated is set, because speculative LOAD was executed
++ * before actual STORE.
++ */
++ smp_mb();
+ }
+
+ static void clear_work_data(struct work_struct *work)
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c
+index 03dd576e6773..59fd7c0b119c 100644
+--- a/lib/assoc_array.c
++++ b/lib/assoc_array.c
+@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ free_slot = i;
+ continue;
+ }
+- if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
++ if (assoc_array_ptr_is_leaf(ptr) &&
++ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
++ index_key)) {
+ pr_devel("replace in slot %d\n", i);
+ edit->leaf_p = &node->slots[i];
+ edit->dead_leaf = node->slots[i];
+diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
+index abcecdc2d0f2..0710a62ad2f6 100644
+--- a/lib/lz4/lz4defs.h
++++ b/lib/lz4/lz4defs.h
+@@ -11,8 +11,7 @@
+ /*
+ * Detects 64 bits mode
+ */
+-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
+- || defined(__ppc64__) || defined(__LP64__))
++#if defined(CONFIG_64BIT)
+ #define LZ4_ARCH64 1
+ #else
+ #define LZ4_ARCH64 0
+@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
+
+ #define PUT4(s, d) (A32(d) = A32(s))
+ #define PUT8(s, d) (A64(d) = A64(s))
++
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
++ (d = s - A16(p))
++
+ #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+ do { \
+ A16(p) = v; \
+@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
+ #define PUT8(s, d) \
+ put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
+
+-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+- do { \
+- put_unaligned(v, (u16 *)(p)); \
+- p += 2; \
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
++ (d = s - get_unaligned_le16(p))
++
++#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
++ do { \
++ put_unaligned_le16(v, (u16 *)(p)); \
++ p += 2; \
+ } while (0)
+ #endif
+
+@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
+
+ #endif
+
+-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+- (d = s - get_unaligned_le16(p))
+-
+ #define LZ4_WILDCOPY(s, d, e) \
+ do { \
+ LZ4_COPYPACKET(s, d); \
+diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
+index 3db76b8c1115..e00ff00e861c 100644
+--- a/lib/mpi/mpicoder.c
++++ b/lib/mpi/mpicoder.c
+@@ -128,6 +128,23 @@ leave:
+ }
+ EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
+
++static int count_lzeros(MPI a)
++{
++ mpi_limb_t alimb;
++ int i, lzeros = 0;
++
++ for (i = a->nlimbs - 1; i >= 0; i--) {
++ alimb = a->d[i];
++ if (alimb == 0) {
++ lzeros += sizeof(mpi_limb_t);
++ } else {
++ lzeros += count_leading_zeros(alimb) / 8;
++ break;
++ }
++ }
++ return lzeros;
++}
++
+ /**
+ * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
+ *
+@@ -146,7 +163,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ uint8_t *p;
+ mpi_limb_t alimb;
+ unsigned int n = mpi_get_size(a);
+- int i, lzeros = 0;
++ int i, lzeros;
+
+ if (buf_len < n || !buf || !nbytes)
+ return -EINVAL;
+@@ -154,14 +171,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ if (sign)
+ *sign = a->sign;
+
+- p = (void *)&a->d[a->nlimbs] - 1;
+-
+- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+- if (!*p)
+- lzeros++;
+- else
+- break;
+- }
++ lzeros = count_lzeros(a);
+
+ p = buf;
+ *nbytes = n - lzeros;
+@@ -343,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
+ u8 *p, *p2;
+ mpi_limb_t alimb, alimb2;
+ unsigned int n = mpi_get_size(a);
+- int i, x, y = 0, lzeros = 0, buf_len;
++ int i, x, y = 0, lzeros, buf_len;
+
+ if (!nbytes || *nbytes < n)
+ return -EINVAL;
+@@ -351,14 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
+ if (sign)
+ *sign = a->sign;
+
+- p = (void *)&a->d[a->nlimbs] - 1;
+-
+- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+- if (!*p)
+- lzeros++;
+- else
+- break;
+- }
++ lzeros = count_lzeros(a);
+
+ *nbytes = n - lzeros;
+ buf_len = sgl->length;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 62fe06bb7d04..530e6427f823 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2134,10 +2134,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ * page fault if needed.
+ */
+ return 0;
+- if (vma->vm_ops)
++ if (vma->vm_ops || (vm_flags & VM_NO_THP))
+ /* khugepaged not yet working on file or special mappings */
+ return 0;
+- VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
+ hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ hend = vma->vm_end & HPAGE_PMD_MASK;
+ if (hstart < hend)
+@@ -2498,8 +2497,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
+ return false;
+ if (is_vma_temporary_stack(vma))
+ return false;
+- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
+- return true;
++ return !(vma->vm_flags & VM_NO_THP);
+ }
+
+ static void collapse_huge_page(struct mm_struct *mm,
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index fc0bcc41d57f..6ba4dd988e2e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -196,6 +196,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
+ /* "mc" and its members are protected by cgroup_mutex */
+ static struct move_charge_struct {
+ spinlock_t lock; /* for from, to */
++ struct mm_struct *mm;
+ struct mem_cgroup *from;
+ struct mem_cgroup *to;
+ unsigned long flags;
+@@ -4800,6 +4801,8 @@ static void __mem_cgroup_clear_mc(void)
+
+ static void mem_cgroup_clear_mc(void)
+ {
++ struct mm_struct *mm = mc.mm;
++
+ /*
+ * we must clear moving_task before waking up waiters at the end of
+ * task migration.
+@@ -4809,7 +4812,10 @@ static void mem_cgroup_clear_mc(void)
+ spin_lock(&mc.lock);
+ mc.from = NULL;
+ mc.to = NULL;
++ mc.mm = NULL;
+ spin_unlock(&mc.lock);
++
++ mmput(mm);
+ }
+
+ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+@@ -4866,6 +4872,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ VM_BUG_ON(mc.moved_swap);
+
+ spin_lock(&mc.lock);
++ mc.mm = mm;
+ mc.from = from;
+ mc.to = memcg;
+ mc.flags = move_flags;
+@@ -4875,8 +4882,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ ret = mem_cgroup_precharge_mc(mm);
+ if (ret)
+ mem_cgroup_clear_mc();
++ } else {
++ mmput(mm);
+ }
+- mmput(mm);
+ return ret;
+ }
+
+@@ -4985,11 +4993,11 @@ put: /* get_mctgt_type() gets the page */
+ return ret;
+ }
+
+-static void mem_cgroup_move_charge(struct mm_struct *mm)
++static void mem_cgroup_move_charge(void)
+ {
+ struct mm_walk mem_cgroup_move_charge_walk = {
+ .pmd_entry = mem_cgroup_move_charge_pte_range,
+- .mm = mm,
++ .mm = mc.mm,
+ };
+
+ lru_add_drain_all();
+@@ -5001,7 +5009,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
+ atomic_inc(&mc.from->moving_account);
+ synchronize_rcu();
+ retry:
+- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
++ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
+ /*
+ * Someone who are holding the mmap_sem might be waiting in
+ * waitq. So we cancel all extra charges, wake up all waiters,
+@@ -5018,23 +5026,16 @@ retry:
+ * additional charge, the page walk just aborts.
+ */
+ walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
+- up_read(&mm->mmap_sem);
++ up_read(&mc.mm->mmap_sem);
+ atomic_dec(&mc.from->moving_account);
+ }
+
+-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
++static void mem_cgroup_move_task(void)
+ {
+- struct cgroup_subsys_state *css;
+- struct task_struct *p = cgroup_taskset_first(tset, &css);
+- struct mm_struct *mm = get_task_mm(p);
+-
+- if (mm) {
+- if (mc.to)
+- mem_cgroup_move_charge(mm);
+- mmput(mm);
+- }
+- if (mc.to)
++ if (mc.to) {
++ mem_cgroup_move_charge();
+ mem_cgroup_clear_mc();
++ }
+ }
+ #else /* !CONFIG_MMU */
+ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+@@ -5044,7 +5045,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
+ {
+ }
+-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
++static void mem_cgroup_move_task(void)
+ {
+ }
+ #endif
+@@ -5258,7 +5259,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
+ .css_reset = mem_cgroup_css_reset,
+ .can_attach = mem_cgroup_can_attach,
+ .cancel_attach = mem_cgroup_cancel_attach,
+- .attach = mem_cgroup_move_task,
++ .post_attach = mem_cgroup_move_task,
+ .bind = mem_cgroup_bind,
+ .dfl_cftypes = memory_files,
+ .legacy_cftypes = mem_cgroup_legacy_files,
+diff --git a/mm/memory.c b/mm/memory.c
+index b80bf4746b67..76dcee317714 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -797,6 +797,46 @@ out:
+ return pfn_to_page(pfn);
+ }
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
++ pmd_t pmd)
++{
++ unsigned long pfn = pmd_pfn(pmd);
++
++ /*
++ * There is no pmd_special() but there may be special pmds, e.g.
++ * in a direct-access (dax) mapping, so let's just replicate the
++ * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
++ */
++ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
++ if (vma->vm_flags & VM_MIXEDMAP) {
++ if (!pfn_valid(pfn))
++ return NULL;
++ goto out;
++ } else {
++ unsigned long off;
++ off = (addr - vma->vm_start) >> PAGE_SHIFT;
++ if (pfn == vma->vm_pgoff + off)
++ return NULL;
++ if (!is_cow_mapping(vma->vm_flags))
++ return NULL;
++ }
++ }
++
++ if (is_zero_pfn(pfn))
++ return NULL;
++ if (unlikely(pfn > highest_memmap_pfn))
++ return NULL;
++
++ /*
++ * NOTE! We still have PageReserved() pages in the page tables.
++ * eg. VDSO mappings can cause them to exist.
++ */
++out:
++ return pfn_to_page(pfn);
++}
++#endif
++
+ /*
+ * copy one vm_area from one task to the other. Assumes the page tables
+ * already present in the new task to be cleared in the whole range
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 6d17e0ab42d4..bbeb0b71fcf4 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -963,7 +963,13 @@ out:
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ /* Soft-offlined page shouldn't go through lru cache list */
+- if (reason == MR_MEMORY_FAILURE) {
++ if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
++ /*
++ * With this release, we free successfully migrated
++ * page and set PG_HWPoison on just freed page
++ * intentionally. Although it's rather weird, it's how
++ * HWPoison flag works at the moment.
++ */
+ put_page(page);
+ if (!test_set_page_hwpoison(page))
+ num_poisoned_pages_inc();
+diff --git a/mm/slub.c b/mm/slub.c
+index 46997517406e..65d5f92d51d2 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2819,6 +2819,7 @@ struct detached_freelist {
+ void *tail;
+ void *freelist;
+ int cnt;
++ struct kmem_cache *s;
+ };
+
+ /*
+@@ -2833,8 +2834,9 @@ struct detached_freelist {
+ * synchronization primitive. Look ahead in the array is limited due
+ * to performance reasons.
+ */
+-static int build_detached_freelist(struct kmem_cache *s, size_t size,
+- void **p, struct detached_freelist *df)
++static inline
++int build_detached_freelist(struct kmem_cache *s, size_t size,
++ void **p, struct detached_freelist *df)
+ {
+ size_t first_skipped_index = 0;
+ int lookahead = 3;
+@@ -2850,8 +2852,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ if (!object)
+ return 0;
+
++ /* Support for memcg, compiler can optimize this out */
++ df->s = cache_from_obj(s, object);
++
+ /* Start new detached freelist */
+- set_freepointer(s, object, NULL);
++ set_freepointer(df->s, object, NULL);
+ df->page = virt_to_head_page(object);
+ df->tail = object;
+ df->freelist = object;
+@@ -2866,7 +2871,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ /* df->page is always set at this point */
+ if (df->page == virt_to_head_page(object)) {
+ /* Opportunity build freelist */
+- set_freepointer(s, object, df->freelist);
++ set_freepointer(df->s, object, df->freelist);
+ df->freelist = object;
+ df->cnt++;
+ p[size] = NULL; /* mark object processed */
+@@ -2885,25 +2890,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ return first_skipped_index;
+ }
+
+-
+ /* Note that interrupts must be enabled when calling this function. */
+-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
++void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+ {
+ if (WARN_ON(!size))
+ return;
+
+ do {
+ struct detached_freelist df;
+- struct kmem_cache *s;
+-
+- /* Support for memcg */
+- s = cache_from_obj(orig_s, p[size - 1]);
+
+ size = build_detached_freelist(s, size, p, &df);
+ if (unlikely(!df.page))
+ continue;
+
+- slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
++ slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
+ } while (likely(size));
+ }
+ EXPORT_SYMBOL(kmem_cache_free_bulk);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 2aec4241b42a..0c114e2b01d3 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2534,7 +2534,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
+ sc->gfp_mask |= __GFP_HIGHMEM;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+- requested_highidx, sc->nodemask) {
++ gfp_zone(sc->gfp_mask), sc->nodemask) {
+ enum zone_type classzone_idx;
+
+ if (!populated_zone(zone))
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 59651af8cc27..992b35fb8615 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
+
+ skb_queue_purge(&sk->sk_write_queue);
+
+- if (nlk->portid) {
++ if (nlk->portid && nlk->bound) {
+ struct netlink_notify n = {
+ .net = sock_net(sk),
+ .protocol = sk->sk_protocol,
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 21e20353178e..63fb5ee212cf 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
+ }
+
+ crq->q.reader = 0;
+- crq->item = cache_get(h);
+ crq->buf = buf;
+ crq->len = 0;
+ crq->readers = 0;
+ spin_lock(&queue_lock);
+- if (test_bit(CACHE_PENDING, &h->flags))
++ if (test_bit(CACHE_PENDING, &h->flags)) {
++ crq->item = cache_get(h);
+ list_add_tail(&crq->q.list, &detail->queue);
+- else
++ } else
+ /* Lost a race, no longer PENDING, so don't enqueue */
+ ret = -EAGAIN;
+ spin_unlock(&queue_lock);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 75b0d23ee882..5d89f13a98db 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -13161,7 +13161,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
+ struct wireless_dev *wdev;
+ struct cfg80211_beacon_registration *reg, *tmp;
+
+- if (state != NETLINK_URELEASE)
++ if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index 0b7dc2fd7bac..dd243d2abd87 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
+ if (in)
+ goto load;
+ sym_add_change_count(1);
+- if (!sym_defconfig_list) {
+- sym_calc_value(modules_sym);
++ if (!sym_defconfig_list)
+ return 1;
+- }
+
+ for_all_defaults(sym_defconfig_list, prop) {
+ if (expr_calc_value(prop->visible.expr) == no ||
+@@ -403,7 +401,6 @@ setsym:
+ }
+ free(line);
+ fclose(in);
+- sym_calc_value(modules_sym);
+ return 0;
+ }
+
+@@ -414,8 +411,12 @@ int conf_read(const char *name)
+
+ sym_set_change_count(0);
+
+- if (conf_read_simple(name, S_DEF_USER))
++ if (conf_read_simple(name, S_DEF_USER)) {
++ sym_calc_value(modules_sym);
+ return 1;
++ }
++
++ sym_calc_value(modules_sym);
+
+ for_all_symbols(i, sym) {
+ sym_calc_value(sym);
+@@ -846,6 +847,7 @@ static int conf_split_config(void)
+
+ name = conf_get_autoconfig_name();
+ conf_read_simple(name, S_DEF_AUTO);
++ sym_calc_value(modules_sym);
+
+ if (chdir("include/config"))
+ return 1;
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 5c4fa8eba1d0..367dbf0d285e 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -843,7 +843,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
+ bool allow_powerdown)
+ {
+ hda_nid_t nid, changed = 0;
+- int i, state;
++ int i, state, power;
+
+ for (i = 0; i < path->depth; i++) {
+ nid = path->path[i];
+@@ -855,7 +855,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
+ state = AC_PWRST_D0;
+ else
+ state = AC_PWRST_D3;
+- if (!snd_hda_check_power_state(codec, nid, state)) {
++ power = snd_hda_codec_read(codec, nid, 0,
++ AC_VERB_GET_POWER_STATE, 0);
++ if (power != (state | (state << 4))) {
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_POWER_STATE, state);
+ changed = nid;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 2ff692dd2c5f..411630e9c034 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2207,6 +2207,9 @@ static const struct pci_device_id azx_ids[] = {
+ /* Broxton-P(Apollolake) */
+ { PCI_DEVICE(0x8086, 0x5a98),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
++ /* Broxton-T */
++ { PCI_DEVICE(0x8086, 0x1a98),
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ /* Haswell */
+ { PCI_DEVICE(0x8086, 0x0a0c),
+ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index a47e8ae0eb30..80bbadc83721 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
+ {
+ struct cs_spec *spec = codec->spec;
+ int err;
++ int i;
+
+ err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
+ if (err < 0)
+@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
+ if (err < 0)
+ return err;
+
++ /* keep the ADCs powered up when it's dynamically switchable */
++ if (spec->gen.dyn_adc_switch) {
++ unsigned int done = 0;
++ for (i = 0; i < spec->gen.input_mux.num_items; i++) {
++ int idx = spec->gen.dyn_adc_idx[i];
++ if (done & (1 << idx))
++ continue;
++ snd_hda_gen_fix_pin_power(codec,
++ spec->gen.adc_nids[idx]);
++ done |= 1 << idx;
++ }
++ }
++
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1402ba954b3d..ac4490a96863 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
++ SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+@@ -5583,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
+index c5194f5b150a..d7e71f309299 100644
+--- a/sound/pci/pcxhr/pcxhr_core.c
++++ b/sound/pci/pcxhr/pcxhr_core.c
+@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
+ }
+
+ pcxhr_msg_thread(mgr);
++ mutex_unlock(&mgr->lock);
+ return IRQ_HANDLED;
+ }
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index f2beb1aa5763..b1c8bb39cdf1 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
+
+ /* Interface data select */
+ static const char * const rt5640_data_select[] = {
+- "Normal", "left copy to right", "right copy to left", "Swap"};
++ "Normal", "Swap", "left copy to right", "right copy to left"};
+
+ static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
+ RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
+diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
+index 3deb8babeabb..243f42633989 100644
+--- a/sound/soc/codecs/rt5640.h
++++ b/sound/soc/codecs/rt5640.h
+@@ -442,39 +442,39 @@
+ #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
+ #define RT5640_IF1_DAC_SEL_SFT 14
+ #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
+-#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
+-#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
+-#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
++#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
++#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
++#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
+ #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
+ #define RT5640_IF1_ADC_SEL_SFT 12
+ #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
+-#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
+-#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
+-#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
++#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
++#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
++#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
+ #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
+ #define RT5640_IF2_DAC_SEL_SFT 10
+ #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
+-#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
+-#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
+-#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
++#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
++#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
++#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
+ #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
+ #define RT5640_IF2_ADC_SEL_SFT 8
+ #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
+-#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
+-#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
+-#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
++#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
++#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
++#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
+ #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
+ #define RT5640_IF3_DAC_SEL_SFT 6
+ #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
+-#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
+-#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
+-#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
++#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
++#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
++#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
+ #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
+ #define RT5640_IF3_ADC_SEL_SFT 4
+ #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
+-#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
+-#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
+-#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
++#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
++#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
++#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
+
+ /* REC Left Mixer Control 1 (0x3b) */
+ #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
+diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
+index e619d5651b09..080c78e88e10 100644
+--- a/sound/soc/codecs/ssm4567.c
++++ b/sound/soc/codecs/ssm4567.c
+@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
+ regcache_cache_only(ssm4567->regmap, !enable);
+
+ if (enable) {
++ ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
++ 0x00);
++ if (ret)
++ return ret;
++
+ ret = regmap_update_bits(ssm4567->regmap,
+ SSM4567_REG_POWER_CTRL,
+ SSM4567_POWER_SPWDN, 0x00);
+diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
+index df65c5b494b1..b6ab3fc5789e 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.c
++++ b/sound/soc/samsung/s3c-i2s-v2.c
+@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
+ #endif
+
+ int s3c_i2sv2_register_component(struct device *dev, int id,
+- struct snd_soc_component_driver *cmp_drv,
++ const struct snd_soc_component_driver *cmp_drv,
+ struct snd_soc_dai_driver *dai_drv)
+ {
+ struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
+diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
+index 90abab364b49..d0684145ed1f 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.h
++++ b/sound/soc/samsung/s3c-i2s-v2.h
+@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
+ * soc core.
+ */
+ extern int s3c_i2sv2_register_component(struct device *dev, int id,
+- struct snd_soc_component_driver *cmp_drv,
++ const struct snd_soc_component_driver *cmp_drv,
+ struct snd_soc_dai_driver *dai_drv);
+
+ #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 416514fe9e63..afb70a5d4fd3 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
+ int count = 0;
+ char *state = "not set";
+
++ /* card won't be set for the dummy component, as a spot fix
++ * we're checking for that case specifically here but in future
++ * we will ensure that the dummy component looks like others.
++ */
++ if (!cmpnt->card)
++ return 0;
++
+ list_for_each_entry(w, &cmpnt->card->widgets, list) {
+ if (w->dapm != dapm)
+ continue;
+diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
+index 4e074a660826..90c3558c2c12 100644
+--- a/tools/perf/Documentation/perf-stat.txt
++++ b/tools/perf/Documentation/perf-stat.txt
+@@ -62,6 +62,14 @@ OPTIONS
+ --scale::
+ scale/normalize counter values
+
++-d::
++--detailed::
++ print more detailed statistics, can be specified up to 3 times
++
++ -d: detailed events, L1 and LLC data cache
++ -d -d: more detailed events, dTLB and iTLB events
++ -d -d -d: very detailed events, adding prefetch events
++
+ -r::
+ --repeat=<n>::
+ repeat command and print average + stddev (max: 100). 0 means forever.
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 81def6c3f24b..3900386a3629 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -2059,10 +2059,12 @@ skip_annotation:
+ *
+ * See hist_browser__show_entry.
+ */
+- nr_options += add_script_opt(browser,
+- &actions[nr_options],
+- &options[nr_options],
+- NULL, browser->selection->sym);
++ if (sort__has_sym && browser->selection->sym) {
++ nr_options += add_script_opt(browser,
++ &actions[nr_options],
++ &options[nr_options],
++ NULL, browser->selection->sym);
++ }
+ }
+ nr_options += add_script_opt(browser, &actions[nr_options],
+ &options[nr_options], NULL, NULL);
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index 8b10621b415c..956187bf1a85 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -274,7 +274,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ strcpy(execname, "");
+
+ /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
+- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
++ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
+ &event->mmap2.start, &event->mmap2.len, prot,
+ &event->mmap2.pgoff, &event->mmap2.maj,
+ &event->mmap2.min,
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index d1392194a9a9..b4b96120fc3b 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -1211,12 +1211,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+ */
+ if (cpus != evlist->cpus) {
+ cpu_map__put(evlist->cpus);
+- evlist->cpus = cpus;
++ evlist->cpus = cpu_map__get(cpus);
+ }
+
+ if (threads != evlist->threads) {
+ thread_map__put(evlist->threads);
+- evlist->threads = threads;
++ evlist->threads = thread_map__get(threads);
+ }
+
+ perf_evlist__propagate_maps(evlist);
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 97f963a3dcb9..9227c2f076c3 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
+ pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
+ ret);
+
+- if (pt->synth_opts.callchain)
++ if (pt->synth_opts.last_branch)
+ intel_pt_reset_last_branch_rb(ptq);
+
+ return ret;
+diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
+index ea6064696fe4..a7b9022b5c8f 100644
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
+ vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
+ vcpu->arch.timer_cpu.armed = false;
+
++ WARN_ON(!kvm_timer_should_fire(vcpu));
++
+ /*
+ * If the vcpu is blocked we want to wake it up so that it will see
+ * the timer has expired when entering the guest.
+@@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
+ kvm_vcpu_kick(vcpu);
+ }
+
++static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
++{
++ cycle_t cval, now;
++
++ cval = vcpu->arch.timer_cpu.cntv_cval;
++ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
++
++ if (now < cval) {
++ u64 ns;
++
++ ns = cyclecounter_cyc2ns(timecounter->cc,
++ cval - now,
++ timecounter->mask,
++ &timecounter->frac);
++ return ns;
++ }
++
++ return 0;
++}
++
+ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
+ {
+ struct arch_timer_cpu *timer;
++ struct kvm_vcpu *vcpu;
++ u64 ns;
++
+ timer = container_of(hrt, struct arch_timer_cpu, timer);
++ vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
++
++ /*
++ * Check that the timer has really expired from the guest's
++ * PoV (NTP on the host may have forced it to expire
++ * early). If we should have slept longer, restart it.
++ */
++ ns = kvm_timer_compute_delta(vcpu);
++ if (unlikely(ns)) {
++ hrtimer_forward_now(hrt, ns_to_ktime(ns));
++ return HRTIMER_RESTART;
++ }
++
+ queue_work(wqueue, &timer->expired);
+ return HRTIMER_NORESTART;
+ }
+@@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
+ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+ {
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+- u64 ns;
+- cycle_t cval, now;
+
+ BUG_ON(timer_is_armed(timer));
+
+@@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+ return;
+
+ /* The timer has not yet expired, schedule a background timer */
+- cval = timer->cntv_cval;
+- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+-
+- ns = cyclecounter_cyc2ns(timecounter->cc,
+- cval - now,
+- timecounter->mask,
+- &timecounter->frac);
+- timer_arm(timer, ns);
++ timer_arm(timer, kvm_timer_compute_delta(vcpu));
+ }
+
+ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)