summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2015-03-24 19:19:25 -0400
committerMike Pagano <mpagano@gentoo.org>2015-03-24 19:19:25 -0400
commit2c07eb276f5dc53f6200006e696e9af2077bfecc (patch)
treef5e494edeb73f889323c25d61a2abd2f30721958
parentUpdate gcc >= 4.9 optimization patch. See bug #544028. (diff)
downloadlinux-patches-2c07eb276f5dc53f6200006e696e9af2077bfecc.tar.gz
linux-patches-2c07eb276f5dc53f6200006e696e9af2077bfecc.tar.bz2
linux-patches-2c07eb276f5dc53f6200006e696e9af2077bfecc.zip
Linux patch 3.18.103.18-12
-rw-r--r--0000_README4
-rw-r--r--1009_linux-3.18.10.patch6057
2 files changed, 6061 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 81613045..ab13f61a 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch: 1008_linux-3.18.9.patch
From: http://www.kernel.org
Desc: Linux 3.18.9
+Patch: 1009_linux-3.18.10.patch
+From: http://www.kernel.org
+Desc: Linux 3.18.10
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1009_linux-3.18.10.patch b/1009_linux-3.18.10.patch
new file mode 100644
index 00000000..7a9284fd
--- /dev/null
+++ b/1009_linux-3.18.10.patch
@@ -0,0 +1,6057 @@
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index aee73e78c7d4..02f8331edb8b 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -32,18 +32,42 @@ Procedure for submitting patches to the -stable tree:
+ - If the patch covers files in net/ or drivers/net please follow netdev stable
+ submission guidelines as described in
+ Documentation/networking/netdev-FAQ.txt
+- - Send the patch, after verifying that it follows the above rules, to
+- stable@vger.kernel.org. You must note the upstream commit ID in the
+- changelog of your submission, as well as the kernel version you wish
+- it to be applied to.
+- - To have the patch automatically included in the stable tree, add the tag
++ - Security patches should not be handled (solely) by the -stable review
++ process but should follow the procedures in Documentation/SecurityBugs.
++
++For all other submissions, choose one of the following procedures:
++
++ --- Option 1 ---
++
++ To have the patch automatically included in the stable tree, add the tag
+ Cc: stable@vger.kernel.org
+ in the sign-off area. Once the patch is merged it will be applied to
+ the stable tree without anything else needing to be done by the author
+ or subsystem maintainer.
+- - If the patch requires other patches as prerequisites which can be
+- cherry-picked, then this can be specified in the following format in
+- the sign-off area:
++
++ --- Option 2 ---
++
++ After the patch has been merged to Linus' tree, send an email to
++ stable@vger.kernel.org containing the subject of the patch, the commit ID,
++ why you think it should be applied, and what kernel version you wish it to
++ be applied to.
++
++ --- Option 3 ---
++
++ Send the patch, after verifying that it follows the above rules, to
++ stable@vger.kernel.org. You must note the upstream commit ID in the
++ changelog of your submission, as well as the kernel version you wish
++ it to be applied to.
++
++Option 1 is probably the easiest and most common. Options 2 and 3 are more
++useful if the patch isn't deemed worthy at the time it is applied to a public
++git tree (for instance, because it deserves more regression testing first).
++Option 3 is especially useful if the patch needs some special handling to apply
++to an older kernel (e.g., if API's have changed in the meantime).
++
++Additionally, some patches submitted via Option 1 may have additional patch
++prerequisites which can be cherry-picked. This can be specified in the following
++format in the sign-off area:
+
+ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
+ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
+@@ -57,13 +81,13 @@ Procedure for submitting patches to the -stable tree:
+ git cherry-pick fd21073
+ git cherry-pick <this commit>
+
++Following the submission:
++
+ - The sender will receive an ACK when the patch has been accepted into the
+ queue, or a NAK if the patch is rejected. This response might take a few
+ days, according to the developer's schedules.
+ - If accepted, the patch will be added to the -stable queue, for review by
+ other developers and by the relevant subsystem maintainer.
+- - Security patches should not be sent to this alias, but instead to the
+- documented security@kernel.org address.
+
+
+ Review cycle:
+diff --git a/Makefile b/Makefile
+index 62b333802a0e..d4ce2cb674c8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Diseased Newt
+
+diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
+index 210fe97464c3..c750af161979 100644
+--- a/arch/arc/include/asm/processor.h
++++ b/arch/arc/include/asm/processor.h
+@@ -75,18 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *t);
+ #define release_segments(mm) do { } while (0)
+
+ #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
++#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
+
+ /*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Look in process.c for details of kernel stack layout
+ */
+-#define KSTK_ESP(tsk) (tsk->thread.ksp)
++#define TSK_K_ESP(tsk) (tsk->thread.ksp)
+
+-#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
++#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
+ sizeof(struct callee_regs) + off)))
+
+-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
+-#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
++#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
++#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
+
+ extern void start_thread(struct pt_regs * regs, unsigned long pc,
+ unsigned long usp);
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index 9ce47cfe2303..fb98769b6a98 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -64,9 +64,9 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
+
+ frame_info->task = tsk;
+
+- frame_info->regs.r27 = KSTK_FP(tsk);
+- frame_info->regs.r28 = KSTK_ESP(tsk);
+- frame_info->regs.r31 = KSTK_BLINK(tsk);
++ frame_info->regs.r27 = TSK_K_FP(tsk);
++ frame_info->regs.r28 = TSK_K_ESP(tsk);
++ frame_info->regs.r31 = TSK_K_BLINK(tsk);
+ frame_info->regs.r63 = (unsigned int)__switch_to;
+
+ /* In the prologue of __switch_to, first FP is saved on stack
+diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
+index c1388d40663b..bd6437f67dc0 100644
+--- a/arch/mips/kvm/trace.h
++++ b/arch/mips/kvm/trace.h
+@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason),
+ TP_STRUCT__entry(
+- __field(struct kvm_vcpu *, vcpu)
++ __field(unsigned long, pc)
+ __field(unsigned int, reason)
+ ),
+
+ TP_fast_assign(
+- __entry->vcpu = vcpu;
++ __entry->pc = vcpu->arch.pc;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("[%s]PC: 0x%08lx",
+ kvm_mips_exit_types_str[__entry->reason],
+- __entry->vcpu->arch.pc)
++ __entry->pc)
+ );
+
+ #endif /* _TRACE_KVM_H */
+diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
+index f09a22fa1bd7..bfa8f8ac51fa 100644
+--- a/arch/powerpc/include/asm/pnv-pci.h
++++ b/arch/powerpc/include/asm/pnv-pci.h
+@@ -19,7 +19,7 @@ int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
+ int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
+ void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
+ int pnv_cxl_get_irq_count(struct pci_dev *dev);
+-struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev);
++struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
+
+ #ifdef CONFIG_CXL_BASE
+ int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 3ba435ec3dcd..3f596706a5b7 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -1355,13 +1355,13 @@ static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
+
+ #ifdef CONFIG_CXL_BASE
+
+-struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev)
++struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
+ {
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+- return hose->dn;
++ return of_node_get(hose->dn);
+ }
+-EXPORT_SYMBOL(pnv_pci_to_phb_node);
++EXPORT_SYMBOL(pnv_pci_get_phb_node);
+
+ int pnv_phb_to_cxl(struct pci_dev *dev)
+ {
+diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
+index 7e7a79ada658..d82b80405e45 100644
+--- a/arch/x86/include/asm/xsave.h
++++ b/arch/x86/include/asm/xsave.h
+@@ -81,18 +81,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ asm volatile("1:"XSAVES"\n\t"
+ "2:\n\t"
+- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++ xstate_fault
++ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+ else
+ asm volatile("1:"XSAVE"\n\t"
+ "2:\n\t"
+- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++ xstate_fault
++ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+-
+- asm volatile(xstate_fault
+- : "0" (0)
+- : "memory");
+-
+ return err;
+ }
+
+@@ -111,18 +108,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ asm volatile("1:"XRSTORS"\n\t"
+ "2:\n\t"
+- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++ xstate_fault
++ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+ else
+ asm volatile("1:"XRSTOR"\n\t"
+ "2:\n\t"
+- : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++ xstate_fault
++ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+-
+- asm volatile(xstate_fault
+- : "0" (0)
+- : "memory");
+-
+ return err;
+ }
+
+@@ -148,9 +142,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
+ */
+ alternative_input_2(
+ "1:"XSAVE,
+- "1:"XSAVEOPT,
++ XSAVEOPT,
+ X86_FEATURE_XSAVEOPT,
+- "1:"XSAVES,
++ XSAVES,
+ X86_FEATURE_XSAVES,
+ [fx] "D" (fx), "a" (lmask), "d" (hmask) :
+ "memory");
+@@ -177,7 +171,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
+ */
+ alternative_input(
+ "1: " XRSTOR,
+- "1: " XRSTORS,
++ XRSTORS,
+ X86_FEATURE_XSAVES,
+ "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index c0226ab54106..f1dc27f457f1 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -334,11 +334,14 @@ ENTRY(ret_from_fork)
+ testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ jz 1f
+
+- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+- jnz int_ret_from_sys_call
+-
+- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
+- jmp ret_from_sys_call # go to the SYSRET fastpath
++ /*
++ * By the time we get here, we have no idea whether our pt_regs,
++ * ti flags, and ti status came from the 64-bit SYSCALL fast path,
++ * the slow path, or one of the ia32entry paths.
++ * Use int_ret_from_sys_call to return, since it can safely handle
++ * all of the above.
++ */
++ jmp int_ret_from_sys_call
+
+ 1:
+ subq $REST_SKIP, %rsp # leave space for volatiles
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index c7327a7761ca..974e4d98ed29 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4829,7 +4829,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ }
+- ctxt->dst.orig_val = ctxt->dst.val;
++ /* Copy full 64-bit value for CMPXCHG8B. */
++ ctxt->dst.orig_val64 = ctxt->dst.val64;
+
+ special_insn:
+
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 41e9c199e874..fdb5701bed75 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -65,6 +65,7 @@ struct lpss_private_data;
+
+ struct lpss_device_desc {
+ unsigned int flags;
++ const char *clk_con_id;
+ unsigned int prv_offset;
+ size_t prv_size_override;
+ void (*setup)(struct lpss_private_data *pdata);
+@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = {
+
+ static struct lpss_device_desc lpt_uart_dev_desc = {
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
++ .clk_con_id = "baudclk",
+ .prv_offset = 0x800,
+ .setup = lpss_uart_setup,
+ };
+@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
+
+ static struct lpss_device_desc byt_uart_dev_desc = {
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
++ .clk_con_id = "baudclk",
+ .prv_offset = 0x800,
+ .setup = lpss_uart_setup,
+ };
+@@ -313,7 +316,7 @@ out:
+ return PTR_ERR(clk);
+
+ pdata->clk = clk;
+- clk_register_clkdev(clk, NULL, devname);
++ clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
+ return 0;
+ }
+
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 41322591fb43..ff7bc22b6135 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -2124,6 +2124,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
+
+ static int __init acpi_video_init(void)
+ {
++ /*
++ * Let the module load even if ACPI is disabled (e.g. due to
++ * a broken BIOS) so that i915.ko can still be loaded on such
++ * old systems without an AcpiOpRegion.
++ *
++ * acpi_video_register() will report -ENODEV later as well due
++ * to acpi_disabled when i915.ko tries to register itself afterwards.
++ */
++ if (acpi_disabled)
++ return 0;
++
+ dmi_check_system(video_dmi_table);
+
+ if (intel_opregion_present())
+diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
+index 51fd87fb7ba6..da00eeb95dad 100644
+--- a/drivers/clk/clk-gate.c
++++ b/drivers/clk/clk-gate.c
+@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
+ struct clk_init_data init;
+
+ if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
+- if (bit_idx > 16) {
++ if (bit_idx > 15) {
+ pr_err("gate bit exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 26bed0889e97..7d74830e2ced 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -343,13 +343,9 @@ unlock:
+ static void clk_debug_unregister(struct clk *clk)
+ {
+ mutex_lock(&clk_debug_lock);
+- if (!clk->dentry)
+- goto out;
+-
+ hlist_del_init(&clk->debug_node);
+ debugfs_remove_recursive(clk->dentry);
+ clk->dentry = NULL;
+-out:
+ mutex_unlock(&clk_debug_lock);
+ }
+
+diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
+index d5dc951264ca..b18e22fb25a6 100644
+--- a/drivers/clk/sunxi/clk-sunxi.c
++++ b/drivers/clk/sunxi/clk-sunxi.c
+@@ -419,6 +419,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
+ .kwidth = 2,
+ .mshift = 0,
+ .mwidth = 2,
++ .n_start = 1,
+ };
+
+ static struct clk_factors_config sun8i_a23_pll1_config = {
+diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
+index 9037bebd69f7..f870aad57711 100644
+--- a/drivers/clk/zynq/clkc.c
++++ b/drivers/clk/zynq/clkc.c
+@@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np)
+ clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
+ "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
+ 26, 0, &armclk_lock);
++ clk_prepare_enable(clks[cpu_2x]);
+
+ clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
+ 4 + 2 * tmp);
+diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
+index 3c97c8fa8d02..8a37af923094 100644
+--- a/drivers/dma-buf/reservation.c
++++ b/drivers/dma-buf/reservation.c
+@@ -402,8 +402,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
+ int ret = 1;
+
+ if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+- int ret;
+-
+ fence = fence_get_rcu(lfence);
+ if (!fence)
+ return -1;
+diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
+index a920fec8fe88..5186eb01945a 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -170,12 +170,12 @@ again:
+ start = desc->phys_addr;
+ end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+
+- if ((start + size) > end || (start + size) > max)
+- continue;
+-
+- if (end - size > max)
++ if (end > max)
+ end = max;
+
++ if ((start + size) > end)
++ continue;
++
+ if (round_down(end - size, align) < start)
+ continue;
+
+diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
+index 018c29a26615..87b8e3b900d2 100644
+--- a/drivers/firmware/efi/runtime-map.c
++++ b/drivers/firmware/efi/runtime-map.c
+@@ -191,7 +191,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj)
+
+ return 0;
+ out_add_entry:
+- for (j = i - 1; j > 0; j--) {
++ for (j = i - 1; j >= 0; j--) {
+ entry = *(map_entries + j);
+ kobject_put(&entry->kobj);
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index c33327d5c543..45434333b289 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2077,6 +2077,7 @@ struct drm_i915_cmd_table {
+ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
+ #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
+ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
++ (INTEL_DEVID(dev) & 0xf) == 0xb || \
+ (INTEL_DEVID(dev) & 0xf) == 0xe))
+ #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
+ (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 2b1eaa29ada4..6765148ea5bc 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -315,9 +315,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
++ mutex_lock(&dev->struct_mutex);
+ if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
+- drm_gem_object_unreference_unlocked(&obj->base);
+- return -EBUSY;
++ ret = -EBUSY;
++ goto err;
+ }
+
+ if (args->tiling_mode == I915_TILING_NONE) {
+@@ -349,7 +350,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ }
+ }
+
+- mutex_lock(&dev->struct_mutex);
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
+ /* We need to rebind the object if its current allocation
+@@ -395,6 +395,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ obj->bit_17 = NULL;
+ }
+
++err:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
+index d182058383a9..1719078c763a 100644
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -113,7 +113,10 @@ restart:
+ continue;
+
+ obj = mo->obj;
+- drm_gem_object_reference(&obj->base);
++
++ if (!kref_get_unless_zero(&obj->base.refcount))
++ continue;
++
+ spin_unlock(&mn->lock);
+
+ cancel_userptr(obj);
+@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+ it = interval_tree_iter_first(&mn->objects, start, end);
+ if (it != NULL) {
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+- drm_gem_object_reference(&obj->base);
++
++ /* The mmu_object is released late when destroying the
++ * GEM object so it is entirely possible to gain a
++ * reference on an object in the process of being freed
++ * since our serialisation is via the spinlock and not
++ * the struct_mutex - and consequently use it after it
++ * is freed and then double free it.
++ */
++ if (!kref_get_unless_zero(&obj->base.refcount)) {
++ spin_unlock(&mn->lock);
++ serial = 0;
++ continue;
++ }
++
+ serial = mn->serial;
+ }
+ spin_unlock(&mn->lock);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 9ba1177200b2..0ab77f319cef 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -2123,6 +2123,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
+ u32 iir, gt_iir, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+
++ if (!intel_irqs_enabled(dev_priv))
++ return IRQ_NONE;
++
+ while (true) {
+ /* Find, clear, then process each source of interrupt */
+
+@@ -2167,6 +2170,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
+ u32 master_ctl, iir;
+ irqreturn_t ret = IRQ_NONE;
+
++ if (!intel_irqs_enabled(dev_priv))
++ return IRQ_NONE;
++
+ for (;;) {
+ master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ iir = I915_READ(VLV_IIR);
+@@ -2455,6 +2461,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+ u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+ irqreturn_t ret = IRQ_NONE;
+
++ if (!intel_irqs_enabled(dev_priv))
++ return IRQ_NONE;
++
+ /* We get interrupts on unclaimed registers, so check for this before we
+ * do any I915_{READ,WRITE}. */
+ intel_uncore_check_errors(dev);
+@@ -2525,6 +2534,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
+ uint32_t tmp = 0;
+ enum pipe pipe;
+
++ if (!intel_irqs_enabled(dev_priv))
++ return IRQ_NONE;
++
+ master_ctl = I915_READ(GEN8_MASTER_IRQ);
+ master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+ if (!master_ctl)
+@@ -4052,6 +4064,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
++ if (!intel_irqs_enabled(dev_priv))
++ return IRQ_NONE;
++
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ return IRQ_NONE;
+@@ -4238,6 +4253,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ int pipe, ret = IRQ_NONE;
+
++ if (!intel_irqs_enabled(dev_priv))
++ return IRQ_NONE;
++
+ iir = I915_READ(IIR);
+ do {
+ bool irq_received = (iir & ~flip_mask) != 0;
+@@ -4466,6 +4484,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
++ if (!intel_irqs_enabled(dev_priv))
++ return IRQ_NONE;
++
+ iir = I915_READ(IIR);
+
+ for (;;) {
+@@ -4777,4 +4798,5 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
+ dev_priv->pm._irqs_disabled = false;
+ dev->driver->irq_preinstall(dev);
+ dev->driver->irq_postinstall(dev);
++ synchronize_irq(dev_priv->dev->irq);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 31b96643b59c..7a7c445b07b4 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -12895,6 +12895,9 @@ static struct intel_quirk intel_quirks[] = {
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
++
++ /* Dell Chromebook 11 */
++ { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+ };
+
+ static void intel_init_quirks(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 4bcd91757321..740d9ebbebde 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -3645,8 +3645,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+ enum port port = intel_dig_port->port;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct intel_crtc *intel_crtc =
+- to_intel_crtc(intel_dig_port->base.base.crtc);
+ uint32_t DP = intel_dp->DP;
+
+ if (WARN_ON(HAS_DDI(dev)))
+@@ -3671,8 +3669,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+
+ if (HAS_PCH_IBX(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+-
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+@@ -3683,18 +3679,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+-
+- /* Changes to enable or select take place the vblank
+- * after being written.
+- */
+- if (WARN_ON(crtc == NULL)) {
+- /* We should never try to disable a port without a crtc
+- * attached. For paranoia keep the code around for a
+- * bit. */
+- POSTING_READ(intel_dp->output_reg);
+- msleep(50);
+- } else
+- intel_wait_for_vblank(dev, intel_crtc->pipe);
++ POSTING_READ(intel_dp->output_reg);
+ }
+
+ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index bafd38b5703e..a97b83b78ae7 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -1106,15 +1106,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
+
+ cmd = MI_FLUSH_DW + 1;
+
+- if (ring == &dev_priv->ring[VCS]) {
+- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+- MI_FLUSH_DW_STORE_INDEX |
+- MI_FLUSH_DW_OP_STOREDW;
+- } else {
+- if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
+- cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+- MI_FLUSH_DW_OP_STOREDW;
++ /* We always require a command barrier so that subsequent
++ * commands, such as breadcrumb interrupts, are strictly ordered
++ * wrt the contents of the write cache being flushed to memory
++ * (and thus being coherent from the CPU).
++ */
++ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
++
++ if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
++ cmd |= MI_INVALIDATE_TLB;
++ if (ring == &dev_priv->ring[VCS])
++ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ intel_logical_ring_emit(ringbuf, cmd);
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index ae17e77dc08d..9f10b771319f 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -2139,6 +2139,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+ cmd = MI_FLUSH_DW;
+ if (INTEL_INFO(ring->dev)->gen >= 8)
+ cmd += 1;
++
++ /* We always require a command barrier so that subsequent
++ * commands, such as breadcrumb interrupts, are strictly ordered
++ * wrt the contents of the write cache being flushed to memory
++ * (and thus being coherent from the CPU).
++ */
++ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
++
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+@@ -2146,8 +2154,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+- MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
++ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
++
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+@@ -2242,6 +2250,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
+ cmd = MI_FLUSH_DW;
+ if (INTEL_INFO(ring->dev)->gen >= 8)
+ cmd += 1;
++
++ /* We always require a command barrier so that subsequent
++ * commands, such as breadcrumb interrupts, are strictly ordered
++ * wrt the contents of the write cache being flushed to memory
++ * (and thus being coherent from the CPU).
++ */
++ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
++
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+@@ -2249,8 +2265,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+- cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+- MI_FLUSH_DW_OP_STOREDW;
++ cmd |= MI_INVALIDATE_TLB;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
+index 01d841ea3140..731b10a09aa0 100644
+--- a/drivers/gpu/drm/i915/intel_sideband.c
++++ b/drivers/gpu/drm/i915/intel_sideband.c
+@@ -82,7 +82,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRRDDA_NP, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+@@ -94,7 +94,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRWRDA_NP, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+ }
+@@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ u32 val = 0;
+
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+@@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+
+ void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ SB_CRWRDA_NP, reg, &val);
+ }
+
+@@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
+ SB_CRRDDA_NP, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+@@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+ u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ u32 val = 0;
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+ SB_CRRDDA_NP, reg, &val);
+ return val;
+ }
+
+ void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+ SB_CRWRDA_NP, reg, &val);
+ }
+
+ u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ u32 val = 0;
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ SB_CRRDDA_NP, reg, &val);
+ return val;
+ }
+
+ void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ SB_CRWRDA_NP, reg, &val);
+ }
+
+ u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ u32 val = 0;
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ SB_CRRDDA_NP, reg, &val);
+ return val;
+ }
+
+ void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ SB_CRWRDA_NP, reg, &val);
+ }
+
+ u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ u32 val = 0;
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
+ SB_CRRDDA_NP, reg, &val);
+ return val;
+ }
+
+ void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
++ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
+ SB_CRWRDA_NP, reg, &val);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index db42a670f995..5bf825dfaa09 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -623,10 +623,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+ drm_dp_dpcd_writeb(dp_info->aux,
+ DP_DOWNSPREAD_CTRL, 0);
+
+- if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+- (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
++ if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
+ drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
+- }
+
+ /* set the lane count on the sink */
+ tmp = dp_info->dp_lane_count;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 9328fb3dcfce..5f395be9b3e3 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3880,7 +3880,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+- /* EVENT_WRITE_EOP - flush caches, send int */
++ /* Workaround for cache flush problems. First send a dummy EOP
++ * event down the pipe with seq one below.
++ */
++ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
++ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
++ EOP_TC_ACTION_EN |
++ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
++ EVENT_INDEX(5)));
++ radeon_ring_write(ring, addr & 0xfffffffc);
++ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
++ DATA_SEL(1) | INT_SEL(0));
++ radeon_ring_write(ring, fence->seq - 1);
++ radeon_ring_write(ring, 0);
++
++ /* Then send the real EOP event down the pipe. */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+@@ -7295,7 +7309,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ u32 grbm_int_cntl = 0;
+ u32 dma_cntl, dma_cntl1;
+- u32 thermal_int;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+@@ -7332,13 +7345,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+
+- if (rdev->flags & RADEON_IS_IGP)
+- thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
+- ~(THERM_INTH_MASK | THERM_INTL_MASK);
+- else
+- thermal_int = RREG32_SMC(CG_THERMAL_INT) &
+- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+-
+ /* enable CP interrupts on all rings */
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ DRM_DEBUG("cik_irq_set: sw int gfx\n");
+@@ -7496,14 +7502,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+
+- if (rdev->irq.dpm_thermal) {
+- DRM_DEBUG("dpm thermal\n");
+- if (rdev->flags & RADEON_IS_IGP)
+- thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+- else
+- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+- }
+-
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+
+ WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
+@@ -7557,11 +7555,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
+- if (rdev->flags & RADEON_IS_IGP)
+- WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+- else
+- WREG32_SMC(CG_THERMAL_INT, thermal_int);
+-
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
+index e3e9c10cfba9..85a109e1e56b 100644
+--- a/drivers/gpu/drm/radeon/kv_dpm.c
++++ b/drivers/gpu/drm/radeon/kv_dpm.c
+@@ -1169,6 +1169,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+ }
+ }
+
++static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
++{
++ u32 thermal_int;
++
++ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
++ if (enable)
++ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
++ else
++ thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
++ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
++
++}
++
+ int kv_dpm_enable(struct radeon_device *rdev)
+ {
+ struct kv_power_info *pi = kv_get_pi(rdev);
+@@ -1280,8 +1293,7 @@ int kv_dpm_late_enable(struct radeon_device *rdev)
+ DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ return ret;
+ }
+- rdev->irq.dpm_thermal = true;
+- radeon_irq_set(rdev);
++ kv_enable_thermal_int(rdev, true);
+ }
+
+ /* powerdown unused blocks for now */
+@@ -1312,6 +1324,7 @@ void kv_dpm_disable(struct radeon_device *rdev)
+ kv_stop_dpm(rdev);
+ kv_enable_ulv(rdev, false);
+ kv_reset_am(rdev);
++ kv_enable_thermal_int(rdev, false);
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+ }
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 3faee58946dd..8a83c917cf53 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1085,12 +1085,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+
+ if ((rdev->config.cayman.max_backends_per_se == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+- if ((disabled_rb_mask & 3) == 1) {
+- /* RB0 disabled, RB1 enabled */
+- tmp = 0x11111111;
+- } else {
++ if ((disabled_rb_mask & 3) == 2) {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
++ } else {
++ /* RB0 disabled, RB1 enabled */
++ tmp = 0x11111111;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
+index b5c73df8e202..65a0c1c03c69 100644
+--- a/drivers/gpu/drm/radeon/r600_dpm.c
++++ b/drivers/gpu/drm/radeon/r600_dpm.c
+@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+- vrefresh = radeon_crtc->hw_mode.vrefresh;
++ vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
+ break;
+ }
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index df69b92ba164..d79e892093b5 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -3280,6 +3280,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+
+ args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
+ args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
++ args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
+ args.in.ulSCLKFreq =
+ cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
+index 6b670b0bc47b..3a297037cc17 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
+ (rdev->pdev->subsystem_vendor == 0x1734) &&
+ (rdev->pdev->subsystem_device == 0x1107))
+ use_bl = false;
++/* Older PPC macs use on-GPU backlight controller */
++#ifndef CONFIG_PPC_PMAC
+ /* disable native backlight control on older asics */
+ else if (rdev->family < CHIP_R600)
+ use_bl = false;
++#endif
+ else
+ use_bl = true;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 4c0d786d5c7a..194f6245c379 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -218,6 +218,18 @@ int radeon_bo_create(struct radeon_device *rdev,
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+ */
+ bo->flags &= ~RADEON_GEM_GTT_WC;
++#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
++ /* Don't try to enable write-combining when it can't work, or things
++ * may be slow
++ * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
++ */
++
++#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
++ thanks to write-combining
++
++ DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
++ "better performance thanks to write-combining\n");
++ bo->flags &= ~RADEON_GEM_GTT_WC;
+ #endif
+
+ radeon_ttm_placement_from_domain(bo, domain);
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index 59736bb810cd..1218419c12f6 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -152,7 +152,7 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
+ if (err < 0)
+ return err;
+
+- err = get_user(dest->target.offset, &src->cmdbuf.offset);
++ err = get_user(dest->target.offset, &src->target.offset);
+ if (err < 0)
+ return err;
+
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 8df8ceb47659..01c7a08a66e1 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -1104,6 +1104,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ return;
+ }
+
++ /*
++ * Ignore reports for absolute data if the data didn't change. This is
++ * not only an optimization but also fixes 'dead' key reports. Some
++ * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID
++ * 0x31 and 0x32) report multiple keys, even though a localized keyboard
++ * can only have one of them physically available. The 'dead' keys
++ * report constant 0. As all map to the same keycode, they'd confuse
++ * the input layer. If we filter the 'dead' keys on the HID level, we
++ * skip the keycode translation and only forward real events.
++ */
++ if (!(field->flags & (HID_MAIN_ITEM_RELATIVE |
++ HID_MAIN_ITEM_BUFFERED_BYTE)) &&
++ (field->flags & HID_MAIN_ITEM_VARIABLE) &&
++ usage->usage_index < field->maxusage &&
++ value == field->value[usage->usage_index])
++ return;
++
+ /* report the usage code as scancode if the key status has changed */
+ if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
+ input_event(input, EV_MSC, MSC_SCAN, usage->hid);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 7cf998cdd011..c673eda71460 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -756,6 +756,12 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
+ input_report_key(input, BTN_7, (data[4] & 0x40)); /* Left */
+ input_report_key(input, BTN_8, (data[4] & 0x80)); /* Down */
+ input_report_key(input, BTN_0, (data[3] & 0x01)); /* Center */
++
++ if (data[4] | (data[3] & 0x01)) {
++ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
++ } else {
++ input_report_abs(input, ABS_MISC, 0);
++ }
+ } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+ int i;
+
+diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
+index 51672256072b..b96c636470ef 100644
+--- a/drivers/iio/adc/mcp3422.c
++++ b/drivers/iio/adc/mcp3422.c
+@@ -58,20 +58,11 @@
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ }
+
+-/* LSB is in nV to eliminate floating point */
+-static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
+-
+-/*
+- * scales calculated as:
+- * rates_to_lsb[sample_rate] / (1 << pga);
+- * pga is 1 for 0, 2
+- */
+-
+ static const int mcp3422_scales[4][4] = {
+- { 1000000, 250000, 62500, 15625 },
+- { 500000 , 125000, 31250, 7812 },
+- { 250000 , 62500 , 15625, 3906 },
+- { 125000 , 31250 , 7812 , 1953 } };
++ { 1000000, 500000, 250000, 125000 },
++ { 250000 , 125000, 62500 , 31250 },
++ { 62500 , 31250 , 15625 , 7812 },
++ { 15625 , 7812 , 3906 , 1953 } };
+
+ /* Constant msleep times for data acquisitions */
+ static const int mcp3422_read_times[4] = {
+diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
+index f57562aa396f..15c73e20272d 100644
+--- a/drivers/iio/dac/ad5686.c
++++ b/drivers/iio/dac/ad5686.c
+@@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi)
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+- st->reg = devm_regulator_get(&spi->dev, "vcc");
++ st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
+index b70873de04ea..fa795dcd5f75 100644
+--- a/drivers/iio/imu/adis16400_core.c
++++ b/drivers/iio/imu/adis16400_core.c
+@@ -26,6 +26,7 @@
+ #include <linux/list.h>
+ #include <linux/module.h>
+ #include <linux/debugfs.h>
++#include <linux/bitops.h>
+
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+@@ -414,7 +415,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ mutex_unlock(&indio_dev->mlock);
+ if (ret)
+ return ret;
+- val16 = ((val16 & 0xFFF) << 4) >> 4;
++ val16 = sign_extend32(val16, 11);
+ *val = val16;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 56a4b7ca7ee3..45d67e9228d7 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
+ if (!optlen)
+ return -EINVAL;
+
++ memset(&sa_path, 0, sizeof(sa_path));
++ sa_path.vlan_id = 0xffff;
++
+ ib_sa_unpack_path(path_data->path_rec, &sa_path);
+ ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+ if (ret)
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index 5ba2a86aab6a..63a9f04bdb6c 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -2057,20 +2057,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
+ if (qp->real_qp == qp) {
+ ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
+ if (ret)
+- goto out;
++ goto release_qp;
+ ret = qp->device->modify_qp(qp, attr,
+ modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
+ } else {
+ ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
+ }
+
+- put_qp_read(qp);
+-
+ if (ret)
+- goto out;
++ goto release_qp;
+
+ ret = in_len;
+
++release_qp:
++ put_qp_read(qp);
++
+ out:
+ kfree(attr);
+
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 8b72cf392b34..3b619b10a372 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -1221,8 +1221,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ u64 reg_id;
+ struct mlx4_ib_steering *ib_steering = NULL;
+- enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
+- MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
++ enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+@@ -1235,8 +1234,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ !!(mqp->flags &
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+ prot, &reg_id);
+- if (err)
++ if (err) {
++ pr_err("multicast attach op failed, err %d\n", err);
+ goto err_malloc;
++ }
+
+ err = add_gid_entry(ibqp, gid);
+ if (err)
+@@ -1284,8 +1285,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ struct net_device *ndev;
+ struct mlx4_ib_gid_entry *ge;
+ u64 reg_id = 0;
+- enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
+- MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
++ enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 9c5150c3cb31..03045dd9e5de 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -1669,8 +1669,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
+ qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
+ qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
+ err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
+- if (err)
+- return -EINVAL;
++ if (err) {
++ err = -EINVAL;
++ goto out;
++ }
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+ dev->qp1_proxy[qp->port - 1] = qp;
+ }
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 1ba6c42e4df8..820fb8009ed7 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -987,7 +987,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
+ struct ib_device_attr *dprops = NULL;
+ struct ib_port_attr *pprops = NULL;
+ struct mlx5_general_caps *gen;
+- int err = 0;
++ int err = -ENOMEM;
+ int port;
+
+ gen = &dev->mdev->caps.gen;
+diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
+index c00ae093b6f8..b218254ee41b 100644
+--- a/drivers/infiniband/hw/qib/qib.h
++++ b/drivers/infiniband/hw/qib/qib.h
+@@ -1082,12 +1082,6 @@ struct qib_devdata {
+ /* control high-level access to EEPROM */
+ struct mutex eep_lock;
+ uint64_t traffic_wds;
+- /* active time is kept in seconds, but logged in hours */
+- atomic_t active_time;
+- /* Below are nominal shadow of EEPROM, new since last EEPROM update */
+- uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
+- uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
+- uint16_t eep_hrs;
+ /*
+ * masks for which bits of errs, hwerrs that cause
+ * each of the counters to increment.
+@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
+ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ const void *buffer, int len);
+ void qib_get_eeprom_info(struct qib_devdata *);
+-int qib_update_eeprom_log(struct qib_devdata *dd);
+-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
++#define qib_inc_eeprom_err(dd, eidx, incr)
+ void qib_dump_lookup_output_queue(struct qib_devdata *);
+ void qib_force_pio_avail_update(struct qib_devdata *);
+ void qib_clear_symerror_on_linkup(unsigned long opaque);
+diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
+index 4d5d71aaa2b4..e2280b07df02 100644
+--- a/drivers/infiniband/hw/qib/qib_eeprom.c
++++ b/drivers/infiniband/hw/qib/qib_eeprom.c
+@@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
+ "Board SN %s did not pass functional test: %s\n",
+ dd->serial, ifp->if_comment);
+
+- memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
+- /*
+- * Power-on (actually "active") hours are kept as little-endian value
+- * in EEPROM, but as seconds in a (possibly as small as 24-bit)
+- * atomic_t while running.
+- */
+- atomic_set(&dd->active_time, 0);
+- dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
+-
+ done:
+ vfree(buf);
+
+ bail:;
+ }
+
+-/**
+- * qib_update_eeprom_log - copy active-time and error counters to eeprom
+- * @dd: the qlogic_ib device
+- *
+- * Although the time is kept as seconds in the qib_devdata struct, it is
+- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
+- * First-cut code reads whole (expected) struct qib_flash, modifies,
+- * re-writes. Future direction: read/write only what we need, assuming
+- * that the EEPROM had to have been "good enough" for driver init, and
+- * if not, we aren't making it worse.
+- *
+- */
+-int qib_update_eeprom_log(struct qib_devdata *dd)
+-{
+- void *buf;
+- struct qib_flash *ifp;
+- int len, hi_water;
+- uint32_t new_time, new_hrs;
+- u8 csum;
+- int ret, idx;
+- unsigned long flags;
+-
+- /* first, check if we actually need to do anything. */
+- ret = 0;
+- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+- if (dd->eep_st_new_errs[idx]) {
+- ret = 1;
+- break;
+- }
+- }
+- new_time = atomic_read(&dd->active_time);
+-
+- if (ret == 0 && new_time < 3600)
+- goto bail;
+-
+- /*
+- * The quick-check above determined that there is something worthy
+- * of logging, so get current contents and do a more detailed idea.
+- * read full flash, not just currently used part, since it may have
+- * been written with a newer definition
+- */
+- len = sizeof(struct qib_flash);
+- buf = vmalloc(len);
+- ret = 1;
+- if (!buf) {
+- qib_dev_err(dd,
+- "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
+- len);
+- goto bail;
+- }
+-
+- /* Grab semaphore and read current EEPROM. If we get an
+- * error, let go, but if not, keep it until we finish write.
+- */
+- ret = mutex_lock_interruptible(&dd->eep_lock);
+- if (ret) {
+- qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+- goto free_bail;
+- }
+- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
+- if (ret) {
+- mutex_unlock(&dd->eep_lock);
+- qib_dev_err(dd, "Unable read EEPROM for logging\n");
+- goto free_bail;
+- }
+- ifp = (struct qib_flash *)buf;
+-
+- csum = flash_csum(ifp, 0);
+- if (csum != ifp->if_csum) {
+- mutex_unlock(&dd->eep_lock);
+- qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+- csum, ifp->if_csum);
+- ret = 1;
+- goto free_bail;
+- }
+- hi_water = 0;
+- spin_lock_irqsave(&dd->eep_st_lock, flags);
+- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+- int new_val = dd->eep_st_new_errs[idx];
+- if (new_val) {
+- /*
+- * If we have seen any errors, add to EEPROM values
+- * We need to saturate at 0xFF (255) and we also
+- * would need to adjust the checksum if we were
+- * trying to minimize EEPROM traffic
+- * Note that we add to actual current count in EEPROM,
+- * in case it was altered while we were running.
+- */
+- new_val += ifp->if_errcntp[idx];
+- if (new_val > 0xFF)
+- new_val = 0xFF;
+- if (ifp->if_errcntp[idx] != new_val) {
+- ifp->if_errcntp[idx] = new_val;
+- hi_water = offsetof(struct qib_flash,
+- if_errcntp) + idx;
+- }
+- /*
+- * update our shadow (used to minimize EEPROM
+- * traffic), to match what we are about to write.
+- */
+- dd->eep_st_errs[idx] = new_val;
+- dd->eep_st_new_errs[idx] = 0;
+- }
+- }
+- /*
+- * Now update active-time. We would like to round to the nearest hour
+- * but unless atomic_t are sure to be proper signed ints we cannot,
+- * because we need to account for what we "transfer" to EEPROM and
+- * if we log an hour at 31 minutes, then we would need to set
+- * active_time to -29 to accurately count the _next_ hour.
+- */
+- if (new_time >= 3600) {
+- new_hrs = new_time / 3600;
+- atomic_sub((new_hrs * 3600), &dd->active_time);
+- new_hrs += dd->eep_hrs;
+- if (new_hrs > 0xFFFF)
+- new_hrs = 0xFFFF;
+- dd->eep_hrs = new_hrs;
+- if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
+- ifp->if_powerhour[0] = new_hrs & 0xFF;
+- hi_water = offsetof(struct qib_flash, if_powerhour);
+- }
+- if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
+- ifp->if_powerhour[1] = new_hrs >> 8;
+- hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
+- }
+- }
+- /*
+- * There is a tiny possibility that we could somehow fail to write
+- * the EEPROM after updating our shadows, but problems from holding
+- * the spinlock too long are a much bigger issue.
+- */
+- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+- if (hi_water) {
+- /* we made some change to the data, uopdate cksum and write */
+- csum = flash_csum(ifp, 1);
+- ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
+- }
+- mutex_unlock(&dd->eep_lock);
+- if (ret)
+- qib_dev_err(dd, "Failed updating EEPROM\n");
+-
+-free_bail:
+- vfree(buf);
+-bail:
+- return ret;
+-}
+-
+-/**
+- * qib_inc_eeprom_err - increment one of the four error counters
+- * that are logged to EEPROM.
+- * @dd: the qlogic_ib device
+- * @eidx: 0..3, the counter to increment
+- * @incr: how much to add
+- *
+- * Each counter is 8-bits, and saturates at 255 (0xFF). They
+- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
+- * is called, but it can only be called in a context that allows sleep.
+- * This function can be called even at interrupt level.
+- */
+-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
+-{
+- uint new_val;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&dd->eep_st_lock, flags);
+- new_val = dd->eep_st_new_errs[eidx] + incr;
+- if (new_val > 255)
+- new_val = 255;
+- dd->eep_st_new_errs[eidx] = new_val;
+- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+-}
+diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
+index d68266ac7619..f7f49a6c34b0 100644
+--- a/drivers/infiniband/hw/qib/qib_iba6120.c
++++ b/drivers/infiniband/hw/qib/qib_iba6120.c
+@@ -2681,8 +2681,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ traffic_wds -= dd->traffic_wds;
+ dd->traffic_wds += traffic_wds;
+- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+- atomic_add(5, &dd->active_time); /* S/B #define */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+
+ qib_chk_6120_errormask(dd);
+diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
+index 7dec89fdc124..f5fa106e1992 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7220.c
++++ b/drivers/infiniband/hw/qib/qib_iba7220.c
+@@ -3297,8 +3297,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ traffic_wds -= dd->traffic_wds;
+ dd->traffic_wds += traffic_wds;
+- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+- atomic_add(5, &dd->active_time); /* S/B #define */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+ done:
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index a7eb32517a04..23ca2aca1ad6 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -5178,8 +5178,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
+ spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
+ traffic_wds -= ppd->dd->traffic_wds;
+ ppd->dd->traffic_wds += traffic_wds;
+- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+- atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
+ spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
+ if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
+ QIB_IB_QDR) &&
+diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
+index 729da39c49ed..738269b46d83 100644
+--- a/drivers/infiniband/hw/qib/qib_init.c
++++ b/drivers/infiniband/hw/qib/qib_init.c
+@@ -931,7 +931,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
+ qib_free_pportdata(ppd);
+ }
+
+- qib_update_eeprom_log(dd);
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
+index 3c8e4e3caca6..b9ccbda7817d 100644
+--- a/drivers/infiniband/hw/qib/qib_sysfs.c
++++ b/drivers/infiniband/hw/qib/qib_sysfs.c
+@@ -611,28 +611,6 @@ bail:
+ return ret < 0 ? ret : count;
+ }
+
+-static ssize_t show_logged_errs(struct device *device,
+- struct device_attribute *attr, char *buf)
+-{
+- struct qib_ibdev *dev =
+- container_of(device, struct qib_ibdev, ibdev.dev);
+- struct qib_devdata *dd = dd_from_dev(dev);
+- int idx, count;
+-
+- /* force consistency with actual EEPROM */
+- if (qib_update_eeprom_log(dd) != 0)
+- return -ENXIO;
+-
+- count = 0;
+- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+- count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
+- dd->eep_st_errs[idx],
+- idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
+- }
+-
+- return count;
+-}
+-
+ /*
+ * Dump tempsense regs. in decimal, to ease shell-scripts.
+ */
+@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+ static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
+ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+ static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
+ static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
+ static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
+ static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
+ &dev_attr_nfreectxts,
+ &dev_attr_serial,
+ &dev_attr_boardversion,
+- &dev_attr_logged_errors,
+ &dev_attr_tempsense,
+ &dev_attr_localbus_info,
+ &dev_attr_chip_reset,
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index f14c3849e568..e4bc9409243f 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ enum dma_data_direction dma_dir);
+
+ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data);
++ struct iser_data_buf *data,
++ enum dma_data_direction dir);
++
+ int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc);
+ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 3821633f1065..20e859a6f1a6 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+
+- if (!iser_conn->rx_descs)
+- goto free_login_buf;
+-
+ if (device->iser_free_rdma_reg_res)
+ device->iser_free_rdma_reg_res(ib_conn);
+
+@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
+ /* make sure we never redo any unmapping */
+ iser_conn->rx_descs = NULL;
+
+-free_login_buf:
+ iser_free_login_buf(iser_conn);
+ }
+
+@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ if (is_rdma_data_aligned)
+ iser_dma_unmap_task_data(iser_task,
+- &iser_task->data[ISER_DIR_IN]);
++ &iser_task->data[ISER_DIR_IN],
++ DMA_FROM_DEVICE);
+ if (prot_count && is_rdma_prot_aligned)
+ iser_dma_unmap_task_data(iser_task,
+- &iser_task->prot[ISER_DIR_IN]);
++ &iser_task->prot[ISER_DIR_IN],
++ DMA_FROM_DEVICE);
+ }
+
+ if (iser_task->dir[ISER_DIR_OUT]) {
+ device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
+ if (is_rdma_data_aligned)
+ iser_dma_unmap_task_data(iser_task,
+- &iser_task->data[ISER_DIR_OUT]);
++ &iser_task->data[ISER_DIR_OUT],
++ DMA_TO_DEVICE);
+ if (prot_count && is_rdma_prot_aligned)
+ iser_dma_unmap_task_data(iser_task,
+- &iser_task->prot[ISER_DIR_OUT]);
++ &iser_task->prot[ISER_DIR_OUT],
++ DMA_TO_DEVICE);
+ }
+ }
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index 6c5ce357fba6..424783f3e0af 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -333,12 +333,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ }
+
+ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data)
++ struct iser_data_buf *data,
++ enum dma_data_direction dir)
+ {
+ struct ib_device *dev;
+
+ dev = iser_task->iser_conn->ib_conn.device->ib_device;
+- ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
++ ib_dma_unmap_sg(dev, data->buf, data->size, dir);
+ }
+
+ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
+@@ -358,7 +359,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task, mem);
++ iser_dma_unmap_task_data(iser_task, mem,
++ (cmd_dir == ISER_DIR_OUT) ?
++ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 67225bb82bb5..d004d6ee2c1a 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -567,16 +567,16 @@ void iser_release_work(struct work_struct *work)
+ /**
+ * iser_free_ib_conn_res - release IB related resources
+ * @iser_conn: iser connection struct
+- * @destroy_device: indicator if we need to try to release
+- * the iser device (only iscsi shutdown and DEVICE_REMOVAL
+- * will use this.
++ * @destroy: indicator if we need to try to release the
++ * iser device and memory regoins pool (only iscsi
++ * shutdown and DEVICE_REMOVAL will use this).
+ *
+ * This routine is called with the iser state mutex held
+ * so the cm_id removal is out of here. It is Safe to
+ * be invoked multiple times.
+ */
+ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
+- bool destroy_device)
++ bool destroy)
+ {
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+@@ -584,17 +584,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
+ iser_info("freeing conn %p cma_id %p qp %p\n",
+ iser_conn, ib_conn->cma_id, ib_conn->qp);
+
+- iser_free_rx_descriptors(iser_conn);
+-
+ if (ib_conn->qp != NULL) {
+ ib_conn->comp->active_qps--;
+ rdma_destroy_qp(ib_conn->cma_id);
+ ib_conn->qp = NULL;
+ }
+
+- if (destroy_device && device != NULL) {
+- iser_device_try_release(device);
+- ib_conn->device = NULL;
++ if (destroy) {
++ if (iser_conn->rx_descs)
++ iser_free_rx_descriptors(iser_conn);
++
++ if (device != NULL) {
++ iser_device_try_release(device);
++ ib_conn->device = NULL;
++ }
+ }
+ }
+
+@@ -803,7 +806,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
+ }
+
+ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
+- bool destroy_device)
++ bool destroy)
+ {
+ struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+
+@@ -813,7 +816,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
+ * and flush errors.
+ */
+ iser_disconnected_handler(cma_id);
+- iser_free_ib_conn_res(iser_conn, destroy_device);
++ iser_free_ib_conn_res(iser_conn, destroy);
+ complete(&iser_conn->ib_completion);
+ };
+
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index c09359db3a90..37de0173b6d2 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ unsigned short logical_block_size = queue_logical_block_size(q);
+ sector_t num_sectors;
+
++ /* Reject unsupported discard requests */
++ if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
++ dec_count(io, region, -EOPNOTSUPP);
++ return;
++ }
++
+ /*
+ * where->count may be zero if rw holds a flush and we need to
+ * send a zero-sized flush.
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 7dfdb5c746d6..089d62751f7f 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
+ return;
+ }
+
++ /*
++ * If the bio is discard, return an error, but do not
++ * degrade the array.
++ */
++ if (bio->bi_rw & REQ_DISCARD) {
++ bio_endio(bio, -EOPNOTSUPP);
++ return;
++ }
++
+ for (i = 0; i < ms->nr_mirrors; i++)
+ if (test_bit(i, &error))
+ fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 864b03f47727..8b204ae216ab 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1432,8 +1432,6 @@ out:
+ full_bio->bi_private = pe->full_bio_private;
+ atomic_inc(&full_bio->bi_remaining);
+ }
+- free_pending_exception(pe);
+-
+ increment_pending_exceptions_done_count();
+
+ up_write(&s->lock);
+@@ -1450,6 +1448,8 @@ out:
+ }
+
+ retry_origin_bios(s, origin_bios);
++
++ free_pending_exception(pe);
+ }
+
+ static void commit_callback(void *context, int success)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 62c51364cf9e..cec85c5bae9e 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2462,7 +2462,7 @@ int dm_setup_md_queue(struct mapped_device *md)
+ return 0;
+ }
+
+-static struct mapped_device *dm_find_md(dev_t dev)
++struct mapped_device *dm_get_md(dev_t dev)
+ {
+ struct mapped_device *md;
+ unsigned minor = MINOR(dev);
+@@ -2473,12 +2473,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
+ spin_lock(&_minor_lock);
+
+ md = idr_find(&_minor_idr, minor);
+- if (md && (md == MINOR_ALLOCED ||
+- (MINOR(disk_devt(dm_disk(md))) != minor) ||
+- dm_deleting_md(md) ||
+- test_bit(DMF_FREEING, &md->flags))) {
+- md = NULL;
+- goto out;
++ if (md) {
++ if ((md == MINOR_ALLOCED ||
++ (MINOR(disk_devt(dm_disk(md))) != minor) ||
++ dm_deleting_md(md) ||
++ test_bit(DMF_FREEING, &md->flags))) {
++ md = NULL;
++ goto out;
++ }
++ dm_get(md);
+ }
+
+ out:
+@@ -2486,16 +2489,6 @@ out:
+
+ return md;
+ }
+-
+-struct mapped_device *dm_get_md(dev_t dev)
+-{
+- struct mapped_device *md = dm_find_md(dev);
+-
+- if (md)
+- dm_get(md);
+-
+- return md;
+-}
+ EXPORT_SYMBOL_GPL(dm_get_md);
+
+ void *dm_get_mdptr(struct mapped_device *md)
+diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
+index 6ee785da574e..9ff67b1e1d39 100644
+--- a/drivers/misc/cxl/cxl.h
++++ b/drivers/misc/cxl/cxl.h
+@@ -471,6 +471,7 @@ void cxl_release_one_irq(struct cxl *adapter, int hwirq);
+ int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
+ void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
+ int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
++int cxl_update_image_control(struct cxl *adapter);
+
+ /* common == phyp + powernv */
+ struct cxl_process_element_common {
+diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
+index 336020c8e1af..6fe4027feb7d 100644
+--- a/drivers/misc/cxl/irq.c
++++ b/drivers/misc/cxl/irq.c
+@@ -174,6 +174,7 @@ static irqreturn_t cxl_irq(int irq, void *data)
+ }
+
+ cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
++ return IRQ_HANDLED;
+ }
+ if (dsisr & CXL_PSL_DSISR_An_OC)
+ pr_devel("CXL interrupt: OS Context Warning\n");
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 0f2cc9f8b4db..eee4fd606dc1 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -316,7 +316,7 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
+ u64 psl_dsnctl;
+ u64 chipid;
+
+- if (!(np = pnv_pci_to_phb_node(dev)))
++ if (!(np = pnv_pci_get_phb_node(dev)))
+ return -ENODEV;
+
+ while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
+@@ -361,6 +361,41 @@ int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
+ return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
+ }
+
++int cxl_update_image_control(struct cxl *adapter)
++{
++ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
++ int rc;
++ int vsec;
++ u8 image_state;
++
++ if (!(vsec = find_cxl_vsec(dev))) {
++ dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
++ return -ENODEV;
++ }
++
++ if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
++ dev_err(&dev->dev, "failed to read image state: %i\n", rc);
++ return rc;
++ }
++
++ if (adapter->perst_loads_image)
++ image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
++ else
++ image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
++
++ if (adapter->perst_select_user)
++ image_state |= CXL_VSEC_PERST_SELECT_USER;
++ else
++ image_state &= ~CXL_VSEC_PERST_SELECT_USER;
++
++ if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
++ dev_err(&dev->dev, "failed to update image control: %i\n", rc);
++ return rc;
++ }
++
++ return 0;
++}
++
+ int cxl_alloc_one_irq(struct cxl *adapter)
+ {
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+@@ -770,8 +805,8 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
+ CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
+ CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
+ adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
+- adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
+- adapter->perst_select_user = !!(image_state & CXL_VSEC_PERST_SELECT_USER);
++ adapter->perst_loads_image = true;
++ adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
+
+ CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
+ CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
+@@ -879,6 +914,9 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
+ if ((rc = cxl_vsec_looks_ok(adapter, dev)))
+ goto err2;
+
++ if ((rc = cxl_update_image_control(adapter)))
++ goto err2;
++
+ if ((rc = cxl_map_adapter_regs(adapter, dev)))
+ goto err2;
+
+diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
+index 7901d076c127..6c267162f151 100644
+--- a/drivers/misc/mei/init.c
++++ b/drivers/misc/mei/init.c
+@@ -313,6 +313,8 @@ void mei_stop(struct mei_device *dev)
+
+ dev->dev_state = MEI_DEV_POWER_DOWN;
+ mei_reset(dev);
++ /* move device to disabled state unconditionally */
++ dev->dev_state = MEI_DEV_DISABLED;
+
+ mutex_unlock(&dev->device_lock);
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index da1a2500c91c..bb27028d392b 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -872,13 +872,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
+ }
+
+ /* Unlocked version of the reclaim routine */
+-static void __bcmgenet_tx_reclaim(struct net_device *dev,
+- struct bcmgenet_tx_ring *ring)
++static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
++ struct bcmgenet_tx_ring *ring)
+ {
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int last_tx_cn, last_c_index, num_tx_bds;
+ struct enet_cb *tx_cb_ptr;
+ struct netdev_queue *txq;
++ unsigned int pkts_compl = 0;
+ unsigned int bds_compl;
+ unsigned int c_index;
+
+@@ -906,6 +907,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
+ tx_cb_ptr = ring->cbs + last_c_index;
+ bds_compl = 0;
+ if (tx_cb_ptr->skb) {
++ pkts_compl++;
+ bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
+ dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ dma_unmap_single(&dev->dev,
+@@ -929,23 +931,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
+ last_c_index &= (num_tx_bds - 1);
+ }
+
+- if (ring->free_bds > (MAX_SKB_FRAGS + 1))
+- ring->int_disable(priv, ring);
+-
+- if (netif_tx_queue_stopped(txq))
+- netif_tx_wake_queue(txq);
++ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
++ if (netif_tx_queue_stopped(txq))
++ netif_tx_wake_queue(txq);
++ }
+
+ ring->c_index = c_index;
++
++ return pkts_compl;
+ }
+
+-static void bcmgenet_tx_reclaim(struct net_device *dev,
++static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+ {
++ unsigned int released;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+- __bcmgenet_tx_reclaim(dev, ring);
++ released = __bcmgenet_tx_reclaim(dev, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
++
++ return released;
++}
++
++static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
++{
++ struct bcmgenet_tx_ring *ring =
++ container_of(napi, struct bcmgenet_tx_ring, napi);
++ unsigned int work_done = 0;
++
++ work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
++
++ if (work_done == 0) {
++ napi_complete(napi);
++ ring->int_enable(ring->priv, ring);
++
++ return 0;
++ }
++
++ return budget;
+ }
+
+ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+@@ -1201,10 +1225,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+
+- if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
++ if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
+ netif_tx_stop_queue(txq);
+- ring->int_enable(priv, ring);
+- }
+
+ out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+@@ -1517,6 +1539,7 @@ static int init_umac(struct bcmgenet_priv *priv)
+ struct device *kdev = &priv->pdev->dev;
+ int ret;
+ u32 reg, cpu_mask_clear;
++ int index;
+
+ dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+
+@@ -1543,7 +1566,7 @@ static int init_umac(struct bcmgenet_priv *priv)
+
+ bcmgenet_intr_disable(priv);
+
+- cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
++ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
+
+ dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+
+@@ -1570,6 +1593,10 @@ static int init_umac(struct bcmgenet_priv *priv)
+
+ bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+
++ for (index = 0; index < priv->hw_params->tx_queues; index++)
++ bcmgenet_intrl2_1_writel(priv, (1 << index),
++ INTRL2_CPU_MASK_CLEAR);
++
+ /* Enable rx/tx engine.*/
+ dev_dbg(kdev, "done init umac\n");
+
+@@ -1589,6 +1616,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int first_bd;
+
+ spin_lock_init(&ring->lock);
++ ring->priv = priv;
++ netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+ ring->index = index;
+ if (index == DESC_INDEX) {
+ ring->queue = 0;
+@@ -1634,6 +1663,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+ TDMA_WRITE_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+ DMA_END_ADDR);
++
++ napi_enable(&ring->napi);
++}
++
++static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
++ unsigned int index)
++{
++ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
++
++ napi_disable(&ring->napi);
++ netif_napi_del(&ring->napi);
+ }
+
+ /* Initialize a RDMA ring */
+@@ -1803,7 +1843,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+ return ret;
+ }
+
+-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
++static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+ {
+ int i;
+
+@@ -1822,6 +1862,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+ kfree(priv->tx_cbs);
+ }
+
++static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
++{
++ int i;
++
++ bcmgenet_fini_tx_ring(priv, DESC_INDEX);
++
++ for (i = 0; i < priv->hw_params->tx_queues; i++)
++ bcmgenet_fini_tx_ring(priv, i);
++
++ __bcmgenet_fini_dma(priv);
++}
++
+ /* init_edma: Initialize DMA control register */
+ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+ {
+@@ -1848,7 +1900,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+ priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->tx_cbs) {
+- bcmgenet_fini_dma(priv);
++ __bcmgenet_fini_dma(priv);
+ return -ENOMEM;
+ }
+
+@@ -1871,9 +1923,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
+ struct bcmgenet_priv, napi);
+ unsigned int work_done;
+
+- /* tx reclaim */
+- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+-
+ work_done = bcmgenet_desc_rx(priv, budget);
+
+ /* Advancing our consumer index*/
+@@ -1918,28 +1967,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
+ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+ {
+ struct bcmgenet_priv *priv = dev_id;
++ struct bcmgenet_tx_ring *ring;
+ unsigned int index;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq1_stat =
+ bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+- ~priv->int1_mask;
++ ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ /* clear interrupts */
+ bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
++
+ /* Check the MBDONE interrupts.
+ * packet is done, reclaim descriptors
+ */
+- if (priv->irq1_stat & 0x0000ffff) {
+- index = 0;
+- for (index = 0; index < 16; index++) {
+- if (priv->irq1_stat & (1 << index))
+- bcmgenet_tx_reclaim(priv->dev,
+- &priv->tx_rings[index]);
++ for (index = 0; index < priv->hw_params->tx_queues; index++) {
++ if (!(priv->irq1_stat & BIT(index)))
++ continue;
++
++ ring = &priv->tx_rings[index];
++
++ if (likely(napi_schedule_prep(&ring->napi))) {
++ ring->int_disable(priv, ring);
++ __napi_schedule(&ring->napi);
+ }
+ }
++
+ return IRQ_HANDLED;
+ }
+
+@@ -1971,8 +2026,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+ }
+ if (priv->irq0_stat &
+ (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
+- /* Tx reclaim */
+- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
++ struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
++
++ if (likely(napi_schedule_prep(&ring->napi))) {
++ ring->int_disable(priv, ring);
++ __napi_schedule(&ring->napi);
++ }
+ }
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index 31b2da5f9b82..eeda0281c684 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -495,6 +495,7 @@ struct bcmgenet_hw_params {
+
+ struct bcmgenet_tx_ring {
+ spinlock_t lock; /* ring lock */
++ struct napi_struct napi; /* NAPI per tx queue */
+ unsigned int index; /* ring index */
+ unsigned int queue; /* queue index */
+ struct enet_cb *cbs; /* tx ring buffer control block*/
+@@ -509,6 +510,7 @@ struct bcmgenet_tx_ring {
+ struct bcmgenet_tx_ring *);
+ void (*int_disable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
++ struct bcmgenet_priv *priv;
+ };
+
+ /* device context */
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index cf154f74cba1..54390b3e0344 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5062,8 +5062,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
+ RTL_W8(ChipCmd, CmdReset);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
+-
+- netdev_reset_queue(tp->dev);
+ }
+
+ static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+@@ -7073,8 +7071,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+
+ txd->opts2 = cpu_to_le32(opts[1]);
+
+- netdev_sent_queue(dev, skb->len);
+-
+ skb_tx_timestamp(skb);
+
+ wmb();
+@@ -7174,7 +7170,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
+ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ {
+ unsigned int dirty_tx, tx_left;
+- unsigned int bytes_compl = 0, pkts_compl = 0;
+
+ dirty_tx = tp->dirty_tx;
+ smp_rmb();
+@@ -7193,8 +7188,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (status & LastFrag) {
+- pkts_compl++;
+- bytes_compl += tx_skb->skb->len;
++ u64_stats_update_begin(&tp->tx_stats.syncp);
++ tp->tx_stats.packets++;
++ tp->tx_stats.bytes += tx_skb->skb->len;
++ u64_stats_update_end(&tp->tx_stats.syncp);
+ dev_kfree_skb_any(tx_skb->skb);
+ tx_skb->skb = NULL;
+ }
+@@ -7203,13 +7200,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ }
+
+ if (tp->dirty_tx != dirty_tx) {
+- netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+-
+- u64_stats_update_begin(&tp->tx_stats.syncp);
+- tp->tx_stats.packets += pkts_compl;
+- tp->tx_stats.bytes += bytes_compl;
+- u64_stats_update_end(&tp->tx_stats.syncp);
+-
+ tp->dirty_tx = dirty_tx;
+ /* Sync with rtl8169_start_xmit:
+ * - publish dirty_tx ring index (write barrier)
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 880cc090dc44..91d0c6a86e37 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -637,12 +637,15 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ } /* else everything is zero */
+ }
+
++/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
++#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
++
+ /* Get packet from user space buffer */
+ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ const struct iovec *iv, unsigned long total_len,
+ size_t count, int noblock)
+ {
+- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
++ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
+ struct sk_buff *skb;
+ struct macvlan_dev *vlan;
+ unsigned long len = total_len;
+@@ -701,7 +704,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ linear = vnet_hdr.hdr_len;
+ }
+
+- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
++ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
+ linear, noblock, &err);
+ if (!skb)
+ goto err;
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 767cd110f496..dc1f6f07326a 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
+ }
+
+ /**
++ * phy_check_valid - check if there is a valid PHY setting which matches
++ * speed, duplex, and feature mask
++ * @speed: speed to match
++ * @duplex: duplex to match
++ * @features: A mask of the valid settings
++ *
++ * Description: Returns true if there is a valid setting, false otherwise.
++ */
++static inline bool phy_check_valid(int speed, int duplex, u32 features)
++{
++ unsigned int idx;
++
++ idx = phy_find_valid(phy_find_setting(speed, duplex), features);
++
++ return settings[idx].speed == speed && settings[idx].duplex == duplex &&
++ (settings[idx].setting & features);
++}
++
++/**
+ * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
+ * @phydev: the target phy_device struct
+ *
+@@ -1042,7 +1061,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+ int eee_lp, eee_cap, eee_adv;
+ u32 lp, cap, adv;
+ int status;
+- unsigned int idx;
+
+ /* Read phy status to properly get the right settings */
+ status = phy_read_status(phydev);
+@@ -1074,8 +1092,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+
+ adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+ lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
+- idx = phy_find_setting(phydev->speed, phydev->duplex);
+- if (!(lp & adv & settings[idx].setting))
++ if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
+ goto eee_exit_err;
+
+ if (clk_stop_enable) {
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 9c505c4dbe04..ebc95a3771a4 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -42,9 +42,7 @@
+
+ static struct team_port *team_port_get_rcu(const struct net_device *dev)
+ {
+- struct team_port *port = rcu_dereference(dev->rx_handler_data);
+-
+- return team_port_exists(dev) ? port : NULL;
++ return rcu_dereference(dev->rx_handler_data);
+ }
+
+ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+@@ -1735,11 +1733,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
+ if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+- rcu_read_lock();
+- list_for_each_entry_rcu(port, &team->port_list, list)
++ mutex_lock(&team->lock);
++ list_for_each_entry(port, &team->port_list, list)
+ if (team->ops.port_change_dev_addr)
+ team->ops.port_change_dev_addr(team, port);
+- rcu_read_unlock();
++ mutex_unlock(&team->lock);
+ return 0;
+ }
+
+diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
+index 3d18bb0eee85..1bfe0fcaccf5 100644
+--- a/drivers/net/usb/plusb.c
++++ b/drivers/net/usb/plusb.c
+@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
+ }, {
+ USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
+ .driver_info = (unsigned long) &prolific_info,
++}, {
++ USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
++ * Host-to-Host Cable
++ */
++ .driver_info = (unsigned long) &prolific_info,
+ },
+
+ { }, // END
+diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
+index a3399c4f13a9..b9b651ea9851 100644
+--- a/drivers/net/wireless/ath/ath5k/reset.c
++++ b/drivers/net/wireless/ath/ath5k/reset.c
+@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+ regval = ioread32(reg);
+ iowrite32(regval | val, reg);
+ regval = ioread32(reg);
+- usleep_range(100, 150);
++ udelay(100); /* NB: should be atomic */
+
+ /* Bring BB/MAC out of reset */
+ iowrite32(regval & ~val, reg);
+diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
+index 8882b467be95..ecc5fa5640d2 100644
+--- a/drivers/of/of_pci.c
++++ b/drivers/of/of_pci.c
+@@ -140,6 +140,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base)
+ {
++ struct pci_host_bridge_window *window;
+ struct resource *res;
+ struct resource *bus_range;
+ struct of_pci_range range;
+@@ -225,7 +226,10 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
+ conversion_failed:
+ kfree(res);
+ parse_failed:
++ list_for_each_entry(window, resources, list)
++ kfree(window->res);
+ pci_free_resource_list(resources);
++ kfree(bus_range);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index f2446769247f..6f806f93662a 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -365,7 +365,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
+ unsigned long config;
+
+- if (!pin_reg || !pin_reg->conf_reg) {
++ if (!pin_reg || pin_reg->conf_reg == -1) {
+ seq_printf(s, "N/A");
+ return;
+ }
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
+index 550e6d77ac2b..b5fabf684632 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
+@@ -27,150 +27,148 @@
+
+ enum imx25_pads {
+ MX25_PAD_RESERVE0 = 1,
+- MX25_PAD_RESERVE1 = 2,
+- MX25_PAD_A10 = 3,
+- MX25_PAD_A13 = 4,
+- MX25_PAD_A14 = 5,
+- MX25_PAD_A15 = 6,
+- MX25_PAD_A16 = 7,
+- MX25_PAD_A17 = 8,
+- MX25_PAD_A18 = 9,
+- MX25_PAD_A19 = 10,
+- MX25_PAD_A20 = 11,
+- MX25_PAD_A21 = 12,
+- MX25_PAD_A22 = 13,
+- MX25_PAD_A23 = 14,
+- MX25_PAD_A24 = 15,
+- MX25_PAD_A25 = 16,
+- MX25_PAD_EB0 = 17,
+- MX25_PAD_EB1 = 18,
+- MX25_PAD_OE = 19,
+- MX25_PAD_CS0 = 20,
+- MX25_PAD_CS1 = 21,
+- MX25_PAD_CS4 = 22,
+- MX25_PAD_CS5 = 23,
+- MX25_PAD_NF_CE0 = 24,
+- MX25_PAD_ECB = 25,
+- MX25_PAD_LBA = 26,
+- MX25_PAD_BCLK = 27,
+- MX25_PAD_RW = 28,
+- MX25_PAD_NFWE_B = 29,
+- MX25_PAD_NFRE_B = 30,
+- MX25_PAD_NFALE = 31,
+- MX25_PAD_NFCLE = 32,
+- MX25_PAD_NFWP_B = 33,
+- MX25_PAD_NFRB = 34,
+- MX25_PAD_D15 = 35,
+- MX25_PAD_D14 = 36,
+- MX25_PAD_D13 = 37,
+- MX25_PAD_D12 = 38,
+- MX25_PAD_D11 = 39,
+- MX25_PAD_D10 = 40,
+- MX25_PAD_D9 = 41,
+- MX25_PAD_D8 = 42,
+- MX25_PAD_D7 = 43,
+- MX25_PAD_D6 = 44,
+- MX25_PAD_D5 = 45,
+- MX25_PAD_D4 = 46,
+- MX25_PAD_D3 = 47,
+- MX25_PAD_D2 = 48,
+- MX25_PAD_D1 = 49,
+- MX25_PAD_D0 = 50,
+- MX25_PAD_LD0 = 51,
+- MX25_PAD_LD1 = 52,
+- MX25_PAD_LD2 = 53,
+- MX25_PAD_LD3 = 54,
+- MX25_PAD_LD4 = 55,
+- MX25_PAD_LD5 = 56,
+- MX25_PAD_LD6 = 57,
+- MX25_PAD_LD7 = 58,
+- MX25_PAD_LD8 = 59,
+- MX25_PAD_LD9 = 60,
+- MX25_PAD_LD10 = 61,
+- MX25_PAD_LD11 = 62,
+- MX25_PAD_LD12 = 63,
+- MX25_PAD_LD13 = 64,
+- MX25_PAD_LD14 = 65,
+- MX25_PAD_LD15 = 66,
+- MX25_PAD_HSYNC = 67,
+- MX25_PAD_VSYNC = 68,
+- MX25_PAD_LSCLK = 69,
+- MX25_PAD_OE_ACD = 70,
+- MX25_PAD_CONTRAST = 71,
+- MX25_PAD_PWM = 72,
+- MX25_PAD_CSI_D2 = 73,
+- MX25_PAD_CSI_D3 = 74,
+- MX25_PAD_CSI_D4 = 75,
+- MX25_PAD_CSI_D5 = 76,
+- MX25_PAD_CSI_D6 = 77,
+- MX25_PAD_CSI_D7 = 78,
+- MX25_PAD_CSI_D8 = 79,
+- MX25_PAD_CSI_D9 = 80,
+- MX25_PAD_CSI_MCLK = 81,
+- MX25_PAD_CSI_VSYNC = 82,
+- MX25_PAD_CSI_HSYNC = 83,
+- MX25_PAD_CSI_PIXCLK = 84,
+- MX25_PAD_I2C1_CLK = 85,
+- MX25_PAD_I2C1_DAT = 86,
+- MX25_PAD_CSPI1_MOSI = 87,
+- MX25_PAD_CSPI1_MISO = 88,
+- MX25_PAD_CSPI1_SS0 = 89,
+- MX25_PAD_CSPI1_SS1 = 90,
+- MX25_PAD_CSPI1_SCLK = 91,
+- MX25_PAD_CSPI1_RDY = 92,
+- MX25_PAD_UART1_RXD = 93,
+- MX25_PAD_UART1_TXD = 94,
+- MX25_PAD_UART1_RTS = 95,
+- MX25_PAD_UART1_CTS = 96,
+- MX25_PAD_UART2_RXD = 97,
+- MX25_PAD_UART2_TXD = 98,
+- MX25_PAD_UART2_RTS = 99,
+- MX25_PAD_UART2_CTS = 100,
+- MX25_PAD_SD1_CMD = 101,
+- MX25_PAD_SD1_CLK = 102,
+- MX25_PAD_SD1_DATA0 = 103,
+- MX25_PAD_SD1_DATA1 = 104,
+- MX25_PAD_SD1_DATA2 = 105,
+- MX25_PAD_SD1_DATA3 = 106,
+- MX25_PAD_KPP_ROW0 = 107,
+- MX25_PAD_KPP_ROW1 = 108,
+- MX25_PAD_KPP_ROW2 = 109,
+- MX25_PAD_KPP_ROW3 = 110,
+- MX25_PAD_KPP_COL0 = 111,
+- MX25_PAD_KPP_COL1 = 112,
+- MX25_PAD_KPP_COL2 = 113,
+- MX25_PAD_KPP_COL3 = 114,
+- MX25_PAD_FEC_MDC = 115,
+- MX25_PAD_FEC_MDIO = 116,
+- MX25_PAD_FEC_TDATA0 = 117,
+- MX25_PAD_FEC_TDATA1 = 118,
+- MX25_PAD_FEC_TX_EN = 119,
+- MX25_PAD_FEC_RDATA0 = 120,
+- MX25_PAD_FEC_RDATA1 = 121,
+- MX25_PAD_FEC_RX_DV = 122,
+- MX25_PAD_FEC_TX_CLK = 123,
+- MX25_PAD_RTCK = 124,
+- MX25_PAD_DE_B = 125,
+- MX25_PAD_GPIO_A = 126,
+- MX25_PAD_GPIO_B = 127,
+- MX25_PAD_GPIO_C = 128,
+- MX25_PAD_GPIO_D = 129,
+- MX25_PAD_GPIO_E = 130,
+- MX25_PAD_GPIO_F = 131,
+- MX25_PAD_EXT_ARMCLK = 132,
+- MX25_PAD_UPLL_BYPCLK = 133,
+- MX25_PAD_VSTBY_REQ = 134,
+- MX25_PAD_VSTBY_ACK = 135,
+- MX25_PAD_POWER_FAIL = 136,
+- MX25_PAD_CLKO = 137,
+- MX25_PAD_BOOT_MODE0 = 138,
+- MX25_PAD_BOOT_MODE1 = 139,
++ MX25_PAD_A10 = 2,
++ MX25_PAD_A13 = 3,
++ MX25_PAD_A14 = 4,
++ MX25_PAD_A15 = 5,
++ MX25_PAD_A16 = 6,
++ MX25_PAD_A17 = 7,
++ MX25_PAD_A18 = 8,
++ MX25_PAD_A19 = 9,
++ MX25_PAD_A20 = 10,
++ MX25_PAD_A21 = 11,
++ MX25_PAD_A22 = 12,
++ MX25_PAD_A23 = 13,
++ MX25_PAD_A24 = 14,
++ MX25_PAD_A25 = 15,
++ MX25_PAD_EB0 = 16,
++ MX25_PAD_EB1 = 17,
++ MX25_PAD_OE = 18,
++ MX25_PAD_CS0 = 19,
++ MX25_PAD_CS1 = 20,
++ MX25_PAD_CS4 = 21,
++ MX25_PAD_CS5 = 22,
++ MX25_PAD_NF_CE0 = 23,
++ MX25_PAD_ECB = 24,
++ MX25_PAD_LBA = 25,
++ MX25_PAD_BCLK = 26,
++ MX25_PAD_RW = 27,
++ MX25_PAD_NFWE_B = 28,
++ MX25_PAD_NFRE_B = 29,
++ MX25_PAD_NFALE = 30,
++ MX25_PAD_NFCLE = 31,
++ MX25_PAD_NFWP_B = 32,
++ MX25_PAD_NFRB = 33,
++ MX25_PAD_D15 = 34,
++ MX25_PAD_D14 = 35,
++ MX25_PAD_D13 = 36,
++ MX25_PAD_D12 = 37,
++ MX25_PAD_D11 = 38,
++ MX25_PAD_D10 = 39,
++ MX25_PAD_D9 = 40,
++ MX25_PAD_D8 = 41,
++ MX25_PAD_D7 = 42,
++ MX25_PAD_D6 = 43,
++ MX25_PAD_D5 = 44,
++ MX25_PAD_D4 = 45,
++ MX25_PAD_D3 = 46,
++ MX25_PAD_D2 = 47,
++ MX25_PAD_D1 = 48,
++ MX25_PAD_D0 = 49,
++ MX25_PAD_LD0 = 50,
++ MX25_PAD_LD1 = 51,
++ MX25_PAD_LD2 = 52,
++ MX25_PAD_LD3 = 53,
++ MX25_PAD_LD4 = 54,
++ MX25_PAD_LD5 = 55,
++ MX25_PAD_LD6 = 56,
++ MX25_PAD_LD7 = 57,
++ MX25_PAD_LD8 = 58,
++ MX25_PAD_LD9 = 59,
++ MX25_PAD_LD10 = 60,
++ MX25_PAD_LD11 = 61,
++ MX25_PAD_LD12 = 62,
++ MX25_PAD_LD13 = 63,
++ MX25_PAD_LD14 = 64,
++ MX25_PAD_LD15 = 65,
++ MX25_PAD_HSYNC = 66,
++ MX25_PAD_VSYNC = 67,
++ MX25_PAD_LSCLK = 68,
++ MX25_PAD_OE_ACD = 69,
++ MX25_PAD_CONTRAST = 70,
++ MX25_PAD_PWM = 71,
++ MX25_PAD_CSI_D2 = 72,
++ MX25_PAD_CSI_D3 = 73,
++ MX25_PAD_CSI_D4 = 74,
++ MX25_PAD_CSI_D5 = 75,
++ MX25_PAD_CSI_D6 = 76,
++ MX25_PAD_CSI_D7 = 77,
++ MX25_PAD_CSI_D8 = 78,
++ MX25_PAD_CSI_D9 = 79,
++ MX25_PAD_CSI_MCLK = 80,
++ MX25_PAD_CSI_VSYNC = 81,
++ MX25_PAD_CSI_HSYNC = 82,
++ MX25_PAD_CSI_PIXCLK = 83,
++ MX25_PAD_I2C1_CLK = 84,
++ MX25_PAD_I2C1_DAT = 85,
++ MX25_PAD_CSPI1_MOSI = 86,
++ MX25_PAD_CSPI1_MISO = 87,
++ MX25_PAD_CSPI1_SS0 = 88,
++ MX25_PAD_CSPI1_SS1 = 89,
++ MX25_PAD_CSPI1_SCLK = 90,
++ MX25_PAD_CSPI1_RDY = 91,
++ MX25_PAD_UART1_RXD = 92,
++ MX25_PAD_UART1_TXD = 93,
++ MX25_PAD_UART1_RTS = 94,
++ MX25_PAD_UART1_CTS = 95,
++ MX25_PAD_UART2_RXD = 96,
++ MX25_PAD_UART2_TXD = 97,
++ MX25_PAD_UART2_RTS = 98,
++ MX25_PAD_UART2_CTS = 99,
++ MX25_PAD_SD1_CMD = 100,
++ MX25_PAD_SD1_CLK = 101,
++ MX25_PAD_SD1_DATA0 = 102,
++ MX25_PAD_SD1_DATA1 = 103,
++ MX25_PAD_SD1_DATA2 = 104,
++ MX25_PAD_SD1_DATA3 = 105,
++ MX25_PAD_KPP_ROW0 = 106,
++ MX25_PAD_KPP_ROW1 = 107,
++ MX25_PAD_KPP_ROW2 = 108,
++ MX25_PAD_KPP_ROW3 = 109,
++ MX25_PAD_KPP_COL0 = 110,
++ MX25_PAD_KPP_COL1 = 111,
++ MX25_PAD_KPP_COL2 = 112,
++ MX25_PAD_KPP_COL3 = 113,
++ MX25_PAD_FEC_MDC = 114,
++ MX25_PAD_FEC_MDIO = 115,
++ MX25_PAD_FEC_TDATA0 = 116,
++ MX25_PAD_FEC_TDATA1 = 117,
++ MX25_PAD_FEC_TX_EN = 118,
++ MX25_PAD_FEC_RDATA0 = 119,
++ MX25_PAD_FEC_RDATA1 = 120,
++ MX25_PAD_FEC_RX_DV = 121,
++ MX25_PAD_FEC_TX_CLK = 122,
++ MX25_PAD_RTCK = 123,
++ MX25_PAD_DE_B = 124,
++ MX25_PAD_GPIO_A = 125,
++ MX25_PAD_GPIO_B = 126,
++ MX25_PAD_GPIO_C = 127,
++ MX25_PAD_GPIO_D = 128,
++ MX25_PAD_GPIO_E = 129,
++ MX25_PAD_GPIO_F = 130,
++ MX25_PAD_EXT_ARMCLK = 131,
++ MX25_PAD_UPLL_BYPCLK = 132,
++ MX25_PAD_VSTBY_REQ = 133,
++ MX25_PAD_VSTBY_ACK = 134,
++ MX25_PAD_POWER_FAIL = 135,
++ MX25_PAD_CLKO = 136,
++ MX25_PAD_BOOT_MODE0 = 137,
++ MX25_PAD_BOOT_MODE1 = 138,
+ };
+
+ /* Pad names for the pinmux subsystem */
+ static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
+- IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX25_PAD_A10),
+ IMX_PINCTRL_PIN(MX25_PAD_A13),
+ IMX_PINCTRL_PIN(MX25_PAD_A14),
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 30d74a06b993..15a8998bd161 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -586,7 +586,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
+ "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
+ return NULL;
+ }
+- shost->dma_boundary = pcidev->dma_mask;
+ shost->max_id = BE2_MAX_SESSIONS;
+ shost->max_channel = 0;
+ shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 843594c2583d..07b2ea1fbf0d 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -546,7 +546,7 @@ static ssize_t
+ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+ {
+ sg_io_hdr_t *hp = &srp->header;
+- int err = 0;
++ int err = 0, err2;
+ int len;
+
+ if (count < SZ_SG_IO_HDR) {
+@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+ goto err_out;
+ }
+ err_out:
+- err = sg_finish_rem_req(srp);
+- return (0 == err) ? count : err;
++ err2 = sg_finish_rem_req(srp);
++ return err ? : err2 ? : count;
+ }
+
+ static ssize_t
+diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
+index 9b6f96f1591c..25e0b40881ca 100644
+--- a/drivers/staging/comedi/comedi_compat32.c
++++ b/drivers/staging/comedi/comedi_compat32.c
+@@ -262,7 +262,7 @@ static int compat_cmd(struct file *file, unsigned long arg)
+ {
+ struct comedi_cmd __user *cmd;
+ struct comedi32_cmd_struct __user *cmd32;
+- int rc;
++ int rc, err;
+
+ cmd32 = compat_ptr(arg);
+ cmd = compat_alloc_user_space(sizeof(*cmd));
+@@ -271,7 +271,15 @@ static int compat_cmd(struct file *file, unsigned long arg)
+ if (rc)
+ return rc;
+
+- return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
++ rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
++ if (rc == -EAGAIN) {
++ /* Special case: copy cmd back to user. */
++ err = put_compat_cmd(cmd32, cmd);
++ if (err)
++ rc = err;
++ }
++
++ return rc;
+ }
+
+ /* Handle 32-bit COMEDI_CMDTEST ioctl. */
+diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
+index 3b6bffc66918..1eb13b134b32 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
++++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
+@@ -439,6 +439,29 @@ static const struct comedi_lrange ai_ranges_64xx = {
+ }
+ };
+
++static const uint8_t ai_range_code_64xx[8] = {
++ 0x0, 0x1, 0x2, 0x3, /* bipolar 10, 5, 2,5, 1.25 */
++ 0x8, 0x9, 0xa, 0xb /* unipolar 10, 5, 2.5, 1.25 */
++};
++
++/* analog input ranges for 64-Mx boards */
++static const struct comedi_lrange ai_ranges_64_mx = {
++ 7, {
++ BIP_RANGE(5),
++ BIP_RANGE(2.5),
++ BIP_RANGE(1.25),
++ BIP_RANGE(0.625),
++ UNI_RANGE(5),
++ UNI_RANGE(2.5),
++ UNI_RANGE(1.25)
++ }
++};
++
++static const uint8_t ai_range_code_64_mx[7] = {
++ 0x0, 0x1, 0x2, 0x3, /* bipolar 5, 2.5, 1.25, 0.625 */
++ 0x9, 0xa, 0xb /* unipolar 5, 2.5, 1.25 */
++};
++
+ /* analog input ranges for 60xx boards */
+ static const struct comedi_lrange ai_ranges_60xx = {
+ 4, {
+@@ -449,6 +472,10 @@ static const struct comedi_lrange ai_ranges_60xx = {
+ }
+ };
+
++static const uint8_t ai_range_code_60xx[4] = {
++ 0x0, 0x1, 0x4, 0x7 /* bipolar 10, 5, 0.5, 0.05 */
++};
++
+ /* analog input ranges for 6030, etc boards */
+ static const struct comedi_lrange ai_ranges_6030 = {
+ 14, {
+@@ -469,6 +496,11 @@ static const struct comedi_lrange ai_ranges_6030 = {
+ }
+ };
+
++static const uint8_t ai_range_code_6030[14] = {
++ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */
++ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */
++};
++
+ /* analog input ranges for 6052, etc boards */
+ static const struct comedi_lrange ai_ranges_6052 = {
+ 15, {
+@@ -490,6 +522,11 @@ static const struct comedi_lrange ai_ranges_6052 = {
+ }
+ };
+
++static const uint8_t ai_range_code_6052[15] = {
++ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, /* bipolar 10 ... 0.05 */
++ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* unipolar 10 ... 0.1 */
++};
++
+ /* analog input ranges for 4020 board */
+ static const struct comedi_lrange ai_ranges_4020 = {
+ 2, {
+@@ -593,6 +630,7 @@ struct pcidas64_board {
+ int ai_bits; /* analog input resolution */
+ int ai_speed; /* fastest conversion period in ns */
+ const struct comedi_lrange *ai_range_table;
++ const uint8_t *ai_range_code;
+ int ao_nchan; /* number of analog out channels */
+ int ao_bits; /* analog output resolution */
+ int ao_scan_speed; /* analog output scan speed */
+@@ -651,6 +689,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+ .ai_range_table = &ai_ranges_64xx,
++ .ai_range_code = ai_range_code_64xx,
+ .ao_range_table = &ao_ranges_64xx,
+ .ao_range_code = ao_range_code_64xx,
+ .ai_fifo = &ai_fifo_64xx,
+@@ -666,6 +705,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+ .ai_range_table = &ai_ranges_64xx,
++ .ai_range_code = ai_range_code_64xx,
+ .ao_range_table = &ao_ranges_64xx,
+ .ao_range_code = ao_range_code_64xx,
+ .ai_fifo = &ai_fifo_64xx,
+@@ -680,7 +720,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_bits = 16,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ao_range_table = &ao_ranges_64xx,
+ .ao_range_code = ao_range_code_64xx,
+ .ai_fifo = &ai_fifo_64xx,
+@@ -695,7 +736,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_bits = 16,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ao_range_table = &ao_ranges_64xx,
+ .ao_range_code = ao_range_code_64xx,
+ .ai_fifo = &ai_fifo_64xx,
+@@ -710,7 +752,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_bits = 16,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ao_range_table = &ao_ranges_64xx,
+ .ao_range_code = ao_range_code_64xx,
+ .ai_fifo = &ai_fifo_64xx,
+@@ -725,6 +768,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_bits = 16,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_60xx,
++ .ai_range_code = ai_range_code_60xx,
+ .ao_range_table = &range_bipolar10,
+ .ao_range_code = ao_range_code_60xx,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -740,6 +784,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 100000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_60xx,
++ .ai_range_code = ai_range_code_60xx,
+ .ao_range_table = &range_bipolar10,
+ .ao_range_code = ao_range_code_60xx,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -754,6 +799,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 100000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_60xx,
++ .ai_range_code = ai_range_code_60xx,
+ .ao_range_table = &range_bipolar10,
+ .ao_range_code = ao_range_code_60xx,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -769,6 +815,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 100000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_60xx,
++ .ai_range_code = ai_range_code_60xx,
+ .ao_range_table = &range_bipolar10,
+ .ao_range_code = ao_range_code_60xx,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -784,6 +831,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_6030,
++ .ai_range_code = ai_range_code_6030,
+ .ao_range_table = &ao_ranges_6030,
+ .ao_range_code = ao_range_code_6030,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -799,6 +847,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_6030,
++ .ai_range_code = ai_range_code_6030,
+ .ao_range_table = &ao_ranges_6030,
+ .ao_range_code = ao_range_code_6030,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -812,6 +861,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_nchan = 0,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_6030,
++ .ai_range_code = ai_range_code_6030,
+ .ai_fifo = &ai_fifo_60xx,
+ .has_8255 = 0,
+ },
+@@ -823,6 +873,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_nchan = 0,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_6030,
++ .ai_range_code = ai_range_code_6030,
+ .ai_fifo = &ai_fifo_60xx,
+ .has_8255 = 0,
+ },
+@@ -835,6 +886,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 0,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_60xx,
++ .ai_range_code = ai_range_code_60xx,
+ .ai_fifo = &ai_fifo_60xx,
+ .has_8255 = 0,
+ },
+@@ -848,6 +900,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 100000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_60xx,
++ .ai_range_code = ai_range_code_60xx,
+ .ao_range_table = &range_bipolar10,
+ .ao_range_code = ao_range_code_60xx,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -863,6 +916,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 100000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_60xx,
++ .ai_range_code = ai_range_code_60xx,
+ .ao_range_table = &range_bipolar10,
+ .ao_range_code = ao_range_code_60xx,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -878,6 +932,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 1000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_6052,
++ .ai_range_code = ai_range_code_6052,
+ .ao_range_table = &ao_ranges_6030,
+ .ao_range_code = ao_range_code_6030,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -893,6 +948,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 3333,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_6052,
++ .ai_range_code = ai_range_code_6052,
+ .ao_range_table = &ao_ranges_6030,
+ .ao_range_code = ao_range_code_6030,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -908,6 +964,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 1000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_6052,
++ .ai_range_code = ai_range_code_6052,
+ .ao_range_table = &ao_ranges_6030,
+ .ao_range_code = ao_range_code_6030,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -923,6 +980,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 1000,
+ .layout = LAYOUT_60XX,
+ .ai_range_table = &ai_ranges_6052,
++ .ai_range_code = ai_range_code_6052,
+ .ao_range_table = &ao_ranges_6030,
+ .ao_range_code = ao_range_code_6030,
+ .ai_fifo = &ai_fifo_60xx,
+@@ -957,6 +1015,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+ .ai_range_table = &ai_ranges_64xx,
++ .ai_range_code = ai_range_code_64xx,
+ .ai_fifo = ai_fifo_64xx,
+ .has_8255 = 1,
+ },
+@@ -968,7 +1027,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_nchan = 0,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ai_fifo = ai_fifo_64xx,
+ .has_8255 = 1,
+ },
+@@ -980,7 +1040,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_nchan = 0,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ai_fifo = ai_fifo_64xx,
+ .has_8255 = 1,
+ },
+@@ -992,7 +1053,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_nchan = 0,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ai_fifo = ai_fifo_64xx,
+ .has_8255 = 1,
+ },
+@@ -1004,7 +1066,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_nchan = 2,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ai_fifo = ai_fifo_64xx,
+ .has_8255 = 1,
+ },
+@@ -1016,7 +1079,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_nchan = 2,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ai_fifo = ai_fifo_64xx,
+ .has_8255 = 1,
+ },
+@@ -1028,7 +1092,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ .ao_nchan = 2,
+ .ao_scan_speed = 10000,
+ .layout = LAYOUT_64XX,
+- .ai_range_table = &ai_ranges_64xx,
++ .ai_range_table = &ai_ranges_64_mx,
++ .ai_range_code = ai_range_code_64_mx,
+ .ai_fifo = ai_fifo_64xx,
+ .has_8255 = 1,
+ },
+@@ -1122,45 +1187,8 @@ static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
+ unsigned int range_index)
+ {
+ const struct pcidas64_board *thisboard = dev->board_ptr;
+- const struct comedi_krange *range =
+- &thisboard->ai_range_table->range[range_index];
+- unsigned int bits = 0;
+
+- switch (range->max) {
+- case 10000000:
+- bits = 0x000;
+- break;
+- case 5000000:
+- bits = 0x100;
+- break;
+- case 2000000:
+- case 2500000:
+- bits = 0x200;
+- break;
+- case 1000000:
+- case 1250000:
+- bits = 0x300;
+- break;
+- case 500000:
+- bits = 0x400;
+- break;
+- case 200000:
+- case 250000:
+- bits = 0x500;
+- break;
+- case 100000:
+- bits = 0x600;
+- break;
+- case 50000:
+- bits = 0x700;
+- break;
+- default:
+- dev_err(dev->class_dev, "bug! in %s\n", __func__);
+- break;
+- }
+- if (range->min == 0)
+- bits += 0x900;
+- return bits;
++ return thisboard->ai_range_code[range_index] << 8;
+ }
+
+ static unsigned int hw_revision(const struct comedi_device *dev,
+diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
+index 2a29b9baec0d..ffd42071a12e 100644
+--- a/drivers/staging/iio/adc/mxs-lradc.c
++++ b/drivers/staging/iio/adc/mxs-lradc.c
+@@ -214,11 +214,17 @@ struct mxs_lradc {
+ unsigned long is_divided;
+
+ /*
+- * Touchscreen LRADC channels receives a private slot in the CTRL4
+- * register, the slot #7. Therefore only 7 slots instead of 8 in the
+- * CTRL4 register can be mapped to LRADC channels when using the
+- * touchscreen.
+- *
++ * When the touchscreen is enabled, we give it two private virtual
++ * channels: #6 and #7. This means that only 6 virtual channels (instead
++ * of 8) will be available for buffered capture.
++ */
++#define TOUCHSCREEN_VCHANNEL1 7
++#define TOUCHSCREEN_VCHANNEL2 6
++#define BUFFER_VCHANS_LIMITED 0x3f
++#define BUFFER_VCHANS_ALL 0xff
++ u8 buffer_vchans;
++
++ /*
+ * Furthermore, certain LRADC channels are shared between touchscreen
+ * and/or touch-buttons and generic LRADC block. Therefore when using
+ * either of these, these channels are not available for the regular
+@@ -342,6 +348,9 @@ struct mxs_lradc {
+ #define LRADC_CTRL4 0x140
+ #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
+ #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
++#define LRADC_CTRL4_LRADCSELECT(n, x) \
++ (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
++ LRADC_CTRL4_LRADCSELECT_MASK(n))
+
+ #define LRADC_RESOLUTION 12
+ #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
+@@ -416,6 +425,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
+ LRADC_STATUS_TOUCH_DETECT_RAW);
+ }
+
++static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
++ unsigned ch)
++{
++ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
++ LRADC_CTRL4);
++ mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
++}
++
+ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
+ {
+ /*
+@@ -443,12 +460,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
+ LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
+ LRADC_DELAY(3));
+
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
+- LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
+- LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
+
+- /* wake us again, when the complete conversion is done */
+- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
+ /*
+ * after changing the touchscreen plates setting
+ * the signals need some initial time to settle. Start the
+@@ -501,12 +514,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
+ LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
+ LRADC_DELAY(3));
+
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
+- LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
+- LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
+
+- /* wake us again, when the conversions are done */
+- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
+ /*
+ * after changing the touchscreen plates setting
+ * the signals need some initial time to settle. Start the
+@@ -571,36 +580,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
+ #define TS_CH_XM 4
+ #define TS_CH_YM 5
+
+-static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
+-{
+- u32 reg;
+- int val;
+-
+- reg = readl(lradc->base + LRADC_CTRL1);
+-
+- /* only channels 3 to 5 are of interest here */
+- if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
+- LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
+- val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
+- } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
+- LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
+- val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
+- } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
+- LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
+- val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
+- } else {
+- return -EIO;
+- }
+-
+- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
+- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
+-
+- return val;
+-}
+-
+ /*
+ * YP(open)--+-------------+
+ * | |--+
+@@ -644,7 +623,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_X;
+- mxs_lradc_setup_ts_channel(lradc, TS_CH_YP);
++ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
++ mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
+ }
+
+ /*
+@@ -665,7 +645,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_Y;
+- mxs_lradc_setup_ts_channel(lradc, TS_CH_XM);
++ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
++ mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
+ }
+
+ /*
+@@ -686,7 +667,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
+- mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
++ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
++ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
++ mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
++ TOUCHSCREEN_VCHANNEL1);
+ }
+
+ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
+@@ -699,6 +683,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+ }
+
++static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
++{
++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
++ LRADC_CTRL1);
++ mxs_lradc_reg_set(lradc,
++ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
++ /*
++ * start with the Y-pos, because it uses nearly the same plate
++ * settings like the touch detection
++ */
++ mxs_lradc_prepare_y_pos(lradc);
++}
++
+ static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
+ {
+ input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
+@@ -716,10 +713,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
+ * start a dummy conversion to burn time to settle the signals
+ * note: we are not interested in the conversion's value
+ */
+- mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5));
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1);
+- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) |
++ mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
++ mxs_lradc_reg_clear(lradc,
++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
++ mxs_lradc_reg_wrt(lradc,
++ LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
+ LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
+ LRADC_DELAY(2));
+ }
+@@ -751,59 +750,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
+
+ /* if it is released, wait for the next touch via IRQ */
+ lradc->cur_plate = LRADC_TOUCH;
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
++ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
++ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
++ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+ }
+
+ /* touchscreen's state machine */
+ static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
+ {
+- int val;
+-
+ switch (lradc->cur_plate) {
+ case LRADC_TOUCH:
+- /*
+- * start with the Y-pos, because it uses nearly the same plate
+- * settings like the touch detection
+- */
+- if (mxs_lradc_check_touch_event(lradc)) {
+- mxs_lradc_reg_clear(lradc,
+- LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+- LRADC_CTRL1);
+- mxs_lradc_prepare_y_pos(lradc);
+- }
++ if (mxs_lradc_check_touch_event(lradc))
++ mxs_lradc_start_touch_event(lradc);
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
+ LRADC_CTRL1);
+ return;
+
+ case LRADC_SAMPLE_Y:
+- val = mxs_lradc_read_ts_channel(lradc);
+- if (val < 0) {
+- mxs_lradc_enable_touch_detection(lradc); /* re-start */
+- return;
+- }
+- lradc->ts_y_pos = val;
++ lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
++ TOUCHSCREEN_VCHANNEL1);
+ mxs_lradc_prepare_x_pos(lradc);
+ return;
+
+ case LRADC_SAMPLE_X:
+- val = mxs_lradc_read_ts_channel(lradc);
+- if (val < 0) {
+- mxs_lradc_enable_touch_detection(lradc); /* re-start */
+- return;
+- }
+- lradc->ts_x_pos = val;
++ lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
++ TOUCHSCREEN_VCHANNEL1);
+ mxs_lradc_prepare_pressure(lradc);
+ return;
+
+ case LRADC_SAMPLE_PRESSURE:
+- lradc->ts_pressure =
+- mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
++ lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
++ TOUCHSCREEN_VCHANNEL2,
++ TOUCHSCREEN_VCHANNEL1);
+ mxs_lradc_complete_touch_event(lradc);
+ return;
+
+ case LRADC_SAMPLE_VALID:
+- val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
+ mxs_lradc_finish_touch_event(lradc, 1);
+ break;
+ }
+@@ -835,9 +820,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
+ * used if doing raw sampling.
+ */
+ if (lradc->soc == IMX28_LRADC)
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
+ LRADC_CTRL1);
+- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
++ mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
+
+ /* Enable / disable the divider per requirement */
+ if (test_bit(chan, &lradc->is_divided))
+@@ -1081,9 +1066,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
+ {
+ /* stop all interrupts from firing */
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
+- LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) |
+- LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5),
+- LRADC_CTRL1);
++ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
++ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
+
+ /* Power-down touchscreen touch-detect circuitry. */
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+@@ -1149,26 +1133,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
+ struct iio_dev *iio = data;
+ struct mxs_lradc *lradc = iio_priv(iio);
+ unsigned long reg = readl(lradc->base + LRADC_CTRL1);
++ uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
+ const uint32_t ts_irq_mask =
+ LRADC_CTRL1_TOUCH_DETECT_IRQ |
+- LRADC_CTRL1_LRADC_IRQ(2) |
+- LRADC_CTRL1_LRADC_IRQ(3) |
+- LRADC_CTRL1_LRADC_IRQ(4) |
+- LRADC_CTRL1_LRADC_IRQ(5);
++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
+
+ if (!(reg & mxs_lradc_irq_mask(lradc)))
+ return IRQ_NONE;
+
+- if (lradc->use_touchscreen && (reg & ts_irq_mask))
++ if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
+ mxs_lradc_handle_touch(lradc);
+
+- if (iio_buffer_enabled(iio))
+- iio_trigger_poll(iio->trig);
+- else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
++ /* Make sure we don't clear the next conversion's interrupt. */
++ clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
++ }
++
++ if (iio_buffer_enabled(iio)) {
++ if (reg & lradc->buffer_vchans)
++ iio_trigger_poll(iio->trig);
++ } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
+ complete(&lradc->completion);
++ }
+
+- mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc),
+- LRADC_CTRL1);
++ mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
+
+ return IRQ_HANDLED;
+ }
+@@ -1280,9 +1269,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
+ }
+
+ if (lradc->soc == IMX28_LRADC)
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+- LRADC_CTRL1);
+- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
++ mxs_lradc_reg_clear(lradc,
++ lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
++ LRADC_CTRL1);
++ mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
+
+ for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
+ ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
+@@ -1315,10 +1305,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
+ mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
+ LRADC_DELAY_KICK, LRADC_DELAY(0));
+
+- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
++ mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
+ if (lradc->soc == IMX28_LRADC)
+- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+- LRADC_CTRL1);
++ mxs_lradc_reg_clear(lradc,
++ lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
++ LRADC_CTRL1);
+
+ kfree(lradc->buffer);
+ mutex_unlock(&lradc->lock);
+@@ -1344,7 +1335,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
+ if (lradc->use_touchbutton)
+ rsvd_chans++;
+ if (lradc->use_touchscreen)
+- rsvd_chans++;
++ rsvd_chans += 2;
+
+ /* Test for attempts to map channels with special mode of operation. */
+ if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
+@@ -1404,6 +1395,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
+ .channel = 8,
+ .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+ },
++ /* Hidden channel to keep indexes */
++ {
++ .type = IIO_TEMP,
++ .indexed = 1,
++ .scan_index = -1,
++ .channel = 9,
++ },
+ MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
+ MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
+ MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
+@@ -1556,6 +1554,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
+
+ touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
+
++ if (touch_ret == 0)
++ lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
++ else
++ lradc->buffer_vchans = BUFFER_VCHANS_ALL;
++
+ /* Grab all IRQ sources */
+ for (i = 0; i < of_cfg->irq_count; i++) {
+ lradc->irq[i] = platform_get_irq(pdev, i);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 9f93b8234095..45837a4e950d 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -1857,8 +1857,8 @@ static int core_scsi3_update_aptpl_buf(
+ }
+
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+- pr_err("Unable to update renaming"
+- " APTPL metadata\n");
++ pr_err("Unable to update renaming APTPL metadata,"
++ " reallocating larger buffer\n");
+ ret = -EMSGSIZE;
+ goto out;
+ }
+@@ -1875,8 +1875,8 @@ static int core_scsi3_update_aptpl_buf(
+ lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+- pr_err("Unable to update renaming"
+- " APTPL metadata\n");
++ pr_err("Unable to update renaming APTPL metadata,"
++ " reallocating larger buffer\n");
+ ret = -EMSGSIZE;
+ goto out;
+ }
+@@ -1939,7 +1939,7 @@ static int __core_scsi3_write_aptpl_to_file(
+ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
+ {
+ unsigned char *buf;
+- int rc;
++ int rc, len = PR_APTPL_BUF_LEN;
+
+ if (!aptpl) {
+ char *null_buf = "No Registrations or Reservations\n";
+@@ -1953,25 +1953,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
+
+ return 0;
+ }
+-
+- buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
++retry:
++ buf = vzalloc(len);
+ if (!buf)
+ return TCM_OUT_OF_RESOURCES;
+
+- rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
++ rc = core_scsi3_update_aptpl_buf(dev, buf, len);
+ if (rc < 0) {
+- kfree(buf);
+- return TCM_OUT_OF_RESOURCES;
++ vfree(buf);
++ len *= 2;
++ goto retry;
+ }
+
+ rc = __core_scsi3_write_aptpl_to_file(dev, buf);
+ if (rc != 0) {
+ pr_err("SPC-3 PR: Could not update APTPL\n");
+- kfree(buf);
++ vfree(buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ dev->t10_pr.pr_aptpl_active = 1;
+- kfree(buf);
++ vfree(buf);
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
+ return 0;
+ }
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 7a88af0e32d6..565c0da9d99d 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -251,6 +251,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+ static sense_reason_t
+ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+ {
++ struct se_device *dev = cmd->se_dev;
++ sector_t end_lba = dev->transport->get_blocks(dev) + 1;
+ unsigned int sectors = sbc_get_write_same_sectors(cmd);
+
+ if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+@@ -264,6 +266,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ sectors, cmd->se_dev->dev_attrib.max_write_same_len);
+ return TCM_INVALID_CDB_FIELD;
+ }
++ /*
++ * Sanity check for LBA wrap and request past end of device.
++ */
++ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
++ ((cmd->t_task_lba + sectors) > end_lba)) {
++ pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
++ (unsigned long long)end_lba, cmd->t_task_lba, sectors);
++ return TCM_ADDRESS_OUT_OF_RANGE;
++ }
++
+ /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+ if (flags[0] & 0x10) {
+ pr_warn("WRITE SAME with ANCHOR not supported\n");
+@@ -955,7 +967,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ unsigned long long end_lba;
+ check_lba:
+ end_lba = dev->transport->get_blocks(dev) + 1;
+- if (cmd->t_task_lba + sectors > end_lba) {
++ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
++ ((cmd->t_task_lba + sectors) > end_lba)) {
+ pr_err("cmd exceeds last lba %llu "
+ "(lba %llu, sectors %u)\n",
+ end_lba, cmd->t_task_lba, sectors);
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index ca5cfdc1459a..e5c31eadb0ac 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -2088,8 +2088,8 @@ int serial8250_do_startup(struct uart_port *port)
+ /*
+ * Clear the interrupt registers.
+ */
+- if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
+- serial_port_in(port, UART_RX);
++ serial_port_in(port, UART_LSR);
++ serial_port_in(port, UART_RX);
+ serial_port_in(port, UART_IIR);
+ serial_port_in(port, UART_MSR);
+
+@@ -2250,8 +2250,8 @@ dont_test_tx_en:
+ * saved flags to avoid getting false values from polling
+ * routines or the previous session.
+ */
+- if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
+- serial_port_in(port, UART_RX);
++ serial_port_in(port, UART_LSR);
++ serial_port_in(port, UART_RX);
+ serial_port_in(port, UART_IIR);
+ serial_port_in(port, UART_MSR);
+ up->lsr_saved_flags = 0;
+@@ -2344,8 +2344,7 @@ void serial8250_do_shutdown(struct uart_port *port)
+ * Read data port to reset things, and then unlink from
+ * the IRQ chain.
+ */
+- if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
+- serial_port_in(port, UART_RX);
++ serial_port_in(port, UART_RX);
+ serial8250_rpm_put(up);
+
+ del_timer_sync(&up->timer);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 0508a1d8e4cd..0a0a6305c511 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -975,8 +975,8 @@ EXPORT_SYMBOL(start_tty);
+ /* We limit tty time update visibility to every 8 seconds or so. */
+ static void tty_update_time(struct timespec *time)
+ {
+- unsigned long sec = get_seconds() & ~7;
+- if ((long)(sec - time->tv_sec) > 0)
++ unsigned long sec = get_seconds();
++ if (abs(sec - time->tv_sec) & ~7)
+ time->tv_sec = sec;
+ }
+
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 62380ccf70fb..e43b6e559b3d 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
+ #endif
+ if (!timeout)
+ timeout = MAX_SCHEDULE_TIMEOUT;
++
+ if (wait_event_interruptible_timeout(tty->write_wait,
+- !tty_chars_in_buffer(tty), timeout) >= 0) {
+- if (tty->ops->wait_until_sent)
+- tty->ops->wait_until_sent(tty, timeout);
++ !tty_chars_in_buffer(tty), timeout) < 0) {
++ return;
+ }
++
++ if (timeout == MAX_SCHEDULE_TIMEOUT)
++ timeout = 0;
++
++ if (tty->ops->wait_until_sent)
++ tty->ops->wait_until_sent(tty, timeout);
+ }
+ EXPORT_SYMBOL(tty_wait_until_sent);
+
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 0b59731c3021..e500243803d8 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb)
+ as->status = urb->status;
+ signr = as->signr;
+ if (signr) {
++ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.si_signo = as->signr;
+ sinfo.si_errno = as->status;
+ sinfo.si_code = SI_ASYNCIO;
+@@ -2371,6 +2372,7 @@ static void usbdev_remove(struct usb_device *udev)
+ wake_up_all(&ps->wait);
+ list_del_init(&ps->list);
+ if (ps->discsignr) {
++ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.si_signo = ps->discsignr;
+ sinfo.si_errno = EPIPE;
+ sinfo.si_code = SI_ASYNCIO;
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index a0aa9f3da441..3eb0f5eace79 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -205,6 +205,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
+ omap->irq0_offset, value);
+ }
+
++static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
++{
++ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
++ omap->irqmisc_offset, value);
++}
++
++static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
++{
++ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
++ omap->irq0_offset, value);
++}
++
+ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
+ enum omap_dwc3_vbus_id_status status)
+ {
+@@ -345,9 +357,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
+
+ static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
+ {
++ u32 reg;
++
+ /* disable all IRQs */
+- dwc3_omap_write_irqmisc_set(omap, 0x00);
+- dwc3_omap_write_irq0_set(omap, 0x00);
++ reg = USBOTGSS_IRQO_COREIRQ_ST;
++ dwc3_omap_write_irq0_clr(omap, reg);
++
++ reg = (USBOTGSS_IRQMISC_OEVT |
++ USBOTGSS_IRQMISC_DRVVBUS_RISE |
++ USBOTGSS_IRQMISC_CHRGVBUS_RISE |
++ USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
++ USBOTGSS_IRQMISC_IDPULLUP_RISE |
++ USBOTGSS_IRQMISC_DRVVBUS_FALL |
++ USBOTGSS_IRQMISC_CHRGVBUS_FALL |
++ USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
++ USBOTGSS_IRQMISC_IDPULLUP_FALL);
++
++ dwc3_omap_write_irqmisc_clr(omap, reg);
+ }
+
+ static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 34034333f7f6..28d3dd3637b3 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -1161,7 +1161,6 @@ static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
+ if (desc->opts_mutex)
+ mutex_lock(desc->opts_mutex);
+ memcpy(desc->ext_compat_id, page, l);
+- desc->ext_compat_id[l] = '\0';
+
+ if (desc->opts_mutex)
+ mutex_unlock(desc->opts_mutex);
+@@ -1192,7 +1191,6 @@ static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
+ if (desc->opts_mutex)
+ mutex_lock(desc->opts_mutex);
+ memcpy(desc->ext_compat_id + 8, page, l);
+- desc->ext_compat_id[l + 8] = '\0';
+
+ if (desc->opts_mutex)
+ mutex_unlock(desc->opts_mutex);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 7f76c8a12f89..fd53c9ebd662 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -37,6 +37,9 @@
+
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
++#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
+
+ static const char hcd_name[] = "xhci_hcd";
+
+@@ -133,6 +136,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
++ xhci->quirks |= XHCI_PME_STUCK_QUIRK;
++ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_EJ168) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -159,6 +168,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ "QUIRK: Resetting on resume");
+ }
+
++/*
++ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
++ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
++ */
++static void xhci_pme_quirk(struct xhci_hcd *xhci)
++{
++ u32 val;
++ void __iomem *reg;
++
++ reg = (void __iomem *) xhci->cap_regs + 0x80a4;
++ val = readl(reg);
++ writel(val | BIT(28), reg);
++ readl(reg);
++}
++
+ /* called during probe() after chip reset completes */
+ static int xhci_pci_setup(struct usb_hcd *hcd)
+ {
+@@ -283,6 +307,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ pdev->no_d3cold = true;
+
++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++ xhci_pme_quirk(xhci);
++
+ return xhci_suspend(xhci, do_wakeup);
+ }
+
+@@ -313,6 +340,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ usb_enable_intel_xhci_ports(pdev);
+
++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++ xhci_pme_quirk(xhci);
++
+ retval = xhci_resume(xhci, hibernated);
+ return retval;
+ }
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 646300cbe5f7..22516f41c6f4 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -87,15 +87,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ if (!res)
+ return -ENODEV;
+
+- if (of_device_is_compatible(pdev->dev.of_node,
+- "marvell,armada-375-xhci") ||
+- of_device_is_compatible(pdev->dev.of_node,
+- "marvell,armada-380-xhci")) {
+- ret = xhci_mvebu_mbus_init_quirk(pdev);
+- if (ret)
+- return ret;
+- }
+-
+ /* Initialize dma_mask and coherent_dma_mask to 32-bits */
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+@@ -129,6 +120,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ goto put_hcd;
+ }
+
++ if (of_device_is_compatible(pdev->dev.of_node,
++ "marvell,armada-375-xhci") ||
++ of_device_is_compatible(pdev->dev.of_node,
++ "marvell,armada-380-xhci")) {
++ ret = xhci_mvebu_mbus_init_quirk(pdev);
++ if (ret)
++ goto disable_clk;
++ }
++
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret)
+ goto disable_clk;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 06433aec81d7..338f19cc0973 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1957,7 +1957,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ if (event_trb != ep_ring->dequeue) {
+ /* The event was for the status stage */
+ if (event_trb == td->last_trb) {
+- if (td->urb->actual_length != 0) {
++ if (td->urb_length_set) {
+ /* Don't overwrite a previously set error code
+ */
+ if ((*status == -EINPROGRESS || *status == 0) &&
+@@ -1971,7 +1971,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ td->urb->transfer_buffer_length;
+ }
+ } else {
+- /* Maybe the event was for the data stage? */
++ /*
++ * Maybe the event was for the data stage? If so, update
++ * already the actual_length of the URB and flag it as
++ * set, so that it is not overwritten in the event for
++ * the last TRB.
++ */
++ td->urb_length_set = true;
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index d745715a1e2f..94251141adae 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1,3 +1,4 @@
++
+ /*
+ * xHCI host controller driver
+ *
+@@ -88,9 +89,10 @@ struct xhci_cap_regs {
+ #define HCS_IST(p) (((p) >> 0) & 0xf)
+ /* bits 4:7, max number of Event Ring segments */
+ #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
++/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+ /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+-/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+-#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
++/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
++#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+
+ /* HCSPARAMS3 - hcs_params3 - bitmasks */
+ /* bits 0:7, Max U1 to U0 latency for the roothub ports */
+@@ -1288,6 +1290,8 @@ struct xhci_td {
+ struct xhci_segment *start_seg;
+ union xhci_trb *first_trb;
+ union xhci_trb *last_trb;
++ /* actual_length of the URB has already been set */
++ bool urb_length_set;
+ };
+
+ /* xHCI command default timeout value */
+@@ -1560,6 +1564,7 @@ struct xhci_hcd {
+ #define XHCI_SPURIOUS_WAKEUP (1 << 18)
+ /* For controllers with a broken beyond repair streams implementation */
+ #define XHCI_BROKEN_STREAMS (1 << 19)
++#define XHCI_PME_STUCK_QUIRK (1 << 20)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
+index 9374bd2aba20..6f91eb9ae81a 100644
+--- a/drivers/usb/serial/bus.c
++++ b/drivers/usb/serial/bus.c
+@@ -51,6 +51,7 @@ static int usb_serial_device_probe(struct device *dev)
+ {
+ struct usb_serial_driver *driver;
+ struct usb_serial_port *port;
++ struct device *tty_dev;
+ int retval = 0;
+ int minor;
+
+@@ -75,12 +76,20 @@ static int usb_serial_device_probe(struct device *dev)
+ retval = device_create_file(dev, &dev_attr_port_number);
+ if (retval) {
+ if (driver->port_remove)
+- retval = driver->port_remove(port);
++ driver->port_remove(port);
+ goto exit_with_autopm;
+ }
+
+ minor = port->minor;
+- tty_register_device(usb_serial_tty_driver, minor, dev);
++ tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
++ if (IS_ERR(tty_dev)) {
++ retval = PTR_ERR(tty_dev);
++ device_remove_file(dev, &dev_attr_port_number);
++ if (driver->port_remove)
++ driver->port_remove(port);
++ goto exit_with_autopm;
++ }
++
+ dev_info(&port->serial->dev->dev,
+ "%s converter now attached to ttyUSB%d\n",
+ driver->description, minor);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f40c856ff758..84ce2d74894c 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
++ { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
++ { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
+ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
+ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 1ebb351b9e9a..3086dec0ef53 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -799,6 +799,8 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
++ { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
+@@ -978,6 +980,23 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ /* GE Healthcare devices */
+ { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
++ /* Active Research (Actisense) devices */
++ { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
++ { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
++ { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index e52409c9be99..56b1b55c4751 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -38,6 +38,9 @@
+
+ #define FTDI_LUMEL_PD12_PID 0x6002
+
++/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
++#define CYBER_CORTEX_AV_PID 0x8698
++
+ /*
+ * Marvell OpenRD Base, Client
+ * http://www.open-rd.org
+@@ -1438,3 +1441,23 @@
+ */
+ #define GE_HEALTHCARE_VID 0x1901
+ #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
++
++/*
++ * Active Research (Actisense) devices
++ */
++#define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */
++#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
++#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
++#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
++#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
++#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
++#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
++#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
++#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
++#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
++#define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */
++#define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */
++#define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */
++#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */
++#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
++#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index 1bd192290b08..904ab353ecf2 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+ * character or at least one jiffy.
+ */
+ period = max_t(unsigned long, (10 * HZ / bps), 1);
+- period = min_t(unsigned long, period, timeout);
++ if (timeout)
++ period = min_t(unsigned long, period, timeout);
+
+ dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
+ __func__, jiffies_to_msecs(timeout),
+@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+ schedule_timeout_interruptible(period);
+ if (signal_pending(current))
+ break;
+- if (time_after(jiffies, expire))
++ if (timeout && time_after(jiffies, expire))
+ break;
+ }
+ }
+diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
+index ab1d690274ae..460a40669967 100644
+--- a/drivers/usb/serial/mxuport.c
++++ b/drivers/usb/serial/mxuport.c
+@@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
+ }
+
+ /* Initial port termios */
+- mxuport_set_termios(tty, port, NULL);
++ if (tty)
++ mxuport_set_termios(tty, port, NULL);
+
+ /*
+ * TODO: use RQ_VENDOR_GET_MSR, once we know what it
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 475723c006f9..19842370a07f 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -940,8 +940,9 @@ static int usb_serial_probe(struct usb_interface *interface,
+ port = serial->port[i];
+ if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
+ goto probe_error;
+- buffer_size = max_t(int, serial->type->bulk_out_size,
+- usb_endpoint_maxp(endpoint));
++ buffer_size = serial->type->bulk_out_size;
++ if (!buffer_size)
++ buffer_size = usb_endpoint_maxp(endpoint);
+ port->bulk_out_size = buffer_size;
+ port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
+
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 2706a434fdbb..cd047d0cc7a6 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -103,6 +103,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
++/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
++UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
++ "JMicron",
++ "JMS539",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_REPORT_OPCODES),
++
+ /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
+ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
+ "JMicron",
+diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
+index aaf96cb25452..ac7d921ed984 100644
+--- a/fs/autofs4/dev-ioctl.c
++++ b/fs/autofs4/dev-ioctl.c
+@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
+ */
+ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
+ {
+- struct autofs_dev_ioctl tmp;
++ struct autofs_dev_ioctl tmp, *res;
+
+ if (copy_from_user(&tmp, in, sizeof(tmp)))
+ return ERR_PTR(-EFAULT);
+@@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
+ if (tmp.size > (PATH_MAX + sizeof(tmp)))
+ return ERR_PTR(-ENAMETOOLONG);
+
+- return memdup_user(in, tmp.size);
++ res = memdup_user(in, tmp.size);
++ if (!IS_ERR(res))
++ res->size = tmp.size;
++
++ return res;
+ }
+
+ static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index a18ceabd99a8..5193c7844315 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1803,22 +1803,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ mutex_unlock(&inode->i_mutex);
+
+ /*
+- * we want to make sure fsync finds this change
+- * but we haven't joined a transaction running right now.
+- *
+- * Later on, someone is sure to update the inode and get the
+- * real transid recorded.
+- *
+- * We set last_trans now to the fs_info generation + 1,
+- * this will either be one more than the running transaction
+- * or the generation used for the next transaction if there isn't
+- * one running right now.
+- *
+ * We also have to set last_sub_trans to the current log transid,
+ * otherwise subsequent syncs to a file that's been synced in this
+ * transaction will appear to have already occured.
+ */
+- BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
+ BTRFS_I(inode)->last_sub_trans = root->log_transid;
+ if (num_written > 0) {
+ err = generic_write_sync(file, pos, num_written);
+@@ -1954,25 +1942,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ atomic_inc(&root->log_batch);
+
+ /*
+- * check the transaction that last modified this inode
+- * and see if its already been committed
+- */
+- if (!BTRFS_I(inode)->last_trans) {
+- mutex_unlock(&inode->i_mutex);
+- goto out;
+- }
+-
+- /*
+- * if the last transaction that changed this file was before
+- * the current transaction, we can bail out now without any
+- * syncing
++ * If the last transaction that changed this file was before the current
++ * transaction and we have the full sync flag set in our inode, we can
++ * bail out now without any syncing.
++ *
++ * Note that we can't bail out if the full sync flag isn't set. This is
++ * because when the full sync flag is set we start all ordered extents
++ * and wait for them to fully complete - when they complete they update
++ * the inode's last_trans field through:
++ *
++ * btrfs_finish_ordered_io() ->
++ * btrfs_update_inode_fallback() ->
++ * btrfs_update_inode() ->
++ * btrfs_set_inode_last_trans()
++ *
++ * So we are sure that last_trans is up to date and can do this check to
++ * bail out safely. For the fast path, when the full sync flag is not
++ * set in our inode, we can not do it because we start only our ordered
++ * extents and don't wait for them to complete (that is when
++ * btrfs_finish_ordered_io runs), so here at this point their last_trans
++ * value might be less than or equals to fs_info->last_trans_committed,
++ * and setting a speculative last_trans for an inode when a buffered
++ * write is made (such as fs_info->generation + 1 for example) would not
++ * be reliable since after setting the value and before fsync is called
++ * any number of transactions can start and commit (transaction kthread
++ * commits the current transaction periodically), and a transaction
++ * commit does not start nor waits for ordered extents to complete.
+ */
+ smp_mb();
+ if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+- BTRFS_I(inode)->last_trans <=
+- root->fs_info->last_trans_committed) {
+- BTRFS_I(inode)->last_trans = 0;
+-
++ (full_sync && BTRFS_I(inode)->last_trans <=
++ root->fs_info->last_trans_committed)) {
+ /*
+ * We'v had everything committed since the last time we were
+ * modified so clear this flag in case it was set for whatever
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d23362f4464e..edaa6178b4ec 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7151,7 +7151,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
+ ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
+ em->block_start != EXTENT_MAP_HOLE)) {
+ int type;
+- int ret;
+ u64 block_start, orig_start, orig_block_len, ram_bytes;
+
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 269e21dd1506..b23d024c0234 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -442,9 +442,7 @@ void btrfs_get_logged_extents(struct inode *inode,
+ spin_lock_irq(&tree->lock);
+ for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
+ ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+- if (!list_empty(&ordered->log_list))
+- continue;
+- if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
++ if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
+ continue;
+ list_add_tail(&ordered->log_list, logged_list);
+ atomic_inc(&ordered->refs);
+@@ -501,8 +499,7 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
+ wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
+ &ordered->flags));
+
+- if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
+- list_add_tail(&ordered->trans_list, &trans->ordered);
++ list_add_tail(&ordered->trans_list, &trans->ordered);
+ spin_lock_irq(&log->log_extents_lock[index]);
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index ee1c60454a5f..8b40b35e5e0e 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1010,7 +1010,7 @@ again:
+ base = btrfs_item_ptr_offset(leaf, path->slots[0]);
+
+ while (cur_offset < item_size) {
+- extref = (struct btrfs_inode_extref *)base + cur_offset;
++ extref = (struct btrfs_inode_extref *)(base + cur_offset);
+
+ victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
+
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 05f2960ed7c3..6f0ce531e221 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -246,10 +246,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
+ return 0;
+ }
+
++static void debugfs_evict_inode(struct inode *inode)
++{
++ truncate_inode_pages_final(&inode->i_data);
++ clear_inode(inode);
++ if (S_ISLNK(inode->i_mode))
++ kfree(inode->i_private);
++}
++
+ static const struct super_operations debugfs_super_operations = {
+ .statfs = simple_statfs,
+ .remount_fs = debugfs_remount,
+ .show_options = debugfs_show_options,
++ .evict_inode = debugfs_evict_inode,
+ };
+
+ static int debug_fill_super(struct super_block *sb, void *data, int silent)
+@@ -466,23 +475,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
+ int ret = 0;
+
+ if (debugfs_positive(dentry)) {
+- if (dentry->d_inode) {
+- dget(dentry);
+- switch (dentry->d_inode->i_mode & S_IFMT) {
+- case S_IFDIR:
+- ret = simple_rmdir(parent->d_inode, dentry);
+- break;
+- case S_IFLNK:
+- kfree(dentry->d_inode->i_private);
+- /* fall through */
+- default:
+- simple_unlink(parent->d_inode, dentry);
+- break;
+- }
+- if (!ret)
+- d_delete(dentry);
+- dput(dentry);
+- }
++ dget(dentry);
++ if (S_ISDIR(dentry->d_inode->i_mode))
++ ret = simple_rmdir(parent->d_inode, dentry);
++ else
++ simple_unlink(parent->d_inode, dentry);
++ if (!ret)
++ d_delete(dentry);
++ dput(dentry);
+ }
+ return ret;
+ }
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index 54742f9a67a8..77a3db3791c7 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -303,9 +303,22 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ struct file *lower_file = ecryptfs_file_to_lower(file);
+ long rc = -ENOTTY;
+
+- if (lower_file->f_op->unlocked_ioctl)
++ if (!lower_file->f_op->unlocked_ioctl)
++ return rc;
++
++ switch (cmd) {
++ case FITRIM:
++ case FS_IOC_GETFLAGS:
++ case FS_IOC_SETFLAGS:
++ case FS_IOC_GETVERSION:
++ case FS_IOC_SETVERSION:
+ rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
+- return rc;
++ fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
++
++ return rc;
++ default:
++ return rc;
++ }
+ }
+
+ #ifdef CONFIG_COMPAT
+@@ -315,9 +328,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ struct file *lower_file = ecryptfs_file_to_lower(file);
+ long rc = -ENOIOCTLCMD;
+
+- if (lower_file->f_op->compat_ioctl)
++ if (!lower_file->f_op->compat_ioctl)
++ return rc;
++
++ switch (cmd) {
++ case FITRIM:
++ case FS_IOC32_GETFLAGS:
++ case FS_IOC32_SETFLAGS:
++ case FS_IOC32_GETVERSION:
++ case FS_IOC32_SETVERSION:
+ rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
+- return rc;
++ fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
++
++ return rc;
++ default:
++ return rc;
++ }
+ }
+ #endif
+
+diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
+index 3088e2a38e30..7b3143064af1 100644
+--- a/fs/gfs2/acl.c
++++ b/fs/gfs2/acl.c
+@@ -73,7 +73,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+
+ BUG_ON(name == NULL);
+
+- if (acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
++ if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
+ return -E2BIG;
+
+ if (type == ACL_TYPE_ACCESS) {
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 7f3f60641344..4030b558b07e 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -177,8 +177,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+ &delegation->flags);
+ NFS_I(inode)->delegation_state = delegation->type;
+ spin_unlock(&delegation->lock);
+- put_rpccred(oldcred);
+ rcu_read_unlock();
++ put_rpccred(oldcred);
+ trace_nfs4_reclaim_delegation(inode, res->delegation_type);
+ } else {
+ /* We appear to have raced with a delegation return. */
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 6e62155abf26..7a8d67cd823d 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -469,6 +469,8 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+ struct inode *inode;
+ int status;
+
++ if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
++ return;
+ if (filename.name[0] == '.') {
+ if (filename.len == 1)
+ return;
+@@ -479,6 +481,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+
+ dentry = d_lookup(parent, &filename);
+ if (dentry != NULL) {
++ /* Is there a mountpoint here? If so, just exit */
++ if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
++ &entry->fattr->fsid))
++ goto out;
+ if (nfs_same_file(dentry, entry)) {
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ status = nfs_refresh_inode(dentry->d_inode, entry->fattr);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index d66e3ad1de48..5c9c13ee72f9 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1650,7 +1650,7 @@ __destroy_client(struct nfs4_client *clp)
+ nfs4_put_stid(&dp->dl_stid);
+ }
+ while (!list_empty(&clp->cl_revoked)) {
+- dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
++ dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
+ list_del_init(&dp->dl_recall_lru);
+ nfs4_put_stid(&dp->dl_stid);
+ }
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index b2e3ff347620..ecdbae19a766 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -31,6 +31,8 @@
+ #include "alloc.h"
+ #include "dat.h"
+
++static void __nilfs_btree_init(struct nilfs_bmap *bmap);
++
+ static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
+ {
+ struct nilfs_btree_path *path;
+@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
+ return ret;
+ }
+
++/**
++ * nilfs_btree_root_broken - verify consistency of btree root node
++ * @node: btree root node to be examined
++ * @ino: inode number
++ *
++ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
++ */
++static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
++ unsigned long ino)
++{
++ int level, flags, nchildren;
++ int ret = 0;
++
++ level = nilfs_btree_node_get_level(node);
++ flags = nilfs_btree_node_get_flags(node);
++ nchildren = nilfs_btree_node_get_nchildren(node);
++
++ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
++ level > NILFS_BTREE_LEVEL_MAX ||
++ nchildren < 0 ||
++ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
++ pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
++ ino, level, flags, nchildren);
++ ret = 1;
++ }
++ return ret;
++}
++
+ int nilfs_btree_broken_node_block(struct buffer_head *bh)
+ {
+ int ret;
+@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
+
+ /* convert and insert */
+ dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
+- nilfs_btree_init(btree);
++ __nilfs_btree_init(btree);
+ if (nreq != NULL) {
+ nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
+ nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
+@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
+ .bop_gather_data = NULL,
+ };
+
+-int nilfs_btree_init(struct nilfs_bmap *bmap)
++static void __nilfs_btree_init(struct nilfs_bmap *bmap)
+ {
+ bmap->b_ops = &nilfs_btree_ops;
+ bmap->b_nchildren_per_block =
+ NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
+- return 0;
++}
++
++int nilfs_btree_init(struct nilfs_bmap *bmap)
++{
++ int ret = 0;
++
++ __nilfs_btree_init(bmap);
++
++ if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
++ bmap->b_inode->i_ino))
++ ret = -EIO;
++ return ret;
+ }
+
+ void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index 317b72641ebf..228cc4eeeb4a 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -19,7 +19,6 @@
+ #include <linux/mount.h>
+ #include <linux/init.h>
+ #include <linux/idr.h>
+-#include <linux/namei.h>
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
+@@ -162,17 +161,6 @@ void proc_free_inum(unsigned int inum)
+ spin_unlock_irqrestore(&proc_inum_lock, flags);
+ }
+
+-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+-{
+- nd_set_link(nd, __PDE_DATA(dentry->d_inode));
+- return NULL;
+-}
+-
+-static const struct inode_operations proc_link_inode_operations = {
+- .readlink = generic_readlink,
+- .follow_link = proc_follow_link,
+-};
+-
+ /*
+ * Don't create negative dentries here, return -ENOENT by hand
+ * instead.
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 333080d7a671..54ed31cfb398 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -23,6 +23,7 @@
+ #include <linux/slab.h>
+ #include <linux/mount.h>
+ #include <linux/magic.h>
++#include <linux/namei.h>
+
+ #include <asm/uaccess.h>
+
+@@ -401,6 +402,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
+ };
+ #endif
+
++static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ struct proc_dir_entry *pde = PDE(dentry->d_inode);
++ if (unlikely(!use_pde(pde)))
++ return ERR_PTR(-EINVAL);
++ nd_set_link(nd, pde->data);
++ return pde;
++}
++
++static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
++{
++ unuse_pde(p);
++}
++
++const struct inode_operations proc_link_inode_operations = {
++ .readlink = generic_readlink,
++ .follow_link = proc_follow_link,
++ .put_link = proc_put_link,
++};
++
+ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ {
+ struct inode *inode = new_inode_pseudo(sb);
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index aa7a0ee182e1..73f8190de795 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -199,6 +199,7 @@ struct pde_opener {
+ int closing;
+ struct completion *c;
+ };
++extern const struct inode_operations proc_link_inode_operations;
+
+ extern const struct inode_operations proc_pid_link_inode_operations;
+
+diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
+index a70d45647898..47116c87ab10 100644
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -214,9 +214,9 @@
+ INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
+
+ #define _INTEL_BDW_M_IDS(gt, info) \
+- _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
++ _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
+ _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
+- _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
++ _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
+ _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
+
+ #define _INTEL_BDW_D_IDS(gt, info) \
+diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
+index 9bb547c7bce7..704a1ab8240c 100644
+--- a/include/linux/usb/serial.h
++++ b/include/linux/usb/serial.h
+@@ -190,8 +190,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
+ * @num_ports: the number of different ports this device will have.
+ * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
+ * (0 = end-point size)
+- * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer
+- * (0 = end-point size)
++ * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
+ * @calc_num_ports: pointer to a function to determine how many ports this
+ * device has dynamically. It will be called after the probe()
+ * callback is called, but before attach()
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 23c518a0340c..1fbd69cfd0b7 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -409,7 +409,7 @@ struct t10_reservation {
+ /* Activate Persistence across Target Power Loss enabled
+ * for SCSI device */
+ int pr_aptpl_active;
+-#define PR_APTPL_BUF_LEN 8192
++#define PR_APTPL_BUF_LEN 262144
+ u32 pr_generation;
+ spinlock_t registration_lock;
+ spinlock_t aptpl_reg_lock;
+diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
+index aece1346ceb7..4ad10baecd4d 100644
+--- a/include/trace/events/kmem.h
++++ b/include/trace/events/kmem.h
+@@ -268,11 +268,11 @@ TRACE_EVENT(mm_page_alloc_extfrag,
+
+ TP_PROTO(struct page *page,
+ int alloc_order, int fallback_order,
+- int alloc_migratetype, int fallback_migratetype, int new_migratetype),
++ int alloc_migratetype, int fallback_migratetype),
+
+ TP_ARGS(page,
+ alloc_order, fallback_order,
+- alloc_migratetype, fallback_migratetype, new_migratetype),
++ alloc_migratetype, fallback_migratetype),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+@@ -289,7 +289,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
+ __entry->fallback_order = fallback_order;
+ __entry->alloc_migratetype = alloc_migratetype;
+ __entry->fallback_migratetype = fallback_migratetype;
+- __entry->change_ownership = (new_migratetype == alloc_migratetype);
++ __entry->change_ownership = (alloc_migratetype ==
++ get_pageblock_migratetype(page));
+ ),
+
+ TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7c98873a3077..41d53e515914 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1193,7 +1193,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ set_current_state(TASK_RUNNING);
+
+ if (unlikely(ret)) {
+- remove_waiter(lock, &waiter);
++ if (rt_mutex_has_waiters(lock))
++ remove_waiter(lock, &waiter);
+ rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+ }
+
+diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
+index 8a2e230fb86a..eae160dd669d 100644
+--- a/kernel/sched/auto_group.c
++++ b/kernel/sched/auto_group.c
+@@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
+ * so we don't have to move tasks around upon policy change,
+ * or flail around trying to allocate bandwidth on the fly.
+ * A bandwidth exception in __sched_setscheduler() allows
+- * the policy change to proceed. Thereafter, task_group()
+- * returns &root_task_group, so zero bandwidth is required.
++ * the policy change to proceed.
+ */
+ free_rt_sched_group(tg);
+ tg->rt_se = root_task_group.rt_se;
+@@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
+ if (tg != &root_task_group)
+ return false;
+
+- if (p->sched_class != &fair_sched_class)
+- return false;
+-
+ /*
+ * We can only assume the task group can't go away on us if
+ * autogroup_move_group() can see us on ->thread_group list.
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index efdca2f08222..9f5ed5e70eaa 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -490,6 +490,11 @@ static __init void init_hrtick(void)
+ */
+ void hrtick_start(struct rq *rq, u64 delay)
+ {
++ /*
++ * Don't schedule slices shorter than 10000ns, that just
++ * doesn't make sense. Rely on vruntime for fairness.
++ */
++ delay = max_t(u64, delay, 10000LL);
+ __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
+ HRTIMER_MODE_REL_PINNED, 0);
+ }
+@@ -7465,6 +7470,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
+ {
+ struct task_struct *g, *p;
+
++ /*
++ * Autogroups do not have RT tasks; see autogroup_create().
++ */
++ if (task_group_is_autogroup(tg))
++ return 0;
++
+ for_each_process_thread(g, p) {
+ if (rt_task(p) && task_group(p) == tg)
+ return 1;
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 15f2511a1b7c..cd0e835ecb85 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1232,7 +1232,6 @@ static struct ctl_table vm_table[] = {
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = hugetlb_sysctl_handler,
+- .extra1 = &zero,
+ },
+ #ifdef CONFIG_NUMA
+ {
+@@ -1241,7 +1240,6 @@ static struct ctl_table vm_table[] = {
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &hugetlb_mempolicy_sysctl_handler,
+- .extra1 = &zero,
+ },
+ #endif
+ {
+@@ -1264,7 +1262,6 @@ static struct ctl_table vm_table[] = {
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = hugetlb_overcommit_handler,
+- .extra1 = &zero,
+ },
+ #endif
+ {
+diff --git a/mm/compaction.c b/mm/compaction.c
+index f9792ba3537c..b47f08e159d4 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1027,8 +1027,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
+ isolate_mode);
+
+- if (!low_pfn || cc->contended)
++ if (!low_pfn || cc->contended) {
++ acct_isolated(zone, cc);
+ return ISOLATE_ABORT;
++ }
+
+ /*
+ * Either we isolated something and proceed with migration. Or
+@@ -1100,7 +1102,7 @@ static int compact_finished(struct zone *zone, struct compact_control *cc,
+ return COMPACT_PARTIAL;
+
+ /* Job done if allocation would set block type */
+- if (cc->order >= pageblock_order && area->nr_free)
++ if (order >= pageblock_order && area->nr_free)
+ return COMPACT_PARTIAL;
+ }
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index f08fec71ec5a..4cacc6a8a6c1 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2653,9 +2653,10 @@ again:
+ goto unlock;
+
+ /*
+- * HWPoisoned hugepage is already unmapped and dropped reference
++ * Migrating hugepage or HWPoisoned hugepage is already
++ * unmapped and its refcount is dropped, so just clear pte here.
+ */
+- if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
++ if (unlikely(!pte_present(pte))) {
+ huge_pte_clear(mm, address, ptep);
+ goto unlock;
+ }
+@@ -3128,6 +3129,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct page *pagecache_page = NULL;
+ struct hstate *h = hstate_vma(vma);
+ struct address_space *mapping;
++ int need_wait_lock = 0;
+
+ address &= huge_page_mask(h);
+
+@@ -3166,6 +3168,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ ret = 0;
+
+ /*
++ * entry could be a migration/hwpoison entry at this point, so this
++ * check prevents the kernel from going below assuming that we have
++ * a active hugepage in pagecache. This goto expects the 2nd page fault,
++ * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
++ * handle it.
++ */
++ if (!pte_present(entry))
++ goto out_mutex;
++
++ /*
+ * If we are going to COW the mapping later, we examine the pending
+ * reservations for this page now. This will ensure that any
+ * allocations necessary to record that reservation occur outside the
+@@ -3184,30 +3196,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ vma, address);
+ }
+
++ ptl = huge_pte_lock(h, mm, ptep);
++
++ /* Check for a racing update before calling hugetlb_cow */
++ if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
++ goto out_ptl;
++
+ /*
+ * hugetlb_cow() requires page locks of pte_page(entry) and
+ * pagecache_page, so here we need take the former one
+ * when page != pagecache_page or !pagecache_page.
+- * Note that locking order is always pagecache_page -> page,
+- * so no worry about deadlock.
+ */
+ page = pte_page(entry);
+- get_page(page);
+ if (page != pagecache_page)
+- lock_page(page);
+-
+- ptl = huge_pte_lockptr(h, mm, ptep);
+- spin_lock(ptl);
+- /* Check for a racing update before calling hugetlb_cow */
+- if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+- goto out_ptl;
++ if (!trylock_page(page)) {
++ need_wait_lock = 1;
++ goto out_ptl;
++ }
+
++ get_page(page);
+
+ if (flags & FAULT_FLAG_WRITE) {
+ if (!huge_pte_write(entry)) {
+ ret = hugetlb_cow(mm, vma, address, ptep, entry,
+ pagecache_page, ptl);
+- goto out_ptl;
++ goto out_put_page;
+ }
+ entry = huge_pte_mkdirty(entry);
+ }
+@@ -3215,7 +3228,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+ flags & FAULT_FLAG_WRITE))
+ update_mmu_cache(vma, address, ptep);
+-
++out_put_page:
++ if (page != pagecache_page)
++ unlock_page(page);
++ put_page(page);
+ out_ptl:
+ spin_unlock(ptl);
+
+@@ -3223,12 +3239,17 @@ out_ptl:
+ unlock_page(pagecache_page);
+ put_page(pagecache_page);
+ }
+- if (page != pagecache_page)
+- unlock_page(page);
+- put_page(page);
+-
+ out_mutex:
+ mutex_unlock(&htlb_fault_mutex_table[hash]);
++ /*
++ * Generally it's safe to hold refcount during waiting page lock. But
++ * here we just wait to defer the next page fault to avoid busy loop and
++ * the page is not used after unlocked before returning from the current
++ * page fault. So we are safe from accessing freed page, even if we wait
++ * here without taking refcount.
++ */
++ if (need_wait_lock)
++ wait_on_page_locked(page);
+ return ret;
+ }
+
+@@ -3358,7 +3379,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ spin_unlock(ptl);
+ continue;
+ }
+- if (!huge_pte_none(huge_ptep_get(ptep))) {
++ pte = huge_ptep_get(ptep);
++ if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
++ spin_unlock(ptl);
++ continue;
++ }
++ if (unlikely(is_hugetlb_entry_migration(pte))) {
++ swp_entry_t entry = pte_to_swp_entry(pte);
++
++ if (is_write_migration_entry(entry)) {
++ pte_t newpte;
++
++ make_migration_entry_read(&entry);
++ newpte = swp_entry_to_pte(entry);
++ set_huge_pte_at(mm, address, ptep, newpte);
++ pages++;
++ }
++ spin_unlock(ptl);
++ continue;
++ }
++ if (!huge_pte_none(pte)) {
+ pte = huge_ptep_get_and_clear(mm, address, ptep);
+ pte = pte_mkhuge(huge_pte_modify(pte, newprot));
+ pte = arch_make_huge_pte(pte, vma, NULL, 0);
+diff --git a/mm/memory.c b/mm/memory.c
+index d442584fd281..4ffa7b571fb8 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3558,7 +3558,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+ if (follow_phys(vma, addr, write, &prot, &phys_addr))
+ return -EINVAL;
+
+- maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
++ maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+ if (write)
+ memcpy_toio(maddr + offset, buf, len);
+ else
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 1620adbbd77f..3c83bec2274c 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
+ */
+ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ {
+- unsigned long free, allowed, reserve;
++ long free, allowed, reserve;
+
+ VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
+ -(s64)vm_committed_as_batch * num_online_cpus(),
+@@ -220,7 +220,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ */
+ if (mm) {
+ reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
+- allowed -= min(mm->total_vm / 32, reserve);
++ allowed -= min_t(long, mm->total_vm / 32, reserve);
+ }
+
+ if (percpu_counter_read_positive(&vm_committed_as) < allowed)
+diff --git a/mm/nommu.c b/mm/nommu.c
+index bd1808e194a7..b5ba5bc02e4b 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1905,7 +1905,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
+ */
+ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ {
+- unsigned long free, allowed, reserve;
++ long free, allowed, reserve;
+
+ vm_acct_memory(pages);
+
+@@ -1969,7 +1969,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ */
+ if (mm) {
+ reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
+- allowed -= min(mm->total_vm / 32, reserve);
++ allowed -= min_t(long, mm->total_vm / 32, reserve);
+ }
+
+ if (percpu_counter_read_positive(&vm_committed_as) < allowed)
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 616a2c956b4b..c32cb64a1277 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1073,8 +1073,8 @@ static void change_pageblock_range(struct page *pageblock_page,
+ * nor move CMA pages to different free lists. We don't want unmovable pages
+ * to be allocated from MIGRATE_CMA areas.
+ *
+- * Returns the new migratetype of the pageblock (or the same old migratetype
+- * if it was unchanged).
++ * Returns the allocation migratetype if free pages were stolen, or the
++ * fallback migratetype if it was decided not to steal.
+ */
+ static int try_to_steal_freepages(struct zone *zone, struct page *page,
+ int start_type, int fallback_type)
+@@ -1105,12 +1105,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
+
+ /* Claim the whole block if over half of it is free */
+ if (pages >= (1 << (pageblock_order-1)) ||
+- page_group_by_mobility_disabled) {
+-
++ page_group_by_mobility_disabled)
+ set_pageblock_migratetype(page, start_type);
+- return start_type;
+- }
+
++ return start_type;
+ }
+
+ return fallback_type;
+@@ -1162,7 +1160,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
+ set_freepage_migratetype(page, new_type);
+
+ trace_mm_page_alloc_extfrag(page, order, current_order,
+- start_migratetype, migratetype, new_type);
++ start_migratetype, migratetype);
+
+ return page;
+ }
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 1b12d390dc68..4590aa42b6cd 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1348,7 +1348,7 @@ static void __init start_shepherd_timer(void)
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+- INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
++ INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
+ vmstat_update);
+
+ if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
+diff --git a/net/compat.c b/net/compat.c
+index bc8aeefddf3f..c48930373e65 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -738,24 +738,18 @@ static unsigned char nas[21] = {
+
+ COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
+ {
+- if (flags & MSG_CMSG_COMPAT)
+- return -EINVAL;
+ return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+
+ COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+ unsigned int, vlen, unsigned int, flags)
+ {
+- if (flags & MSG_CMSG_COMPAT)
+- return -EINVAL;
+ return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT);
+ }
+
+ COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
+ {
+- if (flags & MSG_CMSG_COMPAT)
+- return -EINVAL;
+ return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+
+@@ -778,9 +772,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+ int datagrams;
+ struct timespec ktspec;
+
+- if (flags & MSG_CMSG_COMPAT)
+- return -EINVAL;
+-
+ if (timeout == NULL)
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT, NULL);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 9704a5c1103e..5db3a3f96198 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -943,7 +943,7 @@ bool dev_valid_name(const char *name)
+ return false;
+
+ while (*name) {
+- if (*name == '/' || isspace(*name))
++ if (*name == '/' || *name == ':' || isspace(*name))
+ return false;
+ name++;
+ }
+diff --git a/net/core/flow.c b/net/core/flow.c
+index a0348fde1fdf..1033725be40b 100644
+--- a/net/core/flow.c
++++ b/net/core/flow.c
+@@ -379,7 +379,7 @@ done:
+ static void flow_cache_flush_task(struct work_struct *work)
+ {
+ struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
+- flow_cache_gc_work);
++ flow_cache_flush_work);
+ struct net *net = container_of(xfrm, struct net, xfrm);
+
+ flow_cache_flush(net);
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index 0c08062d1796..1e2f46a69d50 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
+ return 0;
+
+ nla_put_failure:
++ kfree(d->xstats);
++ d->xstats = NULL;
++ d->xstats_len = 0;
+ spin_unlock_bh(d->lock);
+ return -1;
+ }
+@@ -305,7 +308,9 @@ int
+ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
+ {
+ if (d->compat_xstats) {
+- d->xstats = st;
++ d->xstats = kmemdup(st, len, GFP_ATOMIC);
++ if (!d->xstats)
++ goto err_out;
+ d->xstats_len = len;
+ }
+
+@@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
+ return gnet_stats_copy(d, TCA_STATS_APP, st, len);
+
+ return 0;
++
++err_out:
++ d->xstats_len = 0;
++ spin_unlock_bh(d->lock);
++ return -1;
+ }
+ EXPORT_SYMBOL(gnet_stats_copy_app);
+
+@@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
+ return -1;
+ }
+
++ kfree(d->xstats);
++ d->xstats = NULL;
++ d->xstats_len = 0;
+ spin_unlock_bh(d->lock);
+ return 0;
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 443256bdcddc..0b320d93fb56 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file,
+ return len;
+
+ i += len;
++ if ((value > 1) &&
++ (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
++ return -ENOTSUPP;
+ pkt_dev->burst = value < 1 ? 1 : value;
+ sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
+ return count;
+@@ -2842,25 +2845,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
+ skb->dev = odev;
+ skb->pkt_type = PACKET_HOST;
+
++ pktgen_finalize_skb(pkt_dev, skb, datalen);
++
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V4_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum = 0;
+- udp4_hwcsum(skb, udph->source, udph->dest);
++ udp4_hwcsum(skb, iph->saddr, iph->daddr);
+ } else {
+- __wsum csum = udp_csum(skb);
++ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
+
+ /* add protocol-dependent pseudo-header */
+- udph->check = csum_tcpudp_magic(udph->source, udph->dest,
++ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ datalen + 8, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
+- pktgen_finalize_skb(pkt_dev, skb, datalen);
+-
+ #ifdef CONFIG_XFRM
+ if (!process_ipsec(pkt_dev, skb, protocol))
+ return NULL;
+@@ -2976,6 +2979,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ skb->dev = odev;
+ skb->pkt_type = PACKET_HOST;
+
++ pktgen_finalize_skb(pkt_dev, skb, datalen);
++
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V6_CSUM) {
+@@ -2984,7 +2989,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ skb->csum_offset = offsetof(struct udphdr, check);
+ udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+ } else {
+- __wsum csum = udp_csum(skb);
++ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+@@ -2993,8 +2998,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ udph->check = CSUM_MANGLED_0;
+ }
+
+- pktgen_finalize_skb(pkt_dev, skb, datalen);
+-
+ return skb;
+ }
+
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index ca82629de0b2..c522f7a00eab 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1212,18 +1212,12 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
+ };
+
+ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+- [IFLA_VF_MAC] = { .type = NLA_BINARY,
+- .len = sizeof(struct ifla_vf_mac) },
+- [IFLA_VF_VLAN] = { .type = NLA_BINARY,
+- .len = sizeof(struct ifla_vf_vlan) },
+- [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
+- .len = sizeof(struct ifla_vf_tx_rate) },
+- [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
+- .len = sizeof(struct ifla_vf_spoofchk) },
+- [IFLA_VF_RATE] = { .type = NLA_BINARY,
+- .len = sizeof(struct ifla_vf_rate) },
+- [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY,
+- .len = sizeof(struct ifla_vf_link_state) },
++ [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
++ [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
++ [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
++ [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
++ [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
++ [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
+ };
+
+ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
+@@ -1255,7 +1249,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+
+- rcu_read_lock();
+ cb->seq = net->dev_base_seq;
+
+ /* A hack to preserve kernel<->userspace interface.
+@@ -1277,7 +1270,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+- hlist_for_each_entry_rcu(dev, head, index_hlist) {
++ hlist_for_each_entry(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+@@ -1299,7 +1292,6 @@ cont:
+ }
+ }
+ out:
+- rcu_read_unlock();
+ cb->args[1] = idx;
+ cb->args[0] = h;
+
+@@ -2105,8 +2097,16 @@ replay:
+ }
+ }
+ err = rtnl_configure_link(dev, ifm);
+- if (err < 0)
+- unregister_netdevice(dev);
++ if (err < 0) {
++ if (ops->newlink) {
++ LIST_HEAD(list_kill);
++
++ ops->dellink(dev, &list_kill);
++ unregister_netdevice_many(&list_kill);
++ } else {
++ unregister_netdevice(dev);
++ }
++ }
+ out:
+ put_net(dest_net);
+ return err;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index d7543d0fd744..79589ae84a5d 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3515,13 +3515,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
+ {
+ struct sk_buff_head *q = &sk->sk_error_queue;
+ struct sk_buff *skb, *skb_next;
++ unsigned long flags;
+ int err = 0;
+
+- spin_lock_bh(&q->lock);
++ spin_lock_irqsave(&q->lock, flags);
+ skb = __skb_dequeue(q);
+ if (skb && (skb_next = skb_peek(q)))
+ err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
+- spin_unlock_bh(&q->lock);
++ spin_unlock_irqrestore(&q->lock, flags);
+
+ sk->sk_err = err;
+ if (err)
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 2811cc18701a..b48e03cd6656 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -660,27 +660,30 @@ EXPORT_SYMBOL(ip_defrag);
+ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+ {
+ struct iphdr iph;
++ int netoff;
+ u32 len;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return skb;
+
+- if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
++ netoff = skb_network_offset(skb);
++
++ if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
+ return skb;
+
+ if (iph.ihl < 5 || iph.version != 4)
+ return skb;
+
+ len = ntohs(iph.tot_len);
+- if (skb->len < len || len < (iph.ihl * 4))
++ if (skb->len < netoff + len || len < (iph.ihl * 4))
+ return skb;
+
+ if (ip_is_fragment(&iph)) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb) {
+- if (!pskb_may_pull(skb, iph.ihl*4))
++ if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
+ return skb;
+- if (pskb_trim_rcsum(skb, len))
++ if (pskb_trim_rcsum(skb, netoff + len))
+ return skb;
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (ip_defrag(skb, user))
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 4a2a074bfb4a..357c2a914e77 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -890,7 +890,8 @@ static int __ip_append_data(struct sock *sk,
+ cork->length += length;
+ if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
++ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
++ (sk->sk_type == SOCK_DGRAM)) {
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+ hh_len, fragheaderlen, transhdrlen,
+ maxfraglen, flags);
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 5638b179b355..a5c49d657ab1 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
+ kgid_t low, high;
+ int ret = 0;
+
++ if (sk->sk_family == AF_INET6)
++ sk->sk_ipv6only = 1;
++
+ inet_get_ping_group_range_net(net, &low, &high);
+ if (gid_lte(low, group) && gid_lte(group, high))
+ return 0;
+@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+ if (addr_len < sizeof(*addr))
+ return -EINVAL;
+
++ if (addr->sin_family != AF_INET &&
++ !(addr->sin_family == AF_UNSPEC &&
++ addr->sin_addr.s_addr == htonl(INADDR_ANY)))
++ return -EAFNOSUPPORT;
++
+ pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
+ sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
+
+@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+ return -EINVAL;
+
+ if (addr->sin6_family != AF_INET6)
+- return -EINVAL;
++ return -EAFNOSUPPORT;
+
+ pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
+ sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
+@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ if (msg->msg_namelen < sizeof(*usin))
+ return -EINVAL;
+ if (usin->sin_family != AF_INET)
+- return -EINVAL;
++ return -EAFNOSUPPORT;
+ daddr = usin->sin_addr.s_addr;
+ /* no remote port */
+ } else {
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 815c85e3b1e0..c73077280ad4 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
+ struct tcp_sock *tp;
+ struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+ struct sock *child;
++ u32 end_seq;
+
+ req->num_retrans = 0;
+ req->num_timeout = 0;
+@@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(struct sock *sk,
+
+ /* Queue the data carried in the SYN packet. We need to first
+ * bump skb's refcnt because the caller will attempt to free it.
++ * Note that IPv6 might also have used skb_get() trick
++ * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
++ * So we need to eventually get a clone of the packet,
++ * before inserting it in sk_receive_queue.
+ *
+ * XXX (TFO) - we honor a zero-payload TFO request for now,
+ * (any reason not to?) but no need to queue the skb since
+ * there is no data. How about SYN+FIN?
+ */
+- if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
+- skb = skb_get(skb);
+- skb_dst_drop(skb);
+- __skb_pull(skb, tcp_hdr(skb)->doff * 4);
+- skb_set_owner_r(skb, child);
+- __skb_queue_tail(&child->sk_receive_queue, skb);
+- tp->syn_data_acked = 1;
++ end_seq = TCP_SKB_CB(skb)->end_seq;
++ if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
++ struct sk_buff *skb2;
++
++ if (unlikely(skb_shared(skb)))
++ skb2 = skb_clone(skb, GFP_ATOMIC);
++ else
++ skb2 = skb_get(skb);
++
++ if (likely(skb2)) {
++ skb_dst_drop(skb2);
++ __skb_pull(skb2, tcp_hdrlen(skb));
++ skb_set_owner_r(skb2, child);
++ __skb_queue_tail(&child->sk_receive_queue, skb2);
++ tp->syn_data_acked = 1;
++ } else {
++ end_seq = TCP_SKB_CB(skb)->seq + 1;
++ }
+ }
+- tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
++ tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
+ sk->sk_data_ready(sk);
+ bh_unlock_sock(child);
+ sock_put(child);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 0169ccf5aa4f..17a025847999 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4536,6 +4536,22 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
+ return 0;
+ }
+
++static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
++ [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
++ [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
++};
++
++static int inet6_validate_link_af(const struct net_device *dev,
++ const struct nlattr *nla)
++{
++ struct nlattr *tb[IFLA_INET6_MAX + 1];
++
++ if (dev && !__in6_dev_get(dev))
++ return -EAFNOSUPPORT;
++
++ return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
++}
++
+ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
+ {
+ int err = -EINVAL;
+@@ -5351,6 +5367,7 @@ static struct rtnl_af_ops inet6_ops = {
+ .family = AF_INET6,
+ .fill_link_af = inet6_fill_link_af,
+ .get_link_af_size = inet6_get_link_af_size,
++ .validate_link_af = inet6_validate_link_af,
+ .set_link_af = inet6_set_link_af,
+ };
+
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 8e950c250ada..51add023b723 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1296,7 +1296,8 @@ emsgsize:
+ if (((length > mtu) ||
+ (skb && skb_is_gso(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+- (rt->dst.dev->features & NETIF_F_UFO)) {
++ (rt->dst.dev->features & NETIF_F_UFO) &&
++ (sk->sk_type == SOCK_DGRAM)) {
+ err = ip6_ufo_append_data(sk, getfrag, from, length,
+ hh_len, fragheaderlen,
+ transhdrlen, mtu, flags, rt);
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 5b7a1ed2aba9..2d452a382128 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+
+ if (msg->msg_name) {
+ DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
+- if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
+- u->sin6_family != AF_INET6) {
++ if (msg->msg_namelen < sizeof(*u))
+ return -EINVAL;
++ if (u->sin6_family != AF_INET6) {
++ return -EAFNOSUPPORT;
+ }
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != u->sin6_scope_id) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index d02ee019382e..2d9aca57e7c7 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+ u32 *p = NULL;
+
+ if (!(rt->dst.flags & DST_HOST))
+- return NULL;
++ return dst_cow_metrics_generic(dst, old);
+
+ peer = rt6_get_peer_create(rt);
+ if (peer) {
+diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
+index 61ceb4cdb4a2..23ad419361fb 100644
+--- a/net/irda/ircomm/ircomm_tty.c
++++ b/net/irda/ircomm/ircomm_tty.c
+@@ -816,7 +816,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+ orig_jiffies = jiffies;
+
+ /* Set poll time to 200 ms */
+- poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200));
++ poll_time = msecs_to_jiffies(200);
++ if (timeout)
++ poll_time = min_t(unsigned long, timeout, poll_time);
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ while (self->tx_skb && self->tx_skb->len) {
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 900632a250ec..80ce44f6693d 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -563,6 +563,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
+ if (tx->sdata->control_port_no_encrypt)
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
++ info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
+ }
+
+ return TX_CONTINUE;
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index f9e556b56086..68ccddb5e2c4 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2060,14 +2060,55 @@ static int __net_init ovs_init_net(struct net *net)
+ return 0;
+ }
+
+-static void __net_exit ovs_exit_net(struct net *net)
++static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
++ struct list_head *head)
+ {
+- struct datapath *dp, *dp_next;
+ struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
++ struct datapath *dp;
++
++ list_for_each_entry(dp, &ovs_net->dps, list_node) {
++ int i;
++
++ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
++ struct vport *vport;
++
++ hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
++ struct netdev_vport *netdev_vport;
++
++ if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
++ continue;
++
++ netdev_vport = netdev_vport_priv(vport);
++ if (dev_net(netdev_vport->dev) == dnet)
++ list_add(&vport->detach_list, head);
++ }
++ }
++ }
++}
++
++static void __net_exit ovs_exit_net(struct net *dnet)
++{
++ struct datapath *dp, *dp_next;
++ struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
++ struct vport *vport, *vport_next;
++ struct net *net;
++ LIST_HEAD(head);
+
+ ovs_lock();
+ list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
+ __dp_destroy(dp);
++
++ rtnl_lock();
++ for_each_net(net)
++ list_vports_from_net(net, dnet, &head);
++ rtnl_unlock();
++
++ /* Detach all vports from given namespace. */
++ list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
++ list_del(&vport->detach_list);
++ ovs_dp_detach_port(vport);
++ }
++
+ ovs_unlock();
+
+ cancel_work_sync(&ovs_net->dp_notify_work);
+diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
+index 8942125de3a6..ab01c65fb3de 100644
+--- a/net/openvswitch/vport.h
++++ b/net/openvswitch/vport.h
+@@ -93,6 +93,7 @@ struct vport_portids {
+ * @ops: Class structure.
+ * @percpu_stats: Points to per-CPU statistics used and maintained by vport
+ * @err_stats: Points to error statistics used and maintained by vport
++ * @detach_list: list used for detaching vport in net-exit call.
+ */
+ struct vport {
+ struct rcu_head rcu;
+@@ -107,6 +108,7 @@ struct vport {
+ struct pcpu_sw_netstats __percpu *percpu_stats;
+
+ struct vport_err_stats err_stats;
++ struct list_head detach_list;
+ };
+
+ /**
+diff --git a/net/sched/ematch.c b/net/sched/ematch.c
+index 6742200b1307..fbb7ebfc58c6 100644
+--- a/net/sched/ematch.c
++++ b/net/sched/ematch.c
+@@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
+ * to replay the request.
+ */
+ module_put(em->ops->owner);
++ em->ops = NULL;
+ err = -EAGAIN;
+ }
+ #endif
+diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
+index 9761a0da964d..1bb4d26fbd6b 100644
+--- a/net/sunrpc/backchannel_rqst.c
++++ b/net/sunrpc/backchannel_rqst.c
+@@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct svc_serv *bc_serv = xprt->bc_serv;
+
++ spin_lock(&xprt->bc_pa_lock);
++ list_del(&req->rq_bc_pa_list);
++ spin_unlock(&xprt->bc_pa_lock);
++
+ req->rq_private_buf.len = copied;
+ set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+
+ dprintk("RPC: add callback request to list\n");
+ spin_lock(&bc_serv->sv_cb_lock);
+- list_del(&req->rq_bc_pa_list);
+ list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ wake_up(&bc_serv->sv_cb_waitq);
+ spin_unlock(&bc_serv->sv_cb_lock);
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 066362141133..48f14003af10 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -920,7 +920,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait,
+ poll_wait(filp, &queue_wait, wait);
+
+ /* alway allow write */
+- mask = POLL_OUT | POLLWRNORM;
++ mask = POLLOUT | POLLWRNORM;
+
+ if (!rp)
+ return mask;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 166d59cdc86b..9c823cfdfff0 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1523,6 +1523,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
+ if (! snd_pcm_playback_empty(substream)) {
+ snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
+ snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
++ } else {
++ runtime->status->state = SNDRV_PCM_STATE_SETUP;
+ }
+ break;
+ case SNDRV_PCM_STATE_RUNNING:
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 8337645aa7a5..84c94301bfaf 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -957,7 +957,6 @@ static int azx_alloc_cmd_io(struct azx *chip)
+ dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
+ return err;
+ }
+-EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
+
+ static void azx_init_cmd_io(struct azx *chip)
+ {
+@@ -1022,7 +1021,6 @@ static void azx_init_cmd_io(struct azx *chip)
+ azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
+ spin_unlock_irq(&chip->reg_lock);
+ }
+-EXPORT_SYMBOL_GPL(azx_init_cmd_io);
+
+ static void azx_free_cmd_io(struct azx *chip)
+ {
+@@ -1032,7 +1030,6 @@ static void azx_free_cmd_io(struct azx *chip)
+ azx_writeb(chip, CORBCTL, 0);
+ spin_unlock_irq(&chip->reg_lock);
+ }
+-EXPORT_SYMBOL_GPL(azx_free_cmd_io);
+
+ static unsigned int azx_command_addr(u32 cmd)
+ {
+@@ -1312,7 +1309,6 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
+ else
+ return azx_corb_send_cmd(bus, val);
+ }
+-EXPORT_SYMBOL_GPL(azx_send_cmd);
+
+ /* get a response */
+ static unsigned int azx_get_response(struct hda_bus *bus,
+@@ -1326,7 +1322,6 @@ static unsigned int azx_get_response(struct hda_bus *bus,
+ else
+ return azx_rirb_get_response(bus, addr);
+ }
+-EXPORT_SYMBOL_GPL(azx_get_response);
+
+ #ifdef CONFIG_SND_HDA_DSP_LOADER
+ /*
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 48b6c5a3884f..8413797ba38d 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1995,7 +1995,7 @@ static const struct pci_device_id azx_ids[] = {
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
+ /* Panther Point */
+ { PCI_DEVICE(0x8086, 0x1e20),
+- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
+ /* Lynx Point */
+ { PCI_DEVICE(0x8086, 0x8c20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 50762cf62b2d..8375bc424e2d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5074,6 +5074,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x17, 0x40000000},
+ {0x1d, 0x40700001},
+ {0x21, 0x02211040}),
++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC255_STANDARD_PINS,
++ {0x12, 0x90a60170},
++ {0x14, 0x90170140},
++ {0x17, 0x40000000},
++ {0x1d, 0x40700001},
++ {0x21, 0x02211050}),
+ SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+ {0x12, 0x90a60130},
+ {0x13, 0x40000000},
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 6d36c5b78805..87eff3173ce9 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -79,6 +79,7 @@ enum {
+ STAC_ALIENWARE_M17X,
+ STAC_92HD89XX_HP_FRONT_JACK,
+ STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
++ STAC_92HD73XX_ASUS_MOBO,
+ STAC_92HD73XX_MODELS
+ };
+
+@@ -1911,7 +1912,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
+ [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
+- }
++ },
++ [STAC_92HD73XX_ASUS_MOBO] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ /* enable 5.1 and SPDIF out */
++ { 0x0c, 0x01014411 },
++ { 0x0d, 0x01014410 },
++ { 0x0e, 0x01014412 },
++ { 0x22, 0x014b1180 },
++ { }
++ }
++ },
+ };
+
+ static const struct hda_model_fixup stac92hd73xx_models[] = {
+@@ -1923,6 +1935,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
+ { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
+ { .id = STAC_DELL_EQ, .name = "dell-eq" },
+ { .id = STAC_ALIENWARE_M17X, .name = "alienware" },
++ { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
+ {}
+ };
+
+@@ -1975,6 +1988,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
+ "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
++ SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
++ STAC_92HD73XX_ASUS_MOBO),
+ {} /* terminator */
+ };
+
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index 7134f9ebf2f3..a8a9c1f36b2e 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -222,7 +222,6 @@ static bool rt5670_volatile_register(struct device *dev, unsigned int reg)
+ case RT5670_ADC_EQ_CTRL1:
+ case RT5670_EQ_CTRL1:
+ case RT5670_ALC_CTRL_1:
+- case RT5670_IRQ_CTRL1:
+ case RT5670_IRQ_CTRL2:
+ case RT5670_INT_IRQ_ST:
+ case RT5670_IL_CMD:
+diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
+index f4b05bc23e4b..1343ecbf0bd5 100644
+--- a/sound/soc/omap/omap-pcm.c
++++ b/sound/soc/omap/omap-pcm.c
+@@ -201,7 +201,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
+ struct snd_pcm *pcm = rtd->pcm;
+ int ret;
+
+- ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
++ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+