summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2015-11-03 13:38:25 -0500
committerMike Pagano <mpagano@gentoo.org>2015-11-03 13:38:25 -0500
commitba218a025ac9caddccdbd233481a4ffffaebc4d3 (patch)
tree779f74c2135d69d68dfa2d19d5abbbc122ece609
parentUpdate README (diff)
downloadlinux-patches-ba218a02.tar.gz
linux-patches-ba218a02.tar.bz2
linux-patches-ba218a02.zip
Linux patch 3.12.503.12-47
-rw-r--r--0000_README4
-rw-r--r--1049_linux-3.12.50.patch4795
2 files changed, 4799 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e5655112..ce73ef27 100644
--- a/0000_README
+++ b/0000_README
@@ -238,6 +238,10 @@ Patch: 1048_linux-3.12.49.patch
From: http://www.kernel.org
Desc: Linux 3.12.49
+Patch: 1049_linux-3.12.50.patch
+From: http://www.kernel.org
+Desc: Linux 3.12.50
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1049_linux-3.12.50.patch b/1049_linux-3.12.50.patch
new file mode 100644
index 00000000..c49a1458
--- /dev/null
+++ b/1049_linux-3.12.50.patch
@@ -0,0 +1,4795 @@
+diff --git a/Makefile b/Makefile
+index b2985713121c..cbb29f4a4c43 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
+index ce8860a0b32d..3832bdb794fe 100644
+--- a/arch/alpha/include/asm/barrier.h
++++ b/arch/alpha/include/asm/barrier.h
+@@ -3,33 +3,18 @@
+
+ #include <asm/compiler.h>
+
+-#define mb() \
+-__asm__ __volatile__("mb": : :"memory")
++#define mb() __asm__ __volatile__("mb": : :"memory")
++#define rmb() __asm__ __volatile__("mb": : :"memory")
++#define wmb() __asm__ __volatile__("wmb": : :"memory")
+
+-#define rmb() \
+-__asm__ __volatile__("mb": : :"memory")
+-
+-#define wmb() \
+-__asm__ __volatile__("wmb": : :"memory")
+-
+-#define read_barrier_depends() \
+-__asm__ __volatile__("mb": : :"memory")
++#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
+
+ #ifdef CONFIG_SMP
+ #define __ASM_SMP_MB "\tmb\n"
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#define smp_read_barrier_depends() read_barrier_depends()
+ #else
+ #define __ASM_SMP_MB
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define smp_read_barrier_depends() do { } while (0)
+ #endif
+
+-#define set_mb(var, value) \
+-do { var = value; mb(); } while (0)
++#include <asm-generic/barrier.h>
+
+ #endif /* __BARRIER_H */
+diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
+index d8dd660898b9..5c359cf55934 100644
+--- a/arch/arc/include/asm/Kbuild
++++ b/arch/arc/include/asm/Kbuild
+@@ -46,3 +46,4 @@ generic-y += ucontext.h
+ generic-y += user.h
+ generic-y += vga.h
+ generic-y += xor.h
++generic-y += barrier.h
+diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
+index 83f03ca6caf6..03e494f695d1 100644
+--- a/arch/arc/include/asm/atomic.h
++++ b/arch/arc/include/asm/atomic.h
+@@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+
+ #endif /* !CONFIG_ARC_HAS_LLSC */
+
++#define smp_mb__before_atomic_dec() barrier()
++#define smp_mb__after_atomic_dec() barrier()
++#define smp_mb__before_atomic_inc() barrier()
++#define smp_mb__after_atomic_inc() barrier()
++
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
+deleted file mode 100644
+index f6cb7c4ffb35..000000000000
+--- a/arch/arc/include/asm/barrier.h
++++ /dev/null
+@@ -1,42 +0,0 @@
+-/*
+- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#ifndef __ASM_BARRIER_H
+-#define __ASM_BARRIER_H
+-
+-#ifndef __ASSEMBLY__
+-
+-/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
+-#define mb() __asm__ __volatile__ ("" : : : "memory")
+-#define rmb() mb()
+-#define wmb() mb()
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
+-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+-#define read_barrier_depends() mb()
+-
+-/* TODO-vineetg verify the correctness of macros here */
+-#ifdef CONFIG_SMP
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#else
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#endif
+-
+-#define smp_mb__before_atomic_dec() barrier()
+-#define smp_mb__after_atomic_dec() barrier()
+-#define smp_mb__before_atomic_inc() barrier()
+-#define smp_mb__after_atomic_inc() barrier()
+-
+-#define smp_read_barrier_depends() do { } while (0)
+-
+-#endif
+-
+-#endif
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index db50b626be98..a4254e8ab36c 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -55,6 +55,14 @@ endif
+
+ comma = ,
+
++#
++# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
++# later may result in code being generated that handles signed short and signed
++# char struct members incorrectly. So disable it.
++# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
++#
++KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
++
+ # This selects which instruction set is used.
+ # Note that GCC does not numerically define an architecture version
+ # macro, but instead defines a whole series of macros which makes
+diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
+index 65d7b601651c..542e21da2425 100644
+--- a/arch/arm/boot/dts/omap5-uevm.dts
++++ b/arch/arm/boot/dts/omap5-uevm.dts
+@@ -143,8 +143,8 @@
+
+ i2c5_pins: pinmux_i2c5_pins {
+ pinctrl-single,pins = <
+- 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
+- 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
++ 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
++ 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
+ >;
+ };
+
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index ab3304225272..ab5b238ba59a 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -375,12 +375,23 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+ */
+ thumb = handler & 1;
+
++#if __LINUX_ARM_ARCH__ >= 6
++ /*
++ * Clear the If-Then Thumb-2 execution state. ARM spec
++ * requires this to be all 000s in ARM mode. Snapdragon
++ * S4/Krait misbehaves on a Thumb=>ARM signal transition
++ * without this.
++ *
++ * We must do this whenever we are running on a Thumb-2
++ * capable CPU, which includes ARMv6T2. However, we elect
++ * to do this whenever we're on an ARMv6 or later CPU for
++ * simplicity.
++ */
++ cpsr &= ~PSR_IT_MASK;
++#endif
++
+ if (thumb) {
+ cpsr |= PSR_T_BIT;
+-#if __LINUX_ARM_ARCH__ >= 7
+- /* clear the If-Then Thumb-2 execution state */
+- cpsr &= ~PSR_IT_MASK;
+-#endif
+ } else
+ cpsr &= ~PSR_T_BIT;
+ }
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 4148c05df99a..e06f99f5e37a 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -29,7 +29,7 @@ comma = ,
+ CHECKFLAGS += -D__aarch64__
+
+ ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+-CFLAGS_MODULE += -mcmodel=large
++KBUILD_CFLAGS_MODULE += -mcmodel=large
+ endif
+
+ # Default value
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index c23751b06120..cc083b6e4ce7 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -278,6 +278,7 @@ retry:
+ * starvation.
+ */
+ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
++ mm_flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+ }
+diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h
+index 0961275373db..715100790fd0 100644
+--- a/arch/avr32/include/asm/barrier.h
++++ b/arch/avr32/include/asm/barrier.h
+@@ -8,22 +8,15 @@
+ #ifndef __ASM_AVR32_BARRIER_H
+ #define __ASM_AVR32_BARRIER_H
+
+-#define nop() asm volatile("nop")
+-
+-#define mb() asm volatile("" : : : "memory")
+-#define rmb() mb()
+-#define wmb() asm volatile("sync 0" : : : "memory")
+-#define read_barrier_depends() do { } while(0)
+-#define set_mb(var, value) do { var = value; mb(); } while(0)
++/*
++ * Weirdest thing ever.. no full barrier, but it has a write barrier!
++ */
++#define wmb() asm volatile("sync 0" : : : "memory")
+
+ #ifdef CONFIG_SMP
+ # error "The AVR32 port does not support SMP"
+-#else
+-# define smp_mb() barrier()
+-# define smp_rmb() barrier()
+-# define smp_wmb() barrier()
+-# define smp_read_barrier_depends() do { } while(0)
+ #endif
+
++#include <asm-generic/barrier.h>
+
+ #endif /* __ASM_AVR32_BARRIER_H */
+diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
+index ebb189507dd7..19283a16ac08 100644
+--- a/arch/blackfin/include/asm/barrier.h
++++ b/arch/blackfin/include/asm/barrier.h
+@@ -23,26 +23,10 @@
+ # define rmb() do { barrier(); smp_check_barrier(); } while (0)
+ # define wmb() do { barrier(); smp_mark_barrier(); } while (0)
+ # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
+-#else
+-# define mb() barrier()
+-# define rmb() barrier()
+-# define wmb() barrier()
+-# define read_barrier_depends() do { } while (0)
+ #endif
+
+-#else /* !CONFIG_SMP */
+-
+-#define mb() barrier()
+-#define rmb() barrier()
+-#define wmb() barrier()
+-#define read_barrier_depends() do { } while (0)
+-
+ #endif /* !CONFIG_SMP */
+
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
+-#define smp_read_barrier_depends() read_barrier_depends()
++#include <asm-generic/barrier.h>
+
+ #endif /* _BLACKFIN_BARRIER_H */
+diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
+index c8325455520e..497776e4777d 100644
+--- a/arch/cris/include/asm/Kbuild
++++ b/arch/cris/include/asm/Kbuild
+@@ -11,3 +11,4 @@ generic-y += module.h
+ generic-y += trace_clock.h
+ generic-y += vga.h
+ generic-y += xor.h
++generic-y += barrier.h
+diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h
+deleted file mode 100644
+index 198ad7fa6b25..000000000000
+--- a/arch/cris/include/asm/barrier.h
++++ /dev/null
+@@ -1,25 +0,0 @@
+-#ifndef __ASM_CRIS_BARRIER_H
+-#define __ASM_CRIS_BARRIER_H
+-
+-#define nop() __asm__ __volatile__ ("nop");
+-
+-#define barrier() __asm__ __volatile__("": : :"memory")
+-#define mb() barrier()
+-#define rmb() mb()
+-#define wmb() mb()
+-#define read_barrier_depends() do { } while(0)
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
+-
+-#ifdef CONFIG_SMP
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#define smp_read_barrier_depends() read_barrier_depends()
+-#else
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define smp_read_barrier_depends() do { } while(0)
+-#endif
+-
+-#endif /* __ASM_CRIS_BARRIER_H */
+diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h
+index 06776ad9f5e9..abbef470154c 100644
+--- a/arch/frv/include/asm/barrier.h
++++ b/arch/frv/include/asm/barrier.h
+@@ -17,13 +17,7 @@
+ #define mb() asm volatile ("membar" : : :"memory")
+ #define rmb() asm volatile ("membar" : : :"memory")
+ #define wmb() asm volatile ("membar" : : :"memory")
+-#define read_barrier_depends() do { } while (0)
+
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define smp_read_barrier_depends() do {} while(0)
+-#define set_mb(var, value) \
+- do { var = (value); barrier(); } while (0)
++#include <asm-generic/barrier.h>
+
+ #endif /* _ASM_BARRIER_H */
+diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
+index 1da17caac23c..a214fa4502f0 100644
+--- a/arch/hexagon/include/asm/Kbuild
++++ b/arch/hexagon/include/asm/Kbuild
+@@ -53,3 +53,4 @@ generic-y += types.h
+ generic-y += ucontext.h
+ generic-y += unaligned.h
+ generic-y += xor.h
++generic-y += barrier.h
+diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
+index 8a64ff2337f6..7aae4cb2a29a 100644
+--- a/arch/hexagon/include/asm/atomic.h
++++ b/arch/hexagon/include/asm/atomic.h
+@@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
+ #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
+
+-
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
++#define smp_mb__before_atomic_dec() barrier()
++#define smp_mb__after_atomic_dec() barrier()
++#define smp_mb__before_atomic_inc() barrier()
++#define smp_mb__after_atomic_inc() barrier()
++
+ #endif
+diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
+deleted file mode 100644
+index 1041a8e70ce8..000000000000
+--- a/arch/hexagon/include/asm/barrier.h
++++ /dev/null
+@@ -1,41 +0,0 @@
+-/*
+- * Memory barrier definitions for the Hexagon architecture
+- *
+- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 and
+- * only version 2 as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+- * 02110-1301, USA.
+- */
+-
+-#ifndef _ASM_BARRIER_H
+-#define _ASM_BARRIER_H
+-
+-#define rmb() barrier()
+-#define read_barrier_depends() barrier()
+-#define wmb() barrier()
+-#define mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_read_barrier_depends() barrier()
+-#define smp_wmb() barrier()
+-#define smp_mb() barrier()
+-#define smp_mb__before_atomic_dec() barrier()
+-#define smp_mb__after_atomic_dec() barrier()
+-#define smp_mb__before_atomic_inc() barrier()
+-#define smp_mb__after_atomic_inc() barrier()
+-
+-/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
+-#define set_mb(var, value) \
+- do { var = value; mb(); } while (0)
+-
+-#endif /* _ASM_BARRIER_H */
+diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
+index 6976621efd3f..1a40265e8d88 100644
+--- a/arch/m32r/include/asm/barrier.h
++++ b/arch/m32r/include/asm/barrier.h
+@@ -11,84 +11,6 @@
+
+ #define nop() __asm__ __volatile__ ("nop" : : )
+
+-/*
+- * Memory barrier.
+- *
+- * mb() prevents loads and stores being reordered across this point.
+- * rmb() prevents loads being reordered across this point.
+- * wmb() prevents stores being reordered across this point.
+- */
+-#define mb() barrier()
+-#define rmb() mb()
+-#define wmb() mb()
+-
+-/**
+- * read_barrier_depends - Flush all pending reads that subsequents reads
+- * depend on.
+- *
+- * No data-dependent reads from memory-like regions are ever reordered
+- * over this barrier. All reads preceding this primitive are guaranteed
+- * to access memory (but not necessarily other CPUs' caches) before any
+- * reads following this primitive that depend on the data return by
+- * any of the preceding reads. This primitive is much lighter weight than
+- * rmb() on most CPUs, and is never heavier weight than is
+- * rmb().
+- *
+- * These ordering constraints are respected by both the local CPU
+- * and the compiler.
+- *
+- * Ordering is not guaranteed by anything other than these primitives,
+- * not even by data dependencies. See the documentation for
+- * memory_barrier() for examples and URLs to more information.
+- *
+- * For example, the following code would force ordering (the initial
+- * value of "a" is zero, "b" is one, and "p" is "&a"):
+- *
+- * <programlisting>
+- * CPU 0 CPU 1
+- *
+- * b = 2;
+- * memory_barrier();
+- * p = &b; q = p;
+- * read_barrier_depends();
+- * d = *q;
+- * </programlisting>
+- *
+- *
+- * because the read of "*q" depends on the read of "p" and these
+- * two reads are separated by a read_barrier_depends(). However,
+- * the following code, with the same initial values for "a" and "b":
+- *
+- * <programlisting>
+- * CPU 0 CPU 1
+- *
+- * a = 2;
+- * memory_barrier();
+- * b = 3; y = b;
+- * read_barrier_depends();
+- * x = a;
+- * </programlisting>
+- *
+- * does not enforce ordering, since there is no data dependency between
+- * the read of "a" and the read of "b". Therefore, on some CPUs, such
+- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+- * in cases like this where there are no data dependencies.
+- **/
+-
+-#define read_barrier_depends() do { } while (0)
+-
+-#ifdef CONFIG_SMP
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#define smp_read_barrier_depends() read_barrier_depends()
+-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+-#else
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define smp_read_barrier_depends() do { } while (0)
+-#define set_mb(var, value) do { var = value; barrier(); } while (0)
+-#endif
++#include <asm-generic/barrier.h>
+
+ #endif /* _ASM_M32R_BARRIER_H */
+diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h
+index 445ce22c23cb..15c5f77c1614 100644
+--- a/arch/m68k/include/asm/barrier.h
++++ b/arch/m68k/include/asm/barrier.h
+@@ -1,20 +1,8 @@
+ #ifndef _M68K_BARRIER_H
+ #define _M68K_BARRIER_H
+
+-/*
+- * Force strict CPU ordering.
+- * Not really required on m68k...
+- */
+ #define nop() do { asm volatile ("nop"); barrier(); } while (0)
+-#define mb() barrier()
+-#define rmb() barrier()
+-#define wmb() barrier()
+-#define read_barrier_depends() ((void)0)
+-#define set_mb(var, value) ({ (var) = (value); wmb(); })
+
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define smp_read_barrier_depends() ((void)0)
++#include <asm-generic/barrier.h>
+
+ #endif /* _M68K_BARRIER_H */
+diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
+index 5a822bb790f7..066e74f666ae 100644
+--- a/arch/m68k/include/asm/linkage.h
++++ b/arch/m68k/include/asm/linkage.h
+@@ -4,4 +4,34 @@
+ #define __ALIGN .align 4
+ #define __ALIGN_STR ".align 4"
+
++/*
++ * Make sure the compiler doesn't do anything stupid with the
++ * arguments on the stack - they are owned by the *caller*, not
++ * the callee. This just fools gcc into not spilling into them,
++ * and keeps it from doing tailcall recursion and/or using the
++ * stack slots for temporaries, since they are live and "used"
++ * all the way to the end of the function.
++ */
++#define asmlinkage_protect(n, ret, args...) \
++ __asmlinkage_protect##n(ret, ##args)
++#define __asmlinkage_protect_n(ret, args...) \
++ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
++#define __asmlinkage_protect0(ret) \
++ __asmlinkage_protect_n(ret)
++#define __asmlinkage_protect1(ret, arg1) \
++ __asmlinkage_protect_n(ret, "m" (arg1))
++#define __asmlinkage_protect2(ret, arg1, arg2) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
++#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
++#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++ "m" (arg4))
++#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++ "m" (arg4), "m" (arg5))
++#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++ "m" (arg4), "m" (arg5), "m" (arg6))
++
+ #endif
+diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
+index d3c51a6a601d..9197b379d005 100644
+--- a/arch/microblaze/include/asm/Kbuild
++++ b/arch/microblaze/include/asm/Kbuild
+@@ -3,3 +3,4 @@ generic-y += clkdev.h
+ generic-y += exec.h
+ generic-y += trace_clock.h
+ generic-y += syscalls.h
++generic-y += barrier.h
+diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h
+deleted file mode 100644
+index df5be3e87044..000000000000
+--- a/arch/microblaze/include/asm/barrier.h
++++ /dev/null
+@@ -1,27 +0,0 @@
+-/*
+- * Copyright (C) 2006 Atmark Techno, Inc.
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License. See the file "COPYING" in the main directory of this archive
+- * for more details.
+- */
+-
+-#ifndef _ASM_MICROBLAZE_BARRIER_H
+-#define _ASM_MICROBLAZE_BARRIER_H
+-
+-#define nop() asm volatile ("nop")
+-
+-#define smp_read_barrier_depends() do {} while (0)
+-#define read_barrier_depends() do {} while (0)
+-
+-#define mb() barrier()
+-#define rmb() mb()
+-#define wmb() mb()
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
+-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+-
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-
+-#endif /* _ASM_MICROBLAZE_BARRIER_H */
+diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
+index 5f8b95512580..7dd78fc991bf 100644
+--- a/arch/mips/mm/dma-default.c
++++ b/arch/mips/mm/dma-default.c
+@@ -92,7 +92,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+ else
+ #endif
+ #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
++ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
+ dma_flag = __GFP_DMA;
+ else
+ #endif
+diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
+index c5d767028306..a530bca92014 100644
+--- a/arch/mn10300/include/asm/Kbuild
++++ b/arch/mn10300/include/asm/Kbuild
+@@ -2,3 +2,4 @@
+ generic-y += clkdev.h
+ generic-y += exec.h
+ generic-y += trace_clock.h
++generic-y += barrier.h
+diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h
+deleted file mode 100644
+index 2bd97a5c8af7..000000000000
+--- a/arch/mn10300/include/asm/barrier.h
++++ /dev/null
+@@ -1,37 +0,0 @@
+-/* MN10300 memory barrier definitions
+- *
+- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+- * Written by David Howells (dhowells@redhat.com)
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public Licence
+- * as published by the Free Software Foundation; either version
+- * 2 of the Licence, or (at your option) any later version.
+- */
+-#ifndef _ASM_BARRIER_H
+-#define _ASM_BARRIER_H
+-
+-#define nop() asm volatile ("nop")
+-
+-#define mb() asm volatile ("": : :"memory")
+-#define rmb() mb()
+-#define wmb() asm volatile ("": : :"memory")
+-
+-#ifdef CONFIG_SMP
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#define set_mb(var, value) do { xchg(&var, value); } while (0)
+-#else /* CONFIG_SMP */
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
+-#endif /* CONFIG_SMP */
+-
+-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+-
+-#define read_barrier_depends() do {} while (0)
+-#define smp_read_barrier_depends() do {} while (0)
+-
+-#endif /* _ASM_BARRIER_H */
+diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
+index ff4c9faed546..827a8465a536 100644
+--- a/arch/parisc/include/asm/Kbuild
++++ b/arch/parisc/include/asm/Kbuild
+@@ -4,3 +4,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
+ div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
+ poll.h xor.h clkdev.h exec.h
+ generic-y += trace_clock.h
++generic-y += barrier.h
+diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
+deleted file mode 100644
+index e77d834aa803..000000000000
+--- a/arch/parisc/include/asm/barrier.h
++++ /dev/null
+@@ -1,35 +0,0 @@
+-#ifndef __PARISC_BARRIER_H
+-#define __PARISC_BARRIER_H
+-
+-/*
+-** This is simply the barrier() macro from linux/kernel.h but when serial.c
+-** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
+-** hasn't yet been included yet so it fails, thus repeating the macro here.
+-**
+-** PA-RISC architecture allows for weakly ordered memory accesses although
+-** none of the processors use it. There is a strong ordered bit that is
+-** set in the O-bit of the page directory entry. Operating systems that
+-** can not tolerate out of order accesses should set this bit when mapping
+-** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
+-** of the processor implemented the PSW O-bit). The PCX-W ERS states that
+-** the TLB O-bit is not implemented so the page directory does not need to
+-** have the O-bit set when mapping pages (section 3.1). This section also
+-** states that the PSW Y, Z, G, and O bits are not implemented.
+-** So it looks like nothing needs to be done for parisc-linux (yet).
+-** (thanks to chada for the above comment -ggg)
+-**
+-** The __asm__ op below simple prevents gcc/ld from reordering
+-** instructions across the mb() "call".
+-*/
+-#define mb() __asm__ __volatile__("":::"memory") /* barrier() */
+-#define rmb() mb()
+-#define wmb() mb()
+-#define smp_mb() mb()
+-#define smp_rmb() mb()
+-#define smp_wmb() mb()
+-#define smp_read_barrier_depends() do { } while(0)
+-#define read_barrier_depends() do { } while(0)
+-
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
+-
+-#endif /* __PARISC_BARRIER_H */
+diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
+index e1c7bb999b06..825c7184fced 100644
+--- a/arch/score/include/asm/Kbuild
++++ b/arch/score/include/asm/Kbuild
+@@ -4,3 +4,4 @@ header-y +=
+ generic-y += clkdev.h
+ generic-y += trace_clock.h
+ generic-y += xor.h
++generic-y += barrier.h
+diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h
+deleted file mode 100644
+index 0eacb6471e6d..000000000000
+--- a/arch/score/include/asm/barrier.h
++++ /dev/null
+@@ -1,16 +0,0 @@
+-#ifndef _ASM_SCORE_BARRIER_H
+-#define _ASM_SCORE_BARRIER_H
+-
+-#define mb() barrier()
+-#define rmb() barrier()
+-#define wmb() barrier()
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-
+-#define read_barrier_depends() do {} while (0)
+-#define smp_read_barrier_depends() do {} while (0)
+-
+-#define set_mb(var, value) do {var = value; wmb(); } while (0)
+-
+-#endif /* _ASM_SCORE_BARRIER_H */
+diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
+index 72c103dae300..43715308b068 100644
+--- a/arch/sh/include/asm/barrier.h
++++ b/arch/sh/include/asm/barrier.h
+@@ -26,29 +26,14 @@
+ #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+ #define mb() __asm__ __volatile__ ("synco": : :"memory")
+ #define rmb() mb()
+-#define wmb() __asm__ __volatile__ ("synco": : :"memory")
++#define wmb() mb()
+ #define ctrl_barrier() __icbi(PAGE_OFFSET)
+-#define read_barrier_depends() do { } while(0)
+ #else
+-#define mb() __asm__ __volatile__ ("": : :"memory")
+-#define rmb() mb()
+-#define wmb() __asm__ __volatile__ ("": : :"memory")
+ #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
+-#define read_barrier_depends() do { } while(0)
+-#endif
+-
+-#ifdef CONFIG_SMP
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#define smp_read_barrier_depends() read_barrier_depends()
+-#else
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define smp_read_barrier_depends() do { } while(0)
+ #endif
+
+ #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+
++#include <asm-generic/barrier.h>
++
+ #endif /* __ASM_SH_BARRIER_H */
+diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
+index ded4cee35318..dc78cdd43e0a 100644
+--- a/arch/sparc/crypto/aes_glue.c
++++ b/arch/sparc/crypto/aes_glue.c
+@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
+index 641f55cb61c3..eb87d6dd86b1 100644
+--- a/arch/sparc/crypto/camellia_glue.c
++++ b/arch/sparc/crypto/camellia_glue.c
+@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
++ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_set_key,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
+index d11500972994..1359bfc544e4 100644
+--- a/arch/sparc/crypto/des_glue.c
++++ b/arch/sparc/crypto/des_glue.c
+@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
++ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_set_key,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = des3_ede_set_key,
+ .encrypt = cbc3_encrypt,
+ .decrypt = cbc3_decrypt,
+diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h
+index c1b76654ee76..ae69eda288f4 100644
+--- a/arch/sparc/include/asm/barrier_32.h
++++ b/arch/sparc/include/asm/barrier_32.h
+@@ -1,15 +1,7 @@
+ #ifndef __SPARC_BARRIER_H
+ #define __SPARC_BARRIER_H
+
+-/* XXX Change this if we ever use a PSO mode kernel. */
+-#define mb() __asm__ __volatile__ ("" : : : "memory")
+-#define rmb() mb()
+-#define wmb() mb()
+-#define read_barrier_depends() do { } while(0)
+-#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
+-#define smp_mb() __asm__ __volatile__("":::"memory")
+-#define smp_rmb() __asm__ __volatile__("":::"memory")
+-#define smp_wmb() __asm__ __volatile__("":::"memory")
+-#define smp_read_barrier_depends() do { } while(0)
++#include <asm/processor.h> /* for nop() */
++#include <asm-generic/barrier.h>
+
+ #endif /* !(__SPARC_BARRIER_H) */
+diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
+index a9a73da5865d..b5a05d050a8f 100644
+--- a/arch/tile/include/asm/barrier.h
++++ b/arch/tile/include/asm/barrier.h
+@@ -22,59 +22,6 @@
+ #include <arch/spr_def.h>
+ #include <asm/timex.h>
+
+-/*
+- * read_barrier_depends - Flush all pending reads that subsequents reads
+- * depend on.
+- *
+- * No data-dependent reads from memory-like regions are ever reordered
+- * over this barrier. All reads preceding this primitive are guaranteed
+- * to access memory (but not necessarily other CPUs' caches) before any
+- * reads following this primitive that depend on the data return by
+- * any of the preceding reads. This primitive is much lighter weight than
+- * rmb() on most CPUs, and is never heavier weight than is
+- * rmb().
+- *
+- * These ordering constraints are respected by both the local CPU
+- * and the compiler.
+- *
+- * Ordering is not guaranteed by anything other than these primitives,
+- * not even by data dependencies. See the documentation for
+- * memory_barrier() for examples and URLs to more information.
+- *
+- * For example, the following code would force ordering (the initial
+- * value of "a" is zero, "b" is one, and "p" is "&a"):
+- *
+- * <programlisting>
+- * CPU 0 CPU 1
+- *
+- * b = 2;
+- * memory_barrier();
+- * p = &b; q = p;
+- * read_barrier_depends();
+- * d = *q;
+- * </programlisting>
+- *
+- * because the read of "*q" depends on the read of "p" and these
+- * two reads are separated by a read_barrier_depends(). However,
+- * the following code, with the same initial values for "a" and "b":
+- *
+- * <programlisting>
+- * CPU 0 CPU 1
+- *
+- * a = 2;
+- * memory_barrier();
+- * b = 3; y = b;
+- * read_barrier_depends();
+- * x = a;
+- * </programlisting>
+- *
+- * does not enforce ordering, since there is no data dependency between
+- * the read of "a" and the read of "b". Therefore, on some CPUs, such
+- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+- * in cases like this where there are no data dependencies.
+- */
+-#define read_barrier_depends() do { } while (0)
+-
+ #define __sync() __insn_mf()
+
+ #include <hv/syscall_public.h>
+@@ -125,20 +72,7 @@ mb_incoherent(void)
+ #define mb() fast_mb()
+ #define iob() fast_iob()
+
+-#ifdef CONFIG_SMP
+-#define smp_mb() mb()
+-#define smp_rmb() rmb()
+-#define smp_wmb() wmb()
+-#define smp_read_barrier_depends() read_barrier_depends()
+-#else
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define smp_read_barrier_depends() do { } while (0)
+-#endif
+-
+-#define set_mb(var, value) \
+- do { var = value; mb(); } while (0)
++#include <asm-generic/barrier.h>
+
+ #endif /* !__ASSEMBLY__ */
+ #endif /* _ASM_TILE_BARRIER_H */
+diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h
+index a6620e5336b6..83d6a520f4bd 100644
+--- a/arch/unicore32/include/asm/barrier.h
++++ b/arch/unicore32/include/asm/barrier.h
+@@ -14,15 +14,6 @@
+ #define dsb() __asm__ __volatile__ ("" : : : "memory")
+ #define dmb() __asm__ __volatile__ ("" : : : "memory")
+
+-#define mb() barrier()
+-#define rmb() barrier()
+-#define wmb() barrier()
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+-#define read_barrier_depends() do { } while (0)
+-#define smp_read_barrier_depends() do { } while (0)
+-
+-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
++#include <asm-generic/barrier.h>
+
+ #endif /* __UNICORE_BARRIER_H__ */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 7170f1738793..5c2742b75be1 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -351,6 +351,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
+ apic_write(APIC_LVTT, lvtt_value);
+
+ if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
++ /*
++ * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
++ * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
++ * According to Intel, MFENCE can do the serialization here.
++ */
++ asm volatile("mfence" : : : "memory");
++
+ printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
+ return;
+ }
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 7ed99df028ca..ead3e7c9672e 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1675,7 +1675,18 @@ END(error_exit)
+ /* runs on exception stack */
+ ENTRY(nmi)
+ INTR_FRAME
++ /*
++ * Fix up the exception frame if we're on Xen.
++ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
++ * one value to the stack on native, so it may clobber the rdx
++ * scratch slot, but it won't clobber any of the important
++ * slots past it.
++ *
++ * Xen is a different story, because the Xen frame itself overlaps
++ * the "NMI executing" variable.
++ */
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
++
+ /*
+ * We allow breakpoints in NMIs. If a breakpoint occurs, then
+ * the iretq it performs will take us out of NMI context.
+@@ -1727,9 +1738,12 @@ ENTRY(nmi)
+ * we don't want to enable interrupts, because then we'll end
+ * up in an awkward situation in which IRQs are on but NMIs
+ * are off.
++ *
++ * We also must not push anything to the stack before switching
++ * stacks lest we corrupt the "NMI executing" variable.
+ */
+
+- SWAPGS
++ SWAPGS_UNSAFE_STACK
+ cld
+ movq %rsp, %rdx
+ movq PER_CPU_VAR(kernel_stack), %rsp
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 1b10af835c31..45c2045692bd 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -40,10 +40,18 @@
+ #include <asm/timer.h>
+ #include <asm/special_insns.h>
+
+-/* nop stub */
+-void _paravirt_nop(void)
+-{
+-}
++/*
++ * nop stub, which must not clobber anything *including the stack* to
++ * avoid confusing the entry prologues.
++ */
++extern void _paravirt_nop(void);
++asm (".pushsection .entry.text, \"ax\"\n"
++ ".global _paravirt_nop\n"
++ "_paravirt_nop:\n\t"
++ "ret\n\t"
++ ".size _paravirt_nop, . - _paravirt_nop\n\t"
++ ".type _paravirt_nop, @function\n\t"
++ ".popsection");
+
+ /* identity function, which can be inlined */
+ u32 _paravirt_ident_32(u32 x)
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index cefe57ce4ebd..b40765803d05 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -20,6 +20,7 @@
+ #include <asm/hypervisor.h>
+ #include <asm/nmi.h>
+ #include <asm/x86_init.h>
++#include <asm/geode.h>
+
+ unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
+ EXPORT_SYMBOL(cpu_khz);
+@@ -812,15 +813,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+
+ static void __init check_system_tsc_reliable(void)
+ {
+-#ifdef CONFIG_MGEODE_LX
+- /* RTSC counts during suspend */
++#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
++ if (is_geode_lx()) {
++ /* RTSC counts during suspend */
+ #define RTSC_SUSP 0x100
+- unsigned long res_low, res_high;
++ unsigned long res_low, res_high;
+
+- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+- /* Geode_LX - the OLPC CPU has a very reliable TSC */
+- if (res_low & RTSC_SUSP)
+- tsc_clocksource_reliable = 1;
++ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
++ /* Geode_LX - the OLPC CPU has a very reliable TSC */
++ if (res_low & RTSC_SUSP)
++ tsc_clocksource_reliable = 1;
++ }
+ #endif
+ if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
+ tsc_clocksource_reliable = 1;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2996635196d3..d1a065ec683f 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -496,7 +496,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (svm->vmcb->control.next_rip != 0) {
+- WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
++ WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
+ svm->next_rip = svm->vmcb->control.next_rip;
+ }
+
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index b599241aea81..a93e32722ab1 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1131,7 +1131,7 @@ void mark_rodata_ro(void)
+ * has been zapped already via cleanup_highmem().
+ */
+ all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
+- set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
++ set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
+
+ rodata_test();
+
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 2cbc2f2cf43e..b2de632861c2 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -33,6 +33,10 @@
+ #include <linux/memblock.h>
+ #include <linux/edd.h>
+
++#ifdef CONFIG_KEXEC
++#include <linux/kexec.h>
++#endif
++
+ #include <xen/xen.h>
+ #include <xen/events.h>
+ #include <xen/interface/xen.h>
+@@ -1746,6 +1750,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
+ .notifier_call = xen_hvm_cpu_notify,
+ };
+
++#ifdef CONFIG_KEXEC
++static void xen_hvm_shutdown(void)
++{
++ native_machine_shutdown();
++ if (kexec_in_progress)
++ xen_reboot(SHUTDOWN_soft_reset);
++}
++
++static void xen_hvm_crash_shutdown(struct pt_regs *regs)
++{
++ native_machine_crash_shutdown(regs);
++ xen_reboot(SHUTDOWN_soft_reset);
++}
++#endif
++
+ static void __init xen_hvm_guest_init(void)
+ {
+ init_hvm_pv_info();
+@@ -1762,6 +1781,10 @@ static void __init xen_hvm_guest_init(void)
+ x86_init.irqs.intr_init = xen_init_IRQ;
+ xen_hvm_init_time_ops();
+ xen_hvm_init_mmu_ops();
++#ifdef CONFIG_KEXEC
++ machine_ops.shutdown = xen_hvm_shutdown;
++ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
++#endif
+ }
+
+ static uint32_t __init xen_hvm_platform(void)
+diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
+index ef021677d536..e1ee6b51dfc5 100644
+--- a/arch/xtensa/include/asm/barrier.h
++++ b/arch/xtensa/include/asm/barrier.h
+@@ -9,21 +9,14 @@
+ #ifndef _XTENSA_SYSTEM_H
+ #define _XTENSA_SYSTEM_H
+
+-#define smp_read_barrier_depends() do { } while(0)
+-#define read_barrier_depends() do { } while(0)
+-
+ #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+ #define rmb() barrier()
+ #define wmb() mb()
+
+ #ifdef CONFIG_SMP
+ #error smp_* not defined
+-#else
+-#define smp_mb() barrier()
+-#define smp_rmb() barrier()
+-#define smp_wmb() barrier()
+ #endif
+
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
++#include <asm-generic/barrier.h>
+
+ #endif /* _XTENSA_SYSTEM_H */
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 793a27f2493e..857ae2b2a2a2 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -462,7 +462,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
+ struct crypto_alg *base = &alg->halg.base;
+
+ if (alg->halg.digestsize > PAGE_SIZE / 8 ||
+- alg->halg.statesize > PAGE_SIZE / 8)
++ alg->halg.statesize > PAGE_SIZE / 8 ||
++ alg->halg.statesize == 0)
+ return -EINVAL;
+
+ base->cra_type = &crypto_ahash_type;
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index b18c7da77067..8135feff72a2 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -23,8 +23,7 @@ static struct dentry *regmap_debugfs_root;
+ /* Calculate the length of a fixed format */
+ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
+ {
+- snprintf(buf, buf_size, "%x", max_val);
+- return strlen(buf);
++ return snprintf(NULL, 0, "%x", max_val);
+ }
+
+ static ssize_t regmap_name_read_file(struct file *file,
+@@ -423,7 +422,7 @@ static ssize_t regmap_access_read_file(struct file *file,
+ /* If we're in the region the user is trying to read */
+ if (p >= *ppos) {
+ /* ...but not beyond it */
+- if (buf_pos >= count - 1 - tot_len)
++ if (buf_pos + tot_len + 1 >= count)
+ break;
+
+ /* Format the register */
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 63ff17fc23df..66f632730969 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -4868,7 +4868,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
+ out_err:
+ if (parent) {
+ rbd_dev_unparent(rbd_dev);
+- kfree(rbd_dev->header_name);
+ rbd_dev_destroy(parent);
+ } else {
+ rbd_put_client(rbdc);
+diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
+index e0564652af35..5e35804b1a95 100644
+--- a/drivers/cpuidle/cpuidle-ux500.c
++++ b/drivers/cpuidle/cpuidle-ux500.c
+@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = {
+ .state_count = 2,
+ };
+
+-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
++static int dbx500_cpuidle_probe(struct platform_device *pdev)
+ {
+ /* Configure wake up reasons */
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index a8884b8aaa9e..c128aab076ab 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -1585,7 +1585,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ INIT_LIST_HEAD(&dw->dma.channels);
+ for (i = 0; i < nr_channels; i++) {
+ struct dw_dma_chan *dwc = &dw->chan[i];
+- int r = nr_channels - i - 1;
+
+ dwc->chan.device = &dw->dma;
+ dma_cookie_init(&dwc->chan);
+@@ -1597,7 +1596,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+
+ /* 7 is highest priority & 0 is lowest. */
+ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+- dwc->priority = r;
++ dwc->priority = nr_channels - i - 1;
+ else
+ dwc->priority = i;
+
+@@ -1617,6 +1616,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ /* Hardware configuration */
+ if (autocfg) {
+ unsigned int dwc_params;
++ unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
+ void __iomem *addr = chip->regs + r * sizeof(u32);
+
+ dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
+diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
+index d752c96d6090..bdceb60998d3 100644
+--- a/drivers/gpu/drm/drm_lock.c
++++ b/drivers/gpu/drm/drm_lock.c
+@@ -58,6 +58,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ struct drm_master *master = file_priv->master;
+ int ret = 0;
+
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ return -EINVAL;
++
+ ++file_priv->lock_count;
+
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+@@ -151,6 +154,9 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ struct drm_lock *lock = data;
+ struct drm_master *master = file_priv->master;
+
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ return -EINVAL;
++
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ task_pid_nr(current), lock->context);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index a86ecf65c164..2268dd52f3c6 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -183,8 +183,30 @@ nouveau_fbcon_sync(struct fb_info *info)
+ return 0;
+ }
+
++static int
++nouveau_fbcon_open(struct fb_info *info, int user)
++{
++ struct nouveau_fbdev *fbcon = info->par;
++ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
++ int ret = pm_runtime_get_sync(drm->dev->dev);
++ if (ret < 0 && ret != -EACCES)
++ return ret;
++ return 0;
++}
++
++static int
++nouveau_fbcon_release(struct fb_info *info, int user)
++{
++ struct nouveau_fbdev *fbcon = info->par;
++ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
++ pm_runtime_put(drm->dev->dev);
++ return 0;
++}
++
+ static struct fb_ops nouveau_fbcon_ops = {
+ .owner = THIS_MODULE,
++ .fb_open = nouveau_fbcon_open,
++ .fb_release = nouveau_fbcon_release,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = nouveau_fbcon_fillrect,
+@@ -200,6 +222,8 @@ static struct fb_ops nouveau_fbcon_ops = {
+
+ static struct fb_ops nouveau_fbcon_sw_ops = {
+ .owner = THIS_MODULE,
++ .fb_open = nouveau_fbcon_open,
++ .fb_release = nouveau_fbcon_release,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cfb_fillrect,
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index ea0904875c74..98976f054597 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -537,7 +537,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+
+- if (qcrtc->index == 0)
++ if (bo->is_primary == false)
+ recreate_primary = true;
+
+ if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
+@@ -799,13 +799,15 @@ static enum drm_connector_status qxl_conn_detect(
+ drm_connector_to_qxl_output(connector);
+ struct drm_device *ddev = connector->dev;
+ struct qxl_device *qdev = ddev->dev_private;
+- int connected;
++ bool connected = false;
+
+ /* The first monitor is always connected */
+- connected = (output->index == 0) ||
+- (qdev->client_monitors_config &&
+- qdev->client_monitors_config->count > output->index &&
+- qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
++ if (!qdev->client_monitors_config) {
++ if (output->index == 0)
++ connected = true;
++ } else
++ connected = qdev->client_monitors_config->count > output->index &&
++ qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
+
+ DRM_DEBUG("\n");
+ return connected ? connector_status_connected
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 6d9649471f28..68fd96a50fc7 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
++ .driver_data = APPLE_HAS_FN },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
++ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
++ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index eb23021390cb..85b0da8c33f4 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1695,6 +1695,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+@@ -2370,6 +2373,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 6da09931a987..50b25fad982d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -139,6 +139,9 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
+ #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
++#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
++#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
++#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
+ #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
+ #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
+@@ -878,7 +881,8 @@
+ #define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688
+
+ #define USB_VENDOR_ID_TPV 0x25aa
+-#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883
++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882
++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883
+
+ #define USB_VENDOR_ID_TURBOX 0x062a
+ #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 7bc98db768eb..7166d7fb43de 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -106,7 +106,8 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index b6d28439f1b9..96dc7e7a58e3 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -348,6 +348,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
+
+ /* NCT6776 specific data */
+
++/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
++#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
++#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
++
+ static const s8 NCT6776_ALARM_BITS[] = {
+ 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
+ 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
+@@ -3492,8 +3496,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6775_REG_PWM;
+ data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+@@ -3562,8 +3566,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6775_REG_PWM;
+ data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+@@ -3636,8 +3640,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6775_REG_PWM;
+ data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index d0bdac0498ce..f7439c556413 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -28,6 +28,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/i2c.h>
+ #include <linux/clk.h>
+ #include <linux/errno.h>
+@@ -53,6 +54,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+ }
+
+ #ifdef CONFIG_ACPI
++/*
++ * The HCNT/LCNT information coming from ACPI should be the most accurate
++ * for given platform. However, some systems get it wrong. On such systems
++ * we get better results by calculating those based on the input clock.
++ */
++static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
++ {
++ .ident = "Dell Inspiron 7348",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
++ },
++ },
++ { }
++};
++
+ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
+ u16 *hcnt, u16 *lcnt, u32 *sda_hold)
+ {
+@@ -60,6 +77,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *obj;
+
++ if (dmi_check_system(dw_i2c_no_acpi_params))
++ return;
++
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
+ return;
+
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index c8a42602205b..622b6fce149b 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ pm_runtime_enable(dev);
++ platform_set_drvdata(pdev, priv);
++
+ ret = i2c_add_numbered_adapter(adap);
+ if (ret < 0) {
+ dev_err(dev, "reg adap failed: %d\n", ret);
++ pm_runtime_disable(dev);
+ return ret;
+ }
+
+- pm_runtime_enable(dev);
+- platform_set_drvdata(pdev, priv);
+-
+ dev_info(dev, "probed\n");
+
+ return 0;
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index ce09bf932831..8983e7fa0fb4 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -1151,17 +1151,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+ i2c->adap.nr = i2c->pdata->bus_num;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
+
++ platform_set_drvdata(pdev, i2c);
++
++ pm_runtime_enable(&pdev->dev);
++
+ ret = i2c_add_numbered_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add bus to i2c core\n");
++ pm_runtime_disable(&pdev->dev);
+ s3c24xx_i2c_deregister_cpufreq(i2c);
+ clk_unprepare(i2c->clk);
+ return ret;
+ }
+
+- platform_set_drvdata(pdev, i2c);
+-
+- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(&i2c->adap.dev);
+
+ dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 9a51eb2242a0..2e04d5253130 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2541,9 +2541,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+ static int
+ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+ {
+- int ret;
++ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
++ int ret = 0;
+
+ switch (state) {
++ case ISTATE_REMOVE:
++ spin_lock_bh(&conn->cmd_lock);
++ list_del_init(&cmd->i_conn_node);
++ spin_unlock_bh(&conn->cmd_lock);
++ isert_put_cmd(isert_cmd, true);
++ break;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ ret = isert_put_nopin(cmd, conn, false);
+ break;
+diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
+index 56eb471b5576..4215b5382092 100644
+--- a/drivers/input/joystick/Kconfig
++++ b/drivers/input/joystick/Kconfig
+@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
+ config JOYSTICK_ZHENHUA
+ tristate "5-byte Zhenhua RC transmitter"
+ select SERIO
++ select BITREVERSE
+ help
+ Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
+ supplied with a ready to fly micro electric indoor helicopters
+diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
+index 30acfd49fa6c..1ba3490b9ffe 100644
+--- a/drivers/input/keyboard/omap4-keypad.c
++++ b/drivers/input/keyboard/omap4-keypad.c
+@@ -284,7 +284,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
+ } else {
+ error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
+ if (error)
+- return error;
++ goto err_free_keypad;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index cff065f6261c..de3d92077c77 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -1441,6 +1441,10 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
+ if (error)
+ goto err_clear_drvdata;
+
++ /* give PT device some time to settle down before probing */
++ if (serio->id.type == SERIO_PS_PSTHRU)
++ usleep_range(10000, 15000);
++
+ if (psmouse_probe(psmouse) < 0) {
+ error = -ENODEV;
+ goto err_close_serio;
+diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
+index 26b45936f9fd..1e8cd6f1fe9e 100644
+--- a/drivers/input/serio/parkbd.c
++++ b/drivers/input/serio/parkbd.c
+@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
+ parkbd_port = parkbd_allocate_serio();
+ if (!parkbd_port) {
+ parport_release(parkbd_dev);
++ parport_unregister_device(parkbd_dev);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index b853bb47fc7d..d22b4af761f5 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1750,14 +1750,16 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
+ unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
+ int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
+ int i = start >> APERTURE_RANGE_SHIFT;
+- unsigned long boundary_size;
++ unsigned long boundary_size, mask;
+ unsigned long address = -1;
+ unsigned long limit;
+
+ next_bit >>= PAGE_SHIFT;
+
+- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+- PAGE_SIZE) >> PAGE_SHIFT;
++ mask = dma_get_seg_boundary(dev);
++
++ boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
++ 1UL << (BITS_PER_LONG - PAGE_SHIFT);
+
+ for (;i < max_index; ++i) {
+ unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
+diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
+index 3ee198b65843..cc7ece1712b5 100644
+--- a/drivers/macintosh/windfarm_core.c
++++ b/drivers/macintosh/windfarm_core.c
+@@ -435,7 +435,7 @@ int wf_unregister_client(struct notifier_block *nb)
+ {
+ mutex_lock(&wf_lock);
+ blocking_notifier_chain_unregister(&wf_client_list, nb);
+- wf_client_count++;
++ wf_client_count--;
+ if (wf_client_count == 0)
+ wf_stop_thread();
+ mutex_unlock(&wf_lock);
+diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
+index b04d1f904d07..2eca9084defe 100644
+--- a/drivers/md/dm-cache-policy-cleaner.c
++++ b/drivers/md/dm-cache-policy-cleaner.c
+@@ -434,7 +434,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
+ static struct dm_cache_policy_type wb_policy_type = {
+ .name = "cleaner",
+ .version = {1, 0, 0},
+- .hint_size = 0,
++ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = wb_create
+ };
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 59715389b3cf..19cfd7affebe 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -325,8 +325,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
+ */
+ if (min_region_size > (1 << 13)) {
+ /* If not a power of 2, make it the next power of 2 */
+- if (min_region_size & (min_region_size - 1))
+- region_size = 1 << fls(region_size);
++ region_size = roundup_pow_of_two(min_region_size);
+ DMINFO("Choosing default region size of %lu sectors",
+ region_size);
+ } else {
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index d2b3563129c2..5ff934102f30 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2153,7 +2153,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ metadata_low_callback,
+ pool);
+ if (r)
+- goto out_free_pt;
++ goto out_flags_changed;
+
+ pt->callbacks.congested_fn = pool_is_congested;
+ dm_table_add_target_callbacks(ti->table, &pt->callbacks);
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index bf2b80d5c470..8731b6ea026b 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
+
+ extern struct dm_block_validator btree_node_validator;
+
++/*
++ * Value type for upper levels of multi-level btrees.
++ */
++extern void init_le64_type(struct dm_transaction_manager *tm,
++ struct dm_btree_value_type *vt);
++
+ #endif /* DM_BTREE_INTERNAL_H */
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index a03178e91a79..7c0d75547ccf 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ return r;
+ }
+
+-static struct dm_btree_value_type le64_type = {
+- .context = NULL,
+- .size = sizeof(__le64),
+- .inc = NULL,
+- .dec = NULL,
+- .equal = NULL
+-};
+-
+ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root)
+ {
+@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+ struct btree_node *n;
++ struct dm_btree_value_type le64_vt;
+
++ init_le64_type(info->tm, &le64_vt);
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = remove_raw(&spine, info,
+ (level == last_level ?
+- &info->value_type : &le64_type),
++ &info->value_type : &le64_vt),
+ root, keys[level], (unsigned *)&index);
+ if (r < 0)
+ break;
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index 1b5e13ec7f96..0dee514ba4c5 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
+ {
+ return s->root;
+ }
++
++static void le64_inc(void *context, const void *value_le)
++{
++ struct dm_transaction_manager *tm = context;
++ __le64 v_le;
++
++ memcpy(&v_le, value_le, sizeof(v_le));
++ dm_tm_inc(tm, le64_to_cpu(v_le));
++}
++
++static void le64_dec(void *context, const void *value_le)
++{
++ struct dm_transaction_manager *tm = context;
++ __le64 v_le;
++
++ memcpy(&v_le, value_le, sizeof(v_le));
++ dm_tm_dec(tm, le64_to_cpu(v_le));
++}
++
++static int le64_equal(void *context, const void *value1_le, const void *value2_le)
++{
++ __le64 v1_le, v2_le;
++
++ memcpy(&v1_le, value1_le, sizeof(v1_le));
++ memcpy(&v2_le, value2_le, sizeof(v2_le));
++ return v1_le == v2_le;
++}
++
++void init_le64_type(struct dm_transaction_manager *tm,
++ struct dm_btree_value_type *vt)
++{
++ vt->context = tm;
++ vt->size = sizeof(__le64);
++ vt->inc = le64_inc;
++ vt->dec = le64_dec;
++ vt->equal = le64_equal;
++}
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 8dad9849649e..50cf11119af9 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
+ struct btree_node *n;
+ struct dm_btree_value_type le64_type;
+
+- le64_type.context = NULL;
+- le64_type.size = sizeof(__le64);
+- le64_type.inc = NULL;
+- le64_type.dec = NULL;
+- le64_type.equal = NULL;
+-
++ init_le64_type(info->tm, &le64_type);
+ init_shadow_spine(&spine, info);
+
+ for (level = 0; level < (info->levels - 1); level++) {
+diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
+index 1d3304f1365b..72faf593427e 100644
+--- a/drivers/media/platform/vsp1/vsp1_regs.h
++++ b/drivers/media/platform/vsp1/vsp1_regs.h
+@@ -238,7 +238,7 @@
+ #define VI6_WPF_SZCLIP_EN (1 << 28)
+ #define VI6_WPF_SZCLIP_OFST_MASK (0xff << 16)
+ #define VI6_WPF_SZCLIP_OFST_SHIFT 16
+-#define VI6_WPF_SZCLIP_SIZE_MASK (0x1fff << 0)
++#define VI6_WPF_SZCLIP_SIZE_MASK (0xfff << 0)
+ #define VI6_WPF_SZCLIP_SIZE_SHIFT 0
+
+ #define VI6_WPF_OUTFMT 0x100c
+@@ -304,9 +304,9 @@
+ #define VI6_DPR_HST_ROUTE 0x2044
+ #define VI6_DPR_HSI_ROUTE 0x2048
+ #define VI6_DPR_BRU_ROUTE 0x204c
+-#define VI6_DPR_ROUTE_FXA_MASK (0xff << 8)
++#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
+ #define VI6_DPR_ROUTE_FXA_SHIFT 16
+-#define VI6_DPR_ROUTE_FP_MASK (0xff << 8)
++#define VI6_DPR_ROUTE_FP_MASK (0x3f << 8)
+ #define VI6_DPR_ROUTE_FP_SHIFT 8
+ #define VI6_DPR_ROUTE_RT_MASK (0x3f << 0)
+ #define VI6_DPR_ROUTE_RT_SHIFT 0
+diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+index 7cbc3a00bda8..bf6b215438e3 100644
+--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
++++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+@@ -177,7 +177,7 @@ static int rotation_thread_function(void *data)
+ __s32 vflip, hflip;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- while (!schedule_timeout(100)) {
++ while (!schedule_timeout(msecs_to_jiffies(100))) {
+ if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock))
+ break;
+
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index 661f7f2a9e8b..ea5ec8ed67a7 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -435,6 +435,7 @@ static int usbvision_v4l2_close(struct file *file)
+ usbvision_scratch_free(usbvision);
+
+ usbvision->user--;
++ mutex_unlock(&usbvision->v4l2_lock);
+
+ if (power_on_at_open) {
+ /* power off in a little while
+@@ -448,7 +449,6 @@ static int usbvision_v4l2_close(struct file *file)
+ usbvision_release(usbvision);
+ return 0;
+ }
+- mutex_unlock(&usbvision->v4l2_lock);
+
+ PDEBUG(DBG_IO, "success");
+ return 0;
+diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
+index bf79def40126..8822e880833b 100644
+--- a/drivers/mtd/ubi/io.c
++++ b/drivers/mtd/ubi/io.c
+@@ -931,6 +931,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
+ goto bad;
+ }
+
++ if (data_size > ubi->leb_size) {
++ ubi_err("bad data_size");
++ goto bad;
++ }
++
+ if (vol_type == UBI_VID_STATIC) {
+ /*
+ * Although from high-level point of view static volumes may
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index d77b1c1d7c72..bebf49e0dbe9 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -651,6 +651,7 @@ static int init_volumes(struct ubi_device *ubi,
+ if (ubi->corr_peb_count)
+ ubi_err("%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
++ return -ENOSPC;
+ }
+ ubi->rsvd_pebs += reserved_pebs;
+ ubi->avail_pebs -= reserved_pebs;
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index c08254016fe8..3375bfb1b246 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1978,6 +1978,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ if (ubi->corr_peb_count)
+ ubi_err("%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
++ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= reserved_pebs;
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index addd23246eb6..d66cf214e95e 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
+ if (po->pppoe_dev == dev &&
+ sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ pppox_unbind_sock(sk);
+- sk->sk_state = PPPOX_ZOMBIE;
+ sk->sk_state_change(sk);
+ po->pppoe_dev = NULL;
+ dev_put(dev);
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 20643833f0e6..31e607afb1d0 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -466,19 +466,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ return ret;
+ }
+
+- ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
+- if (ret < 0)
+- return ret;
+-
+- msleep(150);
+-
+- ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
+- if (ret < 0)
+- return ret;
+-
+- msleep(150);
+-
+- ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
++ ax88772_reset(dev);
+
+ /* Read PHYID register *AFTER* the PHY was reset properly */
+ phyid = asix_get_phyid(dev);
+@@ -891,7 +879,7 @@ static const struct driver_info ax88772_info = {
+ .unbind = ax88772_unbind,
+ .status = asix_status,
+ .link_reset = ax88772_link_reset,
+- .reset = ax88772_reset,
++ .reset = ax88772_link_reset,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index c9887cb60650..f900dfd551e8 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -893,6 +893,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ hw->max_rate_tries = 10;
+ hw->sta_data_size = sizeof(struct ath_node);
+ hw->vif_data_size = sizeof(struct ath_vif);
++ hw->extra_tx_headroom = 4;
+
+ hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
+ hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
+diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
+index bb6b0df50b33..efb6e13dc788 100644
+--- a/drivers/s390/char/con3270.c
++++ b/drivers/s390/char/con3270.c
+@@ -407,6 +407,10 @@ con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
+ else
+ /* Normal end. Copy residual count. */
+ rq->rescnt = irb->scsw.cmd.count;
++ } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
++ /* Interrupt without an outstanding request -> update all */
++ cp->update_flags = CON_UPDATE_ALL;
++ con3270_set_timer(cp, 1);
+ }
+ return RAW3270_IO_DONE;
+ }
+diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
+index 34629ea913d4..49f034facf77 100644
+--- a/drivers/s390/char/tty3270.c
++++ b/drivers/s390/char/tty3270.c
+@@ -662,6 +662,10 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
+ else
+ /* Normal end. Copy residual count. */
+ rq->rescnt = irb->scsw.cmd.count;
++ } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
++ /* Interrupt without an outstanding request -> update all */
++ tp->update_flags = TTY_UPDATE_ALL;
++ tty3270_set_timer(tp, 1);
+ }
+ return RAW3270_IO_DONE;
+ }
+diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
+index 5f57e3d35e26..6adf9abdf955 100644
+--- a/drivers/scsi/3w-9xxx.c
++++ b/drivers/scsi/3w-9xxx.c
+@@ -225,6 +225,17 @@ static const struct file_operations twa_fops = {
+ .llseek = noop_llseek,
+ };
+
++/*
++ * The controllers use an inline buffer instead of a mapped SGL for small,
++ * single entry buffers. Note that we treat a zero-length transfer like
++ * a mapped SGL.
++ */
++static bool twa_command_mapped(struct scsi_cmnd *cmd)
++{
++ return scsi_sg_count(cmd) != 1 ||
++ scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
++}
++
+ /* This function will complete an aen request from the isr */
+ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+ {
+@@ -1351,7 +1362,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
+ }
+
+ /* Now complete the io */
+- scsi_dma_unmap(cmd);
++ if (twa_command_mapped(cmd))
++ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
+@@ -1594,7 +1606,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+ cmd->result = (DID_RESET << 16);
+- scsi_dma_unmap(cmd);
++ if (twa_command_mapped(cmd))
++ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
+ }
+ }
+@@ -1777,12 +1790,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
+ retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ switch (retval) {
+ case SCSI_MLQUEUE_HOST_BUSY:
+- scsi_dma_unmap(SCpnt);
++ if (twa_command_mapped(SCpnt))
++ scsi_dma_unmap(SCpnt);
+ twa_free_request_id(tw_dev, request_id);
+ break;
+ case 1:
+ SCpnt->result = (DID_ERROR << 16);
+- scsi_dma_unmap(SCpnt);
++ if (twa_command_mapped(SCpnt))
++ scsi_dma_unmap(SCpnt);
+ done(SCpnt);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
+@@ -1843,8 +1858,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ /* Map sglist from scsi layer to cmd packet */
+
+ if (scsi_sg_count(srb)) {
+- if ((scsi_sg_count(srb) == 1) &&
+- (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
++ if (!twa_command_mapped(srb)) {
+ if (srb->sc_data_direction == DMA_TO_DEVICE ||
+ srb->sc_data_direction == DMA_BIDIRECTIONAL)
+ scsi_sg_copy_to_buffer(srb,
+@@ -1917,7 +1931,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
+ {
+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+
+- if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
++ if (!twa_command_mapped(cmd) &&
+ (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
+ if (scsi_sg_count(cmd) == 1) {
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 066e3198838d..ff2689d01209 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1906,8 +1906,17 @@ int scsi_error_handler(void *data)
+ * We never actually get interrupted because kthread_run
+ * disables signal delivery for the created thread.
+ */
+- while (!kthread_should_stop()) {
++ while (true) {
++ /*
++ * The sequence in kthread_stop() sets the stop flag first
++ * then wakes the process. To avoid missed wakeups, the task
++ * should always be in a non running state before the stop
++ * flag is checked
++ */
+ set_current_state(TASK_INTERRUPTIBLE);
++ if (kthread_should_stop())
++ break;
++
+ if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
+ shost->host_failed != shost->host_busy) {
+ SCSI_LOG_ERROR_RECOVERY(1,
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index d01ae4d353d4..bb4a919d2fdf 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -562,6 +562,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
+ if (!(sccr1_reg & SSCR1_TIE))
+ mask &= ~SSSR_TFS;
+
++ /* Ignore RX timeout interrupt if it is disabled */
++ if (!(sccr1_reg & SSCR1_TINTE))
++ mask &= ~SSSR_TINT;
++
+ if (!(status & mask))
+ return IRQ_NONE;
+
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index d254477372b9..5ddda10472c6 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1087,8 +1087,7 @@ static struct class spi_master_class = {
+ *
+ * The caller is responsible for assigning the bus number and initializing
+ * the master's methods before calling spi_register_master(); and (after errors
+- * adding the device) calling spi_master_put() and kfree() to prevent a memory
+- * leak.
++ * adding the device) calling spi_master_put() to prevent a memory leak.
+ */
+ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+ {
+diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
+index 4299cf45f947..5e1f16c36b49 100644
+--- a/drivers/staging/speakup/fakekey.c
++++ b/drivers/staging/speakup/fakekey.c
+@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
+ __this_cpu_write(reporting_keystroke, true);
+ input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
+ input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
++ input_sync(virt_keyboard);
+ __this_cpu_write(reporting_keystroke, false);
+
+ /* reenable preemption */
+diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
+index 92f0cc442d46..eac6a3212de2 100644
+--- a/drivers/usb/chipidea/debug.c
++++ b/drivers/usb/chipidea/debug.c
+@@ -62,9 +62,11 @@ static int ci_port_test_show(struct seq_file *s, void *data)
+ unsigned long flags;
+ unsigned mode;
+
++ pm_runtime_get_sync(ci->dev);
+ spin_lock_irqsave(&ci->lock, flags);
+ mode = hw_port_test_get(ci);
+ spin_unlock_irqrestore(&ci->lock, flags);
++ pm_runtime_put_sync(ci->dev);
+
+ seq_printf(s, "mode = %u\n", mode);
+
+@@ -94,9 +96,11 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
+ if (sscanf(buf, "%u", &mode) != 1)
+ return -EINVAL;
+
++ pm_runtime_get_sync(ci->dev);
+ spin_lock_irqsave(&ci->lock, flags);
+ ret = hw_port_test_set(ci, mode);
+ spin_unlock_irqrestore(&ci->lock, flags);
++ pm_runtime_put_sync(ci->dev);
+
+ return ret ? ret : count;
+ }
+@@ -238,8 +242,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
+ if (role == CI_ROLE_END || role == ci->role)
+ return -EINVAL;
+
++ pm_runtime_get_sync(ci->dev);
+ ci_role_stop(ci);
+ ret = ci_role_start(ci, role);
++ pm_runtime_put_sync(ci->dev);
+
+ return ret ? ret : count;
+ }
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 09de131ee0cb..c997ee9122bc 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -110,6 +110,7 @@ struct usbtmc_ID_rigol_quirk {
+
+ static const struct usbtmc_ID_rigol_quirk usbtmc_id_quirk[] = {
+ { 0x1ab1, 0x0588 },
++ { 0x1ab1, 0x04b0 },
+ { 0, 0 }
+ };
+
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 98cb09617b20..b9560f485d21 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -114,7 +114,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ ep->ss_ep_comp.bmAttributes = 16;
+ } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
+- desc->bmAttributes > 2) {
++ USB_SS_MULT(desc->bmAttributes) > 3) {
+ dev_warn(ddev, "Isoc endpoint has Mult of %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to 3\n", desc->bmAttributes + 1,
+@@ -123,7 +123,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ }
+
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+- max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
++ max_tx = (desc->bMaxBurst + 1) *
++ (USB_SS_MULT(desc->bmAttributes)) *
+ usb_endpoint_maxp(&ep->desc);
+ else if (usb_endpoint_xfer_int(&ep->desc))
+ max_tx = usb_endpoint_maxp(&ep->desc) *
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 5e1a1790c2f6..04b21577e8ed 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -215,6 +215,9 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ goto disable_pci;
+ }
+
++ hcd->amd_resume_bug = (usb_hcd_amd_remote_wakeup_quirk(dev) &&
++ driver->flags & (HCD_USB11 | HCD_USB3)) ? 1 : 0;
++
+ if (driver->flags & HCD_MEMORY) {
+ /* EHCI, OHCI */
+ hcd->rsrc_start = pci_resource_start(dev, 0);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 78141993dfd0..f9af3bf33e1b 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2539,9 +2539,6 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ #define HUB_LONG_RESET_TIME 200
+ #define HUB_RESET_TIMEOUT 800
+
+-static int hub_port_reset(struct usb_hub *hub, int port1,
+- struct usb_device *udev, unsigned int delay, bool warm);
+-
+ /* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+@@ -2622,44 +2619,6 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ return 0;
+ }
+
+-static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+- struct usb_device *udev, int *status)
+-{
+- switch (*status) {
+- case 0:
+- /* TRSTRCY = 10 ms; plus some extra */
+- msleep(10 + 40);
+- if (udev) {
+- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+-
+- update_devnum(udev, 0);
+- /* The xHC may think the device is already reset,
+- * so ignore the status.
+- */
+- if (hcd->driver->reset_device)
+- hcd->driver->reset_device(hcd, udev);
+- }
+- /* FALL THROUGH */
+- case -ENOTCONN:
+- case -ENODEV:
+- usb_clear_port_feature(hub->hdev,
+- port1, USB_PORT_FEAT_C_RESET);
+- if (hub_is_superspeed(hub->hdev)) {
+- usb_clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_BH_PORT_RESET);
+- usb_clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_PORT_LINK_STATE);
+- usb_clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_CONNECTION);
+- }
+- if (udev)
+- usb_set_device_state(udev, *status
+- ? USB_STATE_NOTATTACHED
+- : USB_STATE_DEFAULT);
+- break;
+- }
+-}
+-
+ /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm)
+@@ -2682,13 +2641,10 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ * If the caller hasn't explicitly requested a warm reset,
+ * double check and see if one is needed.
+ */
+- status = hub_port_status(hub, port1,
+- &portstatus, &portchange);
+- if (status < 0)
+- goto done;
+-
+- if (hub_port_warm_reset_required(hub, portstatus))
+- warm = true;
++ if (hub_port_status(hub, port1, &portstatus, &portchange) == 0)
++ if (hub_port_warm_reset_required(hub,
++ portstatus))
++ warm = true;
+ }
+
+ /* Reset the port */
+@@ -2713,11 +2669,19 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+
+ /* Check for disconnect or reset */
+ if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+- hub_port_finish_reset(hub, port1, udev, &status);
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_RESET);
+
+ if (!hub_is_superspeed(hub->hdev))
+ goto done;
+
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_BH_PORT_RESET);
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_PORT_LINK_STATE);
++ usb_clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_CONNECTION);
++
+ /*
+ * If a USB 3.0 device migrates from reset to an error
+ * state, re-issue the warm reset.
+@@ -2751,6 +2715,26 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ port1);
+
+ done:
++ if (status == 0) {
++ /* TRSTRCY = 10 ms; plus some extra */
++ msleep(10 + 40);
++ if (udev) {
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ update_devnum(udev, 0);
++ /* The xHC may think the device is already reset,
++ * so ignore the status.
++ */
++ if (hcd->driver->reset_device)
++ hcd->driver->reset_device(hcd, udev);
++
++ usb_set_device_state(udev, USB_STATE_DEFAULT);
++ }
++ } else {
++ if (udev)
++ usb_set_device_state(udev, USB_STATE_NOTATTACHED);
++ }
++
+ if (!hub_is_superspeed(hub->hdev))
+ up_read(&ehci_cf_port_reset_rwsem);
+
+diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
+index e8cdce571bb1..2753cec61aaf 100644
+--- a/drivers/usb/core/otg_whitelist.h
++++ b/drivers/usb/core/otg_whitelist.h
+@@ -59,6 +59,11 @@ static int is_targeted(struct usb_device *dev)
+ le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
+ return 0;
+
++ /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
++ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
++ le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
++ return 1;
++
+ /* NOTE: can't use usb_match_id() since interface caches
+ * aren't set up yet. this is cut/paste from that code.
+ */
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 5014a4282352..08f321904fb7 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -13,6 +13,7 @@
+
+ #include <linux/usb.h>
+ #include <linux/usb/quirks.h>
++#include <linux/usb/hcd.h>
+ #include "usb.h"
+
+ /* Lists of quirky USB devices, split in device quirks and interface quirks.
+@@ -53,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+
++ /* Logitech ConferenceCam CC3000e */
++ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
++ { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
++
++ /* Logitech PTZ Pro Camera */
++ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ /* Logitech Quickcam Fusion */
+ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+
+@@ -77,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Philips PSC805 audio device */
+ { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Plantronic Audio 655 DSP */
++ { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
++
++ /* Plantronic Audio 648 USB */
++ { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* Artisman Watchdog Dongle */
+ { USB_DEVICE(0x04b4, 0x0526), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+@@ -120,9 +134,6 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Alcor Micro Corp. Hub */
+ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* MicroTouch Systems touchscreen */
+- { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+ /* appletouch */
+ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+@@ -184,6 +195,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
++ /* Protocol and OTG Electrical Test Device */
++ { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
++ USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
++
+ { } /* terminating entry must be last */
+ };
+
+@@ -192,9 +207,20 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
+ .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* ASUS Base Station(T100) */
+- { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++ { } /* terminating entry must be last */
++};
++
++static const struct usb_device_id usb_amd_resume_quirk_list[] = {
++ /* Lenovo Mouse with Pixart controller */
++ { USB_DEVICE(0x17ef, 0x602e), .driver_info = USB_QUIRK_RESET_RESUME },
++
++ /* Pixart Mouse */
++ { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
++
++ /* Logitech Optical Mouse M90/M100 */
++ { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ { } /* terminating entry must be last */
+ };
+@@ -225,6 +251,18 @@ static bool usb_match_any_interface(struct usb_device *udev,
+ return false;
+ }
+
++int usb_amd_resume_quirk(struct usb_device *udev)
++{
++ struct usb_hcd *hcd;
++
++ hcd = bus_to_hcd(udev->bus);
++ /* The device should be attached directly to root hub */
++ if (udev->level == 1 && hcd->amd_resume_bug == 1)
++ return 1;
++
++ return 0;
++}
++
+ static u32 __usb_detect_quirks(struct usb_device *udev,
+ const struct usb_device_id *id)
+ {
+@@ -250,6 +288,15 @@ static u32 __usb_detect_quirks(struct usb_device *udev,
+ void usb_detect_quirks(struct usb_device *udev)
+ {
+ udev->quirks = __usb_detect_quirks(udev, usb_quirk_list);
++
++ /*
++ * Pixart-based mice would trigger remote wakeup issue on AMD
++ * Yangtze chipset, so set them as RESET_RESUME flag.
++ */
++ if (usb_amd_resume_quirk(udev))
++ udev->quirks |= __usb_detect_quirks(udev,
++ usb_amd_resume_quirk_list);
++
+ if (udev->quirks)
+ dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
+ udev->quirks);
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 5a45437da097..a47ff42e620a 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -250,6 +250,18 @@ commit:
+ }
+ EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+
++int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
++{
++ /* Make sure amd chipset type has already been initialized */
++ usb_amd_find_chipset_info();
++ if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
++ return 0;
++
++ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
++ return 1;
++}
++EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
++
+ bool usb_amd_hang_symptom_quirk(void)
+ {
+ u8 rev;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 9af524c1f48f..9552d2080d12 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1402,10 +1402,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ * use Event Data TRBs, and we don't chain in a link TRB on short
+ * transfers, we're basically dividing by 1.
+ *
+- * xHCI 1.0 specification indicates that the Average TRB Length should
+- * be set to 8 for control endpoints.
++ * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
++ * should be set to 8 for control endpoints.
+ */
+- if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
++ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
+ ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
+ else
+ ep_ctx->tx_info |=
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 4ddceb7e05c3..68b8bc2e82d9 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -37,6 +37,9 @@
+
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
++#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
+
+ static const char hcd_name[] = "xhci_hcd";
+
+@@ -129,6 +132,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
++ xhci->quirks |= XHCI_PME_STUCK_QUIRK;
++ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -143,6 +152,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ }
+
++/*
++ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
++ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
++ */
++static void xhci_pme_quirk(struct xhci_hcd *xhci)
++{
++ u32 val;
++ void __iomem *reg;
++
++ reg = (void __iomem *) xhci->cap_regs + 0x80a4;
++ val = readl(reg);
++ writel(val | BIT(28), reg);
++ readl(reg);
++}
++
+ /* called during probe() after chip reset completes */
+ static int xhci_pci_setup(struct usb_hcd *hcd)
+ {
+@@ -269,6 +293,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ if (xhci_compliance_mode_recovery_timer_quirk_check())
+ pdev->no_d3cold = true;
+
++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++ xhci_pme_quirk(xhci);
++
+ return xhci_suspend(xhci, do_wakeup);
+ }
+
+@@ -299,6 +326,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ usb_enable_intel_xhci_ports(pdev);
+
++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++ xhci_pme_quirk(xhci);
++
+ retval = xhci_resume(xhci, hibernated);
+ return retval;
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 66deb0af258e..ad381c22e5ac 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -554,9 +554,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
+ struct xhci_ring *ep_ring;
+- struct xhci_generic_trb *trb;
++ struct xhci_segment *new_seg;
++ union xhci_trb *new_deq;
+ dma_addr_t addr;
+ u64 hw_dequeue;
++ bool cycle_found = false;
++ bool td_last_trb_found = false;
+
+ ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+ ep_index, stream_id);
+@@ -581,45 +584,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ hw_dequeue = le64_to_cpu(ep_ctx->deq);
+ }
+
+- /* Find virtual address and segment of hardware dequeue pointer */
+- state->new_deq_seg = ep_ring->deq_seg;
+- state->new_deq_ptr = ep_ring->dequeue;
+- while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+- != (dma_addr_t)(hw_dequeue & ~0xf)) {
+- next_trb(xhci, ep_ring, &state->new_deq_seg,
+- &state->new_deq_ptr);
+- if (state->new_deq_ptr == ep_ring->dequeue) {
+- WARN_ON(1);
+- return;
+- }
+- }
++ new_seg = ep_ring->deq_seg;
++ new_deq = ep_ring->dequeue;
++ state->new_cycle_state = hw_dequeue & 0x1;
++
+ /*
+- * Find cycle state for last_trb, starting at old cycle state of
+- * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+- * return immediately and cannot toggle the cycle state if this search
+- * wraps around, so add one more toggle manually in that case.
++ * We want to find the pointer, segment and cycle state of the new trb
++ * (the one after current TD's last_trb). We know the cycle state at
++ * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
++ * found.
+ */
+- state->new_cycle_state = hw_dequeue & 0x1;
+- if (ep_ring->first_seg == ep_ring->first_seg->next &&
+- cur_td->last_trb < state->new_deq_ptr)
+- state->new_cycle_state ^= 0x1;
++ do {
++ if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
++ == (dma_addr_t)(hw_dequeue & ~0xf)) {
++ cycle_found = true;
++ if (td_last_trb_found)
++ break;
++ }
++ if (new_deq == cur_td->last_trb)
++ td_last_trb_found = true;
+
+- state->new_deq_ptr = cur_td->last_trb;
+- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+- "Finding segment containing last TRB in TD.");
+- state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+- state->new_deq_ptr, &state->new_cycle_state);
+- if (!state->new_deq_seg) {
+- WARN_ON(1);
+- return;
+- }
++ if (cycle_found &&
++ TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
++ new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
++ state->new_cycle_state ^= 0x1;
+
+- /* Increment to find next TRB after last_trb. Cycle if appropriate. */
+- trb = &state->new_deq_ptr->generic;
+- if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+- (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+- state->new_cycle_state ^= 0x1;
+- next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
++ next_trb(xhci, ep_ring, &new_seg, &new_deq);
++
++ /* Search wrapped around, bail out */
++ if (new_deq == ep->ring->dequeue) {
++ xhci_err(xhci, "Error: Failed finding new dequeue state\n");
++ state->new_deq_seg = NULL;
++ state->new_deq_ptr = NULL;
++ return;
++ }
++
++ } while (!cycle_found || !td_last_trb_found);
++
++ state->new_deq_seg = new_seg;
++ state->new_deq_ptr = new_deq;
+
+ /* Don't update the ring cycle state for the producer (us). */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+@@ -3190,9 +3193,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct xhci_td *td;
+ struct scatterlist *sg;
+ int num_sgs;
+- int trb_buff_len, this_sg_len, running_total;
++ int trb_buff_len, this_sg_len, running_total, ret;
+ unsigned int total_packet_count;
++ bool zero_length_needed;
+ bool first_trb;
++ int last_trb_num;
+ u64 addr;
+ bool more_trbs_coming;
+
+@@ -3208,13 +3213,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
+
+- trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
++ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ num_trbs, urb, 0, mem_flags);
+- if (trb_buff_len < 0)
+- return trb_buff_len;
++ if (ret < 0)
++ return ret;
+
+ urb_priv = urb->hcpriv;
++
++ /* Deal with URB_ZERO_PACKET - need one more td/trb */
++ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
++ urb_priv->length == 2;
++ if (zero_length_needed) {
++ num_trbs++;
++ xhci_dbg(xhci, "Creating zero length td.\n");
++ ret = prepare_transfer(xhci, xhci->devs[slot_id],
++ ep_index, urb->stream_id,
++ 1, urb, 1, mem_flags);
++ if (ret < 0)
++ return ret;
++ }
++
+ td = urb_priv->td[0];
+
+ /*
+@@ -3244,6 +3263,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ trb_buff_len = urb->transfer_buffer_length;
+
+ first_trb = true;
++ last_trb_num = zero_length_needed ? 2 : 1;
+ /* Queue the first TRB, even if it's zero-length */
+ do {
+ u32 field = 0;
+@@ -3261,12 +3281,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ /* Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+- if (num_trbs > 1) {
++ if (num_trbs > last_trb_num) {
+ field |= TRB_CHAIN;
+- } else {
+- /* FIXME - add check for ZERO_PACKET flag before this */
++ } else if (num_trbs == last_trb_num) {
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
++ } else if (zero_length_needed && num_trbs == 1) {
++ trb_buff_len = 0;
++ urb_priv->td[1]->last_trb = ep_ring->enqueue;
++ field |= TRB_IOC;
+ }
+
+ /* Only set interrupt on short packet for IN endpoints */
+@@ -3328,7 +3351,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ if (running_total + trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len =
+ urb->transfer_buffer_length - running_total;
+- } while (running_total < urb->transfer_buffer_length);
++ } while (num_trbs > 0);
+
+ check_trb_math(urb, num_trbs, running_total);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+@@ -3346,7 +3369,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ int num_trbs;
+ struct xhci_generic_trb *start_trb;
+ bool first_trb;
++ int last_trb_num;
+ bool more_trbs_coming;
++ bool zero_length_needed;
+ int start_cycle;
+ u32 field, length_field;
+
+@@ -3377,7 +3402,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+- /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+@@ -3386,6 +3410,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ return ret;
+
+ urb_priv = urb->hcpriv;
++
++ /* Deal with URB_ZERO_PACKET - need one more td/trb */
++ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
++ urb_priv->length == 2;
++ if (zero_length_needed) {
++ num_trbs++;
++ xhci_dbg(xhci, "Creating zero length td.\n");
++ ret = prepare_transfer(xhci, xhci->devs[slot_id],
++ ep_index, urb->stream_id,
++ 1, urb, 1, mem_flags);
++ if (ret < 0)
++ return ret;
++ }
++
+ td = urb_priv->td[0];
+
+ /*
+@@ -3407,7 +3445,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ trb_buff_len = urb->transfer_buffer_length;
+
+ first_trb = true;
+-
++ last_trb_num = zero_length_needed ? 2 : 1;
+ /* Queue the first TRB, even if it's zero-length */
+ do {
+ u32 remainder = 0;
+@@ -3424,12 +3462,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ /* Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+- if (num_trbs > 1) {
++ if (num_trbs > last_trb_num) {
+ field |= TRB_CHAIN;
+- } else {
+- /* FIXME - add check for ZERO_PACKET flag before this */
++ } else if (num_trbs == last_trb_num) {
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
++ } else if (zero_length_needed && num_trbs == 1) {
++ trb_buff_len = 0;
++ urb_priv->td[1]->last_trb = ep_ring->enqueue;
++ field |= TRB_IOC;
+ }
+
+ /* Only set interrupt on short packet for IN endpoints */
+@@ -3467,7 +3508,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ trb_buff_len = urb->transfer_buffer_length - running_total;
+ if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+ trb_buff_len = TRB_MAX_BUFF_SIZE;
+- } while (running_total < urb->transfer_buffer_length);
++ } while (num_trbs > 0);
+
+ check_trb_math(urb, num_trbs, running_total);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+@@ -3534,8 +3575,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ if (start_cycle == 0)
+ field |= 0x1;
+
+- /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+- if (xhci->hci_version == 0x100) {
++ /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
++ if (xhci->hci_version >= 0x100) {
+ if (urb->transfer_buffer_length > 0) {
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_TX_TYPE(TRB_DATA_IN);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 00686a8c4fa0..3d98a3a82c79 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -143,7 +143,8 @@ static int xhci_start(struct xhci_hcd *xhci)
+ "waited %u microseconds.\n",
+ XHCI_MAX_HALT_USEC);
+ if (!ret)
+- xhci->xhc_state &= ~XHCI_STATE_HALTED;
++ xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
++
+ return ret;
+ }
+
+@@ -1318,6 +1319,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+
+ if (usb_endpoint_xfer_isoc(&urb->ep->desc))
+ size = urb->number_of_packets;
++ else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
++ urb->transfer_buffer_length > 0 &&
++ urb->transfer_flags & URB_ZERO_PACKET &&
++ !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
++ size = 2;
+ else
+ size = 1;
+
+@@ -2902,6 +2908,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+ ep_index, ep->stopped_stream, ep->stopped_td,
+ &deq_state);
+
++ if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
++ return;
++
+ /* HW with the reset endpoint quirk will use the saved dequeue state to
+ * issue a configure endpoint command later.
+ */
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 8686a06d83d4..0419137c4732 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1554,6 +1554,7 @@ struct xhci_hcd {
+ #define XHCI_PLAT (1 << 16)
+ #define XHCI_SLOW_SUSPEND (1 << 17)
+ #define XHCI_SPURIOUS_WAKEUP (1 << 18)
++#define XHCI_PME_STUCK_QUIRK (1 << 20)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 77b475a43dad..2ed1695ff5ad 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -507,10 +507,18 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
++ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
++
++ /* delay to drain to cppi dma pipeline for isoch */
++ udelay(250);
++
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
++ /* wait to drain cppi dma pipe line */
++ udelay(50);
++
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY) {
+ csr |= MUSB_RXCSR_FLUSHFIFO;
+@@ -524,13 +532,14 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
+ tdbit <<= 16;
+
+ do {
+- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
++ if (is_tx)
++ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ ret = dmaengine_terminate_all(cppi41_channel->dc);
+ } while (ret == -EAGAIN);
+
+- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+-
+ if (is_tx) {
++ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
++
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 096438e4fb0c..c918075e5eae 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -276,6 +276,10 @@ static void option_instat_callback(struct urb *urb);
+ #define ZTE_PRODUCT_MF622 0x0001
+ #define ZTE_PRODUCT_MF628 0x0015
+ #define ZTE_PRODUCT_MF626 0x0031
++#define ZTE_PRODUCT_ZM8620_X 0x0396
++#define ZTE_PRODUCT_ME3620_MBIM 0x0426
++#define ZTE_PRODUCT_ME3620_X 0x1432
++#define ZTE_PRODUCT_ME3620_L 0x1433
+ #define ZTE_PRODUCT_AC2726 0xfff1
+ #define ZTE_PRODUCT_CDMA_TECH 0xfffe
+ #define ZTE_PRODUCT_AC8710T 0xffff
+@@ -549,6 +553,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+ .sendsetup = BIT(1) | BIT(2) | BIT(3),
+ };
+
++static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
++ .reserved = BIT(2) | BIT(3) | BIT(4),
++};
++
++static const struct option_blacklist_info zte_me3620_xl_blacklist = {
++ .reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
++static const struct option_blacklist_info zte_zm8620_x_blacklist = {
++ .reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
+ static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ .reserved = BIT(1) | BIT(2),
+ };
+@@ -1579,6 +1595,14 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
++ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
++ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
+index 1e2d86d4f539..aa6d2bea856b 100644
+--- a/drivers/usb/serial/symbolserial.c
++++ b/drivers/usb/serial/symbolserial.c
+@@ -61,17 +61,15 @@ static void symbol_int_callback(struct urb *urb)
+
+ usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
+
++ /*
++ * Data from the device comes with a 1 byte header:
++ *
++ * <size of data> <data>...
++ */
+ if (urb->actual_length > 1) {
+- data_length = urb->actual_length - 1;
+-
+- /*
+- * Data from the device comes with a 1 byte header:
+- *
+- * <size of data>data...
+- * This is real data to be sent to the tty layer
+- * we pretty much just ignore the size and send everything
+- * else to the tty layer.
+- */
++ data_length = data[0];
++ if (data_length > (urb->actual_length - 1))
++ data_length = urb->actual_length - 1;
+ tty_insert_flip_string(&port->port, &data[1], data_length);
+ tty_flip_buffer_push(&port->port);
+ } else {
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index cc5a430dc357..69fec1a99b3e 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -81,6 +81,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
+ static int whiteheat_firmware_attach(struct usb_serial *serial);
+
+ /* function prototypes for the Connect Tech WhiteHEAT serial converter */
++static int whiteheat_probe(struct usb_serial *serial,
++ const struct usb_device_id *id);
+ static int whiteheat_attach(struct usb_serial *serial);
+ static void whiteheat_release(struct usb_serial *serial);
+ static int whiteheat_port_probe(struct usb_serial_port *port);
+@@ -117,6 +119,7 @@ static struct usb_serial_driver whiteheat_device = {
+ .description = "Connect Tech - WhiteHEAT",
+ .id_table = id_table_std,
+ .num_ports = 4,
++ .probe = whiteheat_probe,
+ .attach = whiteheat_attach,
+ .release = whiteheat_release,
+ .port_probe = whiteheat_port_probe,
+@@ -218,6 +221,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
+ /*****************************************************************************
+ * Connect Tech's White Heat serial driver functions
+ *****************************************************************************/
++
++static int whiteheat_probe(struct usb_serial *serial,
++ const struct usb_device_id *id)
++{
++ struct usb_host_interface *iface_desc;
++ struct usb_endpoint_descriptor *endpoint;
++ size_t num_bulk_in = 0;
++ size_t num_bulk_out = 0;
++ size_t min_num_bulk;
++ unsigned int i;
++
++ iface_desc = serial->interface->cur_altsetting;
++
++ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
++ endpoint = &iface_desc->endpoint[i].desc;
++ if (usb_endpoint_is_bulk_in(endpoint))
++ ++num_bulk_in;
++ if (usb_endpoint_is_bulk_out(endpoint))
++ ++num_bulk_out;
++ }
++
++ min_num_bulk = COMMAND_PORT + 1;
++ if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
++ return -ENODEV;
++
++ return 0;
++}
++
+ static int whiteheat_attach(struct usb_serial *serial)
+ {
+ struct usb_serial_port *command_port;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 53039de1495d..db6818878462 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1668,7 +1668,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ int found = 0;
+ struct extent_buffer *eb;
+ struct btrfs_inode_extref *extref;
+- struct extent_buffer *leaf;
+ u32 item_size;
+ u32 cur_offset;
+ unsigned long ptr;
+@@ -1693,9 +1692,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_release_path(path);
+
+- leaf = path->nodes[0];
+- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+- ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
++ item_size = btrfs_item_size_nr(eb, path->slots[0]);
++ ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
+ cur_offset = 0;
+
+ while (cur_offset < item_size) {
+@@ -1709,7 +1707,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ if (ret)
+ break;
+
+- cur_offset += btrfs_inode_extref_name_len(leaf, extref);
++ cur_offset += btrfs_inode_extref_name_len(eb, extref);
+ cur_offset += sizeof(*extref);
+ }
+ btrfs_tree_read_unlock_blocking(eb);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 855f6668cb8e..85bcb25384c0 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2642,7 +2642,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
+ bio_end_io_t end_io_func,
+ int mirror_num,
+ unsigned long prev_bio_flags,
+- unsigned long bio_flags)
++ unsigned long bio_flags,
++ bool force_bio_submit)
+ {
+ int ret = 0;
+ struct bio *bio;
+@@ -2660,6 +2661,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
+ contig = bio_end_sector(bio) == sector;
+
+ if (prev_bio_flags != bio_flags || !contig ||
++ force_bio_submit ||
+ merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
+ bio_add_page(bio, page, page_size, offset) < page_size) {
+ ret = submit_one_bio(rw, bio, mirror_num,
+@@ -2751,7 +2753,8 @@ static int __do_readpage(struct extent_io_tree *tree,
+ get_extent_t *get_extent,
+ struct extent_map **em_cached,
+ struct bio **bio, int mirror_num,
+- unsigned long *bio_flags, int rw)
++ unsigned long *bio_flags, int rw,
++ u64 *prev_em_start)
+ {
+ struct inode *inode = page->mapping->host;
+ u64 start = page_offset(page);
+@@ -2799,6 +2802,7 @@ static int __do_readpage(struct extent_io_tree *tree,
+ }
+ while (cur <= end) {
+ unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
++ bool force_bio_submit = false;
+
+ if (cur >= last_byte) {
+ char *userpage;
+@@ -2849,6 +2853,49 @@ static int __do_readpage(struct extent_io_tree *tree,
+ block_start = em->block_start;
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ block_start = EXTENT_MAP_HOLE;
++
++ /*
++ * If we have a file range that points to a compressed extent
++ * and it's followed by a consecutive file range that points to
++ * to the same compressed extent (possibly with a different
++ * offset and/or length, so it either points to the whole extent
++ * or only part of it), we must make sure we do not submit a
++ * single bio to populate the pages for the 2 ranges because
++ * this makes the compressed extent read zero out the pages
++ * belonging to the 2nd range. Imagine the following scenario:
++ *
++ * File layout
++ * [0 - 8K] [8K - 24K]
++ * | |
++ * | |
++ * points to extent X, points to extent X,
++ * offset 4K, length of 8K offset 0, length 16K
++ *
++ * [extent X, compressed length = 4K uncompressed length = 16K]
++ *
++ * If the bio to read the compressed extent covers both ranges,
++ * it will decompress extent X into the pages belonging to the
++ * first range and then it will stop, zeroing out the remaining
++ * pages that belong to the other range that points to extent X.
++ * So here we make sure we submit 2 bios, one for the first
++ * range and another one for the third range. Both will target
++ * the same physical extent from disk, but we can't currently
++ * make the compressed bio endio callback populate the pages
++ * for both ranges because each compressed bio is tightly
++ * coupled with a single extent map, and each range can have
++ * an extent map with a different offset value relative to the
++ * uncompressed data of our extent and different lengths. This
++ * is a corner case so we prioritize correctness over
++ * non-optimal behavior (submitting 2 bios for the same extent).
++ */
++ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
++ prev_em_start && *prev_em_start != (u64)-1 &&
++ *prev_em_start != em->orig_start)
++ force_bio_submit = true;
++
++ if (prev_em_start)
++ *prev_em_start = em->orig_start;
++
+ free_extent_map(em);
+ em = NULL;
+
+@@ -2898,7 +2945,8 @@ static int __do_readpage(struct extent_io_tree *tree,
+ bdev, bio, pnr,
+ end_bio_extent_readpage, mirror_num,
+ *bio_flags,
+- this_bio_flag);
++ this_bio_flag,
++ force_bio_submit);
+ if (!ret) {
+ nr++;
+ *bio_flags = this_bio_flag;
+@@ -2925,7 +2973,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
+ get_extent_t *get_extent,
+ struct extent_map **em_cached,
+ struct bio **bio, int mirror_num,
+- unsigned long *bio_flags, int rw)
++ unsigned long *bio_flags, int rw,
++ u64 *prev_em_start)
+ {
+ struct inode *inode;
+ struct btrfs_ordered_extent *ordered;
+@@ -2945,7 +2994,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
+
+ for (index = 0; index < nr_pages; index++) {
+ __do_readpage(tree, pages[index], get_extent, em_cached, bio,
+- mirror_num, bio_flags, rw);
++ mirror_num, bio_flags, rw, prev_em_start);
+ page_cache_release(pages[index]);
+ }
+ }
+@@ -2955,7 +3004,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ int nr_pages, get_extent_t *get_extent,
+ struct extent_map **em_cached,
+ struct bio **bio, int mirror_num,
+- unsigned long *bio_flags, int rw)
++ unsigned long *bio_flags, int rw,
++ u64 *prev_em_start)
+ {
+ u64 start = 0;
+ u64 end = 0;
+@@ -2976,7 +3026,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ index - first_index, start,
+ end, get_extent, em_cached,
+ bio, mirror_num, bio_flags,
+- rw);
++ rw, prev_em_start);
+ start = page_start;
+ end = start + PAGE_CACHE_SIZE - 1;
+ first_index = index;
+@@ -2987,7 +3037,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ __do_contiguous_readpages(tree, &pages[first_index],
+ index - first_index, start,
+ end, get_extent, em_cached, bio,
+- mirror_num, bio_flags, rw);
++ mirror_num, bio_flags, rw,
++ prev_em_start);
+ }
+
+ static int __extent_read_full_page(struct extent_io_tree *tree,
+@@ -3013,7 +3064,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
+ }
+
+ ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
+- bio_flags, rw);
++ bio_flags, rw, NULL);
+ return ret;
+ }
+
+@@ -3039,7 +3090,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
+ int ret;
+
+ ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
+- &bio_flags, READ);
++ &bio_flags, READ, NULL);
+ if (bio)
+ ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
+ return ret;
+@@ -3308,7 +3359,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
+ sector, iosize, pg_offset,
+ bdev, &epd->bio, max_nr,
+ end_bio_extent_writepage,
+- 0, 0, 0);
++ 0, 0, 0, false);
+ if (ret)
+ SetPageError(page);
+ }
+@@ -3479,7 +3530,7 @@ static int write_one_eb(struct extent_buffer *eb,
+ ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
+ PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+ -1, end_bio_extent_buffer_writepage,
+- 0, epd->bio_flags, bio_flags);
++ 0, epd->bio_flags, bio_flags, false);
+ epd->bio_flags = bio_flags;
+ if (ret) {
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+@@ -3882,6 +3933,7 @@ int extent_readpages(struct extent_io_tree *tree,
+ struct page *page;
+ struct extent_map *em_cached = NULL;
+ int nr = 0;
++ u64 prev_em_start = (u64)-1;
+
+ for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+ page = list_entry(pages->prev, struct page, lru);
+@@ -3898,12 +3950,12 @@ int extent_readpages(struct extent_io_tree *tree,
+ if (nr < ARRAY_SIZE(pagepool))
+ continue;
+ __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
+- &bio, 0, &bio_flags, READ);
++ &bio, 0, &bio_flags, READ, &prev_em_start);
+ nr = 0;
+ }
+ if (nr)
+ __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
+- &bio, 0, &bio_flags, READ);
++ &bio, 0, &bio_flags, READ, &prev_em_start);
+
+ if (em_cached)
+ free_extent_map(em_cached);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 904ed6d7e4bb..50f08d5f9cbb 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4516,7 +4516,8 @@ void btrfs_evict_inode(struct inode *inode)
+ goto no_delete;
+ }
+ /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
+- btrfs_wait_ordered_range(inode, 0, (u64)-1);
++ if (!special_file(inode->i_mode))
++ btrfs_wait_ordered_range(inode, 0, (u64)-1);
+
+ if (root->fs_info->log_root_recovering) {
+ BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index fc6f4f3a1a9d..134ed52f616f 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -441,6 +441,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ return 0;
+ }
+
++/* Server has provided av pairs/target info in the type 2 challenge
++ * packet and we have plucked it and stored within smb session.
++ * We parse that blob here to find the server given timestamp
++ * as part of ntlmv2 authentication (or local current time as
++ * default in case of failure)
++ */
++static __le64
++find_timestamp(struct cifs_ses *ses)
++{
++ unsigned int attrsize;
++ unsigned int type;
++ unsigned int onesize = sizeof(struct ntlmssp2_name);
++ unsigned char *blobptr;
++ unsigned char *blobend;
++ struct ntlmssp2_name *attrptr;
++
++ if (!ses->auth_key.len || !ses->auth_key.response)
++ return 0;
++
++ blobptr = ses->auth_key.response;
++ blobend = blobptr + ses->auth_key.len;
++
++ while (blobptr + onesize < blobend) {
++ attrptr = (struct ntlmssp2_name *) blobptr;
++ type = le16_to_cpu(attrptr->type);
++ if (type == NTLMSSP_AV_EOL)
++ break;
++ blobptr += 2; /* advance attr type */
++ attrsize = le16_to_cpu(attrptr->length);
++ blobptr += 2; /* advance attr size */
++ if (blobptr + attrsize > blobend)
++ break;
++ if (type == NTLMSSP_AV_TIMESTAMP) {
++ if (attrsize == sizeof(u64))
++ return *((__le64 *)blobptr);
++ }
++ blobptr += attrsize; /* advance attr value */
++ }
++
++ return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
++}
++
+ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ const struct nls_table *nls_cp)
+ {
+@@ -630,6 +672,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ struct ntlmv2_resp *buf;
+ char ntlmv2_hash[16];
+ unsigned char *tiblob = NULL; /* target info blob */
++ __le64 rsp_timestamp;
+
+ if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
+ if (!ses->domainName) {
+@@ -648,6 +691,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ }
+ }
+
++ /* Must be within 5 minutes of the server (or in range +/-2h
++ * in case of Mac OS X), so simply carry over server timestamp
++ * (as Windows 7 does)
++ */
++ rsp_timestamp = find_timestamp(ses);
++
+ baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
+ tilen = ses->auth_key.len;
+ tiblob = ses->auth_key.response;
+@@ -664,7 +713,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
+ buf->blob_signature = cpu_to_le32(0x00000101);
+ buf->reserved = 0;
+- buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
++ buf->time = rsp_timestamp;
++
+ get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
+ buf->reserved2 = 0;
+
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 5f1f3285479e..ea938a8bf240 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -629,9 +629,8 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
+ server->negflavor = CIFS_NEGFLAVOR_UNENCAP;
+ memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
+ CIFS_CRYPTO_KEY_SIZE);
+- } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
+- server->capabilities & CAP_EXTENDED_SECURITY) &&
+- (pSMBr->EncryptionKeyLength == 0)) {
++ } else if (pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
++ server->capabilities & CAP_EXTENDED_SECURITY) {
+ server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
+ rc = decode_ext_sec_blob(ses, pSMBr);
+ } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 6f79cd867a2e..57519567b2ac 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -49,9 +49,13 @@ change_conf(struct TCP_Server_Info *server)
+ break;
+ default:
+ server->echoes = true;
+- server->oplocks = true;
++ if (enable_oplocks) {
++ server->oplocks = true;
++ server->oplock_credits = 1;
++ } else
++ server->oplocks = false;
++
+ server->echo_credits = 1;
+- server->oplock_credits = 1;
+ }
+ server->credits -= server->echo_credits + server->oplock_credits;
+ return 0;
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index b892355f1944..d4c7e470dec8 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -475,14 +475,15 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ * journal_clean_one_cp_list
+ *
+ * Find all the written-back checkpoint buffers in the given list and
+- * release them.
++ * release them. If 'destroy' is set, clean all buffers unconditionally.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ * Returns number of buffers reaped (for debug)
+ */
+
+-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy,
++ int *released)
+ {
+ struct journal_head *last_jh;
+ struct journal_head *next_jh = jh;
+@@ -496,7 +497,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+ do {
+ jh = next_jh;
+ next_jh = jh->b_cpnext;
+- ret = __try_to_free_cp_buf(jh);
++ if (!destroy)
++ ret = __try_to_free_cp_buf(jh);
++ else
++ ret = __jbd2_journal_remove_checkpoint(jh) + 1;
+ if (ret) {
+ freed++;
+ if (ret == 2) {
+@@ -521,13 +525,14 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+ * journal_clean_checkpoint_list
+ *
+ * Find all the written-back checkpoint buffers in the journal and release them.
++ * If 'destroy' is set, release all buffers unconditionally.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ * Returns number of buffers reaped (for debug)
+ */
+
+-int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
++int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ {
+ transaction_t *transaction, *last_transaction, *next_transaction;
+ int ret = 0;
+@@ -543,7 +548,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ transaction = next_transaction;
+ next_transaction = transaction->t_cpnext;
+ ret += journal_clean_one_cp_list(transaction->
+- t_checkpoint_list, &released);
++ t_checkpoint_list, destroy, &released);
+ /*
+ * This function only frees up some memory if possible so we
+ * dont have an obligation to finish processing. Bail out if
+@@ -559,7 +564,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ * we can possibly see not yet submitted buffers on io_list
+ */
+ ret += journal_clean_one_cp_list(transaction->
+- t_checkpoint_io_list, &released);
++ t_checkpoint_io_list, destroy, &released);
+ if (need_resched())
+ goto out;
+ } while (transaction != last_transaction);
+@@ -568,6 +573,28 @@ out:
+ }
+
+ /*
++ * Remove buffers from all checkpoint lists as journal is aborted and we just
++ * need to free memory
++ */
++void jbd2_journal_destroy_checkpoint(journal_t *journal)
++{
++ /*
++ * We loop because __jbd2_journal_clean_checkpoint_list() may abort
++ * early due to a need of rescheduling.
++ */
++ while (1) {
++ spin_lock(&journal->j_list_lock);
++ if (!journal->j_checkpoint_transactions) {
++ spin_unlock(&journal->j_list_lock);
++ break;
++ }
++ __jbd2_journal_clean_checkpoint_list(journal, true);
++ spin_unlock(&journal->j_list_lock);
++ cond_resched();
++ }
++}
++
++/*
+ * journal_remove_checkpoint: called after a buffer has been committed
+ * to disk (either by being write-back flushed to disk, or being
+ * committed to the log).
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 9181c2b22b3c..4207cf2caa87 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ * frees some memory
+ */
+ spin_lock(&journal->j_list_lock);
+- __jbd2_journal_clean_checkpoint_list(journal);
++ __jbd2_journal_clean_checkpoint_list(journal, false);
+ spin_unlock(&journal->j_list_lock);
+
+ jbd_debug(3, "JBD2: commit phase 1\n");
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 614ecbf8a48c..2ebb7aadb381 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1710,8 +1710,17 @@ int jbd2_journal_destroy(journal_t *journal)
+ while (journal->j_checkpoint_transactions != NULL) {
+ spin_unlock(&journal->j_list_lock);
+ mutex_lock(&journal->j_checkpoint_mutex);
+- jbd2_log_do_checkpoint(journal);
++ err = jbd2_log_do_checkpoint(journal);
+ mutex_unlock(&journal->j_checkpoint_mutex);
++ /*
++ * If checkpointing failed, just free the buffers to avoid
++ * looping forever
++ */
++ if (err) {
++ jbd2_journal_destroy_checkpoint(journal);
++ spin_lock(&journal->j_list_lock);
++ break;
++ }
+ spin_lock(&journal->j_list_lock);
+ }
+
+diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
+index 01613b382b0e..6f692f8ac664 100644
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -1,4 +1,5 @@
+-/* Generic barrier definitions, based on MN10300 definitions.
++/*
++ * Generic barrier definitions, originally based on MN10300 definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+@@ -16,35 +17,50 @@
+
+ #ifndef __ASSEMBLY__
+
+-#define nop() asm volatile ("nop")
++#include <linux/compiler.h>
++
++#ifndef nop
++#define nop() asm volatile ("nop")
++#endif
+
+ /*
+- * Force strict CPU ordering.
+- * And yes, this is required on UP too when we're talking
+- * to devices.
++ * Force strict CPU ordering. And yes, this is required on UP too when we're
++ * talking to devices.
+ *
+- * This implementation only contains a compiler barrier.
++ * Fall back to compiler barriers if nothing better is provided.
+ */
+
+-#define mb() asm volatile ("": : :"memory")
++#ifndef mb
++#define mb() barrier()
++#endif
++
++#ifndef rmb
+ #define rmb() mb()
+-#define wmb() asm volatile ("": : :"memory")
++#endif
++
++#ifndef wmb
++#define wmb() mb()
++#endif
++
++#ifndef read_barrier_depends
++#define read_barrier_depends() do { } while (0)
++#endif
+
+ #ifdef CONFIG_SMP
+ #define smp_mb() mb()
+ #define smp_rmb() rmb()
+ #define smp_wmb() wmb()
++#define smp_read_barrier_depends() read_barrier_depends()
+ #else
+ #define smp_mb() barrier()
+ #define smp_rmb() barrier()
+ #define smp_wmb() barrier()
++#define smp_read_barrier_depends() do { } while (0)
+ #endif
+
+-#define set_mb(var, value) do { var = value; mb(); } while (0)
+-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+-
+-#define read_barrier_depends() do {} while (0)
+-#define smp_read_barrier_depends() do {} while (0)
++#ifndef set_mb
++#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
++#endif
+
+ #define smp_store_release(p, v) \
+ do { \
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index e1fb0f613a99..385593d748f6 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ extern void jbd2_journal_commit_transaction(journal_t *);
+
+ /* Checkpoint list management */
+-int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
++int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+ int __jbd2_journal_remove_checkpoint(struct journal_head *);
++void jbd2_journal_destroy_checkpoint(journal_t *journal);
+ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
+
+
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 9d37e2b9d3ec..dd7c1a16ab5e 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -2441,7 +2441,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
+ unsigned long arg4,
+ unsigned long arg5)
+ {
+- return cap_task_prctl(option, arg2, arg3, arg3, arg5);
++ return cap_task_prctl(option, arg2, arg3, arg4, arg5);
+ }
+
+ static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 79147dc9630d..16e753a9922a 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2264,6 +2264,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
+ {
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
++ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
++ skb_checksum_start_offset(skb) < 0)
++ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+@@ -2350,7 +2353,8 @@ extern int skb_copy_datagram_iovec(const struct sk_buff *from,
+ int size);
+ extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
+ int hlen,
+- struct iovec *iov);
++ struct iovec *iov,
++ int len);
+ extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
+ int offset,
+ const struct iovec *from,
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index d8ee9fd7ca4e..914ce51fa056 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -140,6 +140,7 @@ struct usb_hcd {
+ unsigned wireless:1; /* Wireless USB HCD */
+ unsigned authorized_default:1;
+ unsigned has_tt:1; /* Integrated TT in root hub */
++ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
+
+ unsigned int irq; /* irq allocated */
+ void __iomem *regs; /* device memory/io */
+@@ -428,6 +429,8 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
+ extern void usb_hcd_pci_remove(struct pci_dev *dev);
+ extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+
++extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
++
+ #ifdef CONFIG_PM
+ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+ #endif
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 3fb428883460..a4abaeb3fb00 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -41,13 +41,10 @@
+ */
+ #define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080
+
+-/* device generates spurious wakeup, ignore remote wakeup capability */
+-#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
++/* device can't handle device_qualifier descriptor requests */
++#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
+
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
+
+-/* device can't handle device_qualifier descriptor requests */
+-#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
+-
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index a175ba4a7adb..dfe4ddfbb43c 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -64,7 +64,11 @@ struct unix_sock {
+ #define UNIX_GC_MAYBE_CYCLE 1
+ struct socket_wq peer_wq;
+ };
+-#define unix_sk(__sk) ((struct unix_sock *)__sk)
++
++static inline struct unix_sock *unix_sk(struct sock *sk)
++{
++ return (struct unix_sock *)sk;
++}
+
+ #define peer_wait peer_wq.wait
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index d157f4f56f01..4f355e69e5d2 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -788,6 +788,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
+ if (sk_rcvqueues_full(sk, skb, limit))
+ return -ENOBUFS;
+
++ /*
++ * If the skb was allocated from pfmemalloc reserves, only
++ * allow SOCK_MEMALLOC sockets to use it as this socket is
++ * helping free memory
++ */
++ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
++ return -ENOMEM;
++
+ __sk_add_backlog(sk, skb);
+ sk->sk_backlog.len += skb->truesize;
+ return 0;
+diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
+index 9ce083960a25..f18490985fc8 100644
+--- a/include/xen/interface/sched.h
++++ b/include/xen/interface/sched.h
+@@ -107,5 +107,13 @@ struct sched_watchdog {
+ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
+ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
+ #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
++/*
++ * Domain asked to perform 'soft reset' for it. The expected behavior is to
++ * reset internal Xen state for the domain returning it to the point where it
++ * was created but leaving the domain's memory contents and vCPU contexts
++ * intact. This will allow the domain to start over and set up all Xen specific
++ * interfaces again.
++ */
++#define SHUTDOWN_soft_reset 5
+
+ #endif /* __XEN_PUBLIC_SCHED_H__ */
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 52770bfde2a5..32aaaab15c5c 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -202,13 +202,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ return retval;
+ }
+
+- /* ipc_addid() locks msq upon success. */
+- id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+- if (id < 0) {
+- ipc_rcu_putref(msq, msg_rcu_free);
+- return id;
+- }
+-
+ msq->q_stime = msq->q_rtime = 0;
+ msq->q_ctime = get_seconds();
+ msq->q_cbytes = msq->q_qnum = 0;
+@@ -218,6 +211,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ INIT_LIST_HEAD(&msq->q_receivers);
+ INIT_LIST_HEAD(&msq->q_senders);
+
++ /* ipc_addid() locks msq upon success. */
++ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
++ if (id < 0) {
++ ipc_rcu_putref(msq, msg_rcu_free);
++ return id;
++ }
++
+ ipc_unlock_object(&msq->q_perm);
+ rcu_read_unlock();
+
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 623bc3877118..02f7125c8a0f 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -545,12 +545,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ if (IS_ERR(file))
+ goto no_file;
+
+- id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+- if (id < 0) {
+- error = id;
+- goto no_id;
+- }
+-
+ shp->shm_cprid = task_tgid_vnr(current);
+ shp->shm_lprid = 0;
+ shp->shm_atim = shp->shm_dtim = 0;
+@@ -560,6 +554,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ shp->shm_file = file;
+ shp->shm_creator = current;
+
++ id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
++ if (id < 0) {
++ error = id;
++ goto no_id;
++ }
++
+ /*
+ * shmid gets reported as "inode#" in /proc/pid/maps.
+ * proc-ps tools use this. Changing this will break them.
+diff --git a/ipc/util.c b/ipc/util.c
+index 7684f41bce76..735342570a87 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -292,6 +292,10 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
+ rcu_read_lock();
+ spin_lock(&new->lock);
+
++ current_euid_egid(&euid, &egid);
++ new->cuid = new->uid = euid;
++ new->gid = new->cgid = egid;
++
+ id = idr_alloc(&ids->ipcs_idr, new,
+ (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
+ GFP_NOWAIT);
+@@ -304,10 +308,6 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
+
+ ids->in_use++;
+
+- current_euid_egid(&euid, &egid);
+- new->cuid = new->uid = euid;
+- new->gid = new->cgid = egid;
+-
+ if (next_id < 0) {
+ new->seq = ids->seq++;
+ if (ids->seq > ids->seq_max)
+diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
+index 095cd7230aef..56d7272199ff 100644
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -12,6 +12,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
++#include <linux/mutex.h>
+
+ #include "internals.h"
+
+@@ -326,18 +327,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
+
+ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
+ {
++ static DEFINE_MUTEX(register_lock);
+ char name [MAX_NAMELEN];
+
+- if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
++ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
+ return;
+
++ /*
++ * irq directories are registered only when a handler is
++ * added, not when the descriptor is created, so multiple
++ * tasks might try to register at the same time.
++ */
++ mutex_lock(&register_lock);
++
++ if (desc->dir)
++ goto out_unlock;
++
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ desc->dir = proc_mkdir(name, root_irq_dir);
+ if (!desc->dir)
+- return;
++ goto out_unlock;
+
+ #ifdef CONFIG_SMP
+ /* create /proc/irq/<irq>/smp_affinity */
+@@ -358,6 +370,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
+
+ proc_create_data("spurious", 0444, desc->dir,
+ &irq_spurious_proc_fops, (void *)(long)irq);
++
++out_unlock:
++ mutex_unlock(&register_lock);
+ }
+
+ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index e27526232b5f..a92bd6bd2bf1 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -802,8 +802,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
+
+ static void record_gp_stall_check_time(struct rcu_state *rsp)
+ {
+- rsp->gp_start = jiffies;
+- rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
++ unsigned long j = ACCESS_ONCE(jiffies);
++
++ rsp->gp_start = j;
++ smp_wmb(); /* Record start time before stall time. */
++ rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
+ }
+
+ /*
+@@ -932,17 +935,48 @@ static void print_cpu_stall(struct rcu_state *rsp)
+
+ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+ {
++ unsigned long completed;
++ unsigned long gpnum;
++ unsigned long gps;
+ unsigned long j;
+ unsigned long js;
+ struct rcu_node *rnp;
+
+- if (rcu_cpu_stall_suppress)
++ if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
+ return;
+ j = ACCESS_ONCE(jiffies);
++
++ /*
++ * Lots of memory barriers to reject false positives.
++ *
++ * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
++ * then rsp->gp_start, and finally rsp->completed. These values
++ * are updated in the opposite order with memory barriers (or
++ * equivalent) during grace-period initialization and cleanup.
++ * Now, a false positive can occur if we get an new value of
++ * rsp->gp_start and a old value of rsp->jiffies_stall. But given
++ * the memory barriers, the only way that this can happen is if one
++ * grace period ends and another starts between these two fetches.
++ * Detect this by comparing rsp->completed with the previous fetch
++ * from rsp->gpnum.
++ *
++ * Given this check, comparisons of jiffies, rsp->jiffies_stall,
++ * and rsp->gp_start suffice to forestall false positives.
++ */
++ gpnum = ACCESS_ONCE(rsp->gpnum);
++ smp_rmb(); /* Pick up ->gpnum first... */
+ js = ACCESS_ONCE(rsp->jiffies_stall);
++ smp_rmb(); /* ...then ->jiffies_stall before the rest... */
++ gps = ACCESS_ONCE(rsp->gp_start);
++ smp_rmb(); /* ...and finally ->gp_start before ->completed. */
++ completed = ACCESS_ONCE(rsp->completed);
++ if (ULONG_CMP_GE(completed, gpnum) ||
++ ULONG_CMP_LT(j, js) ||
++ ULONG_CMP_GE(gps, js))
++ return; /* No stall or GP completed since entering function. */
+ rnp = rdp->mynode;
+ if (rcu_gp_in_progress(rsp) &&
+- (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
++ (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
+
+ /* We haven't checked in, so go dump stack. */
+ print_cpu_stall(rsp);
+@@ -1331,9 +1365,10 @@ static int rcu_gp_init(struct rcu_state *rsp)
+ }
+
+ /* Advance to a new grace period and initialize state. */
++ record_gp_stall_check_time(rsp);
++ smp_wmb(); /* Record GP times before starting GP. */
+ rsp->gpnum++;
+ trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
+- record_gp_stall_check_time(rsp);
+ raw_spin_unlock_irq(&rnp->lock);
+
+ /* Exclude any concurrent CPU-hotplug operations. */
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 0030db473c99..0bcdceaca6e2 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1873,11 +1873,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+ * schedule one last time. The schedule call will never return, and
+ * the scheduled task must drop that reference.
+- * The test for TASK_DEAD must occur while the runqueue locks are
+- * still held, otherwise prev could be scheduled on another cpu, die
+- * there before we look at prev->state, and then the reference would
+- * be dropped twice.
+- * Manfred Spraul <manfred@colorfullife.com>
++ *
++ * We must observe prev->state before clearing prev->on_cpu (in
++ * finish_lock_switch), otherwise a concurrent wakeup can get prev
++ * running on another CPU and we could rave with its RUNNING -> DEAD
++ * transition, resulting in a double drop.
+ */
+ prev_state = prev->state;
+ vtime_task_switch(prev);
+@@ -4729,6 +4729,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+ {
+ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_ONLINE:
++ /*
++ * At this point a starting CPU has marked itself as online via
++ * set_cpu_online(). But it might not yet have marked itself
++ * as active, which is essential from here on.
++ *
++ * Thus, fall-through and help the starting CPU along.
++ */
+ case CPU_DOWN_FAILED:
+ set_cpu_active((long)hcpu, true);
+ return NOTIFY_OK;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 4f310592b1ba..1a1cdc3783ed 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -845,9 +845,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
++ *
++ * Pairs with the control dependency and rmb in try_to_wake_up().
+ */
+- smp_wmb();
+- prev->on_cpu = 0;
++ smp_store_release(&prev->on_cpu, 0);
+ #endif
+ #ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index bb5f920268d7..bba4e426ccbc 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1468,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+ timer_stats_timer_set_start_info(&dwork->timer);
+
+ dwork->wq = wq;
++ /* timer isn't guaranteed to run in this cpu, record earlier */
++ if (cpu == WORK_CPU_UNBOUND)
++ cpu = raw_smp_processor_id();
+ dwork->cpu = cpu;
+ timer->expires = jiffies + delay;
+
+- if (unlikely(cpu != WORK_CPU_UNBOUND))
+- add_timer_on(timer, cpu);
+- else
+- add_timer(timer);
++ add_timer_on(timer, cpu);
+ }
+
+ /**
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index c91c347bb3ea..a3a9676c65cf 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2605,6 +2605,14 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ continue;
+
+ /*
++ * Shared VMAs have their own reserves and do not affect
++ * MAP_PRIVATE accounting but it is possible that a shared
++ * VMA is using the same page so check and skip such VMAs.
++ */
++ if (iter_vma->vm_flags & VM_MAYSHARE)
++ continue;
++
++ /*
+ * Unmap the page from other VMAs without their own reserves.
+ * They get marked to be SIGKILLed if they fault in these
+ * areas. This is because a future no-page fault on this VMA
+diff --git a/mm/slab.c b/mm/slab.c
+index c180fbb8460b..e160d9c39796 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2304,9 +2304,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ size += BYTES_PER_WORD;
+ }
+ #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
+- if (size >= kmalloc_size(INDEX_NODE + 1)
+- && cachep->object_size > cache_line_size()
+- && ALIGN(size, cachep->align) < PAGE_SIZE) {
++ /*
++ * To activate debug pagealloc, off-slab management is necessary
++ * requirement. In early phase of initialization, small sized slab
++ * doesn't get initialized so it would not be possible. So, we need
++ * to check size >= 256. It guarantees that all necessary small
++ * sized slab is initialized in current slab initialization sequence.
++ */
++ if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
++ size >= 256 && cachep->object_size > cache_line_size() &&
++ ALIGN(size, cachep->align) < PAGE_SIZE) {
+ cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
+ size = PAGE_SIZE;
+ }
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 98e3d61e7476..f22f120771ef 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -796,6 +796,7 @@ EXPORT_SYMBOL(__skb_checksum_complete);
+ * @skb: skbuff
+ * @hlen: hardware length
+ * @iov: io vector
++ * @len: amount of data to copy from skb to iov
+ *
+ * Caller _must_ check that skb will fit to this iovec.
+ *
+@@ -805,11 +806,14 @@ EXPORT_SYMBOL(__skb_checksum_complete);
+ * can be modified!
+ */
+ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
+- int hlen, struct iovec *iov)
++ int hlen, struct iovec *iov, int len)
+ {
+ __wsum csum;
+ int chunk = skb->len - hlen;
+
++ if (chunk > len)
++ chunk = len;
++
+ if (!chunk)
+ return 0;
+
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 78e9d9223e40..944c60ce15d8 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1077,7 +1077,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
+
+ gstrings.len = ret;
+
+- data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
++ data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index b01dd5f421da..de76393a9916 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2726,11 +2726,12 @@ EXPORT_SYMBOL(skb_append_datato_frags);
+ */
+ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
+ {
++ unsigned char *data = skb->data;
++
+ BUG_ON(len > skb->len);
+- skb->len -= len;
+- BUG_ON(skb->len < skb->data_len);
+- skb_postpull_rcsum(skb, skb->data, len);
+- return skb->data += len;
++ __skb_pull(skb, len);
++ skb_postpull_rcsum(skb, data, len);
++ return skb->data;
+ }
+ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 49c87a39948f..4829750aa424 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4892,7 +4892,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
+ err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
+ else
+ err = skb_copy_and_csum_datagram_iovec(skb, hlen,
+- tp->ucopy.iov);
++ tp->ucopy.iov, chunk);
+
+ if (!err) {
+ tp->ucopy.len -= chunk;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 268ed25f2d65..4908eaa1cdec 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1245,7 +1245,7 @@ try_again:
+ else {
+ err = skb_copy_and_csum_datagram_iovec(skb,
+ sizeof(struct udphdr),
+- msg->msg_iov);
++ msg->msg_iov, copied);
+
+ if (err == -EINVAL)
+ goto csum_copy_err;
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 430067cb9210..0d51ebc176a7 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -489,7 +489,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ goto csum_copy_err;
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ } else {
+- err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
++ err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err == -EINVAL)
+ goto csum_copy_err;
+ }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index e09ca285e8f5..946ee8efe74b 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -410,7 +410,8 @@ try_again:
+ err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
+ msg->msg_iov, copied);
+ else {
+- err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
++ err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr),
++ msg->msg_iov, copied);
+ if (err == -EINVAL)
+ goto csum_copy_err;
+ }
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index b076e8309bc2..6639bc27edb9 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1438,7 +1438,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ tunnel = container_of(work, struct l2tp_tunnel, del_work);
+ sk = l2tp_tunnel_sock_lookup(tunnel);
+ if (!sk)
+- return;
++ goto out;
+
+ sock = sk->sk_socket;
+
+@@ -1459,6 +1459,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ }
+
+ l2tp_tunnel_sock_put(sk);
++out:
++ l2tp_tunnel_dec_refcount(tunnel);
+ }
+
+ /* Create a socket for the tunnel, if one isn't set up by
+@@ -1788,8 +1790,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+ */
+ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+ {
++ l2tp_tunnel_inc_refcount(tunnel);
+ l2tp_tunnel_closeall(tunnel);
+- return (false == queue_work(l2tp_wq, &tunnel->del_work));
++ if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
++ l2tp_tunnel_dec_refcount(tunnel);
++ return 1;
++ }
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 6d91d760a896..3e3e4f4f594a 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
+ pkts = atomic_add_return(1, &cp->in_pkts);
+ else
+ pkts = sysctl_sync_threshold(ipvs);
+- ip_vs_sync_conn(net, cp->control, pkts);
++ ip_vs_sync_conn(net, cp, pkts);
+ }
+ }
+
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 1692e7534759..c3d204973dbc 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -129,7 +129,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = daddr;
+- fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
+ fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
+ FLOWI_FLAG_KNOWN_NH : 0;
+
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 4fd1ca94fd4a..71c46f463969 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -202,7 +202,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
+ a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
+ }
+
+- return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
++ return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
++ nf_ct_zone(a->master) == nf_ct_zone(b->master);
+ }
+
+ static inline int expect_matches(const struct nf_conntrack_expect *a,
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index eea936b70d15..db744dd68707 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -2925,11 +2925,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
+ }
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+- if (err < 0)
+- goto err_exp;
+-
+- return 0;
+-err_exp:
+ nf_ct_expect_put(exp);
+ err_ct:
+ nf_ct_put(ct);
+diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
+index 5cc2da5d295d..c67f5d3f6e61 100644
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -185,7 +185,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+ msg->msg_iov, copy);
+ } else {
+ ret = skb_copy_and_csum_datagram_iovec(skb, offset,
+- msg->msg_iov);
++ msg->msg_iov,
++ copy);
+ if (ret == -EINVAL)
+ goto csum_copy_error;
+ }
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 9afa362d8a31..157b3595ef62 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1954,6 +1954,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+ }
+
++ if (flags & MSG_PEEK)
++ skip = sk_peek_offset(sk, flags);
++ else
++ skip = 0;
++
+ do {
+ int chunk;
+ struct sk_buff *skb, *last;
+@@ -2000,7 +2005,6 @@ again:
+ break;
+ }
+
+- skip = sk_peek_offset(sk, flags);
+ while (skip >= unix_skb_len(skb)) {
+ skip -= unix_skb_len(skb);
+ last = skb;
+@@ -2064,6 +2068,16 @@ again:
+
+ sk_peek_offset_fwd(sk, chunk);
+
++ if (UNIXCB(skb).fp)
++ break;
++
++ skip = 0;
++ last = skb;
++ unix_state_lock(sk);
++ skb = skb_peek_next(skb, &sk->sk_receive_queue);
++ if (skb)
++ goto again;
++ unix_state_unlock(sk);
+ break;
+ }
+ } while (size);
+diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
+index 885683a3b0bd..e0406211716b 100644
+--- a/sound/arm/Kconfig
++++ b/sound/arm/Kconfig
+@@ -9,6 +9,14 @@ menuconfig SND_ARM
+ Drivers that are implemented on ASoC can be found in
+ "ALSA for SoC audio support" section.
+
++config SND_PXA2XX_LIB
++ tristate
++ select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
++ select SND_DMAENGINE_PCM
++
++config SND_PXA2XX_LIB_AC97
++ bool
++
+ if SND_ARM
+
+ config SND_ARMAACI
+@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
+ tristate
+ select SND_PCM
+
+-config SND_PXA2XX_LIB
+- tristate
+- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+-
+-config SND_PXA2XX_LIB_AC97
+- bool
+-
+ config SND_PXA2XX_AC97
+ tristate "AC97 driver for the Intel PXA2xx chip"
+ depends on ARCH_PXA
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index d54d218fe810..3c90743fa50b 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -47,6 +47,10 @@ struct cs_spec {
+ unsigned int spdif_present:1;
+ unsigned int sense_b:1;
+ hda_nid_t vendor_nid;
++
++ /* for MBP SPDIF control */
++ int (*spdif_sw_put)(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol);
+ };
+
+ /* available models with CS420x */
+@@ -331,10 +335,21 @@ static int cs_init(struct hda_codec *codec)
+ return 0;
+ }
+
++static int cs_build_controls(struct hda_codec *codec)
++{
++ int err;
++
++ err = snd_hda_gen_build_controls(codec);
++ if (err < 0)
++ return err;
++ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_BUILD);
++ return 0;
++}
++
+ #define cs_free snd_hda_gen_free
+
+ static const struct hda_codec_ops cs_patch_ops = {
+- .build_controls = snd_hda_gen_build_controls,
++ .build_controls = cs_build_controls,
+ .build_pcms = snd_hda_gen_build_pcms,
+ .init = cs_init,
+ .free = cs_free,
+@@ -601,12 +616,14 @@ static int patch_cs420x(struct hda_codec *codec)
+ enum {
+ CS4208_MAC_AUTO,
+ CS4208_MBA6,
++ CS4208_MBP11,
+ CS4208_GPIO0,
+ };
+
+ static const struct hda_model_fixup cs4208_models[] = {
+ { .id = CS4208_GPIO0, .name = "gpio0" },
+ { .id = CS4208_MBA6, .name = "mba6" },
++ { .id = CS4208_MBP11, .name = "mbp11" },
+ {}
+ };
+
+@@ -617,8 +634,10 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
+
+ /* codec SSID matching */
+ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
++ SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+ SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
++ SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
+ {} /* terminator */
+ };
+
+@@ -648,6 +667,36 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
+ snd_hda_apply_fixup(codec, action);
+ }
+
++static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
++ struct cs_spec *spec = codec->spec;
++ hda_nid_t pin = spec->gen.autocfg.dig_out_pins[0];
++ int pinctl = ucontrol->value.integer.value[0] ? PIN_OUT : 0;
++
++ snd_hda_set_pin_ctl_cache(codec, pin, pinctl);
++ return spec->spdif_sw_put(kcontrol, ucontrol);
++}
++
++/* hook the SPDIF switch */
++static void cs4208_fixup_spdif_switch(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action == HDA_FIXUP_ACT_BUILD) {
++ struct cs_spec *spec = codec->spec;
++ struct snd_kcontrol *kctl;
++
++ if (!spec->gen.autocfg.dig_out_pins[0])
++ return;
++ kctl = snd_hda_find_mixer_ctl(codec, "IEC958 Playback Switch");
++ if (!kctl)
++ return;
++ spec->spdif_sw_put = kctl->put;
++ kctl->put = cs4208_spdif_sw_put;
++ }
++}
++
+ static const struct hda_fixup cs4208_fixups[] = {
+ [CS4208_MBA6] = {
+ .type = HDA_FIXUP_PINS,
+@@ -655,6 +704,12 @@ static const struct hda_fixup cs4208_fixups[] = {
+ .chained = true,
+ .chain_id = CS4208_GPIO0,
+ },
++ [CS4208_MBP11] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cs4208_fixup_spdif_switch,
++ .chained = true,
++ .chain_id = CS4208_GPIO0,
++ },
+ [CS4208_GPIO0] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs4208_fixup_gpio0,
+diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
+index 2f6357578616..1b6cbbc95456 100644
+--- a/sound/soc/dwc/designware_i2s.c
++++ b/sound/soc/dwc/designware_i2s.c
+@@ -100,10 +100,10 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < 4; i++)
+- i2s_write_reg(dev->i2s_base, TOR(i), 0);
++ i2s_read_reg(dev->i2s_base, TOR(i));
+ } else {
+ for (i = 0; i < 4; i++)
+- i2s_write_reg(dev->i2s_base, ROR(i), 0);
++ i2s_read_reg(dev->i2s_base, ROR(i));
+ }
+ }
+
+diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
+index 4db74a083db1..dbaba4f4fa53 100644
+--- a/sound/soc/pxa/Kconfig
++++ b/sound/soc/pxa/Kconfig
+@@ -1,7 +1,6 @@
+ config SND_PXA2XX_SOC
+ tristate "SoC Audio for the Intel PXA2xx chip"
+ depends on ARCH_PXA
+- select SND_ARM
+ select SND_PXA2XX_LIB
+ help
+ Say Y or M if you want to add support for codecs attached to
+@@ -24,7 +23,6 @@ config SND_PXA2XX_AC97
+ config SND_PXA2XX_SOC_AC97
+ tristate
+ select AC97_BUS
+- select SND_ARM
+ select SND_PXA2XX_LIB_AC97
+ select SND_SOC_AC97_BUS
+
+diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
+index f1059d999de6..ae939cf22ebd 100644
+--- a/sound/soc/pxa/pxa2xx-ac97.c
++++ b/sound/soc/pxa/pxa2xx-ac97.c
+@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
+ .reset = pxa2xx_ac97_cold_reset,
+ };
+
+-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
++static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
+ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+ .addr = __PREG(PCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+ .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
+ };
+
+-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
++static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
+ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
+ .addr = __PREG(PCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
+index daf61abc3670..646b66703bd8 100644
+--- a/sound/synth/emux/emux_oss.c
++++ b/sound/synth/emux/emux_oss.c
+@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
+ struct snd_seq_oss_reg *arg;
+ struct snd_seq_device *dev;
+
+- if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
++ /* using device#1 here for avoiding conflicts with OPL3 */
++ if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
+ sizeof(struct snd_seq_oss_reg), &dev) < 0)
+ return;
+
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 5098f144b92d..df4784df26b3 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -945,7 +945,7 @@ static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
+ static void print_aggr(char *prefix)
+ {
+ struct perf_evsel *counter;
+- int cpu, cpu2, s, s2, id, nr;
++ int cpu, s, s2, id, nr;
+ u64 ena, run, val;
+
+ if (!(aggr_map || aggr_get_id))
+@@ -957,8 +957,7 @@ static void print_aggr(char *prefix)
+ val = ena = run = 0;
+ nr = 0;
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+- cpu2 = perf_evsel__cpus(counter)->map[cpu];
+- s2 = aggr_get_id(evsel_list->cpus, cpu2);
++ s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
+ if (s2 != id)
+ continue;
+ val += counter->counts->cpu[cpu].val;
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index c3e5a3b817ab..3f82a2f65a65 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1718,7 +1718,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+- ph->env.nr_cpus_online = nr;
++ ph->env.nr_cpus_avail = nr;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+@@ -1727,7 +1727,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+- ph->env.nr_cpus_avail = nr;
++ ph->env.nr_cpus_online = nr;
+ return 0;
+ }
+
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 9ff6cf3e9a99..b1c914413c5f 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -160,6 +160,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
+ hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
+ hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
++
++ if (h->srcline)
++ hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
+ }
+
+ void hists__output_recalc_col_len(struct hists *hists, int max_rows)
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index a3510441f7d7..235b3f0cc97e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2813,10 +2813,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+ static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
+ const struct kvm_io_range *r2)
+ {
+- if (r1->addr < r2->addr)
++ gpa_t addr1 = r1->addr;
++ gpa_t addr2 = r2->addr;
++
++ if (addr1 < addr2)
+ return -1;
+- if (r1->addr + r1->len > r2->addr + r2->len)
++
++ /* If r2->len == 0, match the exact address. If r2->len != 0,
++ * accept any overlapping write. Any order is acceptable for
++ * overlapping ranges, because kvm_io_bus_get_first_dev ensures
++ * we process all of them.
++ */
++ if (r2->len) {
++ addr1 += r1->len;
++ addr2 += r2->len;
++ }
++
++ if (addr1 > addr2)
+ return 1;
++
+ return 0;
+ }
+