summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2015-02-02 18:21:19 -0500
committerMike Pagano <mpagano@gentoo.org>2015-02-02 18:21:19 -0500
commita1d2f4883e1b6a2e2ca1849e6c9e1efd51c287d0 (patch)
tree4a1746b9c5ff53c453909262d24259b827ecf05f
parentAdd DEVPTS_MULTIPLE_INSTANCES when GENTOO_LINUX_INIT_SYSTEMD is selected. See... (diff)
downloadlinux-patches-a1d2f4883e1b6a2e2ca1849e6c9e1efd51c287d0.tar.gz
linux-patches-a1d2f4883e1b6a2e2ca1849e6c9e1efd51c287d0.tar.bz2
linux-patches-a1d2f4883e1b6a2e2ca1849e6c9e1efd51c287d0.zip
Linux patch 3.4.1063.4-89
-rw-r--r--0000_README4
-rw-r--r--1105_linux-3.4.106.patch6748
2 files changed, 6752 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 5ff15066..ba104e45 100644
--- a/0000_README
+++ b/0000_README
@@ -459,6 +459,10 @@ Patch: 1104_linux-3.4.105.patch
From: http://www.kernel.org
Desc: Linux 3.4.105
+Patch: 1105_linux-3.4.106.patch
+From: http://www.kernel.org
+Desc: Linux 3.4.106
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1105_linux-3.4.106.patch b/1105_linux-3.4.106.patch
new file mode 100644
index 00000000..61b5c3cf
--- /dev/null
+++ b/1105_linux-3.4.106.patch
@@ -0,0 +1,6748 @@
+diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
+new file mode 100644
+index 000000000000..ea45dd3901e3
+--- /dev/null
++++ b/Documentation/lzo.txt
+@@ -0,0 +1,164 @@
++
++LZO stream format as understood by Linux's LZO decompressor
++===========================================================
++
++Introduction
++
++ This is not a specification. No specification seems to be publicly available
++ for the LZO stream format. This document describes what input format the LZO
++ decompressor as implemented in the Linux kernel understands. The file subject
++ of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on
++ the compressor nor on any other implementations though it seems likely that
++ the format matches the standard one. The purpose of this document is to
++ better understand what the code does in order to propose more efficient fixes
++ for future bug reports.
++
++Description
++
++ The stream is composed of a series of instructions, operands, and data. The
++ instructions consist in a few bits representing an opcode, and bits forming
++ the operands for the instruction, whose size and position depend on the
++ opcode and on the number of literals copied by previous instruction. The
++ operands are used to indicate :
++
++ - a distance when copying data from the dictionary (past output buffer)
++ - a length (number of bytes to copy from dictionary)
++ - the number of literals to copy, which is retained in variable "state"
++ as a piece of information for next instructions.
++
++ Optionally depending on the opcode and operands, extra data may follow. These
++ extra data can be a complement for the operand (eg: a length or a distance
++ encoded on larger values), or a literal to be copied to the output buffer.
++
++ The first byte of the block follows a different encoding from other bytes, it
++ seems to be optimized for literal use only, since there is no dictionary yet
++ prior to that byte.
++
++ Lengths are always encoded on a variable size starting with a small number
++ of bits in the operand. If the number of bits isn't enough to represent the
++ length, up to 255 may be added in increments by consuming more bytes with a
++ rate of at most 255 per extra byte (thus the compression ratio cannot exceed
++ around 255:1). The variable length encoding using #bits is always the same :
++
++ length = byte & ((1 << #bits) - 1)
++ if (!length) {
++ length = ((1 << #bits) - 1)
++ length += 255*(number of zero bytes)
++ length += first-non-zero-byte
++ }
++ length += constant (generally 2 or 3)
++
++ For references to the dictionary, distances are relative to the output
++ pointer. Distances are encoded using very few bits belonging to certain
++ ranges, resulting in multiple copy instructions using different encodings.
++ Certain encodings involve one extra byte, others involve two extra bytes
++ forming a little-endian 16-bit quantity (marked LE16 below).
++
++ After any instruction except the large literal copy, 0, 1, 2 or 3 literals
++ are copied before starting the next instruction. The number of literals that
++ were copied may change the meaning and behaviour of the next instruction. In
++ practice, only one instruction needs to know whether 0, less than 4, or more
++ literals were copied. This is the information stored in the <state> variable
++ in this implementation. This number of immediate literals to be copied is
++ generally encoded in the last two bits of the instruction but may also be
++ taken from the last two bits of an extra operand (eg: distance).
++
++ End of stream is declared when a block copy of distance 0 is seen. Only one
++ instruction may encode this distance (0001HLLL), it takes one LE16 operand
++ for the distance, thus requiring 3 bytes.
++
++ IMPORTANT NOTE : in the code some length checks are missing because certain
++ instructions are called under the assumption that a certain number of bytes
++ follow because it has already been garanteed before parsing the instructions.
++ They just have to "refill" this credit if they consume extra bytes. This is
++ an implementation design choice independant on the algorithm or encoding.
++
++Byte sequences
++
++ First byte encoding :
++
++ 0..17 : follow regular instruction encoding, see below. It is worth
++ noting that codes 16 and 17 will represent a block copy from
++ the dictionary which is empty, and that they will always be
++ invalid at this place.
++
++ 18..21 : copy 0..3 literals
++ state = (byte - 17) = 0..3 [ copy <state> literals ]
++ skip byte
++
++ 22..255 : copy literal string
++ length = (byte - 17) = 4..238
++ state = 4 [ don't copy extra literals ]
++ skip byte
++
++ Instruction encoding :
++
++ 0 0 0 0 X X X X (0..15)
++ Depends on the number of literals copied by the last instruction.
++ If last instruction did not copy any literal (state == 0), this
++ encoding will be a copy of 4 or more literal, and must be interpreted
++ like this :
++
++ 0 0 0 0 L L L L (0..15) : copy long literal string
++ length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte)
++ state = 4 (no extra literals are copied)
++
++ If last instruction used to copy between 1 to 3 literals (encoded in
++ the instruction's opcode or distance), the instruction is a copy of a
++ 2-byte block from the dictionary within a 1kB distance. It is worth
++ noting that this instruction provides little savings since it uses 2
++ bytes to encode a copy of 2 other bytes but it encodes the number of
++ following literals for free. It must be interpreted like this :
++
++ 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance
++ length = 2
++ state = S (copy S literals after this block)
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 2) + D + 1
++
++ If last instruction used to copy 4 or more literals (as detected by
++ state == 4), the instruction becomes a copy of a 3-byte block from the
++ dictionary from a 2..3kB distance, and must be interpreted like this :
++
++ 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance
++ length = 3
++ state = S (copy S literals after this block)
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 2) + D + 2049
++
++ 0 0 0 1 H L L L (16..31)
++ Copy of a block within 16..48kB distance (preferably less than 10B)
++ length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte)
++ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S
++ distance = 16384 + (H << 14) + D
++ state = S (copy S literals after this block)
++ End of stream is reached if distance == 16384
++
++ 0 0 1 L L L L L (32..63)
++ Copy of small block within 16kB distance (preferably less than 34B)
++ length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte)
++ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S
++ distance = D + 1
++ state = S (copy S literals after this block)
++
++ 0 1 L D D D S S (64..127)
++ Copy 3-4 bytes from block within 2kB distance
++ state = S (copy S literals after this block)
++ length = 3 + L
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 3) + D + 1
++
++ 1 L L D D D S S (128..255)
++ Copy 5-8 bytes from block within 2kB distance
++ state = S (copy S literals after this block)
++ length = 5 + L
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 3) + D + 1
++
++Authors
++
++ This document was written by Willy Tarreau <w@1wt.eu> on 2014/07/19 during an
++ analysis of the decompression code available in Linux 3.16-rc5. The code is
++ tricky, it is possible that this document contains mistakes or that a few
++ corner cases were overlooked. In any case, please report any doubt, fix, or
++ proposed updates to the author(s) so that the document can be updated.
+diff --git a/Makefile b/Makefile
+index cf2c8a82ca3e..649f1462ebf8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 4
+-SUBLEVEL = 105
++SUBLEVEL = 106
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c
+index 2c7dde3c6430..2a5259fd23eb 100644
+--- a/arch/m68k/mm/hwtest.c
++++ b/arch/m68k/mm/hwtest.c
+@@ -28,9 +28,11 @@
+ int hwreg_present( volatile void *regp )
+ {
+ int ret = 0;
++ unsigned long flags;
+ long save_sp, save_vbr;
+ long tmp_vectors[3];
+
++ local_irq_save(flags);
+ __asm__ __volatile__
+ ( "movec %/vbr,%2\n\t"
+ "movel #Lberr1,%4@(8)\n\t"
+@@ -46,6 +48,7 @@ int hwreg_present( volatile void *regp )
+ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+ : "a" (regp), "a" (tmp_vectors)
+ );
++ local_irq_restore(flags);
+
+ return( ret );
+ }
+@@ -58,9 +61,11 @@ EXPORT_SYMBOL(hwreg_present);
+ int hwreg_write( volatile void *regp, unsigned short val )
+ {
+ int ret;
++ unsigned long flags;
+ long save_sp, save_vbr;
+ long tmp_vectors[3];
+
++ local_irq_save(flags);
+ __asm__ __volatile__
+ ( "movec %/vbr,%2\n\t"
+ "movel #Lberr2,%4@(8)\n\t"
+@@ -78,6 +83,7 @@ int hwreg_write( volatile void *regp, unsigned short val )
+ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+ : "a" (regp), "a" (tmp_vectors), "g" (val)
+ );
++ local_irq_restore(flags);
+
+ return( ret );
+ }
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 0bc485b3cd60..6d64efe77026 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -1041,6 +1041,7 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
+ struct mips_huge_tlb_info {
+ int huge_pte;
+ int restore_scratch;
++ bool need_reload_pte;
+ };
+
+ static struct mips_huge_tlb_info __cpuinit
+@@ -1055,6 +1056,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
+
+ rv.huge_pte = scratch;
+ rv.restore_scratch = 0;
++ rv.need_reload_pte = false;
+
+ if (check_for_high_segbits) {
+ UASM_i_MFC0(p, tmp, C0_BADVADDR);
+@@ -1247,6 +1249,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
+ } else {
+ htlb_info.huge_pte = K0;
+ htlb_info.restore_scratch = 0;
++ htlb_info.need_reload_pte = true;
+ vmalloc_mode = refill_noscratch;
+ /*
+ * create the plain linear handler
+@@ -1283,6 +1286,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
+ }
+ #ifdef CONFIG_HUGETLB_PAGE
+ uasm_l_tlb_huge_update(&l, p);
++ if (htlb_info.need_reload_pte)
++ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
+ build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+ build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ htlb_info.restore_scratch);
+diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
+index 6854ed5097d2..83a1dfd8f0e3 100644
+--- a/arch/mips/oprofile/backtrace.c
++++ b/arch/mips/oprofile/backtrace.c
+@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
+ /* This marks the end of the previous function,
+ which means we overran. */
+ break;
+- stack_size = (unsigned) stack_adjustment;
++ stack_size = (unsigned long) stack_adjustment;
+ } else if (is_ra_save_ins(&ip)) {
+ int ra_slot = ip.i_format.simmediate;
+ if (ra_slot < 0)
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index e500969bea0c..c3fc39ee57a5 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -813,7 +813,13 @@ user_work:
+ b .ret_from_except_lite
+
+ 1: bl .save_nvgprs
++ /*
++ * Use a non volatile GPR to save and restore our thread_info flags
++ * across the call to restore_interrupts.
++ */
++ mr r30,r4
+ bl .restore_interrupts
++ mr r4,r30
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_notify_resume
+ b .ret_from_except
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 10e13b331d38..df69bcb13c79 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -43,6 +43,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
+ return 1;
++ return 0;
+ case KVM_S390_INT_EMERGENCY:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index e95822d683f4..fa9c8c7bc500 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -250,7 +250,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
+ }
+
+-#define _LDT_empty(info) \
++/* This intentionally ignores lm, since 32-bit apps don't have that field. */
++#define LDT_empty(info) \
+ ((info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+@@ -260,11 +261,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0)
+
+-#ifdef CONFIG_X86_64
+-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
+-#else
+-#define LDT_empty(info) (_LDT_empty(info))
+-#endif
++/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
++static inline bool LDT_zero(const struct user_desc *info)
++{
++ return (info->base_addr == 0 &&
++ info->limit == 0 &&
++ info->contents == 0 &&
++ info->read_exec_only == 0 &&
++ info->seg_32bit == 0 &&
++ info->limit_in_pages == 0 &&
++ info->seg_not_present == 0 &&
++ info->useable == 0);
++}
+
+ static inline void clear_LDT(void)
+ {
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 5939f44fe0c0..06ec1fe26d98 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -155,8 +155,9 @@ do { \
+ #define elf_check_arch(x) \
+ ((x)->e_machine == EM_X86_64)
+
+-#define compat_elf_check_arch(x) \
+- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
++#define compat_elf_check_arch(x) \
++ (elf_check_arch_ia32(x) || \
++ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
+
+ #if __USER32_DS != __USER_DS
+ # error "The following code assumes __USER32_DS == __USER_DS"
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 944471f4d142..4f787579b329 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -453,6 +453,7 @@ struct kvm_vcpu_arch {
+ u64 mmio_gva;
+ unsigned access;
+ gfn_t mmio_gfn;
++ u64 mmio_gen;
+
+ struct kvm_pmu pmu;
+
+@@ -881,6 +882,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ }
+
++static inline u64 get_canonical(u64 la)
++{
++ return ((int64_t)la << 16) >> 16;
++}
++
++static inline bool is_noncanonical_address(u64 la)
++{
++#ifdef CONFIG_X86_64
++ return get_canonical(la) != la;
++#else
++ return false;
++#endif
++}
++
+ #define TSS_IOPB_BASE_OFFSET 0x66
+ #define TSS_BASE_SIZE 0x68
+ #define TSS_IOPB_SIZE (65536 / 8)
+@@ -939,7 +954,7 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
+ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+
+ void kvm_define_shared_msr(unsigned index, u32 msr);
+-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
++int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+
+ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+
+diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
+index ade619ff9e2a..88dae6b3d7d5 100644
+--- a/arch/x86/include/asm/page_32_types.h
++++ b/arch/x86/include/asm/page_32_types.h
+@@ -18,7 +18,6 @@
+ #define THREAD_ORDER 1
+ #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+
+-#define STACKFAULT_STACK 0
+ #define DOUBLEFAULT_STACK 1
+ #define NMI_STACK 0
+ #define DEBUG_STACK 0
+diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
+index 7639dbf5d223..a9e9937b9a62 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -14,12 +14,11 @@
+ #define IRQ_STACK_ORDER 2
+ #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+
+-#define STACKFAULT_STACK 1
+-#define DOUBLEFAULT_STACK 2
+-#define NMI_STACK 3
+-#define DEBUG_STACK 4
+-#define MCE_STACK 5
+-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
++#define DOUBLEFAULT_STACK 1
++#define NMI_STACK 2
++#define DEBUG_STACK 3
++#define MCE_STACK 4
++#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
+
+ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+ #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
+index 31f180c21ce9..504d1cf9def8 100644
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -279,6 +279,8 @@ enum vmcs_field {
+ #define EXIT_REASON_APIC_ACCESS 44
+ #define EXIT_REASON_EPT_VIOLATION 48
+ #define EXIT_REASON_EPT_MISCONFIG 49
++#define EXIT_REASON_INVEPT 50
++#define EXIT_REASON_INVVPID 53
+ #define EXIT_REASON_WBINVD 54
+ #define EXIT_REASON_XSETBV 55
+
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index edc24480469f..cb5b54e796eb 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1229,7 +1229,7 @@ void __cpuinit setup_local_APIC(void)
+ unsigned int value, queued;
+ int i, j, acked = 0;
+ unsigned long long tsc = 0, ntsc;
+- long long max_loops = cpu_khz;
++ long long max_loops = cpu_khz ? cpu_khz : 1000000;
+
+ if (cpu_has_tsc)
+ rdtscll(tsc);
+@@ -1325,7 +1325,7 @@ void __cpuinit setup_local_APIC(void)
+ acked);
+ break;
+ }
+- if (cpu_has_tsc) {
++ if (cpu_has_tsc && cpu_khz) {
+ rdtscll(ntsc);
+ max_loops = (cpu_khz << 10) - (ntsc - tsc);
+ } else
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index cf79302198a6..114db0fee86c 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -142,6 +142,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+
+ static int __init x86_xsave_setup(char *s)
+ {
++ if (strlen(s))
++ return 0;
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+ setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+ return 1;
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 3e6ff6cbf42a..e7a64dd602d9 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -143,6 +143,21 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
+ setup_clear_cpu_cap(X86_FEATURE_ERMS);
+ }
+ }
++
++ /*
++ * Intel Quark Core DevMan_001.pdf section 6.4.11
++ * "The operating system also is required to invalidate (i.e., flush)
++ * the TLB when any changes are made to any of the page table entries.
++ * The operating system must reload CR3 to cause the TLB to be flushed"
++ *
++ * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
++ * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
++ * to be modified
++ */
++ if (c->x86 == 5 && c->x86_model == 9) {
++ pr_info("Disabling PGE capability bit\n");
++ setup_clear_cpu_cap(X86_FEATURE_PGE);
++ }
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index 17107bd6e1f0..e8206060a0a8 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
+ [ DEBUG_STACK-1 ] = "#DB",
+ [ NMI_STACK-1 ] = "NMI",
+ [ DOUBLEFAULT_STACK-1 ] = "#DF",
+- [ STACKFAULT_STACK-1 ] = "#SS",
+ [ MCE_STACK-1 ] = "#MC",
+ #if DEBUG_STKSZ > EXCEPTION_STKSZ
+ [ N_EXCEPTION_STACKS ...
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 42b055e24691..45f9c70f1246 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -912,13 +912,16 @@ ENTRY(native_iret)
+ jnz native_irq_return_ldt
+ #endif
+
++.global native_irq_return_iret
+ native_irq_return_iret:
++ /*
++ * This may fault. Non-paranoid faults on return to userspace are
++ * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
++ * Double-faults due to espfix64 are handled in do_double_fault.
++ * Other faults here are fatal.
++ */
+ iretq
+
+- .section __ex_table,"a"
+- .quad native_irq_return_iret, bad_iret
+- .previous
+-
+ #ifdef CONFIG_X86_ESPFIX64
+ native_irq_return_ldt:
+ pushq_cfi %rax
+@@ -945,25 +948,6 @@ native_irq_return_ldt:
+ jmp native_irq_return_iret
+ #endif
+
+- .section .fixup,"ax"
+-bad_iret:
+- /*
+- * The iret traps when the %cs or %ss being restored is bogus.
+- * We've lost the original trap vector and error code.
+- * #GPF is the most likely one to get for an invalid selector.
+- * So pretend we completed the iret and took the #GPF in user mode.
+- *
+- * We are now running with the kernel GS after exception recovery.
+- * But error_entry expects us to have user GS to match the user %cs,
+- * so swap back.
+- */
+- pushq $0
+-
+- SWAPGS
+- jmp general_protection
+-
+- .previous
+-
+ /* edi: workmask, edx: work */
+ retint_careful:
+ CFI_RESTORE_STATE
+@@ -1011,37 +995,6 @@ ENTRY(retint_kernel)
+ CFI_ENDPROC
+ END(common_interrupt)
+
+- /*
+- * If IRET takes a fault on the espfix stack, then we
+- * end up promoting it to a doublefault. In that case,
+- * modify the stack to make it look like we just entered
+- * the #GP handler from user space, similar to bad_iret.
+- */
+-#ifdef CONFIG_X86_ESPFIX64
+- ALIGN
+-__do_double_fault:
+- XCPT_FRAME 1 RDI+8
+- movq RSP(%rdi),%rax /* Trap on the espfix stack? */
+- sarq $PGDIR_SHIFT,%rax
+- cmpl $ESPFIX_PGD_ENTRY,%eax
+- jne do_double_fault /* No, just deliver the fault */
+- cmpl $__KERNEL_CS,CS(%rdi)
+- jne do_double_fault
+- movq RIP(%rdi),%rax
+- cmpq $native_irq_return_iret,%rax
+- jne do_double_fault /* This shouldn't happen... */
+- movq PER_CPU_VAR(kernel_stack),%rax
+- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
+- movq %rax,RSP(%rdi)
+- movq $0,(%rax) /* Missing (lost) #GP error code */
+- movq $general_protection,RIP(%rdi)
+- retq
+- CFI_ENDPROC
+-END(__do_double_fault)
+-#else
+-# define __do_double_fault do_double_fault
+-#endif
+-
+ /*
+ * End of kprobes section
+ */
+@@ -1217,7 +1170,7 @@ zeroentry overflow do_overflow
+ zeroentry bounds do_bounds
+ zeroentry invalid_op do_invalid_op
+ zeroentry device_not_available do_device_not_available
+-paranoiderrorentry double_fault __do_double_fault
++paranoiderrorentry double_fault do_double_fault
+ zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
+ errorentry invalid_TSS do_invalid_TSS
+ errorentry segment_not_present do_segment_not_present
+@@ -1431,7 +1384,7 @@ apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
+
+ paranoidzeroentry_ist debug do_debug DEBUG_STACK
+ paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
+-paranoiderrorentry stack_segment do_stack_segment
++errorentry stack_segment do_stack_segment
+ #ifdef CONFIG_XEN
+ zeroentry xen_debug do_debug
+ zeroentry xen_int3 do_int3
+@@ -1541,16 +1494,15 @@ error_sti:
+
+ /*
+ * There are two places in the kernel that can potentially fault with
+- * usergs. Handle them here. The exception handlers after iret run with
+- * kernel gs again, so don't set the user space flag. B stepping K8s
+- * sometimes report an truncated RIP for IRET exceptions returning to
+- * compat mode. Check for these here too.
++ * usergs. Handle them here. B stepping K8s sometimes report a
++ * truncated RIP for IRET exceptions returning to compat mode. Check
++ * for these here too.
+ */
+ error_kernelspace:
+ incl %ebx
+ leaq native_irq_return_iret(%rip),%rcx
+ cmpq %rcx,RIP+8(%rsp)
+- je error_swapgs
++ je error_bad_iret
+ movl %ecx,%eax /* zero extend */
+ cmpq %rax,RIP+8(%rsp)
+ je bstep_iret
+@@ -1561,7 +1513,15 @@ error_kernelspace:
+ bstep_iret:
+ /* Fix truncated RIP */
+ movq %rcx,RIP+8(%rsp)
+- jmp error_swapgs
++ /* fall through */
++
++error_bad_iret:
++ SWAPGS
++ mov %rsp,%rdi
++ call fixup_bad_iret
++ mov %rax,%rsp
++ decl %ebx /* Return to usergs */
++ jmp error_sti
+ CFI_ENDPROC
+ END(error_entry)
+
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index e554e5ad2fe8..226f28413f76 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -258,7 +258,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
+ static void __init paravirt_ops_setup(void)
+ {
+ pv_info.name = "KVM";
+- pv_info.paravirt_enabled = 1;
++
++ /*
++ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
++ * guest kernel works like a bare metal kernel with additional
++ * features, and paravirt_enabled is about features that are
++ * missing.
++ */
++ pv_info.paravirt_enabled = 0;
+
+ if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
+ pv_cpu_ops.io_delay = kvm_io_delay;
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index f8492da65bfc..5e3f91bb6ec3 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -212,7 +212,6 @@ void __init kvmclock_init(void)
+ #endif
+ kvm_get_preset_lpj();
+ clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
+- pv_info.paravirt_enabled = 1;
+ pv_info.name = "KVM";
+
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index 9d9d2f9e77a5..9d25a6eef1e1 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -27,6 +27,42 @@ static int get_free_idx(void)
+ return -ESRCH;
+ }
+
++static bool tls_desc_okay(const struct user_desc *info)
++{
++ /*
++ * For historical reasons (i.e. no one ever documented how any
++ * of the segmentation APIs work), user programs can and do
++ * assume that a struct user_desc that's all zeros except for
++ * entry_number means "no segment at all". This never actually
++ * worked. In fact, up to Linux 3.19, a struct user_desc like
++ * this would create a 16-bit read-write segment with base and
++ * limit both equal to zero.
++ *
++ * That was close enough to "no segment at all" until we
++ * hardened this function to disallow 16-bit TLS segments. Fix
++ * it up by interpreting these zeroed segments the way that they
++ * were almost certainly intended to be interpreted.
++ *
++ * The correct way to ask for "no segment at all" is to specify
++ * a user_desc that satisfies LDT_empty. To keep everything
++ * working, we accept both.
++ *
++ * Note that there's a similar kludge in modify_ldt -- look at
++ * the distinction between modes 1 and 0x11.
++ */
++ if (LDT_empty(info) || LDT_zero(info))
++ return true;
++
++ /*
++ * espfix is required for 16-bit data segments, but espfix
++ * only works for LDT segments.
++ */
++ if (!info->seg_32bit)
++ return false;
++
++ return true;
++}
++
+ static void set_tls_desc(struct task_struct *p, int idx,
+ const struct user_desc *info, int n)
+ {
+@@ -40,7 +76,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
+ cpu = get_cpu();
+
+ while (n-- > 0) {
+- if (LDT_empty(info))
++ if (LDT_empty(info) || LDT_zero(info))
+ desc->a = desc->b = 0;
+ else
+ fill_ldt(desc, info);
+@@ -66,6 +102,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+
++ if (!tls_desc_okay(&info))
++ return -EINVAL;
++
+ if (idx == -1)
+ idx = info.entry_number;
+
+@@ -196,6 +235,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+ {
+ struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
+ const struct user_desc *info;
++ int i;
+
+ if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+ (pos % sizeof(struct user_desc)) != 0 ||
+@@ -209,6 +249,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+ else
+ info = infobuf;
+
++ for (i = 0; i < count / sizeof(struct user_desc); i++)
++ if (!tls_desc_okay(info + i))
++ return -EINVAL;
++
+ set_tls_desc(target,
+ GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
+ info, count / sizeof(struct user_desc));
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index ff9281f16029..9bfe95fda57c 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -213,29 +213,41 @@ DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
+ coprocessor_segment_overrun)
+ DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
+ DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
+-#ifdef CONFIG_X86_32
+ DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
+-#endif
+ DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
+ BUS_ADRALN, 0)
+
+ #ifdef CONFIG_X86_64
+ /* Runs on IST stack */
+-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
+-{
+- if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+- X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
+- return;
+- preempt_conditional_sti(regs);
+- do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
+- preempt_conditional_cli(regs);
+-}
+-
+ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+ {
+ static const char str[] = "double fault";
+ struct task_struct *tsk = current;
+
++#ifdef CONFIG_X86_ESPFIX64
++ extern unsigned char native_irq_return_iret[];
++
++ /*
++ * If IRET takes a non-IST fault on the espfix64 stack, then we
++ * end up promoting it to a doublefault. In that case, modify
++ * the stack to make it look like we just entered the #GP
++ * handler from user space, similar to bad_iret.
++ */
++ if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
++ regs->cs == __KERNEL_CS &&
++ regs->ip == (unsigned long)native_irq_return_iret)
++ {
++ struct pt_regs *normal_regs = task_pt_regs(current);
++
++ /* Fake a #GP(0) from userspace. */
++ memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
++ normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
++ regs->ip = (unsigned long)general_protection;
++ regs->sp = (unsigned long)&normal_regs->orig_ax;
++ return;
++ }
++#endif
++
+ /* Return not checked because double check cannot be ignored */
+ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
+
+@@ -332,7 +344,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
+ * for scheduling or signal handling. The actual stack switch is done in
+ * entry.S
+ */
+-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
++asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+ {
+ struct pt_regs *regs = eregs;
+ /* Did already sync */
+@@ -351,6 +363,35 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+ *regs = *eregs;
+ return regs;
+ }
++
++struct bad_iret_stack {
++ void *error_entry_ret;
++ struct pt_regs regs;
++};
++
++asmlinkage notrace __kprobes
++struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
++{
++ /*
++ * This is called from entry_64.S early in handling a fault
++ * caused by a bad iret to user mode. To handle the fault
++ * correctly, we want move our stack frame to task_pt_regs
++ * and we want to pretend that the exception came from the
++ * iret target.
++ */
++ struct bad_iret_stack *new_stack =
++ container_of(task_pt_regs(current),
++ struct bad_iret_stack, regs);
++
++ /* Copy the IRET target to the new stack. */
++ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
++
++ /* Copy the remainder of the stack from the current stack. */
++ memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
++
++ BUG_ON(!user_mode_vm(&new_stack->regs));
++ return new_stack;
++}
+ #endif
+
+ /*
+@@ -694,7 +735,7 @@ void __init trap_init(void)
+ set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
+ set_intr_gate(X86_TRAP_TS, &invalid_TSS);
+ set_intr_gate(X86_TRAP_NP, &segment_not_present);
+- set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
++ set_intr_gate(X86_TRAP_SS, stack_segment);
+ set_intr_gate(X86_TRAP_GP, &general_protection);
+ set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
+ set_intr_gate(X86_TRAP_MF, &coprocessor_error);
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index fc0a147e3727..8652aa408ae0 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -959,14 +959,17 @@ void __init tsc_init(void)
+
+ x86_init.timers.tsc_pre_init();
+
+- if (!cpu_has_tsc)
++ if (!cpu_has_tsc) {
++ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
++ }
+
+ tsc_khz = x86_platform.calibrate_tsc();
+ cpu_khz = tsc_khz;
+
+ if (!tsc_khz) {
+ mark_tsc_unstable("could not calculate TSC khz");
++ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
+ }
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 83756223f8aa..91e8680ec239 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -459,11 +459,6 @@ register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, in
+ *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
+ }
+
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+-{
+- register_address_increment(ctxt, &ctxt->_eip, rel);
+-}
+-
+ static u32 desc_limit_scaled(struct desc_struct *desc)
+ {
+ u32 limit = get_desc_limit(desc);
+@@ -537,6 +532,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
+ return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+
++static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
++ int cs_l)
++{
++ switch (ctxt->op_bytes) {
++ case 2:
++ ctxt->_eip = (u16)dst;
++ break;
++ case 4:
++ ctxt->_eip = (u32)dst;
++ break;
++#ifdef CONFIG_X86_64
++ case 8:
++ if ((cs_l && is_noncanonical_address(dst)) ||
++ (!cs_l && (dst >> 32) != 0))
++ return emulate_gp(ctxt, 0);
++ ctxt->_eip = dst;
++ break;
++#endif
++ default:
++ WARN(1, "unsupported eip assignment size\n");
++ }
++ return X86EMUL_CONTINUE;
++}
++
++static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
++}
++
++static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++{
++ return assign_eip_near(ctxt, ctxt->_eip + rel);
++}
++
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+ {
+ u16 selector;
+@@ -1224,11 +1253,13 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ }
+
+ /* Does not support long mode */
+-static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+- u16 selector, int seg)
++static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
++ u16 selector, int seg, u8 cpl,
++ bool in_task_switch,
++ struct desc_struct *desc)
+ {
+ struct desc_struct seg_desc;
+- u8 dpl, rpl, cpl;
++ u8 dpl, rpl;
+ unsigned err_vec = GP_VECTOR;
+ u32 err_code = 0;
+ bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+@@ -1279,7 +1310,6 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+
+ rpl = selector & 3;
+ dpl = seg_desc.dpl;
+- cpl = ctxt->ops->cpl(ctxt);
+
+ switch (seg) {
+ case VCPU_SREG_SS:
+@@ -1336,12 +1366,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ }
+ load:
+ ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
++ if (desc)
++ *desc = seg_desc;
+ return X86EMUL_CONTINUE;
+ exception:
+ emulate_exception(ctxt, err_vec, err_code, true);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
++static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
++ u16 selector, int seg)
++{
++ u8 cpl = ctxt->ops->cpl(ctxt);
++ return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
++}
++
+ static void write_register_operand(struct operand *op)
+ {
+ /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+@@ -1681,17 +1720,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
+ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
+- unsigned short sel;
++ unsigned short sel, old_sel;
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
++ u8 cpl = ctxt->ops->cpl(ctxt);
++
++ /* Assignment of RIP may only fail in 64-bit mode */
++ if (ctxt->mode == X86EMUL_MODE_PROT64)
++ ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
++ VCPU_SREG_CS);
+
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+
+- rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
++ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
++ &new_desc);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- ctxt->_eip = 0;
+- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
+- return X86EMUL_CONTINUE;
++ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++ if (rc != X86EMUL_CONTINUE) {
++ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
++ /* assigning eip failed; restore the old cs */
++ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
++ return rc;
++ }
++ return rc;
+ }
+
+ static int em_grp2(struct x86_emulate_ctxt *ctxt)
+@@ -1785,13 +1838,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
+ case 2: /* call near abs */ {
+ long int old_eip;
+ old_eip = ctxt->_eip;
+- ctxt->_eip = ctxt->src.val;
++ rc = assign_eip_near(ctxt, ctxt->src.val);
++ if (rc != X86EMUL_CONTINUE)
++ break;
+ ctxt->src.val = old_eip;
+ rc = em_push(ctxt);
+ break;
+ }
+ case 4: /* jmp abs */
+- ctxt->_eip = ctxt->src.val;
++ rc = assign_eip_near(ctxt, ctxt->src.val);
+ break;
+ case 5: /* jmp far */
+ rc = em_jmp_far(ctxt);
+@@ -1823,26 +1878,43 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
+
+ static int em_ret(struct x86_emulate_ctxt *ctxt)
+ {
+- ctxt->dst.type = OP_REG;
+- ctxt->dst.addr.reg = &ctxt->_eip;
+- ctxt->dst.bytes = ctxt->op_bytes;
+- return em_pop(ctxt);
++ int rc;
++ unsigned long eip;
++
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++
++ return assign_eip_near(ctxt, eip);
+ }
+
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
+- unsigned long cs;
++ unsigned long eip, cs;
++ u16 old_cs;
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
++
++ if (ctxt->mode == X86EMUL_MODE_PROT64)
++ ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
++ VCPU_SREG_CS);
+
+- rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+- if (ctxt->op_bytes == 4)
+- ctxt->_eip = (u32)ctxt->_eip;
+ rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+- rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
++ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
++ &new_desc);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++ rc = assign_eip_far(ctxt, eip, new_desc.l);
++ if (rc != X86EMUL_CONTINUE) {
++ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
++ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++ }
+ return rc;
+ }
+
+@@ -2091,7 +2163,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ {
+ struct x86_emulate_ops *ops = ctxt->ops;
+ struct desc_struct cs, ss;
+- u64 msr_data;
++ u64 msr_data, rcx, rdx;
+ int usermode;
+ u16 cs_sel = 0, ss_sel = 0;
+
+@@ -2107,6 +2179,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ else
+ usermode = X86EMUL_MODE_PROT32;
+
++ rcx = ctxt->regs[VCPU_REGS_RCX];
++ rdx = ctxt->regs[VCPU_REGS_RDX];
++
+ cs.dpl = 3;
+ ss.dpl = 3;
+ ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+@@ -2124,6 +2199,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ ss_sel = cs_sel + 8;
+ cs.d = 0;
+ cs.l = 1;
++ if (is_noncanonical_address(rcx) ||
++ is_noncanonical_address(rdx))
++ return emulate_gp(ctxt, 0);
+ break;
+ }
+ cs_sel |= SELECTOR_RPL_MASK;
+@@ -2132,8 +2210,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+
+- ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
+- ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
++ ctxt->_eip = rdx;
++ ctxt->regs[VCPU_REGS_RSP] = rcx;
+
+ return X86EMUL_CONTINUE;
+ }
+@@ -2222,6 +2300,7 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
+ struct tss_segment_16 *tss)
+ {
+ int ret;
++ u8 cpl;
+
+ ctxt->_eip = tss->ip;
+ ctxt->eflags = tss->flag | 2;
+@@ -2244,23 +2323,30 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
+ set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
+ set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
+
++ cpl = tss->cs & 3;
++
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+- ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
++ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
++ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
++ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
++ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
++ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+@@ -2339,6 +2425,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+ struct tss_segment_32 *tss)
+ {
+ int ret;
++ u8 cpl;
+
+ if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
+ return emulate_gp(ctxt, 0);
+@@ -2357,7 +2444,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+
+ /*
+ * SDM says that segment selectors are loaded before segment
+- * descriptors
++ * descriptors. This is important because CPL checks will
++ * use CS.RPL.
+ */
+ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
+ set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
+@@ -2371,43 +2459,45 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+ * If we're switching between Protected Mode and VM86, we need to make
+ * sure to update the mode before loading the segment descriptors so
+ * that the selectors are interpreted correctly.
+- *
+- * Need to get rflags to the vcpu struct immediately because it
+- * influences the CPL which is checked at least when loading the segment
+- * descriptors and when pushing an error code to the new kernel stack.
+- *
+- * TODO Introduce a separate ctxt->ops->set_cpl callback
+ */
+- if (ctxt->eflags & X86_EFLAGS_VM)
++ if (ctxt->eflags & X86_EFLAGS_VM) {
+ ctxt->mode = X86EMUL_MODE_VM86;
+- else
++ cpl = 3;
++ } else {
+ ctxt->mode = X86EMUL_MODE_PROT32;
+-
+- ctxt->ops->set_rflags(ctxt, ctxt->eflags);
++ cpl = tss->cs & 3;
++ }
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+- ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
++ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
++ cpl, true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
++ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
++ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
++ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
++ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
++ ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
++ ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+@@ -2629,10 +2719,13 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
+
+ static int em_call(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc;
+ long rel = ctxt->src.val;
+
+ ctxt->src.val = (unsigned long)ctxt->_eip;
+- jmp_rel(ctxt, rel);
++ rc = jmp_rel(ctxt, rel);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
+ return em_push(ctxt);
+ }
+
+@@ -2641,34 +2734,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ u16 sel, old_cs;
+ ulong old_eip;
+ int rc;
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
++ int cpl = ctxt->ops->cpl(ctxt);
+
+- old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
+ old_eip = ctxt->_eip;
++ ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
+
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+- if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
++ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
++ &new_desc);
++ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_CONTINUE;
+
+- ctxt->_eip = 0;
+- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
++ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++ if (rc != X86EMUL_CONTINUE)
++ goto fail;
+
+ ctxt->src.val = old_cs;
+ rc = em_push(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+- return rc;
++ goto fail;
+
+ ctxt->src.val = old_eip;
+- return em_push(ctxt);
++ rc = em_push(ctxt);
++ /* If we failed, we tainted the memory, but the very least we should
++ restore cs */
++ if (rc != X86EMUL_CONTINUE)
++ goto fail;
++ return rc;
++fail:
++ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++ return rc;
++
+ }
+
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
++ unsigned long eip;
+
+- ctxt->dst.type = OP_REG;
+- ctxt->dst.addr.reg = &ctxt->_eip;
+- ctxt->dst.bytes = ctxt->op_bytes;
+- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++ rc = assign_eip_near(ctxt, eip);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
+@@ -2977,20 +3086,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
+
+ static int em_loop(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc = X86EMUL_CONTINUE;
++
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
+ if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
+ (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+
+- return X86EMUL_CONTINUE;
++ return rc;
+ }
+
+ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc = X86EMUL_CONTINUE;
++
+ if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+
+- return X86EMUL_CONTINUE;
++ return rc;
+ }
+
+ static int em_in(struct x86_emulate_ctxt *ctxt)
+@@ -4168,7 +4281,7 @@ special_insn:
+ break;
+ case 0x70 ... 0x7f: /* jcc (short) */
+ if (test_cc(ctxt->b, ctxt->eflags))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ break;
+ case 0x8d: /* lea r16/r32, m */
+ ctxt->dst.val = ctxt->src.addr.mem.ea;
+@@ -4207,7 +4320,7 @@ special_insn:
+ break;
+ case 0xe9: /* jmp rel */
+ case 0xeb: /* jmp rel short */
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
+ break;
+ case 0xf4: /* hlt */
+@@ -4310,7 +4423,7 @@ twobyte_insn:
+ break;
+ case 0x80 ... 0x8f: /* jnz rel, etc*/
+ if (test_cc(ctxt->b, ctxt->eflags))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ break;
+ case 0x90 ... 0x9f: /* setcc r/m8 */
+ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index d68f99df690c..db336f9f2c8c 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -263,8 +263,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
+ return;
+
+ timer = &pit->pit_state.pit_timer.timer;
++ mutex_lock(&pit->pit_state.lock);
+ if (hrtimer_cancel(timer))
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
++ mutex_unlock(&pit->pit_state.lock);
+ }
+
+ static void destroy_pit_timer(struct kvm_pit *pit)
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index fd6dec6ffa47..84f4bca0ca2c 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2842,7 +2842,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return;
+
+- vcpu_clear_mmio_info(vcpu, ~0ul);
++ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
+ kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+ if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
+ hpa_t root = vcpu->arch.mmu.root_hpa;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index b567285efceb..86c74c0cd876 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3212,7 +3212,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
+
+
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
+- if (svm_set_msr(&svm->vcpu, ecx, data)) {
++ if (kvm_set_msr(&svm->vcpu, ecx, data)) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(&svm->vcpu, 0);
+ } else {
+@@ -3494,9 +3494,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
+
+ if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
+ || !svm_exit_handlers[exit_code]) {
+- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+- kvm_run->hw.hardware_exit_reason = exit_code;
+- return 0;
++ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
+ }
+
+ return svm_exit_handlers[exit_code](svm);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 617b00b4857b..2eb4e5af8816 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -388,6 +388,7 @@ struct vcpu_vmx {
+ u16 fs_sel, gs_sel, ldt_sel;
+ int gs_ldt_reload_needed;
+ int fs_reload_needed;
++ unsigned long vmcs_host_cr4; /* May not match real cr4 */
+ } host_state;
+ struct {
+ int vm86_active;
+@@ -2209,12 +2210,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+ break;
+ msr = find_msr_entry(vmx, msr_index);
+ if (msr) {
++ u64 old_msr_data = msr->data;
+ msr->data = data;
+ if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ preempt_disable();
+- kvm_set_shared_msr(msr->index, msr->data,
+- msr->mask);
++ ret = kvm_set_shared_msr(msr->index, msr->data,
++ msr->mask);
+ preempt_enable();
++ if (ret)
++ msr->data = old_msr_data;
+ }
+ break;
+ }
+@@ -3622,16 +3626,21 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
+ * Note that host-state that does change is set elsewhere. E.g., host-state
+ * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
+ */
+-static void vmx_set_constant_host_state(void)
++static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+ {
+ u32 low32, high32;
+ unsigned long tmpl;
+ struct desc_ptr dt;
++ unsigned long cr4;
+
+ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
+- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
+
++ /* Save the most likely value for this task's CR4 in the VMCS. */
++ cr4 = read_cr4();
++ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
++ vmx->host_state.vmcs_host_cr4 = cr4;
++
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+@@ -3753,7 +3762,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
+
+ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
+ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
+- vmx_set_constant_host_state();
++ vmx_set_constant_host_state(vmx);
+ #ifdef CONFIG_X86_64
+ rdmsrl(MSR_FS_BASE, a);
+ vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
+@@ -4539,7 +4548,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
+ u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
+ | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+
+- if (vmx_set_msr(vcpu, ecx, data) != 0) {
++ if (kvm_set_msr(vcpu, ecx, data) != 0) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+@@ -5557,6 +5566,18 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
+ return 1;
+ }
+
++static int handle_invept(struct kvm_vcpu *vcpu)
++{
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++}
++
++static int handle_invvpid(struct kvm_vcpu *vcpu)
++{
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++}
++
+ /*
+ * The exit handlers return 1 if the exit was handled fully and guest execution
+ * may resume. Otherwise they set the kvm_run parameter to indicate what needs
+@@ -5599,6 +5620,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+ [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
++ [EXIT_REASON_INVEPT] = handle_invept,
++ [EXIT_REASON_INVVPID] = handle_invvpid,
+ };
+
+ static const int kvm_vmx_max_exit_handlers =
+@@ -5783,6 +5806,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
++ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
+@@ -5912,10 +5936,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+ && kvm_vmx_exit_handlers[exit_reason])
+ return kvm_vmx_exit_handlers[exit_reason](vcpu);
+ else {
+- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+- vcpu->run->hw.hardware_exit_reason = exit_reason;
++ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
+ }
+- return 0;
+ }
+
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+@@ -6101,6 +6125,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
+ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
++ unsigned long cr4;
+
+ if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+@@ -6131,6 +6156,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+ vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+
++ cr4 = read_cr4();
++ if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
++ vmcs_writel(HOST_CR4, cr4);
++ vmx->host_state.vmcs_host_cr4 = cr4;
++ }
++
+ /* When single-stepping over STI and MOV SS, we must clear the
+ * corresponding interruptibility bits in the guest state. Otherwise
+ * vmentry fails as it then expects bit 14 (BS) in pending debug
+@@ -6589,7 +6620,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+ * Other fields are different per CPU, and will be set later when
+ * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
+ */
+- vmx_set_constant_host_state();
++ vmx_set_constant_host_state(vmx);
+
+ /*
+ * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4b1be290f6e3..318a2454366f 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -220,19 +220,24 @@ static void kvm_shared_msr_cpu_online(void)
+ shared_msr_update(i, shared_msrs_global.msrs[i]);
+ }
+
+-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
++int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+ {
+ struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
++ int err;
+
+ if (((value ^ smsr->values[slot].curr) & mask) == 0)
+- return;
++ return 0;
+ smsr->values[slot].curr = value;
+- wrmsrl(shared_msrs_global.msrs[slot], value);
++ err = checking_wrmsrl(shared_msrs_global.msrs[slot], value);
++ if (err)
++ return 1;
++
+ if (!smsr->registered) {
+ smsr->urn.on_user_return = kvm_on_user_return;
+ user_return_notifier_register(&smsr->urn);
+ smsr->registered = true;
+ }
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
+
+@@ -858,7 +863,6 @@ void kvm_enable_efer_bits(u64 mask)
+ }
+ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+
+-
+ /*
+ * Writes msr value into into the appropriate "register".
+ * Returns 0 on success, non-0 otherwise.
+@@ -866,8 +870,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ */
+ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+ {
++ switch (msr_index) {
++ case MSR_FS_BASE:
++ case MSR_GS_BASE:
++ case MSR_KERNEL_GS_BASE:
++ case MSR_CSTAR:
++ case MSR_LSTAR:
++ if (is_noncanonical_address(data))
++ return 1;
++ break;
++ case MSR_IA32_SYSENTER_EIP:
++ case MSR_IA32_SYSENTER_ESP:
++ /*
++ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
++ * non-canonical address is written on Intel but not on
++ * AMD (which ignores the top 32-bits, because it does
++ * not implement 64-bit SYSENTER).
++ *
++ * 64-bit code should hence be able to write a non-canonical
++ * value on AMD. Making the address canonical ensures that
++ * vmentry does not fail on Intel after writing a non-canonical
++ * value, and that something deterministic happens if the guest
++ * invokes 64-bit SYSENTER.
++ */
++ data = get_canonical(data);
++ }
+ return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+ }
++EXPORT_SYMBOL_GPL(kvm_set_msr);
+
+ /*
+ * Adapt set_msr() to msr_io()'s calling convention
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index cb80c293cdd8..1ce5611fdb09 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -78,15 +78,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+ vcpu->arch.mmio_gva = gva & PAGE_MASK;
+ vcpu->arch.access = access;
+ vcpu->arch.mmio_gfn = gfn;
++ vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
++}
++
++static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
+ }
+
+ /*
+- * Clear the mmio cache info for the given gva,
+- * specially, if gva is ~0ul, we clear all mmio cache info.
++ * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
++ * clear all mmio cache info.
+ */
++#define MMIO_GVA_ANY (~(gva_t)0)
++
+ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+- if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
++ if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
+ return;
+
+ vcpu->arch.mmio_gva = 0;
+@@ -94,7 +102,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+
+ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+ {
+- if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
++ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
++ vcpu->arch.mmio_gva == (gva & PAGE_MASK))
+ return true;
+
+ return false;
+@@ -102,7 +111,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+
+ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+ {
+- if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
++ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
++ vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
+ return true;
+
+ return false;
+diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
+index 798ee6d285a1..7ab1f52f1fdd 100644
+--- a/arch/xtensa/include/asm/unistd.h
++++ b/arch/xtensa/include/asm/unistd.h
+@@ -394,7 +394,8 @@ __SYSCALL(174, sys_chroot, 1)
+ #define __NR_pivot_root 175
+ __SYSCALL(175, sys_pivot_root, 2)
+ #define __NR_umount 176
+-__SYSCALL(176, sys_umount, 2)
++__SYSCALL(176, sys_oldumount, 1)
++#define __ARCH_WANT_SYS_OLDUMOUNT
+ #define __NR_swapoff 177
+ __SYSCALL(177, sys_swapoff, 1)
+ #define __NR_sync 178
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index b74cc58bc038..14f1d3083cae 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -538,7 +538,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ bottom = max(b->physical_block_size, b->io_min) + alignment;
+
+ /* Verify that top and bottom intervals line up */
+- if (max(top, bottom) & (min(top, bottom) - 1)) {
++ if (max(top, bottom) % min(top, bottom)) {
+ t->misaligned = 1;
+ ret = -1;
+ }
+@@ -579,7 +579,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+
+ /* Find lowest common alignment_offset */
+ t->alignment_offset = lcm(t->alignment_offset, alignment)
+- & (max(t->physical_block_size, t->io_min) - 1);
++ % max(t->physical_block_size, t->io_min);
+
+ /* Verify that new alignment_offset is on a logical block boundary */
+ if (t->alignment_offset & (t->logical_block_size - 1)) {
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 9a87daa6f4fb..f1c00c9aec1a 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -505,7 +505,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+
+ if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+ err = DRIVER_ERROR << 24;
+- goto out;
++ goto error;
+ }
+
+ memset(sense, 0, sizeof(sense));
+@@ -515,7 +515,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+
+ blk_execute_rq(q, disk, rq, 0);
+
+-out:
+ err = rq->errors & 0xff; /* only 8 bit SCSI status */
+ if (err) {
+ if (rq->sense_len && rq->sense) {
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index d366a75e6705..ca9a287b5864 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -313,6 +313,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
++ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
++ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
++ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
++ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
++ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index d8af325a6bda..3723e5ec2b4d 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -2008,13 +2008,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+- /* software reset. causes dev0 to be selected */
+- iowrite8(ap->ctl, ioaddr->ctl_addr);
+- udelay(20); /* FIXME: flush */
+- iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+- udelay(20); /* FIXME: flush */
+- iowrite8(ap->ctl, ioaddr->ctl_addr);
+- ap->last_ctl = ap->ctl;
++ if (ap->ioaddr.ctl_addr) {
++ /* software reset. causes dev0 to be selected */
++ iowrite8(ap->ctl, ioaddr->ctl_addr);
++ udelay(20); /* FIXME: flush */
++ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
++ udelay(20); /* FIXME: flush */
++ iowrite8(ap->ctl, ioaddr->ctl_addr);
++ ap->last_ctl = ap->ctl;
++ }
+
+ /* wait the port to become ready */
+ return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+@@ -2215,10 +2217,6 @@ void ata_sff_error_handler(struct ata_port *ap)
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+- /* ignore ata_sff_softreset if ctl isn't accessible */
+- if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
+- softreset = NULL;
+-
+ /* ignore built-in hardresets if SCR access is not available */
+ if ((hardreset == sata_std_hardreset ||
+ hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
+diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
+index 71eaf385e970..5929dde07c91 100644
+--- a/drivers/ata/pata_serverworks.c
++++ b/drivers/ata/pata_serverworks.c
+@@ -252,12 +252,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
+ pci_write_config_byte(pdev, 0x54, ultra_cfg);
+ }
+
+-static struct scsi_host_template serverworks_sht = {
++static struct scsi_host_template serverworks_osb4_sht = {
++ ATA_BMDMA_SHT(DRV_NAME),
++ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
++};
++
++static struct scsi_host_template serverworks_csb_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+ static struct ata_port_operations serverworks_osb4_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
++ .qc_prep = ata_bmdma_dumb_qc_prep,
+ .cable_detect = serverworks_cable_detect,
+ .mode_filter = serverworks_osb4_filter,
+ .set_piomode = serverworks_set_piomode,
+@@ -266,6 +272,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
+
+ static struct ata_port_operations serverworks_csb_port_ops = {
+ .inherits = &serverworks_osb4_port_ops,
++ .qc_prep = ata_bmdma_qc_prep,
+ .mode_filter = serverworks_csb_filter,
+ };
+
+@@ -405,6 +412,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ }
+ };
+ const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
++ struct scsi_host_template *sht = &serverworks_csb_sht;
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+@@ -418,6 +426,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ /* Select non UDMA capable OSB4 if we can't do fixups */
+ if (rc < 0)
+ ppi[0] = &info[1];
++ sht = &serverworks_osb4_sht;
+ }
+ /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
+ else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
+@@ -434,7 +443,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ ppi[1] = &ata_dummy_port_info;
+ }
+
+- return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
++ return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index e28ce9898af4..32e86d6f141c 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -718,12 +718,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+ return &dir->kobj;
+ }
+
++static DEFINE_MUTEX(gdp_mutex);
+
+ static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+ {
+ if (dev->class) {
+- static DEFINE_MUTEX(gdp_mutex);
+ struct kobject *kobj = NULL;
+ struct kobject *parent_kobj;
+ struct kobject *k;
+@@ -787,7 +787,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+ glue_dir->kset != &dev->class->p->glue_dirs)
+ return;
+
++ mutex_lock(&gdp_mutex);
+ kobject_put(glue_dir);
++ mutex_unlock(&gdp_mutex);
+ }
+
+ static void cleanup_device_parent(struct device *dev)
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 5401814c874d..b7a4fe586f8a 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -588,6 +588,9 @@ request_firmware(const struct firmware **firmware_p, const char *name,
+ struct firmware_priv *fw_priv;
+ int ret;
+
++ if (!name || name[0] == '\0')
++ return -EINVAL;
++
+ fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
+ false);
+ if (IS_ERR_OR_NULL(fw_priv))
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 8ab1eab90be7..1db12895110a 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -244,7 +244,12 @@ static const struct file_operations regmap_access_fops = {
+
+ void regmap_debugfs_init(struct regmap *map)
+ {
+- map->debugfs = debugfs_create_dir(dev_name(map->dev),
++ const char *devname = "dummy";
++
++ if (map->dev)
++ devname = dev_name(map->dev);
++
++ map->debugfs = debugfs_create_dir(devname,
+ regmap_debugfs_root);
+ if (!map->debugfs) {
+ dev_warn(map->dev, "Failed to create debugfs directory\n");
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index e5545427b46b..8e81f85b1ba0 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -600,6 +600,11 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ if (val_bytes == 1) {
+ wval = (void *)val;
+ } else {
++ if (!val_count) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+ if (!wval) {
+ ret = -ENOMEM;
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 3cc242535012..155a61841e2b 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -304,6 +304,9 @@ static void btusb_intr_complete(struct urb *urb)
+ BT_ERR("%s corrupted event packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
+@@ -392,6 +395,9 @@ static void btusb_bulk_complete(struct urb *urb)
+ BT_ERR("%s corrupted ACL packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
+@@ -486,6 +492,9 @@ static void btusb_isoc_complete(struct urb *urb)
+ hdev->stat.err_rx++;
+ }
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 1052fc4cae66..85172faa1569 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -932,8 +932,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * pool while mixing, and hash one final time.
+ */
+ sha_transform(hash.w, extract, workspace);
+- memset(extract, 0, sizeof(extract));
+- memset(workspace, 0, sizeof(workspace));
++ memzero_explicit(extract, sizeof(extract));
++ memzero_explicit(workspace, sizeof(workspace));
+
+ /*
+ * In case the hash function has some recognizable output
+@@ -956,7 +956,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ }
+
+ memcpy(out, &hash, EXTRACT_SIZE);
+- memset(&hash, 0, sizeof(hash));
++ memzero_explicit(&hash, sizeof(hash));
+ }
+
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -989,7 +989,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ }
+
+ /* Wipe data just returned from memory */
+- memset(tmp, 0, sizeof(tmp));
++ memzero_explicit(tmp, sizeof(tmp));
+
+ return ret;
+ }
+@@ -1027,7 +1027,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ }
+
+ /* Wipe data just returned from memory */
+- memset(tmp, 0, sizeof(tmp));
++ memzero_explicit(tmp, sizeof(tmp));
+
+ return ret;
+ }
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 7f2f149ae40f..cf864ef8d181 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -371,7 +371,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
+ show_one(scaling_min_freq, min);
+ show_one(scaling_max_freq, max);
+-show_one(scaling_cur_freq, cur);
++
++static ssize_t show_scaling_cur_freq(
++ struct cpufreq_policy *policy, char *buf)
++{
++ ssize_t ret;
++
++ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
++ ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
++ else
++ ret = sprintf(buf, "%u\n", policy->cur);
++ return ret;
++}
+
+ static int __cpufreq_set_policy(struct cpufreq_policy *data,
+ struct cpufreq_policy *policy);
+@@ -818,11 +829,11 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
+ if (ret)
+ goto err_out_kobj_put;
+ }
+- if (cpufreq_driver->target) {
+- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+- if (ret)
+- goto err_out_kobj_put;
+- }
++
++ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
++ if (ret)
++ goto err_out_kobj_put;
++
+ if (cpufreq_driver->bios_limit) {
+ ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
+ if (ret)
+diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
+index 73464a62adf7..0f0bf1a2ae1a 100644
+--- a/drivers/edac/mpc85xx_edac.c
++++ b/drivers/edac/mpc85xx_edac.c
+@@ -577,7 +577,8 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+- mpc85xx_l2_isr, IRQF_DISABLED,
++ mpc85xx_l2_isr,
++ IRQF_DISABLED | IRQF_SHARED,
+ "[EDAC] L2 err", edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index b558810b2da0..b449572cb800 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -1619,8 +1619,7 @@ static int dispatch_ioctl(struct client *client,
+ _IOC_SIZE(cmd) > sizeof(buffer))
+ return -ENOTTY;
+
+- if (_IOC_DIR(cmd) == _IOC_READ)
+- memset(&buffer, 0, _IOC_SIZE(cmd));
++ memset(&buffer, 0, sizeof(buffer));
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index df62c393f2f5..01434ef9e00f 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1176,6 +1176,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ } else {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 00fb5aa2bf77..7ca1d472d7cb 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1915,6 +1915,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ int i;
++ u32 assumed_bpp = 2;
++
++ /*
++ * If using screen objects, then assume 32-bpp because that's what the
++ * SVGA device is assuming
++ */
++ if (dev_priv->sou_priv)
++ assumed_bpp = 4;
+
+ /* Add preferred mode */
+ {
+@@ -1925,8 +1933,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
+
+- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+- mode->vdisplay)) {
++ if (vmw_kms_validate_mode_vram(dev_priv,
++ mode->hdisplay * assumed_bpp,
++ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+ } else {
+ drm_mode_destroy(dev, mode);
+@@ -1948,7 +1957,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ bmode->vdisplay > max_height)
+ continue;
+
+- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
++ if (!vmw_kms_validate_mode_vram(dev_priv,
++ bmode->hdisplay * assumed_bpp,
+ bmode->vdisplay))
+ continue;
+
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index f4c3d28cd1fc..3c8b2c473b81 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -207,8 +207,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ ret = vmbus_post_msg(open_msg,
+ sizeof(struct vmbus_channel_open_channel));
+
+- if (ret != 0)
++ if (ret != 0) {
++ err = ret;
+ goto error1;
++ }
+
+ t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
+ if (t == 0) {
+@@ -400,7 +402,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ u32 next_gpadl_handle;
+ unsigned long flags;
+ int ret = 0;
+- int t;
+
+ next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+ atomic_inc(&vmbus_connection.next_gpadl_handle);
+@@ -447,9 +448,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+
+ }
+ }
+- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+- BUG_ON(t == 0);
+-
++ wait_for_completion(&msginfo->waitevent);
+
+ /* At this point, we received the gpadl created msg */
+ *gpadl_handle = gpadlmsg->gpadl;
+@@ -472,7 +471,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ struct vmbus_channel_gpadl_teardown *msg;
+ struct vmbus_channel_msginfo *info;
+ unsigned long flags;
+- int ret, t;
++ int ret;
+
+ info = kmalloc(sizeof(*info) +
+ sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
+@@ -494,11 +493,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_gpadl_teardown));
+
+- BUG_ON(ret != 0);
+- t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
+- BUG_ON(t == 0);
++ if (ret)
++ goto post_msg_err;
++
++ wait_for_completion(&info->waitevent);
+
+- /* Received a torndown response */
++post_msg_err:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+@@ -531,11 +531,28 @@ void vmbus_close(struct vmbus_channel *channel)
+
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
+
+- BUG_ON(ret != 0);
++ if (ret) {
++ pr_err("Close failed: close post msg return is %d\n", ret);
++ /*
++ * If we failed to post the close msg,
++ * it is perhaps better to leak memory.
++ */
++ return;
++ }
++
+ /* Tear down the gpadl for the channel's ring buffer */
+- if (channel->ringbuffer_gpadlhandle)
+- vmbus_teardown_gpadl(channel,
+- channel->ringbuffer_gpadlhandle);
++ if (channel->ringbuffer_gpadlhandle) {
++ ret = vmbus_teardown_gpadl(channel,
++ channel->ringbuffer_gpadlhandle);
++ if (ret) {
++ pr_err("Close failed: teardown gpadl return %d\n", ret);
++ /*
++ * If we failed to teardown gpadl,
++ * it is perhaps better to leak memory.
++ */
++ return;
++ }
++ }
+
+ /* Cleanup the ring buffers for this channel */
+ hv_ringbuffer_cleanup(&channel->outbound);
+@@ -543,8 +560,6 @@ void vmbus_close(struct vmbus_channel *channel)
+
+ free_pages((unsigned long)channel->ringbuffer_pages,
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+-
+-
+ }
+ EXPORT_SYMBOL_GPL(vmbus_close);
+
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 650c9f0b6642..2d52a1b15b35 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -294,10 +294,21 @@ int vmbus_post_msg(void *buffer, size_t buflen)
+ * insufficient resources. Retry the operation a couple of
+ * times before giving up.
+ */
+- while (retries < 3) {
+- ret = hv_post_message(conn_id, 1, buffer, buflen);
+- if (ret != HV_STATUS_INSUFFICIENT_BUFFERS)
++ while (retries < 10) {
++ ret = hv_post_message(conn_id, 1, buffer, buflen);
++
++ switch (ret) {
++ case HV_STATUS_INSUFFICIENT_BUFFERS:
++ ret = -ENOMEM;
++ case -ENOMEM:
++ break;
++ case HV_STATUS_SUCCESS:
+ return ret;
++ default:
++ pr_err("hv_post_msg() failed; error code:%d\n", ret);
++ return -EINVAL;
++ }
++
+ retries++;
+ msleep(100);
+ }
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index ff0b71a7e8e1..6bd8e81cfd9d 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -2146,6 +2146,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+ if (!qp_init)
+ goto out;
+
++retry:
+ ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
+ ch->rq_size + srp_sq_size, 0);
+ if (IS_ERR(ch->cq)) {
+@@ -2169,6 +2170,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+ ch->qp = ib_create_qp(sdev->pd, qp_init);
+ if (IS_ERR(ch->qp)) {
+ ret = PTR_ERR(ch->qp);
++ if (ret == -ENOMEM) {
++ srp_sq_size /= 2;
++ if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
++ ib_destroy_cq(ch->cq);
++ goto retry;
++ }
++ }
+ printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+ goto err_destroy_cq;
+ }
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 4c6a72d3d48c..9854a1ff5ab5 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -787,7 +787,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+
+- if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
++ /*
++ * Check if we are dealing with a bare PS/2 packet, presumably from
++ * a device connected to the external PS/2 port. Because bare PS/2
++ * protocol does not have enough constant bits to self-synchronize
++ * properly we only do this if the device is fully synchronized.
++ */
++ if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
+ if (psmouse->pktcnt == 3) {
+ alps_report_bare_ps2_packet(psmouse, psmouse->packet,
+ true);
+@@ -1619,6 +1625,9 @@ int alps_init(struct psmouse *psmouse)
+ /* We are having trouble resyncing ALPS touchpads so disable it for now */
+ psmouse->resync_time = 0;
+
++ /* Allow 2 invalid packets without resetting device */
++ psmouse->resetafter = psmouse->pktsize * 2;
++
+ return 0;
+
+ init_fail:
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 32b1363f7ace..97e5f6f797b4 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -506,6 +506,8 @@ static void synaptics_parse_agm(const unsigned char buf[],
+ priv->agm_pending = true;
+ }
+
++static bool is_forcepad;
++
+ static int synaptics_parse_hw_state(const unsigned char buf[],
+ struct synaptics_data *priv,
+ struct synaptics_hw_state *hw)
+@@ -535,7 +537,7 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ hw->left = (buf[0] & 0x01) ? 1 : 0;
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+
+- if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
++ if (is_forcepad) {
+ /*
+ * ForcePads, like Clickpads, use middle button
+ * bits to report primary button clicks.
+@@ -1512,6 +1514,18 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ { }
+ };
+
++static const struct dmi_system_id forcepad_dmi_table[] __initconst = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Folio 1040 G1"),
++ },
++ },
++#endif
++ { }
++};
++
+ void __init synaptics_module_init(void)
+ {
+ const struct dmi_system_id *min_max_dmi;
+@@ -1522,6 +1536,12 @@ void __init synaptics_module_init(void)
+ min_max_dmi = dmi_first_match(min_max_dmi_table);
+ if (min_max_dmi)
+ quirk_min_max = min_max_dmi->driver_data;
++
++ /*
++ * Unfortunately ForcePad capability is not exported over PS/2,
++ * so we have to resort to checking DMI.
++ */
++ is_forcepad = dmi_check_system(forcepad_dmi_table);
+ }
+
+ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
+diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
+index ac1b77354cac..20d861b4e326 100644
+--- a/drivers/input/mouse/synaptics.h
++++ b/drivers/input/mouse/synaptics.h
+@@ -76,12 +76,9 @@
+ * for noise.
+ * 2 0x08 image sensor image sensor tracks 5 fingers, but only
+ * reports 2.
++ * 2 0x01 uniform clickpad whole clickpad moves instead of being
++ * hinged at the top.
+ * 2 0x20 report min query 0x0f gives min coord reported
+- * 2 0x80 forcepad forcepad is a variant of clickpad that
+- * does not have physical buttons but rather
+- * uses pressure above certain threshold to
+- * report primary clicks. Forcepads also have
+- * clickpad bit set.
+ */
+ #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
+ #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
+@@ -90,7 +87,6 @@
+ #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
+ #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
+ #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
+-#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
+
+ /* synaptics modes query bits */
+ #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 1291673bd57e..ce715b1bee46 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -101,6 +101,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ },
+ {
+ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
++ },
++ },
++ {
++ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+@@ -609,6 +615,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+ },
+ },
+ {
++ /* Fujitsu A544 laptop */
++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
++ },
++ },
++ {
++ /* Fujitsu AH544 laptop */
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
++ },
++ },
++ {
+ /* Fujitsu U574 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ .matches = {
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 6f99500790b3..535b09cd3cd8 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -467,6 +467,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
+ b->list_mode = dirty;
+ list_del(&b->lru_list);
+ list_add(&b->lru_list, &c->lru[dirty]);
++ b->last_accessed = jiffies;
+ }
+
+ /*----------------------------------------------------------------
+@@ -1378,9 +1379,9 @@ static void drop_buffers(struct dm_bufio_client *c)
+
+ /*
+ * Test if the buffer is unused and too old, and commit it.
+- * At if noio is set, we must not do any I/O because we hold
+- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
+- * different bufio client.
++ * And if GFP_NOFS is used, we must not do any I/O because we hold
++ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
++ * rerouted to different bufio client.
+ */
+ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
+ unsigned long max_jiffies)
+@@ -1388,7 +1389,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
+ if (jiffies - b->last_accessed < max_jiffies)
+ return 1;
+
+- if (!(gfp & __GFP_IO)) {
++ if (!(gfp & __GFP_FS)) {
+ if (test_bit(B_READING, &b->state) ||
+ test_bit(B_WRITING, &b->state) ||
+ test_bit(B_DIRTY, &b->state))
+@@ -1427,7 +1428,7 @@ static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
+ unsigned long r;
+ unsigned long nr_to_scan = sc->nr_to_scan;
+
+- if (sc->gfp_mask & __GFP_IO)
++ if (sc->gfp_mask & __GFP_FS)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
+ return !nr_to_scan ? 0 : -1;
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index 08d9a207259a..c69d0b787746 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void)
+
+ r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
+ if (r) {
+- cn_del_callback(&ulog_cn_id);
++ kfree(prealloced_cn_msg);
+ return r;
+ }
+
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index ead5ca99a749..5dea02cff622 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -592,8 +592,7 @@ struct dm_raid_superblock {
+ __le32 layout;
+ __le32 stripe_sectors;
+
+- __u8 pad[452]; /* Round struct to 512 bytes. */
+- /* Always set to 0 when writing. */
++ /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
+ } __packed;
+
+ static int read_disk_sb(struct md_rdev *rdev, int size)
+@@ -628,7 +627,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
+ if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
+ failed_devices |= (1ULL << r->raid_disk);
+
+- memset(sb, 0, sizeof(*sb));
++ memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
+
+ sb->magic = cpu_to_le32(DM_RAID_MAGIC);
+ sb->features = cpu_to_le32(0); /* No features yet */
+@@ -663,7 +662,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
+ uint64_t events_sb, events_refsb;
+
+ rdev->sb_start = 0;
+- rdev->sb_size = sizeof(*sb);
++ rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
++ if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
++ DMERR("superblock size of a logical block is no longer valid");
++ return -EINVAL;
++ }
+
+ ret = read_disk_sb(rdev, rdev->sb_size);
+ if (ret)
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index c2cdefa1651e..d4f7f9537db2 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -359,7 +359,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ if (priv->echo_skb[idx]) {
+- kfree_skb(priv->echo_skb[idx]);
++ dev_kfree_skb_any(priv->echo_skb[idx]);
+ priv->echo_skb[idx] = NULL;
+ }
+ }
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index 09b1da5bc512..6c8423411dd8 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -1094,6 +1094,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
+ }
+ }
+ unlink_all_urbs(dev);
++ kfree(dev);
+ }
+ }
+
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index f5b9de48bb82..c19f9447b200 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -17,6 +17,7 @@
+ #include <linux/idr.h>
+ #include <linux/fs.h>
+
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/rtnetlink.h>
+ #include <net/sock.h>
+@@ -578,6 +579,8 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
++ if (skb->protocol == htons(ETH_P_IPV6))
++ ipv6_proxy_select_ident(skb);
+ break;
+ default:
+ return -EINVAL;
+@@ -634,6 +637,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ vnet_hdr->csum_start = skb_checksum_start_offset(skb);
++ if (vlan_tx_tag_present(skb))
++ vnet_hdr->csum_start += VLAN_HLEN;
+ vnet_hdr->csum_offset = skb->csum_offset;
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 21d7151fb0ab..1207bb19ba58 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -588,7 +588,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ if (file == ppp->owner)
+ ppp_shutdown_interface(ppp);
+ }
+- if (atomic_long_read(&file->f_count) <= 2) {
++ if (atomic_long_read(&file->f_count) < 2) {
+ ppp_release(NULL, file);
+ err = 0;
+ } else
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 5b1a1b51fdb0..84b95c9b15f6 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -64,6 +64,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+@@ -696,6 +697,8 @@ static ssize_t tun_get_user(struct tun_struct *tun,
+ break;
+ }
+
++ skb_reset_network_header(skb);
++
+ if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ pr_debug("GSO!\n");
+ switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+@@ -707,6 +710,8 @@ static ssize_t tun_get_user(struct tun_struct *tun,
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
++ if (skb->protocol == htons(ETH_P_IPV6))
++ ipv6_proxy_select_ident(skb);
+ break;
+ default:
+ tun->dev->stats.rx_frame_errors++;
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
+index 063bfa8b91f4..9105493445bd 100644
+--- a/drivers/net/wireless/rt2x00/rt2800.h
++++ b/drivers/net/wireless/rt2x00/rt2800.h
+@@ -1751,7 +1751,7 @@ struct mac_iveiv_entry {
+ * 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
+ */
+-#define BBP1_TX_POWER_CTRL FIELD8(0x07)
++#define BBP1_TX_POWER_CTRL FIELD8(0x03)
+ #define BBP1_TX_ANTENNA FIELD8(0x18)
+
+ /*
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 664e93d2a682..49baf0cfe304 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -1081,6 +1081,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071) },
+ { USB_DEVICE(0x1b75, 0x3072) },
++ { USB_DEVICE(0x1b75, 0xa200) },
+ /* Para */
+ { USB_DEVICE(0x20b8, 0x8888) },
+ /* Pegatron */
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index 4d792a242c5e..c5bdbe94aec8 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -148,55 +148,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
+ skb_trim(skb, frame_length);
+ }
+
+-void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
++/*
++ * H/W needs L2 padding between the header and the paylod if header size
++ * is not 4 bytes aligned.
++ */
++void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
+ {
+- unsigned int payload_length = skb->len - header_length;
+- unsigned int header_align = ALIGN_SIZE(skb, 0);
+- unsigned int payload_align = ALIGN_SIZE(skb, header_length);
+- unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
++ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
+
+- /*
+- * Adjust the header alignment if the payload needs to be moved more
+- * than the header.
+- */
+- if (payload_align > header_align)
+- header_align += 4;
+-
+- /* There is nothing to do if no alignment is needed */
+- if (!header_align)
++ if (!l2pad)
+ return;
+
+- /* Reserve the amount of space needed in front of the frame */
+- skb_push(skb, header_align);
+-
+- /*
+- * Move the header.
+- */
+- memmove(skb->data, skb->data + header_align, header_length);
+-
+- /* Move the payload, if present and if required */
+- if (payload_length && payload_align)
+- memmove(skb->data + header_length + l2pad,
+- skb->data + header_length + l2pad + payload_align,
+- payload_length);
+-
+- /* Trim the skb to the correct size */
+- skb_trim(skb, header_length + l2pad + payload_length);
++ skb_push(skb, l2pad);
++ memmove(skb->data, skb->data + l2pad, hdr_len);
+ }
+
+-void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
++void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
+ {
+- /*
+- * L2 padding is only present if the skb contains more than just the
+- * IEEE 802.11 header.
+- */
+- unsigned int l2pad = (skb->len > header_length) ?
+- L2PAD_SIZE(header_length) : 0;
++ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
+
+ if (!l2pad)
+ return;
+
+- memmove(skb->data + l2pad, skb->data, header_length);
++ memmove(skb->data + l2pad, skb->data, hdr_len);
+ skb_pull(skb, l2pad);
+ }
+
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 66d96f14c274..c059ce1dd338 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -328,6 +328,21 @@ static struct of_bus *of_match_bus(struct device_node *np)
+ return NULL;
+ }
+
++static int of_empty_ranges_quirk(void)
++{
++ if (IS_ENABLED(CONFIG_PPC)) {
++ /* To save cycles, we cache the result */
++ static int quirk_state = -1;
++
++ if (quirk_state < 0)
++ quirk_state =
++ of_machine_is_compatible("Power Macintosh") ||
++ of_machine_is_compatible("MacRISC");
++ return quirk_state;
++ }
++ return false;
++}
++
+ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ struct of_bus *pbus, u32 *addr,
+ int na, int ns, int pna, const char *rprop)
+@@ -353,12 +368,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ * This code is only enabled on powerpc. --gcl
+ */
+ ranges = of_get_property(parent, rprop, &rlen);
+-#if !defined(CONFIG_PPC)
+- if (ranges == NULL) {
++ if (ranges == NULL && !of_empty_ranges_quirk()) {
+ pr_err("OF: no ranges; cannot translate\n");
+ return 1;
+ }
+-#endif /* !defined(CONFIG_PPC) */
+ if (ranges == NULL || rlen == 0) {
+ offset = of_read_number(addr, na);
+ memset(addr, 0, pna * 4);
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 1c207f23b114..d439d0611559 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -716,52 +716,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
+ EXPORT_SYMBOL_GPL(of_property_read_string);
+
+ /**
+- * of_property_read_string_index - Find and read a string from a multiple
+- * strings property.
+- * @np: device node from which the property value is to be read.
+- * @propname: name of the property to be searched.
+- * @index: index of the string in the list of strings
+- * @out_string: pointer to null terminated return string, modified only if
+- * return value is 0.
+- *
+- * Search for a property in a device tree node and retrieve a null
+- * terminated string value (pointer to data, not a copy) in the list of strings
+- * contained in that property.
+- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+- * property does not have a value, and -EILSEQ if the string is not
+- * null-terminated within the length of the property data.
+- *
+- * The out_string pointer is modified only if a valid string can be decoded.
+- */
+-int of_property_read_string_index(struct device_node *np, const char *propname,
+- int index, const char **output)
+-{
+- struct property *prop = of_find_property(np, propname, NULL);
+- int i = 0;
+- size_t l = 0, total = 0;
+- const char *p;
+-
+- if (!prop)
+- return -EINVAL;
+- if (!prop->value)
+- return -ENODATA;
+- if (strnlen(prop->value, prop->length) >= prop->length)
+- return -EILSEQ;
+-
+- p = prop->value;
+-
+- for (i = 0; total < prop->length; total += l, p += l) {
+- l = strlen(p) + 1;
+- if (i++ == index) {
+- *output = p;
+- return 0;
+- }
+- }
+- return -ENODATA;
+-}
+-EXPORT_SYMBOL_GPL(of_property_read_string_index);
+-
+-/**
+ * of_property_match_string() - Find string in a list and return index
+ * @np: pointer to node containing string list property
+ * @propname: string list property name
+@@ -787,7 +741,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
+ end = p + prop->length;
+
+ for (i = 0; p < end; i++, p += l) {
+- l = strlen(p) + 1;
++ l = strnlen(p, end - p) + 1;
+ if (p + l > end)
+ return -EILSEQ;
+ pr_debug("comparing %s with %s\n", string, p);
+@@ -799,39 +753,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
+ EXPORT_SYMBOL_GPL(of_property_match_string);
+
+ /**
+- * of_property_count_strings - Find and return the number of strings from a
+- * multiple strings property.
++ * of_property_read_string_util() - Utility helper for parsing string properties
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
++ * @out_strs: output array of string pointers.
++ * @sz: number of array elements to read.
++ * @skip: Number of strings to skip over at beginning of list.
+ *
+- * Search for a property in a device tree node and retrieve the number of null
+- * terminated string contain in it. Returns the number of strings on
+- * success, -EINVAL if the property does not exist, -ENODATA if property
+- * does not have a value, and -EILSEQ if the string is not null-terminated
+- * within the length of the property data.
++ * Don't call this function directly. It is a utility helper for the
++ * of_property_read_string*() family of functions.
+ */
+-int of_property_count_strings(struct device_node *np, const char *propname)
++int of_property_read_string_helper(struct device_node *np, const char *propname,
++ const char **out_strs, size_t sz, int skip)
+ {
+ struct property *prop = of_find_property(np, propname, NULL);
+- int i = 0;
+- size_t l = 0, total = 0;
+- const char *p;
++ int l = 0, i = 0;
++ const char *p, *end;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+- if (strnlen(prop->value, prop->length) >= prop->length)
+- return -EILSEQ;
+-
+ p = prop->value;
++ end = p + prop->length;
+
+- for (i = 0; total < prop->length; total += l, p += l, i++)
+- l = strlen(p) + 1;
+-
+- return i;
++ for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
++ l = strnlen(p, end - p) + 1;
++ if (p + l > end)
++ return -EILSEQ;
++ if (out_strs && i >= skip)
++ *out_strs++ = p;
++ }
++ i -= skip;
++ return i <= 0 ? -ENODATA : i;
+ }
+-EXPORT_SYMBOL_GPL(of_property_count_strings);
++EXPORT_SYMBOL_GPL(of_property_read_string_helper);
+
+ /**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
+index f24ffd7088d2..5a0771cc8987 100644
+--- a/drivers/of/selftest.c
++++ b/drivers/of/selftest.c
+@@ -120,8 +120,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
+ pr_info("end - %s\n", passed_all ? "PASS" : "FAIL");
+ }
+
+-static void __init of_selftest_property_match_string(void)
++static void __init of_selftest_property_string(void)
+ {
++ const char *strings[4];
+ struct device_node *np;
+ int rc;
+
+@@ -139,13 +140,66 @@ static void __init of_selftest_property_match_string(void)
+ rc = of_property_match_string(np, "phandle-list-names", "third");
+ selftest(rc == 2, "third expected:0 got:%i\n", rc);
+ rc = of_property_match_string(np, "phandle-list-names", "fourth");
+- selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
++ selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
+ rc = of_property_match_string(np, "missing-property", "blah");
+- selftest(rc == -EINVAL, "missing property; rc=%i", rc);
++ selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
+ rc = of_property_match_string(np, "empty-property", "blah");
+- selftest(rc == -ENODATA, "empty property; rc=%i", rc);
++ selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
+ rc = of_property_match_string(np, "unterminated-string", "blah");
+- selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++
++ /* of_property_count_strings() tests */
++ rc = of_property_count_strings(np, "string-property");
++ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "phandle-list-names");
++ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "unterminated-string");
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "unterminated-string-list");
++ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
++
++ /* of_property_read_string_index() tests */
++ rc = of_property_read_string_index(np, "string-property", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "string-property", 1, strings);
++ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
++ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
++ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
++ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[1] = NULL;
++
++ /* of_property_read_string_array() tests */
++ rc = of_property_read_string_array(np, "string-property", strings, 4);
++ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
++ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++ /* -- An incorrectly formed string should cause a failure */
++ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
++ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
++ /* -- parsing the correctly formed strings should still work: */
++ strings[2] = NULL;
++ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
++ selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
++ strings[1] = NULL;
++ rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
++ selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
+ }
+
+ static int __init of_selftest(void)
+@@ -161,7 +215,7 @@ static int __init of_selftest(void)
+
+ pr_info("start of selftest - you will see error messages\n");
+ of_selftest_parse_phandle_with_args();
+- of_selftest_property_match_string();
++ of_selftest_property_string();
+ pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+ return 0;
+ }
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 9e39df969560..75dc402f0347 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -237,6 +237,13 @@ static int pciehp_probe(struct pcie_device *dev)
+ else if (pciehp_acpi_slot_detection_check(dev->port))
+ goto err_out_none;
+
++ if (!dev->port->subordinate) {
++ /* Can happen if we run out of bus numbers during probe */
++ dev_err(&dev->device,
++ "Hotplug bridge without secondary bus, ignoring\n");
++ goto err_out_none;
++ }
++
+ ctrl = pcie_init(dev);
+ if (!ctrl) {
+ dev_err(&dev->device, "Controller initialization failed\n");
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index a55e248618cd..985ada79191e 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -173,7 +173,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ {
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
++ return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 61bc33ed1116..e587d0035a74 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -28,6 +28,7 @@
+ #include <linux/ioport.h>
+ #include <linux/sched.h>
+ #include <linux/ktime.h>
++#include <linux/mm.h>
+ #include <asm/dma.h> /* isa_dma_bridge_buggy */
+ #include "pci.h"
+
+@@ -291,6 +292,25 @@ static void __devinit quirk_citrine(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
+
++/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
++static void quirk_extend_bar_to_page(struct pci_dev *dev)
++{
++ int i;
++
++ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
++ struct resource *r = &dev->resource[i];
++
++ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
++ r->end = PAGE_SIZE - 1;
++ r->start = 0;
++ r->flags |= IORESOURCE_UNSET;
++ dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
++ i, r);
++ }
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
++
+ /*
+ * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ * If it's needed, re-allocate the region.
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index c1a3fd8e1243..4d047316e831 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -523,6 +523,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
+ },
+ },
++ {
++ /*
++ * Note no video_set_backlight_video_vendor, we must use the
++ * acer interface, as there is no native backlight interface.
++ */
++ .ident = "Acer KAV80",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index de9f432cf22d..28c1bdb2e59b 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -1517,6 +1517,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ },
+ .driver_data = &samsung_broken_acpi_video,
+ },
++ {
++ .callback = samsung_dmi_matched,
++ .ident = "NC210",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
++ DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
++ },
++ .driver_data = &samsung_broken_acpi_video,
++ },
+ { },
+ };
+ MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
+diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
+index 4c449b26de46..102267fc713d 100644
+--- a/drivers/power/charger-manager.c
++++ b/drivers/power/charger-manager.c
+@@ -808,6 +808,11 @@ static int charger_manager_probe(struct platform_device *pdev)
+ goto err_no_charger_stat;
+ }
+
++ if (!desc->psy_fuel_gauge) {
++ dev_err(&pdev->dev, "No fuel gauge power supply defined\n");
++ return -EINVAL;
++ }
++
+ /* Counting index only */
+ while (desc->psy_charger_stat[i])
+ i++;
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index dcc39b612780..185971c2b41e 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1679,8 +1679,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
+ * is no point trying to lock the door of an off-line device.
+ */
+ shost_for_each_device(sdev, shost) {
+- if (scsi_device_online(sdev) && sdev->locked)
++ if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
+ scsi_eh_lock_door(sdev);
++ sdev->was_reset = 0;
++ }
+ }
+
+ /*
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index b9f0192758d6..efc494a65b43 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -89,7 +89,10 @@ err_exit:
+
+ static void mid_spi_dma_exit(struct dw_spi *dws)
+ {
++ dmaengine_terminate_all(dws->txchan);
+ dma_release_channel(dws->txchan);
++
++ dmaengine_terminate_all(dws->rxchan);
+ dma_release_channel(dws->rxchan);
+ }
+
+@@ -136,7 +139,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+ txconf.dst_addr = dws->dma_addr;
+ txconf.dst_maxburst = LNW_DMA_MSIZE_16;
+ txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ txconf.dst_addr_width = dws->dma_width;
+ txconf.device_fc = false;
+
+ txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+@@ -159,7 +162,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+ rxconf.src_addr = dws->dma_addr;
+ rxconf.src_maxburst = LNW_DMA_MSIZE_16;
+ rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ rxconf.src_addr_width = dws->dma_width;
+ rxconf.device_fc = false;
+
+ rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
+index 469eb28e8328..e3b845ae93c6 100644
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -1061,7 +1061,7 @@ err_rxdesc:
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+ err_tx_sgmap:
+ dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
+- pl022->sgt_tx.nents, DMA_FROM_DEVICE);
++ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+ err_rx_sgmap:
+ sg_free_table(&pl022->sgt_tx);
+ err_alloc_tx_sg:
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index cd82b56d58af..2db80b1fda82 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -109,15 +109,44 @@ static struct ad5933_platform_data ad5933_default_pdata = {
+ };
+
+ static struct iio_chan_spec ad5933_channels[] = {
+- IIO_CHAN(IIO_TEMP, 0, 1, 1, NULL, 0, 0, 0,
+- 0, AD5933_REG_TEMP_DATA, IIO_ST('s', 14, 16, 0), 0),
+- /* Ring Channels */
+- IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "real_raw", 0, 0,
+- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+- AD5933_REG_REAL_DATA, 0, IIO_ST('s', 16, 16, 0), 0),
+- IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "imag_raw", 0, 0,
+- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+- AD5933_REG_IMAG_DATA, 1, IIO_ST('s', 16, 16, 0), 0),
++ {
++ .type = IIO_TEMP,
++ .indexed = 1,
++ .processed_val = 1,
++ .channel = 0,
++ .address = AD5933_REG_TEMP_DATA,
++ .scan_type = {
++ .sign = 's',
++ .realbits = 14,
++ .storagebits = 16,
++ },
++ }, { /* Ring Channels */
++ .type = IIO_VOLTAGE,
++ .indexed = 1,
++ .channel = 0,
++ .extend_name = "real",
++ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
++ .address = AD5933_REG_REAL_DATA,
++ .scan_index = 0,
++ .scan_type = {
++ .sign = 's',
++ .realbits = 16,
++ .storagebits = 16,
++ },
++ }, {
++ .type = IIO_VOLTAGE,
++ .indexed = 1,
++ .channel = 0,
++ .extend_name = "imag",
++ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
++ .address = AD5933_REG_IMAG_DATA,
++ .scan_index = 1,
++ .scan_type = {
++ .sign = 's',
++ .realbits = 16,
++ .storagebits = 16,
++ },
++ },
+ };
+
+ static int ad5933_i2c_write(struct i2c_client *client,
+diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
+index c45b23bb1229..629a6ed2c6ed 100644
+--- a/drivers/staging/iio/meter/ade7758_ring.c
++++ b/drivers/staging/iio/meter/ade7758_ring.c
+@@ -96,7 +96,7 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
+ size_t d_size;
+ unsigned channel;
+
+- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
++ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ return -EINVAL;
+
+ channel = find_first_bit(indio_dev->active_scan_mask,
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 34df0b2a630e..b4b308ef6cf5 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3284,8 +3284,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
+
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ ret = cmd->se_tfo->queue_status(cmd);
+- if (ret)
+- goto out;
++ goto out;
+ }
+
+ switch (cmd->data_direction) {
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index d53f39668044..6f8f985e7805 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1164,6 +1164,7 @@ pci_xr17c154_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
+ #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
+ #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
++#define PCI_DEVICE_ID_INTEL_QRK_UART 0x0936
+
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
+@@ -1686,6 +1687,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_QRK_UART,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_default_setup,
++ },
+ /*
+ * Cronyx Omega PCI (PLX-chip based)
+ */
+@@ -1894,6 +1902,7 @@ enum pci_board_num_t {
+ pbn_ADDIDATA_PCIe_4_3906250,
+ pbn_ADDIDATA_PCIe_8_3906250,
+ pbn_ce4100_1_115200,
++ pbn_qrk,
+ pbn_omegapci,
+ pbn_NETMOS9900_2s_115200,
+ pbn_brcm_trumanage,
+@@ -2592,6 +2601,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
+ .base_baud = 921600,
+ .reg_shift = 2,
+ },
++ [pbn_qrk] = {
++ .flags = FL_BASE0,
++ .num_ports = 1,
++ .base_baud = 2764800,
++ .reg_shift = 2,
++ },
+ [pbn_omegapci] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+@@ -4164,6 +4179,12 @@ static struct pci_device_id serial_pci_tbl[] = {
+ pbn_ce4100_1_115200 },
+
+ /*
++ * Intel Quark x1000
++ */
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_qrk },
++ /*
+ * Cronyx Omega PCI
+ */
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA,
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 4185cc5332ab..82aac2920e19 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -355,7 +355,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
+ * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
+ * Die! Die! Die!
+ */
+- if (baud == 38400)
++ if (try == 0 && baud == 38400)
+ baud = altbaud;
+
+ /*
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index b28d6356a142..a07eb4c068a0 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1633,6 +1633,8 @@ int tty_release(struct inode *inode, struct file *filp)
+ int devpts;
+ int idx;
+ char buf[64];
++ long timeout = 0;
++ int once = 1;
+
+ if (tty_paranoia_check(tty, inode, __func__))
+ return 0;
+@@ -1713,11 +1715,18 @@ int tty_release(struct inode *inode, struct file *filp)
+ if (!do_sleep)
+ break;
+
+- printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
++ if (once) {
++ once = 0;
++ printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
+ __func__, tty_name(tty, buf));
++ }
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+- schedule();
++ schedule_timeout_killable(timeout);
++ if (timeout < 120 * HZ)
++ timeout = 2 * timeout + 1;
++ else
++ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+
+ /*
+diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
+index 8308fc7cdc26..87025d01aaec 100644
+--- a/drivers/tty/vt/consolemap.c
++++ b/drivers/tty/vt/consolemap.c
+@@ -518,6 +518,10 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+
+ /* Save original vc_unipagdir_loc in case we allocate a new one */
+ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
++
++ if (!p)
++ return -EINVAL;
++
+ if (p->readonly) return -EIO;
+
+ if (!ct) return 0;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 2f2540ff21f6..8f4a628d3382 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -910,11 +910,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
+ /* FIXME: Needs to clear unsupported bits in the termios */
+ acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
+
+- if (!newline.dwDTERate) {
++ if (C_BAUD(tty) == B0) {
+ newline.dwDTERate = acm->line.dwDTERate;
+ newctrl &= ~ACM_CTRL_DTR;
+- } else
++ } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
+ newctrl |= ACM_CTRL_DTR;
++ }
+
+ if (newctrl != acm->ctrlout)
+ acm_set_control(acm, acm->ctrlout = newctrl);
+@@ -1601,6 +1602,7 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
++ { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
+ { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
+ },
+ /* Motorola H24 HSPA module: */
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index e2cc8df3d87b..6baeada782eb 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1882,6 +1882,8 @@ int usb_alloc_streams(struct usb_interface *interface,
+ return -EINVAL;
+ if (dev->speed != USB_SPEED_SUPER)
+ return -EINVAL;
++ if (dev->state < USB_STATE_CONFIGURED)
++ return -ENODEV;
+
+ /* Streams only apply to bulk endpoints. */
+ for (i = 0; i < num_eps; i++)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 62a9e44bfef6..93f2538b16cc 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1638,8 +1638,10 @@ void usb_set_device_state(struct usb_device *udev,
+ || new_state == USB_STATE_SUSPENDED)
+ ; /* No change to wakeup settings */
+ else if (new_state == USB_STATE_CONFIGURED)
+- wakeup = udev->actconfig->desc.bmAttributes
+- & USB_CONFIG_ATT_WAKEUP;
++ wakeup = (udev->quirks &
++ USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
++ udev->actconfig->desc.bmAttributes &
++ USB_CONFIG_ATT_WAKEUP;
+ else
+ wakeup = 0;
+ }
+@@ -3359,6 +3361,9 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
+ struct usb_qualifier_descriptor *qual;
+ int status;
+
++ if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
++ return;
++
+ qual = kmalloc (sizeof *qual, GFP_KERNEL);
+ if (qual == NULL)
+ return;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index bcde6f65b1c6..980a9d8c6504 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -88,6 +88,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
++ { USB_DEVICE(0x04f3, 0x009b), .driver_info =
++ USB_QUIRK_DEVICE_QUALIFIER },
++
++ { USB_DEVICE(0x04f3, 0x016f), .driver_info =
++ USB_QUIRK_DEVICE_QUALIFIER },
++
+ /* Roland SC-8820 */
+ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
+
+@@ -158,6 +164,10 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
+ .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* ASUS Base Station(T100) */
++ { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
++ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+ { } /* terminating entry must be last */
+ };
+
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 5bf2bc00821b..8a7a8ee176fa 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -209,7 +209,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+ struct dwc3_ep *dep = dwc->eps[0];
+
+ /* stall is always issued on EP0 */
+- __dwc3_gadget_ep_set_halt(dep, 1);
++ __dwc3_gadget_ep_set_halt(dep, 1, false);
+ dep->flags = DWC3_EP_ENABLED;
+ dwc->delayed_status = false;
+
+@@ -382,7 +382,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ return -EINVAL;
+ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
+ break;
+- ret = __dwc3_gadget_ep_set_halt(dep, set);
++ ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ if (ret)
+ return -EINVAL;
+ break;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 895497d42270..1acb3a419539 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -485,12 +485,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ if (!usb_endpoint_xfer_isoc(desc))
+ return 0;
+
+- memset(&trb_link, 0, sizeof(trb_link));
+-
+ /* Link TRB for ISOC. The HWO bit is never reset */
+ trb_st_hw = &dep->trb_pool[0];
+
+ trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
++ memset(trb_link, 0, sizeof(*trb_link));
+
+ trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+ trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+@@ -533,7 +532,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+
+ /* make sure HW endpoint isn't stalled */
+ if (dep->flags & DWC3_EP_STALL)
+- __dwc3_gadget_ep_set_halt(dep, 0);
++ __dwc3_gadget_ep_set_halt(dep, 0, false);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg &= ~DWC3_DALEPENA_EP(dep->number);
+@@ -1078,7 +1077,7 @@ out0:
+ return ret;
+ }
+
+-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
+ {
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3 *dwc = dep->dwc;
+@@ -1087,6 +1086,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ memset(&params, 0x00, sizeof(params));
+
+ if (value) {
++ if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
++ (!list_empty(&dep->req_queued) ||
++ !list_empty(&dep->request_list)))) {
++ dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
++ dep->name);
++ return -EAGAIN;
++ }
++
+ if (dep->number == 0 || dep->number == 1) {
+ /*
+ * Whenever EP0 is stalled, we will restart
+@@ -1135,7 +1142,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+ goto out;
+ }
+
+- ret = __dwc3_gadget_ep_set_halt(dep, value);
++ ret = __dwc3_gadget_ep_set_halt(dep, value, false);
+ out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
+index a8600084348c..6f498fc4f568 100644
+--- a/drivers/usb/dwc3/gadget.h
++++ b/drivers/usb/dwc3/gadget.h
+@@ -108,7 +108,7 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
+ void dwc3_ep0_out_start(struct dwc3 *dwc);
+ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags);
+-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
+
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 8882d654b0d1..c8835d591b37 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -118,20 +118,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ xhci->quirks |= XHCI_AVOID_BEI;
+ }
+- if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+- (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
+- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
+- /* Workaround for occasional spurious wakeups from S5 (or
+- * any other sleep) on Haswell machines with LPT and LPT-LP
+- * with the new Intel BIOS
+- */
+- /* Limit the quirk to only known vendors, as this triggers
+- * yet another BIOS bug on some other machines
+- * https://bugzilla.kernel.org/show_bug.cgi?id=66171
+- */
+- if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+- xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+- }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index ac339570a805..19074db60896 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
++ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+@@ -160,7 +161,9 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
++ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
++ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+ { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 8425e9e9e127..a89433bd5314 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -156,6 +156,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
+ * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
+ */
+ static struct usb_device_id id_table_combined [] = {
++ { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
+@@ -685,6 +686,10 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 7628b91017ba..64ee791687d9 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -30,6 +30,12 @@
+
+ /*** third-party PIDs (using FTDI_VID) ***/
+
++/*
++ * Certain versions of the official Windows FTDI driver reprogrammed
++ * counterfeit FTDI devices to PID 0. Support these devices anyway.
++ */
++#define FTDI_BRICK_PID 0x0000
++
+ #define FTDI_LUMEL_PD12_PID 0x6002
+
+ /*
+@@ -142,12 +148,19 @@
+ /*
+ * Xsens Technologies BV products (http://www.xsens.com).
+ */
+-#define XSENS_CONVERTER_0_PID 0xD388
+-#define XSENS_CONVERTER_1_PID 0xD389
++#define XSENS_VID 0x2639
++#define XSENS_AWINDA_STATION_PID 0x0101
++#define XSENS_AWINDA_DONGLE_PID 0x0102
++#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
++#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
++
++/* Xsens devices using FTDI VID */
++#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
++#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
+ #define XSENS_CONVERTER_2_PID 0xD38A
+-#define XSENS_CONVERTER_3_PID 0xD38B
+-#define XSENS_CONVERTER_4_PID 0xD38C
+-#define XSENS_CONVERTER_5_PID 0xD38D
++#define XSENS_CONVERTER_3_PID 0xD38B /* Xsens USB-serial converter */
++#define XSENS_CONVERTER_4_PID 0xD38C /* Xsens Wireless Receiver */
++#define XSENS_CONVERTER_5_PID 0xD38D /* Xsens Awinda Station */
+ #define XSENS_CONVERTER_6_PID 0xD38E
+ #define XSENS_CONVERTER_7_PID 0xD38F
+
+diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
+index 4a9a75eb9b95..c3a53acda67a 100644
+--- a/drivers/usb/serial/kobil_sct.c
++++ b/drivers/usb/serial/kobil_sct.c
+@@ -447,7 +447,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+ );
+
+ priv->cur_pos = priv->cur_pos + length;
+- result = usb_submit_urb(port->write_urb, GFP_NOIO);
++ result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
+ dbg("%s - port %d Send write URB returns: %i",
+ __func__, port->number, result);
+ todo = priv->filled - priv->cur_pos;
+@@ -463,7 +463,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
+ priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
+ result = usb_submit_urb(port->interrupt_in_urb,
+- GFP_NOIO);
++ GFP_ATOMIC);
+ dbg("%s - port %d Send read URB returns: %i",
+ __func__, port->number, result);
+ }
+diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
+index 1f850065d159..58b7cecd682f 100644
+--- a/drivers/usb/serial/opticon.c
++++ b/drivers/usb/serial/opticon.c
+@@ -293,7 +293,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
+
+ /* The conncected devices do not have a bulk write endpoint,
+ * to transmit data to de barcode device the control endpoint is used */
+- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
++ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ if (!dr) {
+ dev_err(&port->dev, "out of memory\n");
+ count = -ENOMEM;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 703ebe7eaa93..d8232df2c211 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
+ #define TELIT_PRODUCT_UE910_V2 0x1012
+ #define TELIT_PRODUCT_LE920 0x1200
++#define TELIT_PRODUCT_LE910 0x1201
+
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID 0x19d2
+@@ -362,6 +363,7 @@ static void option_instat_callback(struct urb *urb);
+
+ /* Haier products */
+ #define HAIER_VENDOR_ID 0x201e
++#define HAIER_PRODUCT_CE81B 0x10f8
+ #define HAIER_PRODUCT_CE100 0x2009
+
+ /* Cinterion (formerly Siemens) products */
+@@ -589,6 +591,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+ };
+
++static const struct option_blacklist_info telit_le910_blacklist = {
++ .sendsetup = BIT(0),
++ .reserved = BIT(1) | BIT(2),
++};
++
+ static const struct option_blacklist_info telit_le920_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(5),
+@@ -1138,6 +1145,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
++ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+@@ -1614,6 +1623,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
+ /* Pirelli */
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)},
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index c70109e5d60b..d8d26f4f14dd 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -1120,6 +1120,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
+ */
+ if (result == USB_STOR_XFER_LONG)
+ fake_sense = 1;
++
++ /*
++ * Sometimes a device will mistakenly skip the data phase
++ * and go directly to the status phase without sending a
++ * zero-length packet. If we get a 13-byte response here,
++ * check whether it really is a CSW.
++ */
++ if (result == USB_STOR_XFER_SHORT &&
++ srb->sc_data_direction == DMA_FROM_DEVICE &&
++ transfer_length - scsi_get_resid(srb) ==
++ US_BULK_CS_WRAP_LEN) {
++ struct scatterlist *sg = NULL;
++ unsigned int offset = 0;
++
++ if (usb_stor_access_xfer_buf((unsigned char *) bcs,
++ US_BULK_CS_WRAP_LEN, srb, &sg,
++ &offset, FROM_XFER_BUF) ==
++ US_BULK_CS_WRAP_LEN &&
++ bcs->Signature ==
++ cpu_to_le32(US_BULK_CS_SIGN)) {
++ US_DEBUGP("Device skipped data phase\n");
++ scsi_set_resid(srb, transfer_length);
++ goto skipped_data_phase;
++ }
++ }
+ }
+
+ /* See flow chart on pg 15 of the Bulk Only Transport spec for
+@@ -1155,6 +1180,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
++ skipped_data_phase:
+ /* check bulk status */
+ residue = le32_to_cpu(bcs->Residue);
+ US_DEBUGP("Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 28b1a834906b..6cbb2069531d 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
+ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ int bottom_only)
+ {
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ unsigned int cw = vc->vc_font.width;
+ unsigned int ch = vc->vc_font.height;
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+@@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bs = info->var.yres - bh;
+ struct fb_fillrect region;
+
+- region.color = attr_bgcol_ec(bgshift, vc, info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
+index 41b32ae23dac..5a3cbf6dff4d 100644
+--- a/drivers/video/console/fbcon_ccw.c
++++ b/drivers/video/console/fbcon_ccw.c
+@@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bh = info->var.xres - (vc->vc_rows*ch);
+ unsigned int bs = vc->vc_rows*ch;
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
+index 6a737827beb1..7d3fd9bda66c 100644
+--- a/drivers/video/console/fbcon_cw.c
++++ b/drivers/video/console/fbcon_cw.c
+@@ -181,9 +181,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bh = info->var.xres - (vc->vc_rows*ch);
+ unsigned int rs = info->var.yres - rw;
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
+index ff0872c0498b..19e3714abfe8 100644
+--- a/drivers/video/console/fbcon_ud.c
++++ b/drivers/video/console/fbcon_ud.c
+@@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+ unsigned int bh = info->var.yres - (vc->vc_rows*ch);
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
+index 2e03d416b9af..a41f264dc23d 100644
+--- a/drivers/virtio/virtio_pci.c
++++ b/drivers/virtio/virtio_pci.c
+@@ -745,6 +745,7 @@ static int virtio_pci_restore(struct device *dev)
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
++ unsigned status = 0;
+ int ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+@@ -755,14 +756,40 @@ static int virtio_pci_restore(struct device *dev)
+ return ret;
+
+ pci_set_master(pci_dev);
++ /* We always start by resetting the device, in case a previous
++ * driver messed it up. */
++ vp_reset(&vp_dev->vdev);
++
++ /* Acknowledge that we've seen the device. */
++ status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
++ vp_set_status(&vp_dev->vdev, status);
++
++ /* Maybe driver failed before freeze.
++ * Restore the failed status, for debugging. */
++ status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED;
++ vp_set_status(&vp_dev->vdev, status);
++
++ if (!drv)
++ return 0;
++
++ /* We have a driver! */
++ status |= VIRTIO_CONFIG_S_DRIVER;
++ vp_set_status(&vp_dev->vdev, status);
++
+ vp_finalize_features(&vp_dev->vdev);
+
+- if (drv && drv->restore)
++ if (drv->restore) {
+ ret = drv->restore(&vp_dev->vdev);
++ if (ret) {
++ status |= VIRTIO_CONFIG_S_FAILED;
++ vp_set_status(&vp_dev->vdev, status);
++ return ret;
++ }
++ }
+
+ /* Finally, tell the device we're all set */
+- if (!ret)
+- vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
++ status |= VIRTIO_CONFIG_S_DRIVER_OK;
++ vp_set_status(&vp_dev->vdev, status);
+
+ return ret;
+ }
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 5d158d320233..6eab2dd16e94 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -393,7 +393,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ ret = 0;
+ fail:
+ while (ret < 0 && !list_empty(&tmplist)) {
+- sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
++ sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+diff --git a/fs/buffer.c b/fs/buffer.c
+index f235e1834e39..ed2dc709883a 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1982,6 +1982,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
+ struct page *page, void *fsdata)
+ {
+ struct inode *inode = mapping->host;
++ loff_t old_size = inode->i_size;
+ int i_size_changed = 0;
+
+ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+@@ -2001,6 +2002,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
+ unlock_page(page);
+ page_cache_release(page);
+
++ if (old_size < pos)
++ pagecache_isize_extended(inode, old_size, pos);
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+@@ -2221,6 +2224,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
+ err = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
++
++ if (unlikely(fatal_signal_pending(current))) {
++ err = -EINTR;
++ goto out;
++ }
+ }
+
+ /* page covers the boundary, find the boundary offset */
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 11030b2fd3b4..b5b9b4086143 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -1093,7 +1093,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ }
+
+ rc = vfs_setxattr(lower_dentry, name, value, size, flags);
+- if (!rc)
++ if (!rc && dentry->d_inode)
+ fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
+ out:
+ return rc;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index ef4c812c7a63..564f9429b3b1 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -1292,13 +1292,6 @@ set_qf_format:
+ "not specified.");
+ return 0;
+ }
+- } else {
+- if (sbi->s_jquota_fmt) {
+- ext3_msg(sb, KERN_ERR, "error: journaled quota format "
+- "specified with no journaling "
+- "enabled.");
+- return 0;
+- }
+ }
+ #endif
+ return 1;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 521ba9d18ce6..b9cdb6df8d2b 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1891,6 +1891,7 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
+
+ extern struct inode *ext4_iget(struct super_block *, unsigned long);
++extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
+ extern int ext4_write_inode(struct inode *, struct writeback_control *);
+ extern int ext4_setattr(struct dentry *, struct iattr *);
+ extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 75c4f36bced8..97ca4b6fb2a9 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -725,6 +725,10 @@ got:
+ struct buffer_head *block_bitmap_bh;
+
+ block_bitmap_bh = ext4_read_block_bitmap(sb, group);
++ if (!block_bitmap_bh) {
++ err = -EIO;
++ goto out;
++ }
+ BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
+ err = ext4_journal_get_write_access(handle, block_bitmap_bh);
+ if (err) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 5b6dcba304b1..9e9db425c613 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -157,16 +157,14 @@ void ext4_evict_inode(struct inode *inode)
+ goto no_delete;
+ }
+
+- if (!is_bad_inode(inode))
+- dquot_initialize(inode);
++ if (is_bad_inode(inode))
++ goto no_delete;
++ dquot_initialize(inode);
+
+ if (ext4_should_order_data(inode))
+ ext4_begin_ordered_truncate(inode, 0);
+ truncate_inode_pages(&inode->i_data, 0);
+
+- if (is_bad_inode(inode))
+- goto no_delete;
+-
+ handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
+ if (IS_ERR(handle)) {
+ ext4_std_error(inode->i_sb, PTR_ERR(handle));
+@@ -2410,6 +2408,20 @@ static int ext4_nonda_switch(struct super_block *sb)
+ return 0;
+ }
+
++/* We always reserve for an inode update; the superblock could be there too */
++static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
++{
++ if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
++ EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
++ return 1;
++
++ if (pos + len <= 0x7fffffffULL)
++ return 1;
++
++ /* We might need to update the superblock to set LARGE_FILE */
++ return 2;
++}
++
+ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+@@ -2436,7 +2448,8 @@ retry:
+ * to journalling the i_disksize update if writes to the end
+ * of file which has an already mapped buffer.
+ */
+- handle = ext4_journal_start(inode, 1);
++ handle = ext4_journal_start(inode,
++ ext4_da_write_credits(inode, pos, len));
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+@@ -3840,6 +3853,13 @@ bad_inode:
+ return ERR_PTR(ret);
+ }
+
++struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
++{
++ if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
++ return ERR_PTR(-EIO);
++ return ext4_iget(sb, ino);
++}
++
+ static int ext4_inode_blocks_set(handle_t *handle,
+ struct ext4_inode *raw_inode,
+ struct ext4_inode_info *ei)
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 665e55ca208c..dc5852301da7 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1051,7 +1051,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
+ dentry->d_name.name);
+ return ERR_PTR(-EIO);
+ }
+- inode = ext4_iget(dir->i_sb, ino);
++ inode = ext4_iget_normal(dir->i_sb, ino);
+ if (inode == ERR_PTR(-ESTALE)) {
+ EXT4_ERROR_INODE(dir,
+ "deleted inode referenced: %u",
+@@ -1087,7 +1087,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
+ return ERR_PTR(-EIO);
+ }
+
+- return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
++ return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
+ }
+
+ #define S_SHIFT 12
+@@ -1421,31 +1421,38 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+ hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+ hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+ ext4fs_dirhash(name, namelen, &hinfo);
++ memset(frames, 0, sizeof(frames));
+ frame = frames;
+ frame->entries = entries;
+ frame->at = entries;
+ frame->bh = bh;
+ bh = bh2;
+
+- ext4_handle_dirty_metadata(handle, dir, frame->bh);
+- ext4_handle_dirty_metadata(handle, dir, bh);
++ retval = ext4_handle_dirty_metadata(handle, dir, frame->bh);
++ if (retval)
++ goto out_frames;
++ retval = ext4_handle_dirty_metadata(handle, dir, bh);
++ if (retval)
++ goto out_frames;
+
+ de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
+ if (!de) {
+- /*
+- * Even if the block split failed, we have to properly write
+- * out all the changes we did so far. Otherwise we can end up
+- * with corrupted filesystem.
+- */
+- ext4_mark_inode_dirty(handle, dir);
+- dx_release(frames);
+- return retval;
++ goto out_frames;
+ }
+ dx_release(frames);
+
+ retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
+ brelse(bh);
+ return retval;
++out_frames:
++ /*
++ * Even if the block split failed, we have to properly write
++ * out all the changes we did so far. Otherwise we can end up
++ * with corrupted filesystem.
++ */
++ ext4_mark_inode_dirty(handle, dir);
++ dx_release(frames);
++ return retval;
+ }
+
+ /*
+@@ -1992,7 +1999,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
+ struct ext4_iloc iloc;
+ int err = 0, rc;
+
+- if (!ext4_handle_valid(handle))
++ if (!ext4_handle_valid(handle) || is_bad_inode(inode))
+ return 0;
+
+ mutex_lock(&EXT4_SB(sb)->s_orphan_lock);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index a43e43c835d1..cfd321104250 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -991,7 +991,7 @@ static void update_backups(struct super_block *sb,
+ (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
+ break;
+
+- bh = sb_getblk(sb, group * bpg + blk_off);
++ bh = sb_getblk(sb, ((ext4_fsblk_t)group) * bpg + blk_off);
+ if (!bh) {
+ err = -ENOMEM;
+ break;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index f0e4e46867f7..92ea560efcc7 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1041,7 +1041,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
+ * Currently we don't know the generation for parent directory, so
+ * a generation of 0 means "accept any"
+ */
+- inode = ext4_iget(sb, ino);
++ inode = ext4_iget_normal(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
+@@ -1642,13 +1642,6 @@ static int parse_options(char *options, struct super_block *sb,
+ "not specified");
+ return 0;
+ }
+- } else {
+- if (sbi->s_jquota_fmt) {
+- ext4_msg(sb, KERN_ERR, "journaled quota format "
+- "specified with no journaling "
+- "enabled");
+- return 0;
+- }
+ }
+ #endif
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 5743e9db8027..96455e6988fe 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -144,14 +144,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ }
+
+ static int
+-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
++ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
++ void *value_start)
+ {
+- while (!IS_LAST_ENTRY(entry)) {
+- struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
++ struct ext4_xattr_entry *e = entry;
++
++ while (!IS_LAST_ENTRY(e)) {
++ struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
+ if ((void *)next >= end)
+ return -EIO;
+- entry = next;
++ e = next;
+ }
++
++ while (!IS_LAST_ENTRY(entry)) {
++ if (entry->e_value_size != 0 &&
++ (value_start + le16_to_cpu(entry->e_value_offs) <
++ (void *)e + sizeof(__u32) ||
++ value_start + le16_to_cpu(entry->e_value_offs) +
++ le32_to_cpu(entry->e_value_size) > end))
++ return -EIO;
++ entry = EXT4_XATTR_NEXT(entry);
++ }
++
+ return 0;
+ }
+
+@@ -161,7 +175,8 @@ ext4_xattr_check_block(struct buffer_head *bh)
+ if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
+ BHDR(bh)->h_blocks != cpu_to_le32(1))
+ return -EIO;
+- return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
++ return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
++ bh->b_data);
+ }
+
+ static inline int
+@@ -274,7 +289,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+- error = ext4_xattr_check_names(entry, end);
++ error = ext4_xattr_check_names(entry, end, entry);
+ if (error)
+ goto cleanup;
+ error = ext4_xattr_find_entry(&entry, name_index, name,
+@@ -402,7 +417,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ raw_inode = ext4_raw_inode(&iloc);
+ header = IHDR(inode, raw_inode);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+- error = ext4_xattr_check_names(IFIRST(header), end);
++ error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
+ if (error)
+ goto cleanup;
+ error = ext4_xattr_list_entries(dentry, IFIRST(header),
+@@ -914,7 +929,8 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ is->s.here = is->s.first;
+ is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
+- error = ext4_xattr_check_names(IFIRST(header), is->s.end);
++ error = ext4_xattr_check_names(IFIRST(header), is->s.end,
++ IFIRST(header));
+ if (error)
+ return error;
+ /* Find the named attribute. */
+diff --git a/fs/ioprio.c b/fs/ioprio.c
+index 0f1b9515213b..0dd6a2a7ae82 100644
+--- a/fs/ioprio.c
++++ b/fs/ioprio.c
+@@ -153,14 +153,16 @@ out:
+
+ int ioprio_best(unsigned short aprio, unsigned short bprio)
+ {
+- unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
+- unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
++ unsigned short aclass;
++ unsigned short bclass;
+
+- if (aclass == IOPRIO_CLASS_NONE)
+- aclass = IOPRIO_CLASS_BE;
+- if (bclass == IOPRIO_CLASS_NONE)
+- bclass = IOPRIO_CLASS_BE;
++ if (!ioprio_valid(aprio))
++ aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
++ if (!ioprio_valid(bprio))
++ bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+
++ aclass = IOPRIO_PRIO_CLASS(aprio);
++ bclass = IOPRIO_PRIO_CLASS(bprio);
+ if (aclass == bclass)
+ return min(aprio, bprio);
+ if (aclass > bclass)
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 606a8dd8818c..0a68e0b22839 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -114,6 +114,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+
+ msg.rpc_proc = &clnt->cl_procinfo[proc];
+ status = rpc_call_sync(clnt, &msg, 0);
++ if (status == -ECONNREFUSED) {
++ dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n",
++ status);
++ rpc_force_rebind(clnt);
++ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
++ }
+ if (status < 0)
+ dprintk("lockd: NSM upcall RPC failed, status=%d\n",
+ status);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index f0f2e067c5df..f7be8d9c1cd6 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2508,6 +2508,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+ /* make sure we can reach put_old from new_root */
+ if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
+ goto out4;
++ /* make certain new is below the root */
++ if (!is_path_reachable(new_mnt, new.dentry, &root))
++ goto out4;
+ br_write_lock(vfsmount_lock);
+ detach_mnt(new_mnt, &parent_path);
+ detach_mnt(root_mnt, &root_parent);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 9bb4e5c541b0..a6d59054e8b3 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -512,7 +512,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ {
+ struct inode *inode = dentry->d_inode;
+ int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
+- int err;
++ int err = 0;
+
+ /* Flush out writes to the server in order to update c/mtime. */
+ if (S_ISREG(inode->i_mode)) {
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 527a4fc12546..3d344ab0bdb3 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1740,6 +1740,28 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
+ return ret;
+ }
+
++static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
++{
++ nfs_remove_bad_delegation(state->inode);
++ write_seqlock(&state->seqlock);
++ nfs4_stateid_copy(&state->stateid, &state->open_stateid);
++ write_sequnlock(&state->seqlock);
++ clear_bit(NFS_DELEGATED_STATE, &state->flags);
++}
++
++static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
++{
++ if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
++ nfs_finish_clear_delegation_stateid(state);
++}
++
++static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
++{
++ /* NFSv4.0 doesn't allow for delegation recovery on open expire */
++ nfs40_clear_delegation_stateid(state);
++ return nfs4_open_expired(sp, state);
++}
++
+ #if defined(CONFIG_NFS_V4_1)
+ static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
+ {
+@@ -5796,7 +5818,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
+ int ret = 0;
+
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+- return 0;
++ return -EAGAIN;
+ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+@@ -6547,7 +6569,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
+ static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
+ .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
+- .recover_open = nfs4_open_expired,
++ .recover_open = nfs40_open_expired,
+ .recover_lock = nfs4_lock_expired,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
+diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
+index dc484c0eae7f..78071cf90079 100644
+--- a/fs/nfs/nfs4renewd.c
++++ b/fs/nfs/nfs4renewd.c
+@@ -88,10 +88,18 @@ nfs4_renew_state(struct work_struct *work)
+ }
+ nfs_expire_all_delegations(clp);
+ } else {
++ int ret;
++
+ /* Queue an asynchronous RENEW. */
+- ops->sched_state_renewal(clp, cred, renew_flags);
++ ret = ops->sched_state_renewal(clp, cred, renew_flags);
+ put_rpccred(cred);
+- goto out_exp;
++ switch (ret) {
++ default:
++ goto out_exp;
++ case -EAGAIN:
++ case -ENOMEM:
++ break;
++ }
+ }
+ } else {
+ dprintk("%s: failed to call renewd. Reason: lease not expired \n",
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 461816beff13..c4600b59744a 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1515,7 +1515,8 @@ restart:
+ if (status < 0) {
+ set_bit(ops->owner_flag_bit, &sp->so_flags);
+ nfs4_put_state_owner(sp);
+- return nfs4_recovery_handle_error(clp, status);
++ status = nfs4_recovery_handle_error(clp, status);
++ return (status != 0) ? status : -EAGAIN;
+ }
+
+ nfs4_put_state_owner(sp);
+@@ -1524,7 +1525,7 @@ restart:
+ spin_unlock(&clp->cl_lock);
+ }
+ rcu_read_unlock();
+- return status;
++ return 0;
+ }
+
+ static int nfs4_check_lease(struct nfs_client *clp)
+@@ -1796,23 +1797,18 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->reboot_recovery_ops);
+- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
+- continue;
+- nfs4_state_end_reclaim_reboot(clp);
+- if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
++ if (status == -EAGAIN)
+ continue;
+ if (status < 0)
+ goto out_error;
++ nfs4_state_end_reclaim_reboot(clp);
+ }
+
+ /* Now recover expired state... */
+ if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->nograce_recovery_ops);
+- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
+- test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
++ if (status == -EAGAIN)
+ continue;
+ if (status < 0)
+ goto out_error;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 22beaff3544a..b2ce878080be 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1132,7 +1132,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
+ */
+ if (argp->opcnt == resp->opcnt)
+ return false;
+-
++ if (next->opnum == OP_ILLEGAL)
++ return false;
+ nextd = OPDESC(next);
+ /*
+ * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 48bc91d60fce..97d91f046265 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -67,7 +67,7 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+- client_fd = get_unused_fd();
++ client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
+ if (client_fd < 0)
+ return client_fd;
+
+diff --git a/fs/super.c b/fs/super.c
+index 3c520a5ed715..d0154e52c76b 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -69,6 +69,8 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
+
+ total_objects = sb->s_nr_dentry_unused +
+ sb->s_nr_inodes_unused + fs_objects + 1;
++ if (!total_objects)
++ total_objects = 1;
+
+ if (sc->nr_to_scan) {
+ int dentries;
+diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
+index fb3b5c813a30..b2ca12fd593b 100644
+--- a/fs/ubifs/commit.c
++++ b/fs/ubifs/commit.c
+@@ -166,15 +166,10 @@ static int do_commit(struct ubifs_info *c)
+ err = ubifs_orphan_end_commit(c);
+ if (err)
+ goto out;
+- old_ltail_lnum = c->ltail_lnum;
+- err = ubifs_log_end_commit(c, new_ltail_lnum);
+- if (err)
+- goto out;
+ err = dbg_check_old_index(c, &zroot);
+ if (err)
+ goto out;
+
+- mutex_lock(&c->mst_mutex);
+ c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
+ c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
+ c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
+@@ -203,8 +198,9 @@ static int do_commit(struct ubifs_info *c)
+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+ else
+ c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
+- err = ubifs_write_master(c);
+- mutex_unlock(&c->mst_mutex);
++
++ old_ltail_lnum = c->ltail_lnum;
++ err = ubifs_log_end_commit(c, new_ltail_lnum);
+ if (err)
+ goto out;
+
+diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
+index f9fd068d1ae0..843beda25767 100644
+--- a/fs/ubifs/log.c
++++ b/fs/ubifs/log.c
+@@ -110,10 +110,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
+ h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
+ t = (long long)c->ltail_lnum * c->leb_size;
+
+- if (h >= t)
++ if (h > t)
+ return c->log_bytes - h + t;
+- else
++ else if (h != t)
+ return t - h;
++ else if (c->lhead_lnum != c->ltail_lnum)
++ return 0;
++ else
++ return c->log_bytes;
+ }
+
+ /**
+@@ -453,9 +457,9 @@ out:
+ * @ltail_lnum: new log tail LEB number
+ *
+ * This function is called on when the commit operation was finished. It
+- * moves log tail to new position and unmaps LEBs which contain obsolete data.
+- * Returns zero in case of success and a negative error code in case of
+- * failure.
++ * moves log tail to new position and updates the master node so that it stores
++ * the new log tail LEB number. Returns zero in case of success and a negative
++ * error code in case of failure.
+ */
+ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
+ {
+@@ -483,7 +487,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
+ spin_unlock(&c->buds_lock);
+
+ err = dbg_check_bud_bytes(c);
++ if (err)
++ goto out;
+
++ err = ubifs_write_master(c);
++
++out:
+ mutex_unlock(&c->log_mutex);
+ return err;
+ }
+diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
+index 278c2382e8c2..bb9f48107815 100644
+--- a/fs/ubifs/master.c
++++ b/fs/ubifs/master.c
+@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c)
+ * ubifs_write_master - write master node.
+ * @c: UBIFS file-system description object
+ *
+- * This function writes the master node. The caller has to take the
+- * @c->mst_mutex lock before calling this function. Returns zero in case of
+- * success and a negative error code in case of failure. The master node is
+- * written twice to enable recovery.
++ * This function writes the master node. Returns zero in case of success and a
++ * negative error code in case of failure. The master node is written twice to
++ * enable recovery.
+ */
+ int ubifs_write_master(struct ubifs_info *c)
+ {
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index d867bd97bc60..129bb488ce75 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1984,7 +1984,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
+ mutex_init(&c->lp_mutex);
+ mutex_init(&c->tnc_mutex);
+ mutex_init(&c->log_mutex);
+- mutex_init(&c->mst_mutex);
+ mutex_init(&c->umount_mutex);
+ mutex_init(&c->bu_mutex);
+ mutex_init(&c->write_reserve_mutex);
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index 3f962617e29b..cd62067aea85 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -1041,7 +1041,6 @@ struct ubifs_debug_info;
+ *
+ * @mst_node: master node
+ * @mst_offs: offset of valid master node
+- * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
+ *
+ * @max_bu_buf_len: maximum bulk-read buffer length
+ * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
+@@ -1281,7 +1280,6 @@ struct ubifs_info {
+
+ struct ubifs_mst_node *mst_node;
+ int mst_offs;
+- struct mutex mst_mutex;
+
+ int max_bu_buf_len;
+ struct mutex bu_mutex;
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 757f98066d6b..53baa0d7c34f 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -56,7 +56,6 @@
+ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+- {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 4d4ac24a263e..01b7047b60df 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1069,10 +1069,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
+ static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+ {
+ unsigned int granularity = max(lim->physical_block_size, lim->io_min);
+- unsigned int alignment = (sector << 9) & (granularity - 1);
++ unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
+
+- return (granularity + lim->alignment_offset - alignment)
+- & (granularity - 1);
++ return (granularity + lim->alignment_offset - alignment) % granularity;
+ }
+
+ static inline int bdev_alignment_offset(struct block_device *bdev)
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 7970e31c8c0e..ea9cffe22ec4 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -37,6 +37,9 @@
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); })
+
++/* Make the optimizer believe the variable can be manipulated arbitrarily. */
++#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
++
+ #ifdef __CHECKER__
+ #define __must_be_array(arr) 0
+ #else
+diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
+new file mode 100644
+index 000000000000..cdd1cc202d51
+--- /dev/null
++++ b/include/linux/compiler-gcc5.h
+@@ -0,0 +1,66 @@
++#ifndef __LINUX_COMPILER_H
++#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
++#endif
++
++#define __used __attribute__((__used__))
++#define __must_check __attribute__((warn_unused_result))
++#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
++
++/* Mark functions as cold. gcc will assume any path leading to a call
++ to them will be unlikely. This means a lot of manual unlikely()s
++ are unnecessary now for any paths leading to the usual suspects
++ like BUG(), printk(), panic() etc. [but let's keep them for now for
++ older compilers]
++
++ Early snapshots of gcc 4.3 don't support this and we can't detect this
++ in the preprocessor, but we can live with this because they're unreleased.
++ Maketime probing would be overkill here.
++
++ gcc also has a __attribute__((__hot__)) to move hot functions into
++ a special section, but I don't see any sense in this right now in
++ the kernel context */
++#define __cold __attribute__((__cold__))
++
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
++
++#ifndef __CHECKER__
++# define __compiletime_warning(message) __attribute__((warning(message)))
++# define __compiletime_error(message) __attribute__((error(message)))
++#endif /* __CHECKER__ */
++
++/*
++ * Mark a position in code as unreachable. This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ *
++ * Early snapshots of gcc 4.5 don't support this and we can't detect
++ * this in the preprocessor, but we can live with this because they're
++ * unreleased. Really, we need to have autoconf for the kernel.
++ */
++#define unreachable() __builtin_unreachable()
++
++/* Mark a function definition as prohibited from being cloned. */
++#define __noclone __attribute__((__noclone__))
++
++/*
++ * Tell the optimizer that something else uses this function or variable.
++ */
++#define __visible __attribute__((externally_visible))
++
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
++
++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
++#define __HAVE_BUILTIN_BSWAP32__
++#define __HAVE_BUILTIN_BSWAP64__
++#define __HAVE_BUILTIN_BSWAP16__
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
+index cba9593c4047..1a97cac7dcb2 100644
+--- a/include/linux/compiler-intel.h
++++ b/include/linux/compiler-intel.h
+@@ -15,6 +15,7 @@
+ */
+ #undef barrier
+ #undef RELOC_HIDE
++#undef OPTIMIZER_HIDE_VAR
+
+ #define barrier() __memory_barrier()
+
+@@ -23,6 +24,12 @@
+ __ptr = (unsigned long) (ptr); \
+ (typeof(ptr)) (__ptr + (off)); })
+
++/* This should act as an optimization barrier on var.
++ * Given that this compiler does not have inline assembly, a compiler barrier
++ * is the best we can do.
++ */
++#define OPTIMIZER_HIDE_VAR(var) barrier()
++
+ /* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
+ #define __must_be_array(a) 0
+
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index e7bafa432aa3..b11e298e989f 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -38,6 +38,7 @@ extern struct fs_struct init_fs;
+
+ #define INIT_SIGNALS(sig) { \
+ .nr_threads = 1, \
++ .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
+ .shared_pending = { \
+ .list = LIST_HEAD_INIT(sig.shared_pending.list), \
+@@ -202,6 +203,7 @@ extern struct task_group root_task_group;
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
+ }, \
+ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
++ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
+ INIT_IDS \
+ INIT_PERF_EVENTS(tsk) \
+ INIT_TRACE_IRQFLAGS \
+diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
+index 6b394f0b5148..eeb307985715 100644
+--- a/include/linux/khugepaged.h
++++ b/include/linux/khugepaged.h
+@@ -6,7 +6,8 @@
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern int __khugepaged_enter(struct mm_struct *mm);
+ extern void __khugepaged_exit(struct mm_struct *mm);
+-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
++extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
++ unsigned long vm_flags);
+
+ #define khugepaged_enabled() \
+ (transparent_hugepage_flags & \
+@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
+ __khugepaged_exit(mm);
+ }
+
+-static inline int khugepaged_enter(struct vm_area_struct *vma)
++static inline int khugepaged_enter(struct vm_area_struct *vma,
++ unsigned long vm_flags)
+ {
+ if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
+ if ((khugepaged_always() ||
+- (khugepaged_req_madv() &&
+- vma->vm_flags & VM_HUGEPAGE)) &&
+- !(vma->vm_flags & VM_NOHUGEPAGE))
++ (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
++ !(vm_flags & VM_NOHUGEPAGE))
+ if (__khugepaged_enter(vma->vm_mm))
+ return -ENOMEM;
+ return 0;
+@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+ static inline void khugepaged_exit(struct mm_struct *mm)
+ {
+ }
+-static inline int khugepaged_enter(struct vm_area_struct *vma)
++static inline int khugepaged_enter(struct vm_area_struct *vma,
++ unsigned long vm_flags)
+ {
+ return 0;
+ }
+-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
++static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
++ unsigned long vm_flags)
+ {
+ return 0;
+ }
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index dbca4b21b7d3..656b4e968991 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -953,6 +953,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
+
+ extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
+ extern void truncate_setsize(struct inode *inode, loff_t newsize);
++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
+ extern int vmtruncate(struct inode *inode, loff_t offset);
+ extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
+ void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
+diff --git a/include/linux/of.h b/include/linux/of.h
+index fa7fb1d97458..ac58796df055 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -212,14 +212,12 @@ extern int of_property_read_u64(const struct device_node *np,
+ extern int of_property_read_string(struct device_node *np,
+ const char *propname,
+ const char **out_string);
+-extern int of_property_read_string_index(struct device_node *np,
+- const char *propname,
+- int index, const char **output);
+ extern int of_property_match_string(struct device_node *np,
+ const char *propname,
+ const char *string);
+-extern int of_property_count_strings(struct device_node *np,
+- const char *propname);
++extern int of_property_read_string_helper(struct device_node *np,
++ const char *propname,
++ const char **out_strs, size_t sz, int index);
+ extern int of_device_is_compatible(const struct device_node *device,
+ const char *);
+ extern int of_device_is_available(const struct device_node *device);
+@@ -304,15 +302,9 @@ static inline int of_property_read_string(struct device_node *np,
+ return -ENOSYS;
+ }
+
+-static inline int of_property_read_string_index(struct device_node *np,
+- const char *propname, int index,
+- const char **out_string)
+-{
+- return -ENOSYS;
+-}
+-
+-static inline int of_property_count_strings(struct device_node *np,
+- const char *propname)
++static inline int of_property_read_string_helper(struct device_node *np,
++ const char *propname,
++ const char **out_strs, size_t sz, int index)
+ {
+ return -ENOSYS;
+ }
+@@ -352,6 +344,70 @@ static inline int of_machine_is_compatible(const char *compat)
+ #endif /* CONFIG_OF */
+
+ /**
++ * of_property_read_string_array() - Read an array of strings from a multiple
++ * strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @out_strs: output array of string pointers.
++ * @sz: number of array elements to read.
++ *
++ * Search for a property in a device tree node and retrieve a list of
++ * terminated string values (pointer to data, not a copy) in that property.
++ *
++ * If @out_strs is NULL, the number of strings in the property is returned.
++ */
++static inline int of_property_read_string_array(struct device_node *np,
++ const char *propname, const char **out_strs,
++ size_t sz)
++{
++ return of_property_read_string_helper(np, propname, out_strs, sz, 0);
++}
++
++/**
++ * of_property_count_strings() - Find and return the number of strings from a
++ * multiple strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ *
++ * Search for a property in a device tree node and retrieve the number of null
++ * terminated string contain in it. Returns the number of strings on
++ * success, -EINVAL if the property does not exist, -ENODATA if property
++ * does not have a value, and -EILSEQ if the string is not null-terminated
++ * within the length of the property data.
++ */
++static inline int of_property_count_strings(struct device_node *np,
++ const char *propname)
++{
++ return of_property_read_string_helper(np, propname, NULL, 0, 0);
++}
++
++/**
++ * of_property_read_string_index() - Find and read a string from a multiple
++ * strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @index: index of the string in the list of strings
++ * @out_string: pointer to null terminated return string, modified only if
++ * return value is 0.
++ *
++ * Search for a property in a device tree node and retrieve a null
++ * terminated string value (pointer to data, not a copy) in the list of strings
++ * contained in that property.
++ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
++ * property does not have a value, and -EILSEQ if the string is not
++ * null-terminated within the length of the property data.
++ *
++ * The out_string pointer is modified only if a valid string can be decoded.
++ */
++static inline int of_property_read_string_index(struct device_node *np,
++ const char *propname,
++ int index, const char **output)
++{
++ int rc = of_property_read_string_helper(np, propname, output, 1, index);
++ return rc < 0 ? rc : 0;
++}
++
++/**
+ * of_property_read_bool - Findfrom a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index 3d7647536b03..d6ed7b05e31c 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -45,6 +45,10 @@ extern int test_set_oom_score_adj(int new_val);
+
+ extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
+ const nodemask_t *nodemask, unsigned long totalpages);
++
++extern int oom_kills_count(void);
++extern void note_oom_kill(void);
++
+ extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
+ extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 56d8233c5de7..d529bd9e6680 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -534,6 +534,7 @@ struct signal_struct {
+ atomic_t sigcnt;
+ atomic_t live;
+ int nr_threads;
++ struct list_head thread_head;
+
+ wait_queue_head_t wait_chldexit; /* for wait4() */
+
+@@ -1394,6 +1395,7 @@ struct task_struct {
+ /* PID/PID hash table linkage. */
+ struct pid_link pids[PIDTYPE_MAX];
+ struct list_head thread_group;
++ struct list_head thread_node;
+
+ struct completion *vfork_done; /* for vfork() */
+ int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+@@ -2397,6 +2399,16 @@ extern bool current_is_single_threaded(void);
+ #define while_each_thread(g, t) \
+ while ((t = next_thread(t)) != g)
+
++#define __for_each_thread(signal, t) \
++ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
++
++#define for_each_thread(p, t) \
++ __for_each_thread((p)->signal, t)
++
++/* Careful: this is a double loop, 'break' won't work as expected. */
++#define for_each_process_thread(p, t) \
++ for_each_process(p) for_each_thread(p, t)
++
+ static inline int get_nr_threads(struct task_struct *tsk)
+ {
+ return tsk->signal->nr_threads;
+diff --git a/include/linux/string.h b/include/linux/string.h
+index e033564f10ba..3d9feb70dc20 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -133,7 +133,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+ #endif
+
+ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+- const void *from, size_t available);
++ const void *from, size_t available);
+
+ /**
+ * strstarts - does @str start with @prefix?
+@@ -144,5 +144,7 @@ static inline bool strstarts(const char *str, const char *prefix)
+ {
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+ }
++
++void memzero_explicit(void *s, size_t count);
+ #endif
+ #endif /* _LINUX_STRING_H_ */
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 3e93de7ecbc3..d0d2af09dcc6 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -30,4 +30,10 @@
+ descriptor */
+ #define USB_QUIRK_DELAY_INIT 0x00000040
+
++/* device generates spurious wakeup, ignore remote wakeup capability */
++#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
++
++/* device can't handle device_qualifier descriptor requests */
++#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 117eaa578d0d..8898a191929a 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -485,6 +485,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ }
+
+ extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
++void ipv6_proxy_select_ident(struct sk_buff *skb);
+
+ /*
+ * Prototypes exported by ipv6
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 0caf1f8de0fb..8a142844318f 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
+ chunk->owners[i].index = i;
+ }
+ fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
++ chunk->mark.mask = FS_IN_IGNORED;
+ return chunk;
+ }
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 3eb4dcfc658a..38980ea0b2d6 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -74,6 +74,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
+ __this_cpu_dec(process_counts);
+ }
+ list_del_rcu(&p->thread_group);
++ list_del_rcu(&p->thread_node);
+ }
+
+ /*
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 878dcb2eec55..be2495f0eed7 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1026,6 +1026,11 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+ sig->nr_threads = 1;
+ atomic_set(&sig->live, 1);
+ atomic_set(&sig->sigcnt, 1);
++
++ /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
++ sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
++ tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
++
+ init_waitqueue_head(&sig->wait_chldexit);
+ if (clone_flags & CLONE_NEWPID)
+ sig->flags |= SIGNAL_UNKILLABLE;
+@@ -1412,14 +1417,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ goto bad_fork_free_pid;
+ }
+
+- if (clone_flags & CLONE_THREAD) {
+- current->signal->nr_threads++;
+- atomic_inc(&current->signal->live);
+- atomic_inc(&current->signal->sigcnt);
+- p->group_leader = current->group_leader;
+- list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
+- }
+-
+ if (likely(p->pid)) {
+ ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
+
+@@ -1434,6 +1431,15 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ list_add_tail(&p->sibling, &p->real_parent->children);
+ list_add_tail_rcu(&p->tasks, &init_task.tasks);
+ __this_cpu_inc(process_counts);
++ } else {
++ current->signal->nr_threads++;
++ atomic_inc(&current->signal->live);
++ atomic_inc(&current->signal->sigcnt);
++ p->group_leader = current->group_leader;
++ list_add_tail_rcu(&p->thread_group,
++ &p->group_leader->thread_group);
++ list_add_tail_rcu(&p->thread_node,
++ &p->signal->thread_head);
+ }
+ attach_pid(p, PIDTYPE_PID, pid);
+ nr_threads++;
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index 11f82a4d4eae..2f8ecd994d47 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -36,6 +36,9 @@ bool freezing_slow_path(struct task_struct *p)
+ if (p->flags & PF_NOFREEZE)
+ return false;
+
++ if (test_thread_flag(TIF_MEMDIE))
++ return false;
++
+ if (pm_nosig_freezing || cgroup_freezing(p))
+ return true;
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 41dfb1866f95..6b320c2ad6fa 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -212,6 +212,8 @@ static void drop_futex_key_refs(union futex_key *key)
+ case FUT_OFF_MMSHARED:
+ mmdrop(key->private.mm);
+ break;
++ default:
++ smp_mb(); /* explicit MB (B) */
+ }
+ }
+
+@@ -484,8 +486,14 @@ static struct futex_pi_state * alloc_pi_state(void)
+ return pi_state;
+ }
+
++/*
++ * Must be called with the hb lock held.
++ */
+ static void free_pi_state(struct futex_pi_state *pi_state)
+ {
++ if (!pi_state)
++ return;
++
+ if (!atomic_dec_and_test(&pi_state->refcount))
+ return;
+
+@@ -1399,15 +1407,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ }
+
+ retry:
+- if (pi_state != NULL) {
+- /*
+- * We will have to lookup the pi_state again, so free this one
+- * to keep the accounting correct.
+- */
+- free_pi_state(pi_state);
+- pi_state = NULL;
+- }
+-
+ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
+ if (unlikely(ret != 0))
+ goto out;
+@@ -1495,6 +1494,8 @@ retry_private:
+ case 0:
+ break;
+ case -EFAULT:
++ free_pi_state(pi_state);
++ pi_state = NULL;
+ double_unlock_hb(hb1, hb2);
+ put_futex_key(&key2);
+ put_futex_key(&key1);
+@@ -1504,6 +1505,8 @@ retry_private:
+ goto out;
+ case -EAGAIN:
+ /* The owner was exiting, try again. */
++ free_pi_state(pi_state);
++ pi_state = NULL;
+ double_unlock_hb(hb1, hb2);
+ put_futex_key(&key2);
+ put_futex_key(&key1);
+@@ -1580,6 +1583,7 @@ retry_private:
+ }
+
+ out_unlock:
++ free_pi_state(pi_state);
+ double_unlock_hb(hb1, hb2);
+
+ /*
+@@ -1596,8 +1600,6 @@ out_put_keys:
+ out_put_key1:
+ put_futex_key(&key1);
+ out:
+- if (pi_state != NULL)
+- free_pi_state(pi_state);
+ return ret ? ret : task_count;
+ }
+
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index e885be1e8a11..02824a5c2693 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -589,6 +589,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+ goto out;
+ }
+ } else {
++ memset(&event.sigev_value, 0, sizeof(event.sigev_value));
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGALRM;
+ event.sigev_value.sival_int = new_timer->it_id;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 52a18173c845..dd875cbe0d1a 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -486,8 +486,14 @@ int hibernation_restore(int platform_mode)
+ error = dpm_suspend_start(PMSG_QUIESCE);
+ if (!error) {
+ error = resume_target_kernel(platform_mode);
+- dpm_resume_end(PMSG_RECOVER);
++ /*
++ * The above should either succeed and jump to the new kernel,
++ * or return with an error. Otherwise things are just
++ * undefined, so let's be paranoid.
++ */
++ BUG_ON(!error);
+ }
++ dpm_resume_end(PMSG_RECOVER);
+ pm_restore_gfp_mask();
+ ftrace_start();
+ resume_console();
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index f27d0c8cd9e8..286ff570f3b7 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -114,6 +114,28 @@ static int try_to_freeze_tasks(bool user_only)
+ return todo ? -EBUSY : 0;
+ }
+
++/*
++ * Returns true if all freezable tasks (except for current) are frozen already
++ */
++static bool check_frozen_processes(void)
++{
++ struct task_struct *g, *p;
++ bool ret = true;
++
++ read_lock(&tasklist_lock);
++ for_each_process_thread(g, p) {
++ if (p != current && !freezer_should_skip(p) &&
++ !frozen(p)) {
++ ret = false;
++ goto done;
++ }
++ }
++done:
++ read_unlock(&tasklist_lock);
++
++ return ret;
++}
++
+ /**
+ * freeze_processes - Signal user space processes to enter the refrigerator.
+ *
+@@ -122,6 +144,7 @@ static int try_to_freeze_tasks(bool user_only)
+ int freeze_processes(void)
+ {
+ int error;
++ int oom_kills_saved;
+
+ error = __usermodehelper_disable(UMH_FREEZING);
+ if (error)
+@@ -132,12 +155,27 @@ int freeze_processes(void)
+
+ printk("Freezing user space processes ... ");
+ pm_freezing = true;
++ oom_kills_saved = oom_kills_count();
+ error = try_to_freeze_tasks(true);
+ if (!error) {
+- printk("done.");
+ __usermodehelper_set_disable_depth(UMH_DISABLED);
+ oom_killer_disable();
++
++ /*
++ * There might have been an OOM kill while we were
++ * freezing tasks and the killed task might be still
++ * on the way out so we have to double check for race.
++ */
++ if (oom_kills_count() != oom_kills_saved &&
++ !check_frozen_processes()) {
++ __usermodehelper_set_disable_depth(UMH_ENABLED);
++ printk("OOM in progress.");
++ error = -EBUSY;
++ goto done;
++ }
++ printk("done.");
+ }
++done:
+ printk("\n");
+ BUG_ON(in_atomic());
+
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index c9ce09addacd..4d0a209ecfda 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -309,7 +309,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+ int pc;
+
+ syscall_nr = syscall_get_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+ if (!test_bit(syscall_nr, enabled_enter_syscalls))
+ return;
+@@ -349,7 +349,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ int pc;
+
+ syscall_nr = syscall_get_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+ if (!test_bit(syscall_nr, enabled_exit_syscalls))
+ return;
+@@ -519,6 +519,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+ int size;
+
+ syscall_nr = syscall_get_nr(current, regs);
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
++ return;
+ if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
+ return;
+
+@@ -593,6 +595,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ int size;
+
+ syscall_nr = syscall_get_nr(current, regs);
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
++ return;
+ if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
+ return;
+
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index b5a8b6ad2454..6ccf2120b406 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
+ lower = src[off + k];
+ if (left && off + k == lim - 1)
+ lower &= mask;
+- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
++ dst[k] = lower >> rem;
++ if (rem)
++ dst[k] |= upper << (BITS_PER_LONG - rem);
+ if (left && k == lim - 1)
+ dst[k] &= mask;
+ }
+@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
+ upper = src[k];
+ if (left && k == lim - 1)
+ upper &= (1UL << left) - 1;
+- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
++ dst[k + off] = upper << rem;
++ if (rem)
++ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
+ if (left && k + off == lim - 1)
+ dst[k + off] &= (1UL << left) - 1;
+ }
+diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
+index 8563081e8da3..a1c387f6afba 100644
+--- a/lib/lzo/lzo1x_decompress_safe.c
++++ b/lib/lzo/lzo1x_decompress_safe.c
+@@ -19,31 +19,21 @@
+ #include <linux/lzo.h>
+ #include "lzodefs.h"
+
+-#define HAVE_IP(t, x) \
+- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
+- (((t + x) >= t) && ((t + x) >= x)))
++#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
++#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
++#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
++#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
++#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+
+-#define HAVE_OP(t, x) \
+- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
+- (((t + x) >= t) && ((t + x) >= x)))
+-
+-#define NEED_IP(t, x) \
+- do { \
+- if (!HAVE_IP(t, x)) \
+- goto input_overrun; \
+- } while (0)
+-
+-#define NEED_OP(t, x) \
+- do { \
+- if (!HAVE_OP(t, x)) \
+- goto output_overrun; \
+- } while (0)
+-
+-#define TEST_LB(m_pos) \
+- do { \
+- if ((m_pos) < out) \
+- goto lookbehind_overrun; \
+- } while (0)
++/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
++ * count without overflowing an integer. The multiply will overflow when
++ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
++ * depending on the base count. Since the base count is taken from a u8
++ * and a few bits, it is safe to assume that it will always be lower than
++ * or equal to 2*255, thus we can always prevent any overflow by accepting
++ * two less 255 steps. See Documentation/lzo.txt for more information.
++ */
++#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
+
+ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len)
+@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ if (t < 16) {
+ if (likely(state == 0)) {
+ if (unlikely(t == 0)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 15 + *ip++;
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 15 + *ip++;
+ }
+ t += 3;
+ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
++ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ const unsigned char *ie = ip + t;
+ unsigned char *oe = op + t;
+ do {
+@@ -101,8 +98,8 @@ copy_literal_run:
+ } else
+ #endif
+ {
+- NEED_OP(t, 0);
+- NEED_IP(t, 3);
++ NEED_OP(t);
++ NEED_IP(t + 3);
+ do {
+ *op++ = *ip++;
+ } while (--t > 0);
+@@ -115,7 +112,7 @@ copy_literal_run:
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ TEST_LB(m_pos);
+- NEED_OP(2, 0);
++ NEED_OP(2);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+@@ -136,13 +133,20 @@ copy_literal_run:
+ } else if (t >= 32) {
+ t = (t & 31) + (3 - 1);
+ if (unlikely(t == 2)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 31 + *ip++;
+- NEED_IP(2, 0);
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 31 + *ip++;
++ NEED_IP(2);
+ }
+ m_pos = op - 1;
+ next = get_unaligned_le16(ip);
+@@ -154,13 +158,20 @@ copy_literal_run:
+ m_pos -= (t & 8) << 11;
+ t = (t & 7) + (3 - 1);
+ if (unlikely(t == 2)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 7 + *ip++;
+- NEED_IP(2, 0);
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 7 + *ip++;
++ NEED_IP(2);
+ }
+ next = get_unaligned_le16(ip);
+ ip += 2;
+@@ -174,7 +185,7 @@ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (op - m_pos >= 8) {
+ unsigned char *oe = op + t;
+- if (likely(HAVE_OP(t, 15))) {
++ if (likely(HAVE_OP(t + 15))) {
+ do {
+ COPY8(op, m_pos);
+ op += 8;
+@@ -184,7 +195,7 @@ copy_literal_run:
+ m_pos += 8;
+ } while (op < oe);
+ op = oe;
+- if (HAVE_IP(6, 0)) {
++ if (HAVE_IP(6)) {
+ state = next;
+ COPY4(op, ip);
+ op += next;
+@@ -192,7 +203,7 @@ copy_literal_run:
+ continue;
+ }
+ } else {
+- NEED_OP(t, 0);
++ NEED_OP(t);
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+@@ -201,7 +212,7 @@ copy_literal_run:
+ #endif
+ {
+ unsigned char *oe = op + t;
+- NEED_OP(t, 0);
++ NEED_OP(t);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+@@ -214,15 +225,15 @@ match_next:
+ state = next;
+ t = next;
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
++ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ COPY4(op, ip);
+ op += t;
+ ip += t;
+ } else
+ #endif
+ {
+- NEED_IP(t, 3);
+- NEED_OP(t, 0);
++ NEED_IP(t + 3);
++ NEED_OP(t);
+ while (t > 0) {
+ *op++ = *ip++;
+ t--;
+diff --git a/lib/string.c b/lib/string.c
+index e5878de4f101..43d0781daf47 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
+ EXPORT_SYMBOL(memset);
+ #endif
+
++/**
++ * memzero_explicit - Fill a region of memory (e.g. sensitive
++ * keying data) with 0s.
++ * @s: Pointer to the start of the area.
++ * @count: The size of the area.
++ *
++ * memzero_explicit() doesn't need an arch-specific version as
++ * it just invokes the one of memset() implicitly.
++ */
++void memzero_explicit(void *s, size_t count)
++{
++ memset(s, 0, count);
++ OPTIMIZER_HIDE_VAR(s);
++}
++EXPORT_SYMBOL(memzero_explicit);
++
+ #ifndef __HAVE_ARCH_MEMCPY
+ /**
+ * memcpy - Copy one area of memory to another
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 3da5c0bff3b0..8978c1bf91e4 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -711,7 +711,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
+ if (unlikely(anon_vma_prepare(vma)))
+ return VM_FAULT_OOM;
+- if (unlikely(khugepaged_enter(vma)))
++ if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
+ return VM_FAULT_OOM;
+ page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+ vma, haddr, numa_node_id(), 0);
+@@ -1505,7 +1505,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
+ * register it here without waiting a page fault that
+ * may not happen any time soon.
+ */
+- if (unlikely(khugepaged_enter_vma_merge(vma)))
++ if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
+ return -ENOMEM;
+ break;
+ case MADV_NOHUGEPAGE:
+@@ -1637,7 +1637,8 @@ int __khugepaged_enter(struct mm_struct *mm)
+ return 0;
+ }
+
+-int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
++int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
++ unsigned long vm_flags)
+ {
+ unsigned long hstart, hend;
+ if (!vma->anon_vma)
+@@ -1653,11 +1654,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+ * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
+ * true too, verify it here.
+ */
+- VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
++ VM_BUG_ON(is_linear_pfn_mapping(vma) || vm_flags & VM_NO_THP);
+ hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ hend = vma->vm_end & HPAGE_PMD_MASK;
+ if (hstart < hend)
+- return khugepaged_enter(vma);
++ return khugepaged_enter(vma, vm_flags);
+ return 0;
+ }
+
+diff --git a/mm/memory.c b/mm/memory.c
+index ffd74f370e8d..c34e60a950aa 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1164,8 +1164,10 @@ again:
+ if (unlikely(page_mapcount(page) < 0))
+ print_bad_pte(vma, addr, ptent, page);
+ force_flush = !__tlb_remove_page(tlb, page);
+- if (force_flush)
++ if (force_flush) {
++ addr += PAGE_SIZE;
+ break;
++ }
+ continue;
+ }
+ /*
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 69367e43e52e..94fdbe8f3b99 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -826,7 +826,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ end, prev->vm_pgoff, NULL);
+ if (err)
+ return NULL;
+- khugepaged_enter_vma_merge(prev);
++ khugepaged_enter_vma_merge(prev, vm_flags);
+ return prev;
+ }
+
+@@ -845,7 +845,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ next->vm_pgoff - pglen, NULL);
+ if (err)
+ return NULL;
+- khugepaged_enter_vma_merge(area);
++ khugepaged_enter_vma_merge(area, vm_flags);
+ return area;
+ }
+
+@@ -1820,7 +1820,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ }
+ }
+ vma_unlock_anon_vma(vma);
+- khugepaged_enter_vma_merge(vma);
++ khugepaged_enter_vma_merge(vma, vma->vm_flags);
+ return error;
+ }
+ #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
+@@ -1871,7 +1871,7 @@ int expand_downwards(struct vm_area_struct *vma,
+ }
+ }
+ vma_unlock_anon_vma(vma);
+- khugepaged_enter_vma_merge(vma);
++ khugepaged_enter_vma_merge(vma, vma->vm_flags);
+ return error;
+ }
+
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 597ecac5731a..cb1f046faa68 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -435,6 +435,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
+ dump_tasks(memcg, nodemask);
+ }
+
++/*
++ * Number of OOM killer invocations (including memcg OOM killer).
++ * Primarily used by PM freezer to check for potential races with
++ * OOM killed frozen task.
++ */
++static atomic_t oom_kills = ATOMIC_INIT(0);
++
++int oom_kills_count(void)
++{
++ return atomic_read(&oom_kills);
++}
++
++void note_oom_kill(void)
++{
++ atomic_inc(&oom_kills);
++}
++
+ #define K(x) ((x) << (PAGE_SHIFT-10))
+ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ unsigned int points, unsigned long totalpages,
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index ff0b0997b953..2891a9059f8a 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1982,6 +1982,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+ }
+
+ /*
++ * PM-freezer should be notified that there might be an OOM killer on
++ * its way to kill and wake somebody up. This is too early and we might
++ * end up not killing anything but false positives are acceptable.
++ * See freeze_processes.
++ */
++ note_oom_kill();
++
++ /*
+ * Go through the zonelist yet one more time, keep very high watermark
+ * here, this is only to catch a parallel oom killing, we must fail if
+ * we're still under heavy pressure.
+diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
+index 1ccbd714059c..b7693fdc6bb5 100644
+--- a/mm/page_cgroup.c
++++ b/mm/page_cgroup.c
+@@ -170,6 +170,7 @@ static void free_page_cgroup(void *addr)
+ sizeof(struct page_cgroup) * PAGES_PER_SECTION;
+
+ BUG_ON(PageReserved(page));
++ kmemleak_free(addr);
+ free_pages_exact(addr, table_size);
+ }
+ }
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 5f6042b61ca8..13b2eefabfdd 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1907,8 +1907,6 @@ void __init setup_per_cpu_areas(void)
+
+ if (pcpu_setup_first_chunk(ai, fc) < 0)
+ panic("Failed to initialize percpu areas.");
+-
+- pcpu_free_alloc_info(ai);
+ }
+
+ #endif /* CONFIG_SMP */
+diff --git a/mm/truncate.c b/mm/truncate.c
+index f38055cb8af6..57625f7ed8e1 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -20,6 +20,7 @@
+ #include <linux/buffer_head.h> /* grr. try_to_release_page,
+ do_invalidatepage */
+ #include <linux/cleancache.h>
++#include <linux/rmap.h>
+ #include "internal.h"
+
+
+@@ -571,16 +572,70 @@ EXPORT_SYMBOL(truncate_pagecache);
+ */
+ void truncate_setsize(struct inode *inode, loff_t newsize)
+ {
+- loff_t oldsize;
+-
+- oldsize = inode->i_size;
++ loff_t oldsize = inode->i_size;
+ i_size_write(inode, newsize);
+
++ if (newsize > oldsize)
++ pagecache_isize_extended(inode, oldsize, newsize);
+ truncate_pagecache(inode, oldsize, newsize);
+ }
+ EXPORT_SYMBOL(truncate_setsize);
+
+ /**
++ * pagecache_isize_extended - update pagecache after extension of i_size
++ * @inode: inode for which i_size was extended
++ * @from: original inode size
++ * @to: new inode size
++ *
++ * Handle extension of inode size either caused by extending truncate or by
++ * write starting after current i_size. We mark the page straddling current
++ * i_size RO so that page_mkwrite() is called on the nearest write access to
++ * the page. This way filesystem can be sure that page_mkwrite() is called on
++ * the page before user writes to the page via mmap after the i_size has been
++ * changed.
++ *
++ * The function must be called after i_size is updated so that page fault
++ * coming after we unlock the page will already see the new i_size.
++ * The function must be called while we still hold i_mutex - this not only
++ * makes sure i_size is stable but also that userspace cannot observe new
++ * i_size value before we are prepared to store mmap writes at new inode size.
++ */
++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
++{
++ int bsize = 1 << inode->i_blkbits;
++ loff_t rounded_from;
++ struct page *page;
++ pgoff_t index;
++
++ WARN_ON(to > inode->i_size);
++
++ if (from >= to || bsize == PAGE_CACHE_SIZE)
++ return;
++ /* Page straddling @from will not have any hole block created? */
++ rounded_from = round_up(from, bsize);
++ if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
++ return;
++
++ index = from >> PAGE_CACHE_SHIFT;
++ page = find_lock_page(inode->i_mapping, index);
++ /* Page not cached? Nothing to do */
++ if (!page)
++ return;
++ /*
++ * See clear_page_dirty_for_io() for details why set_page_dirty()
++ * is needed.
++ */
++ if (page_mkclean(page))
++ set_page_dirty(page);
++ unlock_page(page);
++ page_cache_release(page);
++}
++EXPORT_SYMBOL(pagecache_isize_extended);
++
++/**
++ * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
++ * @inode: inode
++ * @lstart: offset of beginning of hole
+ * vmtruncate - unmap mappings "freed" by truncate() syscall
+ * @inode: inode of the file used
+ * @newsize: file offset to start truncating
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 605156f13899..61e2494dc188 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -325,8 +325,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
+ }
+
+ /* Not Just Works/Confirm results in MITM Authentication */
+- if (method != JUST_CFM)
++ if (method != JUST_CFM) {
+ set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags);
++ if (hcon->pending_sec_level < BT_SECURITY_HIGH)
++ hcon->pending_sec_level = BT_SECURITY_HIGH;
++ }
+
+ /* If both devices have Keyoard-Display I/O, the master
+ * Confirms and the slave Enters the passkey.
+diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
+index 9da7fdd3cd8a..3d1be9911b8e 100644
+--- a/net/ceph/crypto.c
++++ b/net/ceph/crypto.c
+@@ -89,11 +89,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
+
+ static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
+
++/*
++ * Should be used for buffers allocated with ceph_kvmalloc().
++ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
++ * in-buffer (msg front).
++ *
++ * Dispose of @sgt with teardown_sgtable().
++ *
++ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
++ * in cases where a single sg is sufficient. No attempt to reduce the
++ * number of sgs by squeezing physically contiguous pages together is
++ * made though, for simplicity.
++ */
++static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
++ const void *buf, unsigned int buf_len)
++{
++ struct scatterlist *sg;
++ const bool is_vmalloc = is_vmalloc_addr(buf);
++ unsigned int off = offset_in_page(buf);
++ unsigned int chunk_cnt = 1;
++ unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
++ int i;
++ int ret;
++
++ if (buf_len == 0) {
++ memset(sgt, 0, sizeof(*sgt));
++ return -EINVAL;
++ }
++
++ if (is_vmalloc) {
++ chunk_cnt = chunk_len >> PAGE_SHIFT;
++ chunk_len = PAGE_SIZE;
++ }
++
++ if (chunk_cnt > 1) {
++ ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
++ if (ret)
++ return ret;
++ } else {
++ WARN_ON(chunk_cnt != 1);
++ sg_init_table(prealloc_sg, 1);
++ sgt->sgl = prealloc_sg;
++ sgt->nents = sgt->orig_nents = 1;
++ }
++
++ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
++ struct page *page;
++ unsigned int len = min(chunk_len - off, buf_len);
++
++ if (is_vmalloc)
++ page = vmalloc_to_page(buf);
++ else
++ page = virt_to_page(buf);
++
++ sg_set_page(sg, page, len, off);
++
++ off = 0;
++ buf += len;
++ buf_len -= len;
++ }
++ WARN_ON(buf_len != 0);
++
++ return 0;
++}
++
++static void teardown_sgtable(struct sg_table *sgt)
++{
++ if (sgt->orig_nents > 1)
++ sg_free_table(sgt);
++}
++
+ static int ceph_aes_encrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[2], sg_out[1];
++ struct scatterlist sg_in[2], prealloc_sg;
++ struct sg_table sg_out;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ int ret;
+@@ -109,16 +180,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+
+ *dst_len = src_len + zero_padding;
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ sg_init_table(sg_in, 2);
+ sg_set_buf(&sg_in[0], src, src_len);
+ sg_set_buf(&sg_in[1], pad, zero_padding);
+- sg_init_table(sg_out, 1);
+- sg_set_buf(sg_out, dst, *dst_len);
++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
++
+ /*
+ print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ key, key_len, 1);
+@@ -127,16 +200,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ pad, zero_padding, 1);
+ */
+- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ src_len + zero_padding);
+- crypto_free_blkcipher(tfm);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("ceph_aes_crypt failed %d\n", ret);
++ goto out_sg;
++ }
+ /*
+ print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_out);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+@@ -144,7 +223,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ const void *src1, size_t src1_len,
+ const void *src2, size_t src2_len)
+ {
+- struct scatterlist sg_in[3], sg_out[1];
++ struct scatterlist sg_in[3], prealloc_sg;
++ struct sg_table sg_out;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ int ret;
+@@ -160,17 +240,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+
+ *dst_len = src1_len + src2_len + zero_padding;
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ sg_init_table(sg_in, 3);
+ sg_set_buf(&sg_in[0], src1, src1_len);
+ sg_set_buf(&sg_in[1], src2, src2_len);
+ sg_set_buf(&sg_in[2], pad, zero_padding);
+- sg_init_table(sg_out, 1);
+- sg_set_buf(sg_out, dst, *dst_len);
++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
++
+ /*
+ print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ key, key_len, 1);
+@@ -181,23 +263,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ pad, zero_padding, 1);
+ */
+- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ src1_len + src2_len + zero_padding);
+- crypto_free_blkcipher(tfm);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("ceph_aes_crypt2 failed %d\n", ret);
++ goto out_sg;
++ }
+ /*
+ print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_out);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_decrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[1], sg_out[2];
++ struct sg_table sg_in;
++ struct scatterlist sg_out[2], prealloc_sg;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm };
+ char pad[16];
+@@ -209,16 +298,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+- sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 2);
+- sg_set_buf(sg_in, src, src_len);
+ sg_set_buf(&sg_out[0], dst, *dst_len);
+ sg_set_buf(&sg_out[1], pad, sizeof(pad));
++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++ if (ret)
++ goto out_tfm;
+
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
+
+ /*
+@@ -227,12 +316,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
+ src, src_len, 1);
+ */
+-
+- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+- crypto_free_blkcipher(tfm);
++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ if (ret < 0) {
+ pr_err("ceph_aes_decrypt failed %d\n", ret);
+- return ret;
++ goto out_sg;
+ }
+
+ if (src_len <= *dst_len)
+@@ -250,7 +337,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_in);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_decrypt2(const void *key, int key_len,
+@@ -258,7 +350,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ void *dst2, size_t *dst2_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[1], sg_out[3];
++ struct sg_table sg_in;
++ struct scatterlist sg_out[3], prealloc_sg;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm };
+ char pad[16];
+@@ -270,17 +363,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+- sg_init_table(sg_in, 1);
+- sg_set_buf(sg_in, src, src_len);
+ sg_init_table(sg_out, 3);
+ sg_set_buf(&sg_out[0], dst1, *dst1_len);
+ sg_set_buf(&sg_out[1], dst2, *dst2_len);
+ sg_set_buf(&sg_out[2], pad, sizeof(pad));
++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++ if (ret)
++ goto out_tfm;
+
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
+
+ /*
+@@ -289,12 +382,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
+ src, src_len, 1);
+ */
+-
+- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+- crypto_free_blkcipher(tfm);
++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ if (ret < 0) {
+ pr_err("ceph_aes_decrypt failed %d\n", ret);
+- return ret;
++ goto out_sg;
+ }
+
+ if (src_len <= *dst1_len)
+@@ -324,7 +415,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ dst2, *dst2_len, 1);
+ */
+
+- return 0;
++out_sg:
++ teardown_sgtable(&sg_in);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 7827436ae843..330be870c1ef 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1346,10 +1346,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ iph->ihl = 5;
+ iph->tos = inet->tos;
+ iph->frag_off = df;
+- ip_select_ident(skb, sk);
+ iph->ttl = ttl;
+ iph->protocol = sk->sk_protocol;
+ ip_copy_addrs(iph, fl4);
++ ip_select_ident(skb, sk);
+
+ if (opt) {
+ iph->ihl += opt->optlen>>2;
+diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
+index 686934acfac1..4b20d5606f6d 100644
+--- a/net/ipv6/Makefile
++++ b/net/ipv6/Makefile
+@@ -37,6 +37,6 @@ obj-$(CONFIG_NETFILTER) += netfilter/
+ obj-$(CONFIG_IPV6_SIT) += sit.o
+ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
+
+-obj-y += addrconf_core.o exthdrs_core.o
++obj-y += addrconf_core.o exthdrs_core.o output_core.o
+
+ obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+new file mode 100644
+index 000000000000..a6126c62a9be
+--- /dev/null
++++ b/net/ipv6/output_core.c
+@@ -0,0 +1,38 @@
++#include <linux/export.h>
++#include <linux/skbuff.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++
++/* This function exists only for tap drivers that must support broken
++ * clients requesting UFO without specifying an IPv6 fragment ID.
++ *
++ * This is similar to ipv6_select_ident() but we use an independent hash
++ * seed to limit information leakage.
++ */
++void ipv6_proxy_select_ident(struct sk_buff *skb)
++{
++ static u32 ip6_proxy_idents_hashrnd __read_mostly;
++ static bool hashrnd_initialized = false;
++ struct in6_addr buf[2];
++ struct in6_addr *addrs;
++ u32 hash, id;
++
++ addrs = skb_header_pointer(skb,
++ skb_network_offset(skb) +
++ offsetof(struct ipv6hdr, saddr),
++ sizeof(buf), buf);
++ if (!addrs)
++ return;
++
++ if (unlikely(!hashrnd_initialized)) {
++ hashrnd_initialized = true;
++ get_random_bytes(&ip6_proxy_idents_hashrnd,
++ sizeof(ip6_proxy_idents_hashrnd));
++ }
++ hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
++ hash = __ipv6_addr_jhash(&addrs[0], hash);
++
++ id = ip_idents_reserve(hash, 1);
++ skb_shinfo(skb)->ip6_frag_id = htonl(id);
++}
++EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 95a04f02f30b..9f32756a302a 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -395,10 +395,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ u32 hw_reconf_flags = 0;
+ int i;
+ enum nl80211_channel_type orig_ct;
++ bool cancel_scan;
+
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+
+- if (local->scan_sdata == sdata)
++ cancel_scan = local->scan_sdata == sdata;
++ if (cancel_scan)
+ ieee80211_scan_cancel(local);
+
+ /*
+@@ -562,6 +564,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+
+ ieee80211_recalc_ps(local, -1);
+
++ if (cancel_scan)
++ flush_delayed_work(&local->scan_work);
++
+ if (local->open_count == 0) {
+ if (local->ops->napi_poll)
+ napi_disable(&local->napi);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index f5ed86388555..32929b07269f 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1486,11 +1486,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ frag = sc & IEEE80211_SCTL_FRAG;
+
+- if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
+- is_multicast_ether_addr(hdr->addr1))) {
+- /* not fragmented */
++ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
++ goto out;
++
++ if (is_multicast_ether_addr(hdr->addr1)) {
++ rx->local->dot11MulticastReceivedFrameCount++;
+ goto out;
+ }
++
+ I802_DEBUG_INC(rx->local->rx_handlers_fragments);
+
+ if (skb_linearize(rx->skb))
+@@ -1583,10 +1586,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ out:
+ if (rx->sta)
+ rx->sta->rx_packets++;
+- if (is_multicast_ether_addr(hdr->addr1))
+- rx->local->dot11MulticastReceivedFrameCount++;
+- else
+- ieee80211_led_rx(rx->local);
++ ieee80211_led_rx(rx->local);
+ return RX_CONTINUE;
+ }
+
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index c487715698ab..d96b7f6a3b44 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -282,9 +282,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ {
+ const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+- if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
+- && (xattr_data->type == EVM_XATTR_HMAC))
+- return -EPERM;
++ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
++ if (!xattr_value_len)
++ return -EINVAL;
++ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
++ return -EPERM;
++ }
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ }
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 639e5c4028ff..cbae6d392087 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -436,6 +436,7 @@ next_inode:
+ list_entry(sbsec->isec_head.next,
+ struct inode_security_struct, list);
+ struct inode *inode = isec->inode;
++ list_del_init(&isec->list);
+ spin_unlock(&sbsec->isec_lock);
+ inode = igrab(inode);
+ if (inode) {
+@@ -444,7 +445,6 @@ next_inode:
+ iput(inode);
+ }
+ spin_lock(&sbsec->isec_lock);
+- list_del_init(&isec->list);
+ goto next_inode;
+ }
+ spin_unlock(&sbsec->isec_lock);
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 91cdf9435fec..4dbb66ef435d 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -204,6 +204,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
+ if (err < 0)
+ return err;
+
++ if (clear_user(src, sizeof(*src)))
++ return -EFAULT;
+ if (put_user(status.state, &src->state) ||
+ put_user(status.trigger_tstamp.tv_sec, &src->trigger_tstamp.tv_sec) ||
+ put_user(status.trigger_tstamp.tv_nsec, &src->trigger_tstamp.tv_nsec) ||
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index d776291d09a0..3a907935fa09 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3171,7 +3171,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
+
+ #ifndef ARCH_HAS_DMA_MMAP_COHERENT
+ /* This should be defined / handled globally! */
+-#ifdef CONFIG_ARM
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ #define ARCH_HAS_DMA_MMAP_COHERENT
+ #endif
+ #endif
+diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
+index a0afa5057488..f35284be7b02 100644
+--- a/sound/pci/emu10k1/emu10k1_callback.c
++++ b/sound/pci/emu10k1/emu10k1_callback.c
+@@ -85,6 +85,8 @@ snd_emu10k1_ops_setup(struct snd_emux *emux)
+ * get more voice for pcm
+ *
+ * terminate most inactive voice and give it as a pcm voice.
++ *
++ * voice_lock is already held.
+ */
+ int
+ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+@@ -92,12 +94,10 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+ struct snd_emux *emu;
+ struct snd_emux_voice *vp;
+ struct best_voice best[V_END];
+- unsigned long flags;
+ int i;
+
+ emu = hw->synth;
+
+- spin_lock_irqsave(&emu->voice_lock, flags);
+ lookup_voices(emu, hw, best, 1); /* no OFF voices */
+ for (i = 0; i < V_END; i++) {
+ if (best[i].voice >= 0) {
+@@ -113,11 +113,9 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+ vp->emu->num_voices--;
+ vp->ch = -1;
+ vp->state = SNDRV_EMUX_ST_OFF;
+- spin_unlock_irqrestore(&emu->voice_lock, flags);
+ return ch;
+ }
+ }
+- spin_unlock_irqrestore(&emu->voice_lock, flags);
+
+ /* not found */
+ return -ENOMEM;
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index f0b8d8e38f71..c40b7ca7a143 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1313,8 +1313,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
+
+ /* enable small pop, introduce 400ms delay in turning off */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
+- SGTL5000_SMALL_POP,
+- SGTL5000_SMALL_POP);
++ SGTL5000_SMALL_POP, 1);
+
+ /* disable short cut detector */
+ snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
+diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
+index d3a68bbfea00..0bd6e1cd8200 100644
+--- a/sound/soc/codecs/sgtl5000.h
++++ b/sound/soc/codecs/sgtl5000.h
+@@ -275,7 +275,7 @@
+ #define SGTL5000_BIAS_CTRL_MASK 0x000e
+ #define SGTL5000_BIAS_CTRL_SHIFT 1
+ #define SGTL5000_BIAS_CTRL_WIDTH 3
+-#define SGTL5000_SMALL_POP 0x0001
++#define SGTL5000_SMALL_POP 0
+
+ /*
+ * SGTL5000_CHIP_MIC_CTRL
+diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
+index 91b728774dba..eb0599f29768 100644
+--- a/sound/soc/sh/fsi.c
++++ b/sound/soc/sh/fsi.c
+@@ -1393,8 +1393,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = {
+ static struct snd_pcm_hardware fsi_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+- SNDRV_PCM_INFO_MMAP_VALID |
+- SNDRV_PCM_INFO_PAUSE,
++ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = FSI_FMTS,
+ .rates = FSI_RATES,
+ .rate_min = 8000,
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 658ea1118a8e..43fca5231628 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -568,18 +568,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ {
+ struct snd_card *card;
+ struct list_head *p;
++ bool was_shutdown;
+
+ if (chip == (void *)-1L)
+ return;
+
+ card = chip->card;
+ down_write(&chip->shutdown_rwsem);
++ was_shutdown = chip->shutdown;
+ chip->shutdown = 1;
+ up_write(&chip->shutdown_rwsem);
+
+ mutex_lock(&register_mutex);
+- chip->num_interfaces--;
+- if (chip->num_interfaces <= 0) {
++ if (!was_shutdown) {
+ snd_card_disconnect(card);
+ /* release the pcm resources */
+ list_for_each(p, &chip->pcm_list) {
+@@ -593,6 +594,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ list_for_each(p, &chip->mixer_list) {
+ snd_usb_mixer_disconnect(p);
+ }
++ }
++
++ chip->num_interfaces--;
++ if (chip->num_interfaces <= 0) {
+ usb_chip[chip->index] = NULL;
+ mutex_unlock(&register_mutex);
+ snd_card_free_when_closed(card);
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 915bc2cf73d9..5ef357983d92 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -301,6 +301,36 @@ YAMAHA_DEVICE(0x105d, NULL),
+ }
+ }
+ },
++{
++ USB_DEVICE(0x0499, 0x1509),
++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ /* .vendor_name = "Yamaha", */
++ /* .product_name = "Steinberg UR22", */
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 1,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ },
++ {
++ .ifnum = 2,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ },
++ {
++ .ifnum = 3,
++ .type = QUIRK_MIDI_YAMAHA
++ },
++ {
++ .ifnum = 4,
++ .type = QUIRK_IGNORE_INTERFACE
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
+ YAMAHA_DEVICE(0x2000, "DGP-7"),
+ YAMAHA_DEVICE(0x2001, "DGP-5"),
+ YAMAHA_DEVICE(0x2002, NULL),
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index defc9baa9a45..3225903ec91b 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
+ gfn_t base_gfn, unsigned long npages);
+
+ static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
+- gfn_t gfn, unsigned long size)
++ gfn_t gfn, unsigned long npages)
+ {
+ gfn_t end_gfn;
+ pfn_t pfn;
+
+ pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
+- end_gfn = gfn + (size >> PAGE_SHIFT);
++ end_gfn = gfn + npages;
+ gfn += 1;
+
+ if (is_error_pfn(pfn))
+@@ -117,7 +117,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ * Pin all pages we are about to map in memory. This is
+ * important because we unmap and unpin in 4kb steps later.
+ */
+- pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
++ pfn = kvm_pin_pages(kvm, slot, gfn, page_size >> PAGE_SHIFT);
+ if (is_error_pfn(pfn)) {
+ gfn += 1;
+ continue;
+@@ -129,7 +129,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ if (r) {
+ printk(KERN_ERR "kvm_iommu_map_address:"
+ "iommu failed to map pfn=%llx\n", pfn);
+- kvm_unpin_pages(kvm, pfn, page_size);
++ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
+ goto unmap_pages;
+ }
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index bc5ed1412382..f4732bd2816c 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -52,6 +52,7 @@
+
+ #include <asm/processor.h>
+ #include <asm/io.h>
++#include <asm/ioctl.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+
+@@ -1744,6 +1745,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
+ if (vcpu->kvm->mm != current->mm)
+ return -EIO;
+
++ if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
++ return -EINVAL;
++
+ #if defined(CONFIG_S390) || defined(CONFIG_PPC)
+ /*
+ * Special cases: vcpu ioctls that are asynchronous to vcpu execution,